Skip to content

Commit 5ca2265

Browse files
baileyforrestdavem330
authored andcommitted
gve: adminq: DQO specific device descriptor logic
- In addition to TX and RX queues, DQO has TX completion and RX buffer queues. - TX completions are received when the device has completed sending a packet on the wire. - RX buffers are posted on a separate queue form the RX completions. - DQO descriptor rings are allowed to be smaller than PAGE_SIZE. Signed-off-by: Bailey Forrest <[email protected]> Reviewed-by: Willem de Bruijn <[email protected]> Reviewed-by: Catherine Sullivan <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent a5886ef commit 5ca2265

File tree

2 files changed

+55
-15
lines changed

2 files changed

+55
-15
lines changed

drivers/net/ethernet/google/gve/gve.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -194,6 +194,11 @@ struct gve_qpl_config {
194194
unsigned long *qpl_id_map; /* bitmap of used qpl ids */
195195
};
196196

197+
struct gve_options_dqo_rda {
198+
u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
199+
u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
200+
};
201+
197202
/* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
198203
* when the entire configure_device_resources command is zeroed out and the
199204
* queue_format is not specified.
@@ -286,6 +291,8 @@ struct gve_priv {
286291
/* Gvnic device link speed from hypervisor. */
287292
u64 link_speed;
288293

294+
struct gve_options_dqo_rda options_dqo_rda;
295+
289296
enum gve_queue_format queue_format;
290297
};
291298

@@ -533,6 +540,12 @@ static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
533540
return DMA_FROM_DEVICE;
534541
}
535542

543+
static inline bool gve_is_gqi(struct gve_priv *priv)
544+
{
545+
return priv->queue_format == GVE_GQI_RDA_FORMAT ||
546+
priv->queue_format == GVE_GQI_QPL_FORMAT;
547+
}
548+
536549
/* buffers */
537550
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
538551
struct page **page, dma_addr_t *dma,

drivers/net/ethernet/google/gve/gve_adminq.c

Lines changed: 42 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -602,6 +602,40 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
602602
return gve_adminq_kick_and_wait(priv);
603603
}
604604

605+
static int gve_set_desc_cnt(struct gve_priv *priv,
606+
struct gve_device_descriptor *descriptor)
607+
{
608+
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
609+
if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
610+
dev_err(&priv->pdev->dev, "Tx desc count %d too low\n",
611+
priv->tx_desc_cnt);
612+
return -EINVAL;
613+
}
614+
priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
615+
if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
616+
< PAGE_SIZE) {
617+
dev_err(&priv->pdev->dev, "Rx desc count %d too low\n",
618+
priv->rx_desc_cnt);
619+
return -EINVAL;
620+
}
621+
return 0;
622+
}
623+
624+
static int
625+
gve_set_desc_cnt_dqo(struct gve_priv *priv,
626+
const struct gve_device_descriptor *descriptor,
627+
const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
628+
{
629+
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
630+
priv->options_dqo_rda.tx_comp_ring_entries =
631+
be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
632+
priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
633+
priv->options_dqo_rda.rx_buff_ring_entries =
634+
be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
635+
636+
return 0;
637+
}
638+
605639
int gve_adminq_describe_device(struct gve_priv *priv)
606640
{
607641
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
@@ -655,22 +689,14 @@ int gve_adminq_describe_device(struct gve_priv *priv)
655689
dev_info(&priv->pdev->dev,
656690
"Driver is running with GQI QPL queue format.\n");
657691
}
658-
659-
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
660-
if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
661-
dev_err(&priv->pdev->dev, "Tx desc count %d too low\n", priv->tx_desc_cnt);
662-
err = -EINVAL;
663-
goto free_device_descriptor;
692+
if (gve_is_gqi(priv)) {
693+
err = gve_set_desc_cnt(priv, descriptor);
694+
} else {
695+
err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
664696
}
665-
priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
666-
if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
667-
< PAGE_SIZE ||
668-
priv->rx_desc_cnt * sizeof(priv->rx->data.data_ring[0])
669-
< PAGE_SIZE) {
670-
dev_err(&priv->pdev->dev, "Rx desc count %d too low\n", priv->rx_desc_cnt);
671-
err = -EINVAL;
697+
if (err)
672698
goto free_device_descriptor;
673-
}
699+
674700
priv->max_registered_pages =
675701
be64_to_cpu(descriptor->max_registered_pages);
676702
mtu = be16_to_cpu(descriptor->mtu);
@@ -686,7 +712,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
686712
dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
687713
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
688714
priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
689-
if (priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
715+
716+
if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
690717
dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
691718
priv->rx_data_slot_cnt);
692719
priv->rx_desc_cnt = priv->rx_data_slot_cnt;

0 commit comments

Comments
 (0)