Skip to content

Commit a6fb8d5

Browse files
rushil-googledavem330
authored andcommitted
gve: Tx path for DQO-QPL
Each QPL page is divided into GVE_TX_BUFS_PER_PAGE_DQO buffers. When a packet needs to be transmitted, we break the packet into max GVE_TX_BUF_SIZE_DQO sized chunks and transmit each chunk using a TX descriptor. We allocate the TX buffers from the free list in dqo_tx. We store these TX buffer indices in an array in the pending_packet structure. The TX buffers are returned to the free list in dqo_compl after receiving packet completion or when removing packets from miss completions list. Signed-off-by: Rushil Gupta <[email protected]> Reviewed-by: Willem de Bruijn <[email protected]> Signed-off-by: Praveen Kaligineedi <[email protected]> Signed-off-by: Bailey Forrest <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 66ce8e6 commit a6fb8d5

File tree

2 files changed

+398
-83
lines changed

2 files changed

+398
-83
lines changed

drivers/net/ethernet/google/gve/gve.h

Lines changed: 75 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,20 @@
5858
/* Maximum TSO size supported on DQO */
5959
#define GVE_DQO_TX_MAX 0x3FFFF
6060

61+
#define GVE_TX_BUF_SHIFT_DQO 11
62+
63+
/* 2K buffers for DQO-QPL */
64+
#define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
65+
#define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
66+
#define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
67+
68+
/* If number of free/recyclable buffers are less than this threshold; driver
69+
* allocs and uses a non-qpl page on the receive path of DQO QPL to free
70+
* up buffers.
71+
* Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
72+
*/
73+
#define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
74+
6175
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
6276
struct gve_rx_desc_queue {
6377
struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -338,8 +352,14 @@ struct gve_tx_pending_packet_dqo {
338352
* All others correspond to `skb`'s frags and should be unmapped with
339353
* `dma_unmap_page`.
340354
*/
341-
DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
342-
DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
355+
union {
356+
struct {
357+
DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
358+
DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
359+
};
360+
s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
361+
};
362+
343363
u16 num_bufs;
344364

345365
/* Linked list index to next element in the list, or -1 if none */
@@ -394,6 +414,32 @@ struct gve_tx_ring {
394414
* set.
395415
*/
396416
u32 last_re_idx;
417+
418+
/* free running number of packet buf descriptors posted */
419+
u16 posted_packet_desc_cnt;
420+
/* free running number of packet buf descriptors completed */
421+
u16 completed_packet_desc_cnt;
422+
423+
/* QPL fields */
424+
struct {
425+
/* Linked list of gve_tx_buf_dqo. Index into
426+
* tx_qpl_buf_next, or -1 if empty.
427+
*
428+
* This is a consumer list owned by the TX path. When it
429+
* runs out, the producer list is stolen from the
430+
* completion handling path
431+
* (dqo_compl.free_tx_qpl_buf_head).
432+
*/
433+
s16 free_tx_qpl_buf_head;
434+
435+
/* Free running count of the number of QPL tx buffers
436+
* allocated
437+
*/
438+
u32 alloc_tx_qpl_buf_cnt;
439+
440+
/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
441+
u32 free_tx_qpl_buf_cnt;
442+
};
397443
} dqo_tx;
398444
};
399445

@@ -437,6 +483,24 @@ struct gve_tx_ring {
437483
* reached a specified timeout.
438484
*/
439485
struct gve_index_list timed_out_completions;
486+
487+
/* QPL fields */
488+
struct {
489+
/* Linked list of gve_tx_buf_dqo. Index into
490+
* tx_qpl_buf_next, or -1 if empty.
491+
*
492+
* This is the producer list, owned by the completion
493+
* handling path. When the consumer list
494+
* (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
495+
* will be stolen.
496+
*/
497+
atomic_t free_tx_qpl_buf_head;
498+
499+
/* Free running count of the number of tx buffers
500+
* freed
501+
*/
502+
atomic_t free_tx_qpl_buf_cnt;
503+
};
440504
} dqo_compl;
441505
} ____cacheline_aligned;
442506
u64 pkt_done; /* free-running - total packets completed */
@@ -468,6 +532,15 @@ struct gve_tx_ring {
468532
struct {
469533
/* qpl assigned to this queue */
470534
struct gve_queue_page_list *qpl;
535+
536+
/* Each QPL page is divided into TX bounce buffers
537+
* of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
538+
* an array to manage linked lists of TX buffers.
539+
* An entry j at index i implies that j'th buffer
540+
* is next on the list after i
541+
*/
542+
s16 *tx_qpl_buf_next;
543+
u32 num_tx_qpl_bufs;
471544
};
472545
} dqo;
473546
} ____cacheline_aligned;

0 commit comments

Comments
 (0)