|
58 | 58 | /* Maximum TSO size supported on DQO */
|
59 | 59 | #define GVE_DQO_TX_MAX 0x3FFFF
|
60 | 60 |
|
| 61 | +#define GVE_TX_BUF_SHIFT_DQO 11 |
| 62 | + |
| 63 | +/* 2K buffers for DQO-QPL */ |
| 64 | +#define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO) |
| 65 | +#define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO) |
| 66 | +#define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO)) |
| 67 | + |
| 68 | +/* If number of free/recyclable buffers are less than this threshold; driver |
| 69 | + * allocs and uses a non-qpl page on the receive path of DQO QPL to free |
| 70 | + * up buffers. |
| 71 | + * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC. |
| 72 | + */ |
| 73 | +#define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96 |
| 74 | + |
61 | 75 | /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
|
62 | 76 | struct gve_rx_desc_queue {
|
63 | 77 | struct gve_rx_desc *desc_ring; /* the descriptor ring */
|
@@ -338,8 +352,14 @@ struct gve_tx_pending_packet_dqo {
|
338 | 352 | * All others correspond to `skb`'s frags and should be unmapped with
|
339 | 353 | * `dma_unmap_page`.
|
340 | 354 | */
|
341 |
| - DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); |
342 |
| - DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); |
| 355 | + union { |
| 356 | + struct { |
| 357 | + DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); |
| 358 | + DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); |
| 359 | + }; |
| 360 | + s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT]; |
| 361 | + }; |
| 362 | + |
343 | 363 | u16 num_bufs;
|
344 | 364 |
|
345 | 365 | /* Linked list index to next element in the list, or -1 if none */
|
@@ -394,6 +414,32 @@ struct gve_tx_ring {
|
394 | 414 | * set.
|
395 | 415 | */
|
396 | 416 | u32 last_re_idx;
|
| 417 | + |
| 418 | + /* free running number of packet buf descriptors posted */ |
| 419 | + u16 posted_packet_desc_cnt; |
| 420 | + /* free running number of packet buf descriptors completed */ |
| 421 | + u16 completed_packet_desc_cnt; |
| 422 | + |
| 423 | + /* QPL fields */ |
| 424 | + struct { |
| 425 | + /* Linked list of gve_tx_buf_dqo. Index into |
| 426 | + * tx_qpl_buf_next, or -1 if empty. |
| 427 | + * |
| 428 | + * This is a consumer list owned by the TX path. When it |
| 429 | + * runs out, the producer list is stolen from the |
| 430 | + * completion handling path |
| 431 | + * (dqo_compl.free_tx_qpl_buf_head). |
| 432 | + */ |
| 433 | + s16 free_tx_qpl_buf_head; |
| 434 | + |
| 435 | + /* Free running count of the number of QPL tx buffers |
| 436 | + * allocated |
| 437 | + */ |
| 438 | + u32 alloc_tx_qpl_buf_cnt; |
| 439 | + |
| 440 | + /* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */ |
| 441 | + u32 free_tx_qpl_buf_cnt; |
| 442 | + }; |
397 | 443 | } dqo_tx;
|
398 | 444 | };
|
399 | 445 |
|
@@ -437,6 +483,24 @@ struct gve_tx_ring {
|
437 | 483 | * reached a specified timeout.
|
438 | 484 | */
|
439 | 485 | struct gve_index_list timed_out_completions;
|
| 486 | + |
| 487 | + /* QPL fields */ |
| 488 | + struct { |
| 489 | + /* Linked list of gve_tx_buf_dqo. Index into |
| 490 | + * tx_qpl_buf_next, or -1 if empty. |
| 491 | + * |
| 492 | + * This is the producer list, owned by the completion |
| 493 | + * handling path. When the consumer list |
| 494 | + * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list |
| 495 | + * will be stolen. |
| 496 | + */ |
| 497 | + atomic_t free_tx_qpl_buf_head; |
| 498 | + |
| 499 | + /* Free running count of the number of tx buffers |
| 500 | + * freed |
| 501 | + */ |
| 502 | + atomic_t free_tx_qpl_buf_cnt; |
| 503 | + }; |
440 | 504 | } dqo_compl;
|
441 | 505 | } ____cacheline_aligned;
|
442 | 506 | u64 pkt_done; /* free-running - total packets completed */
|
@@ -468,6 +532,15 @@ struct gve_tx_ring {
|
468 | 532 | struct {
|
469 | 533 | /* qpl assigned to this queue */
|
470 | 534 | struct gve_queue_page_list *qpl;
|
| 535 | + |
| 536 | + /* Each QPL page is divided into TX bounce buffers |
| 537 | + * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is |
| 538 | + * an array to manage linked lists of TX buffers. |
| 539 | + * An entry j at index i implies that j'th buffer |
| 540 | + * is next on the list after i |
| 541 | + */ |
| 542 | + s16 *tx_qpl_buf_next; |
| 543 | + u32 num_tx_qpl_bufs; |
471 | 544 | };
|
472 | 545 | } dqo;
|
473 | 546 | } ____cacheline_aligned;
|
|
0 commit comments