|
55 | 55 | /* Maximum TSO size supported on DQO */
|
56 | 56 | #define GVE_DQO_TX_MAX 0x3FFFF
|
57 | 57 |
|
| 58 | +#define GVE_TX_BUF_SHIFT_DQO 11 |
| 59 | + |
| 60 | +/* 2K buffers for DQO-QPL */ |
| 61 | +#define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO) |
| 62 | +#define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO) |
| 63 | +#define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO)) |
| 64 | + |
| 65 | +/* If number of free/recyclable buffers are less than this threshold; driver |
| 66 | + * allocs and uses a non-qpl page on the receive path of DQO QPL to free |
| 67 | + * up buffers. |
| 68 | + * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC. |
| 69 | + */ |
| 70 | +#define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96 |
| 71 | + |
58 | 72 | /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
|
59 | 73 | struct gve_rx_desc_queue {
|
60 | 74 | struct gve_rx_desc *desc_ring; /* the descriptor ring */
|
@@ -319,8 +333,14 @@ struct gve_tx_pending_packet_dqo {
|
319 | 333 | * All others correspond to `skb`'s frags and should be unmapped with
|
320 | 334 | * `dma_unmap_page`.
|
321 | 335 | */
|
322 |
| - DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); |
323 |
| - DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); |
| 336 | + union { |
| 337 | + struct { |
| 338 | + DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); |
| 339 | + DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); |
| 340 | + }; |
| 341 | + s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT]; |
| 342 | + }; |
| 343 | + |
324 | 344 | u16 num_bufs;
|
325 | 345 |
|
326 | 346 | /* Linked list index to next element in the list, or -1 if none */
|
@@ -375,6 +395,32 @@ struct gve_tx_ring {
|
375 | 395 | * set.
|
376 | 396 | */
|
377 | 397 | u32 last_re_idx;
|
| 398 | + |
| 399 | + /* free running number of packet buf descriptors posted */ |
| 400 | + u16 posted_packet_desc_cnt; |
| 401 | + /* free running number of packet buf descriptors completed */ |
| 402 | + u16 completed_packet_desc_cnt; |
| 403 | + |
| 404 | + /* QPL fields */ |
| 405 | + struct { |
| 406 | + /* Linked list of gve_tx_buf_dqo. Index into |
| 407 | + * tx_qpl_buf_next, or -1 if empty. |
| 408 | + * |
| 409 | + * This is a consumer list owned by the TX path. When it |
| 410 | + * runs out, the producer list is stolen from the |
| 411 | + * completion handling path |
| 412 | + * (dqo_compl.free_tx_qpl_buf_head). |
| 413 | + */ |
| 414 | + s16 free_tx_qpl_buf_head; |
| 415 | + |
| 416 | + /* Free running count of the number of QPL tx buffers |
| 417 | + * allocated |
| 418 | + */ |
| 419 | + u32 alloc_tx_qpl_buf_cnt; |
| 420 | + |
| 421 | + /* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */ |
| 422 | + u32 free_tx_qpl_buf_cnt; |
| 423 | + }; |
378 | 424 | } dqo_tx;
|
379 | 425 | };
|
380 | 426 |
|
@@ -416,6 +462,24 @@ struct gve_tx_ring {
|
416 | 462 | * reached a specified timeout.
|
417 | 463 | */
|
418 | 464 | struct gve_index_list timed_out_completions;
|
| 465 | + |
| 466 | + /* QPL fields */ |
| 467 | + struct { |
| 468 | + /* Linked list of gve_tx_buf_dqo. Index into |
| 469 | + * tx_qpl_buf_next, or -1 if empty. |
| 470 | + * |
| 471 | + * This is the producer list, owned by the completion |
| 472 | + * handling path. When the consumer list |
| 473 | + * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list |
| 474 | + * will be stolen. |
| 475 | + */ |
| 476 | + atomic_t free_tx_qpl_buf_head; |
| 477 | + |
| 478 | + /* Free running count of the number of tx buffers |
| 479 | + * freed |
| 480 | + */ |
| 481 | + atomic_t free_tx_qpl_buf_cnt; |
| 482 | + }; |
419 | 483 | } dqo_compl;
|
420 | 484 | } ____cacheline_aligned;
|
421 | 485 | u64 pkt_done; /* free-running - total packets completed */
|
@@ -447,6 +511,15 @@ struct gve_tx_ring {
|
447 | 511 | struct {
|
448 | 512 | /* qpl assigned to this queue */
|
449 | 513 | struct gve_queue_page_list *qpl;
|
| 514 | + |
| 515 | + /* Each QPL page is divided into TX bounce buffers |
| 516 | + * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is |
| 517 | + * an array to manage linked lists of TX buffers. |
| 518 | + * An entry j at index i implies that j'th buffer |
| 519 | + * is next on the list after i |
| 520 | + */ |
| 521 | + s16 *tx_qpl_buf_next; |
| 522 | + u32 num_tx_qpl_bufs; |
450 | 523 | };
|
451 | 524 | } dqo;
|
452 | 525 | } ____cacheline_aligned;
|
|
0 commit comments