Skip to content

Commit 2cb0e27

Browse files
Tariq Toukandavem330
authored andcommitted
net/mlx5e: RX, Prepare non-linear striding RQ for XDP multi-buffer support
In preparation for supporting XDP multi-buffer in striding RQ, use xdp_buff struct to describe the packet. Make its skb_shared_info collide the one of the allocated SKB, then add the fragments using the xdp_buff API. Signed-off-by: Tariq Toukan <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 221c8c7 commit 2cb0e27

File tree

1 file changed

+47
-4
lines changed
  • drivers/net/ethernet/mellanox/mlx5/core

1 file changed

+47
-4
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

Lines changed: 47 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1977,10 +1977,17 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
19771977
struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
19781978
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
19791979
struct mlx5e_frag_page *head_page = frag_page;
1980-
u32 frag_offset = head_offset + headlen;
1981-
u32 byte_cnt = cqe_bcnt - headlen;
1980+
u32 frag_offset = head_offset;
1981+
u32 byte_cnt = cqe_bcnt;
1982+
struct skb_shared_info *sinfo;
1983+
struct mlx5e_xdp_buff mxbuf;
1984+
unsigned int truesize = 0;
19821985
struct sk_buff *skb;
1986+
u32 linear_frame_sz;
1987+
u16 linear_data_len;
19831988
dma_addr_t addr;
1989+
u16 linear_hr;
1990+
void *va;
19841991

19851992
skb = napi_alloc_skb(rq->cq.napi,
19861993
ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
@@ -1989,16 +1996,52 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
19891996
return NULL;
19901997
}
19911998

1999+
va = skb->head;
19922000
net_prefetchw(skb->data);
19932001

1994-
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
2002+
frag_offset += headlen;
2003+
byte_cnt -= headlen;
2004+
linear_hr = skb_headroom(skb);
2005+
linear_data_len = headlen;
2006+
linear_frame_sz = MLX5_SKB_FRAG_SZ(skb_end_offset(skb));
19952007
if (unlikely(frag_offset >= PAGE_SIZE)) {
19962008
frag_page++;
19972009
frag_offset -= PAGE_SIZE;
19982010
}
19992011

20002012
skb_mark_for_recycle(skb);
2001-
mlx5e_fill_skb_data(skb, rq, frag_page, byte_cnt, frag_offset);
2013+
mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
2014+
net_prefetch(mxbuf.xdp.data);
2015+
2016+
sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
2017+
2018+
while (byte_cnt) {
2019+
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
2020+
u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
2021+
2022+
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
2023+
truesize += pg_consumed_bytes;
2024+
else
2025+
truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
2026+
2027+
mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset,
2028+
pg_consumed_bytes);
2029+
byte_cnt -= pg_consumed_bytes;
2030+
frag_offset = 0;
2031+
frag_page++;
2032+
}
2033+
if (xdp_buff_has_frags(&mxbuf.xdp)) {
2034+
struct mlx5e_frag_page *pagep;
2035+
2036+
xdp_update_skb_shared_info(skb, sinfo->nr_frags,
2037+
sinfo->xdp_frags_size, truesize,
2038+
xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
2039+
2040+
pagep = frag_page - sinfo->nr_frags;
2041+
do
2042+
pagep->frags++;
2043+
while (++pagep < frag_page);
2044+
}
20022045
/* copy header */
20032046
addr = page_pool_get_dma_addr(head_page->page);
20042047
mlx5e_copy_skb_header(rq, skb, head_page->page, addr,

0 commit comments

Comments
 (0)