@@ -1977,10 +1977,17 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
1977
1977
struct mlx5e_frag_page * frag_page = & wi -> alloc_units .frag_pages [page_idx ];
1978
1978
u16 headlen = min_t (u16 , MLX5E_RX_MAX_HEAD , cqe_bcnt );
1979
1979
struct mlx5e_frag_page * head_page = frag_page ;
1980
- u32 frag_offset = head_offset + headlen ;
1981
- u32 byte_cnt = cqe_bcnt - headlen ;
1980
+ u32 frag_offset = head_offset ;
1981
+ u32 byte_cnt = cqe_bcnt ;
1982
+ struct skb_shared_info * sinfo ;
1983
+ struct mlx5e_xdp_buff mxbuf ;
1984
+ unsigned int truesize = 0 ;
1982
1985
struct sk_buff * skb ;
1986
+ u32 linear_frame_sz ;
1987
+ u16 linear_data_len ;
1983
1988
dma_addr_t addr ;
1989
+ u16 linear_hr ;
1990
+ void * va ;
1984
1991
1985
1992
skb = napi_alloc_skb (rq -> cq .napi ,
1986
1993
ALIGN (MLX5E_RX_MAX_HEAD , sizeof (long )));
@@ -1989,16 +1996,52 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
1989
1996
return NULL ;
1990
1997
}
1991
1998
1999
+ va = skb -> head ;
1992
2000
net_prefetchw (skb -> data );
1993
2001
1994
- /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
2002
+ frag_offset += headlen ;
2003
+ byte_cnt -= headlen ;
2004
+ linear_hr = skb_headroom (skb );
2005
+ linear_data_len = headlen ;
2006
+ linear_frame_sz = MLX5_SKB_FRAG_SZ (skb_end_offset (skb ));
1995
2007
if (unlikely (frag_offset >= PAGE_SIZE )) {
1996
2008
frag_page ++ ;
1997
2009
frag_offset -= PAGE_SIZE ;
1998
2010
}
1999
2011
2000
2012
skb_mark_for_recycle (skb );
2001
- mlx5e_fill_skb_data (skb , rq , frag_page , byte_cnt , frag_offset );
2013
+ mlx5e_fill_mxbuf (rq , cqe , va , linear_hr , linear_frame_sz , linear_data_len , & mxbuf );
2014
+ net_prefetch (mxbuf .xdp .data );
2015
+
2016
+ sinfo = xdp_get_shared_info_from_buff (& mxbuf .xdp );
2017
+
2018
+ while (byte_cnt ) {
2019
+ /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
2020
+ u32 pg_consumed_bytes = min_t (u32 , PAGE_SIZE - frag_offset , byte_cnt );
2021
+
2022
+ if (test_bit (MLX5E_RQ_STATE_SHAMPO , & rq -> state ))
2023
+ truesize += pg_consumed_bytes ;
2024
+ else
2025
+ truesize += ALIGN (pg_consumed_bytes , BIT (rq -> mpwqe .log_stride_sz ));
2026
+
2027
+ mlx5e_add_skb_shared_info_frag (rq , sinfo , & mxbuf .xdp , frag_page , frag_offset ,
2028
+ pg_consumed_bytes );
2029
+ byte_cnt -= pg_consumed_bytes ;
2030
+ frag_offset = 0 ;
2031
+ frag_page ++ ;
2032
+ }
2033
+ if (xdp_buff_has_frags (& mxbuf .xdp )) {
2034
+ struct mlx5e_frag_page * pagep ;
2035
+
2036
+ xdp_update_skb_shared_info (skb , sinfo -> nr_frags ,
2037
+ sinfo -> xdp_frags_size , truesize ,
2038
+ xdp_buff_is_frag_pfmemalloc (& mxbuf .xdp ));
2039
+
2040
+ pagep = frag_page - sinfo -> nr_frags ;
2041
+ do
2042
+ pagep -> frags ++ ;
2043
+ while (++ pagep < frag_page );
2044
+ }
2002
2045
/* copy header */
2003
2046
addr = page_pool_get_dma_addr (head_page -> page );
2004
2047
mlx5e_copy_skb_header (rq , skb , head_page -> page , addr ,
0 commit comments