@@ -2033,16 +2033,16 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
2033
2033
2034
2034
static void
2035
2035
mvneta_xdp_put_buff (struct mvneta_port * pp , struct mvneta_rx_queue * rxq ,
2036
- struct xdp_buff * xdp , int sync_len , bool napi )
2036
+ struct xdp_buff * xdp , struct skb_shared_info * sinfo ,
2037
+ int sync_len )
2037
2038
{
2038
- struct skb_shared_info * sinfo = xdp_get_shared_info_from_buff (xdp );
2039
2039
int i ;
2040
2040
2041
2041
for (i = 0 ; i < sinfo -> nr_frags ; i ++ )
2042
2042
page_pool_put_full_page (rxq -> page_pool ,
2043
- skb_frag_page (& sinfo -> frags [i ]), napi );
2043
+ skb_frag_page (& sinfo -> frags [i ]), true );
2044
2044
page_pool_put_page (rxq -> page_pool , virt_to_head_page (xdp -> data ),
2045
- sync_len , napi );
2045
+ sync_len , true );
2046
2046
}
2047
2047
2048
2048
static int
@@ -2179,6 +2179,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2179
2179
struct bpf_prog * prog , struct xdp_buff * xdp ,
2180
2180
u32 frame_sz , struct mvneta_stats * stats )
2181
2181
{
2182
+ struct skb_shared_info * sinfo = xdp_get_shared_info_from_buff (xdp );
2182
2183
unsigned int len , data_len , sync ;
2183
2184
u32 ret , act ;
2184
2185
@@ -2199,7 +2200,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2199
2200
2200
2201
err = xdp_do_redirect (pp -> dev , xdp , prog );
2201
2202
if (unlikely (err )) {
2202
- mvneta_xdp_put_buff (pp , rxq , xdp , sync , true );
2203
+ mvneta_xdp_put_buff (pp , rxq , xdp , sinfo , sync );
2203
2204
ret = MVNETA_XDP_DROPPED ;
2204
2205
} else {
2205
2206
ret = MVNETA_XDP_REDIR ;
@@ -2210,7 +2211,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2210
2211
case XDP_TX :
2211
2212
ret = mvneta_xdp_xmit_back (pp , xdp );
2212
2213
if (ret != MVNETA_XDP_TX )
2213
- mvneta_xdp_put_buff (pp , rxq , xdp , sync , true );
2214
+ mvneta_xdp_put_buff (pp , rxq , xdp , sinfo , sync );
2214
2215
break ;
2215
2216
default :
2216
2217
bpf_warn_invalid_xdp_action (act );
@@ -2219,7 +2220,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2219
2220
trace_xdp_exception (pp -> dev , prog , act );
2220
2221
fallthrough ;
2221
2222
case XDP_DROP :
2222
- mvneta_xdp_put_buff (pp , rxq , xdp , sync , true );
2223
+ mvneta_xdp_put_buff (pp , rxq , xdp , sinfo , sync );
2223
2224
ret = MVNETA_XDP_DROPPED ;
2224
2225
stats -> xdp_drop ++ ;
2225
2226
break ;
@@ -2277,9 +2278,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
2277
2278
struct mvneta_rx_desc * rx_desc ,
2278
2279
struct mvneta_rx_queue * rxq ,
2279
2280
struct xdp_buff * xdp , int * size ,
2281
+ struct skb_shared_info * xdp_sinfo ,
2280
2282
struct page * page )
2281
2283
{
2282
- struct skb_shared_info * sinfo = xdp_get_shared_info_from_buff (xdp );
2283
2284
struct net_device * dev = pp -> dev ;
2284
2285
enum dma_data_direction dma_dir ;
2285
2286
int data_len , len ;
@@ -2297,13 +2298,22 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
2297
2298
len , dma_dir );
2298
2299
rx_desc -> buf_phys_addr = 0 ;
2299
2300
2300
- if (data_len > 0 && sinfo -> nr_frags < MAX_SKB_FRAGS ) {
2301
- skb_frag_t * frag = & sinfo -> frags [sinfo -> nr_frags ];
2301
+ if (data_len > 0 && xdp_sinfo -> nr_frags < MAX_SKB_FRAGS ) {
2302
+ skb_frag_t * frag = & xdp_sinfo -> frags [xdp_sinfo -> nr_frags ++ ];
2302
2303
2303
2304
skb_frag_off_set (frag , pp -> rx_offset_correction );
2304
2305
skb_frag_size_set (frag , data_len );
2305
2306
__skb_frag_set_page (frag , page );
2306
- sinfo -> nr_frags ++ ;
2307
+
2308
+ /* last fragment */
2309
+ if (len == * size ) {
2310
+ struct skb_shared_info * sinfo ;
2311
+
2312
+ sinfo = xdp_get_shared_info_from_buff (xdp );
2313
+ sinfo -> nr_frags = xdp_sinfo -> nr_frags ;
2314
+ memcpy (sinfo -> frags , xdp_sinfo -> frags ,
2315
+ sinfo -> nr_frags * sizeof (skb_frag_t ));
2316
+ }
2307
2317
} else {
2308
2318
page_pool_put_full_page (rxq -> page_pool , page , true);
2309
2319
}
@@ -2347,13 +2357,17 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
2347
2357
{
2348
2358
int rx_proc = 0 , rx_todo , refill , size = 0 ;
2349
2359
struct net_device * dev = pp -> dev ;
2350
- struct xdp_buff xdp_buf = {
2351
- .frame_sz = PAGE_SIZE ,
2352
- .rxq = & rxq -> xdp_rxq ,
2353
- };
2360
+ struct skb_shared_info sinfo ;
2354
2361
struct mvneta_stats ps = {};
2355
2362
struct bpf_prog * xdp_prog ;
2356
2363
u32 desc_status , frame_sz ;
2364
+ struct xdp_buff xdp_buf ;
2365
+
2366
+ xdp_buf .data_hard_start = NULL ;
2367
+ xdp_buf .frame_sz = PAGE_SIZE ;
2368
+ xdp_buf .rxq = & rxq -> xdp_rxq ;
2369
+
2370
+ sinfo .nr_frags = 0 ;
2357
2371
2358
2372
/* Get number of received packets */
2359
2373
rx_todo = mvneta_rxq_busy_desc_num_get (pp , rxq );
@@ -2393,19 +2407,19 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
2393
2407
rx_desc -> buf_phys_addr = 0 ;
2394
2408
page_pool_put_full_page (rxq -> page_pool , page ,
2395
2409
true);
2396
- continue ;
2410
+ goto next ;
2397
2411
}
2398
2412
2399
2413
mvneta_swbm_add_rx_fragment (pp , rx_desc , rxq , & xdp_buf ,
2400
- & size , page );
2414
+ & size , & sinfo , page );
2401
2415
} /* Middle or Last descriptor */
2402
2416
2403
2417
if (!(rx_status & MVNETA_RXD_LAST_DESC ))
2404
2418
/* no last descriptor this time */
2405
2419
continue ;
2406
2420
2407
2421
if (size ) {
2408
- mvneta_xdp_put_buff (pp , rxq , & xdp_buf , -1 , true );
2422
+ mvneta_xdp_put_buff (pp , rxq , & xdp_buf , & sinfo , -1 );
2409
2423
goto next ;
2410
2424
}
2411
2425
@@ -2417,7 +2431,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
2417
2431
if (IS_ERR (skb )) {
2418
2432
struct mvneta_pcpu_stats * stats = this_cpu_ptr (pp -> stats );
2419
2433
2420
- mvneta_xdp_put_buff (pp , rxq , & xdp_buf , -1 , true );
2434
+ mvneta_xdp_put_buff (pp , rxq , & xdp_buf , & sinfo , -1 );
2421
2435
2422
2436
u64_stats_update_begin (& stats -> syncp );
2423
2437
stats -> es .skb_alloc_error ++ ;
@@ -2434,11 +2448,12 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
2434
2448
napi_gro_receive (napi , skb );
2435
2449
next :
2436
2450
xdp_buf .data_hard_start = NULL ;
2451
+ sinfo .nr_frags = 0 ;
2437
2452
}
2438
2453
rcu_read_unlock ();
2439
2454
2440
2455
if (xdp_buf .data_hard_start )
2441
- mvneta_xdp_put_buff (pp , rxq , & xdp_buf , -1 , true );
2456
+ mvneta_xdp_put_buff (pp , rxq , & xdp_buf , & sinfo , -1 );
2442
2457
2443
2458
if (ps .xdp_redirect )
2444
2459
xdp_do_flush_map ();
0 commit comments