@@ -2278,9 +2278,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
2278
2278
struct mvneta_rx_desc * rx_desc ,
2279
2279
struct mvneta_rx_queue * rxq ,
2280
2280
struct xdp_buff * xdp , int * size ,
2281
+ struct skb_shared_info * xdp_sinfo ,
2281
2282
struct page * page )
2282
2283
{
2283
- struct skb_shared_info * sinfo = xdp_get_shared_info_from_buff (xdp );
2284
2284
struct net_device * dev = pp -> dev ;
2285
2285
enum dma_data_direction dma_dir ;
2286
2286
int data_len , len ;
@@ -2298,13 +2298,22 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
2298
2298
len , dma_dir );
2299
2299
rx_desc -> buf_phys_addr = 0 ;
2300
2300
2301
- if (data_len > 0 && sinfo -> nr_frags < MAX_SKB_FRAGS ) {
2302
- skb_frag_t * frag = & sinfo -> frags [sinfo -> nr_frags ];
2301
+ if (data_len > 0 && xdp_sinfo -> nr_frags < MAX_SKB_FRAGS ) {
2302
+ skb_frag_t * frag = & xdp_sinfo -> frags [xdp_sinfo -> nr_frags ++ ];
2303
2303
2304
2304
skb_frag_off_set (frag , pp -> rx_offset_correction );
2305
2305
skb_frag_size_set (frag , data_len );
2306
2306
__skb_frag_set_page (frag , page );
2307
- sinfo -> nr_frags ++ ;
2307
+
2308
+ /* last fragment */
2309
+ if (len == * size ) {
2310
+ struct skb_shared_info * sinfo ;
2311
+
2312
+ sinfo = xdp_get_shared_info_from_buff (xdp );
2313
+ sinfo -> nr_frags = xdp_sinfo -> nr_frags ;
2314
+ memcpy (sinfo -> frags , xdp_sinfo -> frags ,
2315
+ sinfo -> nr_frags * sizeof (skb_frag_t ));
2316
+ }
2308
2317
} else {
2309
2318
page_pool_put_full_page (rxq -> page_pool , page , true);
2310
2319
}
@@ -2348,6 +2357,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
2348
2357
{
2349
2358
int rx_proc = 0 , rx_todo , refill , size = 0 ;
2350
2359
struct net_device * dev = pp -> dev ;
2360
+ struct skb_shared_info sinfo ;
2351
2361
struct mvneta_stats ps = {};
2352
2362
struct bpf_prog * xdp_prog ;
2353
2363
u32 desc_status , frame_sz ;
@@ -2357,6 +2367,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
2357
2367
xdp_buf .frame_sz = PAGE_SIZE ;
2358
2368
xdp_buf .rxq = & rxq -> xdp_rxq ;
2359
2369
2370
+ sinfo .nr_frags = 0 ;
2371
+
2360
2372
/* Get number of received packets */
2361
2373
rx_todo = mvneta_rxq_busy_desc_num_get (pp , rxq );
2362
2374
@@ -2395,22 +2407,19 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
2395
2407
rx_desc -> buf_phys_addr = 0 ;
2396
2408
page_pool_put_full_page (rxq -> page_pool , page ,
2397
2409
true);
2398
- continue ;
2410
+ goto next ;
2399
2411
}
2400
2412
2401
2413
mvneta_swbm_add_rx_fragment (pp , rx_desc , rxq , & xdp_buf ,
2402
- & size , page );
2414
+ & size , & sinfo , page );
2403
2415
} /* Middle or Last descriptor */
2404
2416
2405
2417
if (!(rx_status & MVNETA_RXD_LAST_DESC ))
2406
2418
/* no last descriptor this time */
2407
2419
continue ;
2408
2420
2409
2421
if (size ) {
2410
- struct skb_shared_info * sinfo ;
2411
-
2412
- sinfo = xdp_get_shared_info_from_buff (& xdp_buf );
2413
- mvneta_xdp_put_buff (pp , rxq , & xdp_buf , sinfo , -1 );
2422
+ mvneta_xdp_put_buff (pp , rxq , & xdp_buf , & sinfo , -1 );
2414
2423
goto next ;
2415
2424
}
2416
2425
@@ -2421,10 +2430,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
2421
2430
skb = mvneta_swbm_build_skb (pp , rxq , & xdp_buf , desc_status );
2422
2431
if (IS_ERR (skb )) {
2423
2432
struct mvneta_pcpu_stats * stats = this_cpu_ptr (pp -> stats );
2424
- struct skb_shared_info * sinfo ;
2425
2433
2426
- sinfo = xdp_get_shared_info_from_buff (& xdp_buf );
2427
- mvneta_xdp_put_buff (pp , rxq , & xdp_buf , sinfo , -1 );
2434
+ mvneta_xdp_put_buff (pp , rxq , & xdp_buf , & sinfo , -1 );
2428
2435
2429
2436
u64_stats_update_begin (& stats -> syncp );
2430
2437
stats -> es .skb_alloc_error ++ ;
@@ -2441,15 +2448,12 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
2441
2448
napi_gro_receive (napi , skb );
2442
2449
next :
2443
2450
xdp_buf .data_hard_start = NULL ;
2451
+ sinfo .nr_frags = 0 ;
2444
2452
}
2445
2453
rcu_read_unlock ();
2446
2454
2447
- if (xdp_buf .data_hard_start ) {
2448
- struct skb_shared_info * sinfo ;
2449
-
2450
- sinfo = xdp_get_shared_info_from_buff (& xdp_buf );
2451
- mvneta_xdp_put_buff (pp , rxq , & xdp_buf , sinfo , -1 );
2452
- }
2455
+ if (xdp_buf .data_hard_start )
2456
+ mvneta_xdp_put_buff (pp , rxq , & xdp_buf , & sinfo , -1 );
2453
2457
2454
2458
if (ps .xdp_redirect )
2455
2459
xdp_do_flush_map ();
0 commit comments