@@ -294,8 +294,14 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
294
294
{
295
295
unsigned int totalsize = xdp -> data_end - xdp -> data_meta ;
296
296
unsigned int metasize = xdp -> data - xdp -> data_meta ;
297
+ struct skb_shared_info * sinfo = NULL ;
297
298
struct sk_buff * skb ;
299
+ u32 nr_frags = 0 ;
298
300
301
+ if (unlikely (xdp_buff_has_frags (xdp ))) {
302
+ sinfo = xdp_get_shared_info_from_buff (xdp );
303
+ nr_frags = sinfo -> nr_frags ;
304
+ }
299
305
net_prefetch (xdp -> data_meta );
300
306
301
307
/* allocate a skb to store the frags */
@@ -312,6 +318,28 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
312
318
__skb_pull (skb , metasize );
313
319
}
314
320
321
+ if (likely (!xdp_buff_has_frags (xdp )))
322
+ goto out ;
323
+
324
+ for (int i = 0 ; i < nr_frags ; i ++ ) {
325
+ struct skb_shared_info * skinfo = skb_shinfo (skb );
326
+ skb_frag_t * frag = & sinfo -> frags [i ];
327
+ struct page * page ;
328
+ void * addr ;
329
+
330
+ page = dev_alloc_page ();
331
+ if (!page ) {
332
+ dev_kfree_skb (skb );
333
+ return NULL ;
334
+ }
335
+ addr = page_to_virt (page );
336
+
337
+ memcpy (addr , skb_frag_page (frag ), skb_frag_size (frag ));
338
+
339
+ __skb_fill_page_desc_noacc (skinfo , skinfo -> nr_frags ++ ,
340
+ addr , 0 , skb_frag_size (frag ));
341
+ }
342
+
315
343
out :
316
344
xsk_buff_free (xdp );
317
345
return skb ;
@@ -322,14 +350,13 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
322
350
union i40e_rx_desc * rx_desc ,
323
351
unsigned int * rx_packets ,
324
352
unsigned int * rx_bytes ,
325
- unsigned int size ,
326
353
unsigned int xdp_res ,
327
354
bool * failure )
328
355
{
329
356
struct sk_buff * skb ;
330
357
331
358
* rx_packets = 1 ;
332
- * rx_bytes = size ;
359
+ * rx_bytes = xdp_get_buff_len ( xdp_buff ) ;
333
360
334
361
if (likely (xdp_res == I40E_XDP_REDIR ) || xdp_res == I40E_XDP_TX )
335
362
return ;
@@ -363,7 +390,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
363
390
return ;
364
391
}
365
392
366
- * rx_bytes = skb -> len ;
367
393
i40e_process_skb_fields (rx_ring , rx_desc , skb );
368
394
napi_gro_receive (& rx_ring -> q_vector -> napi , skb );
369
395
return ;
@@ -374,6 +400,31 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
374
400
WARN_ON_ONCE (1 );
375
401
}
376
402
403
+ static int
404
+ i40e_add_xsk_frag (struct i40e_ring * rx_ring , struct xdp_buff * first ,
405
+ struct xdp_buff * xdp , const unsigned int size )
406
+ {
407
+ struct skb_shared_info * sinfo = xdp_get_shared_info_from_buff (first );
408
+
409
+ if (!xdp_buff_has_frags (first )) {
410
+ sinfo -> nr_frags = 0 ;
411
+ sinfo -> xdp_frags_size = 0 ;
412
+ xdp_buff_set_frags_flag (first );
413
+ }
414
+
415
+ if (unlikely (sinfo -> nr_frags == MAX_SKB_FRAGS )) {
416
+ xsk_buff_free (first );
417
+ return - ENOMEM ;
418
+ }
419
+
420
+ __skb_fill_page_desc_noacc (sinfo , sinfo -> nr_frags ++ ,
421
+ virt_to_page (xdp -> data_hard_start ), 0 , size );
422
+ sinfo -> xdp_frags_size += size ;
423
+ xsk_buff_add_frag (xdp );
424
+
425
+ return 0 ;
426
+ }
427
+
377
428
/**
378
429
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
379
430
* @rx_ring: Rx ring
@@ -384,13 +435,18 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
384
435
int i40e_clean_rx_irq_zc (struct i40e_ring * rx_ring , int budget )
385
436
{
386
437
unsigned int total_rx_bytes = 0 , total_rx_packets = 0 ;
438
+ u16 next_to_process = rx_ring -> next_to_process ;
387
439
u16 next_to_clean = rx_ring -> next_to_clean ;
388
440
u16 count_mask = rx_ring -> count - 1 ;
389
441
unsigned int xdp_res , xdp_xmit = 0 ;
442
+ struct xdp_buff * first = NULL ;
390
443
struct bpf_prog * xdp_prog ;
391
444
bool failure = false;
392
445
u16 cleaned_count ;
393
446
447
+ if (next_to_process != next_to_clean )
448
+ first = * i40e_rx_bi (rx_ring , next_to_clean );
449
+
394
450
/* NB! xdp_prog will always be !NULL, due to the fact that
395
451
* this path is enabled by setting an XDP program.
396
452
*/
@@ -404,7 +460,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
404
460
unsigned int size ;
405
461
u64 qword ;
406
462
407
- rx_desc = I40E_RX_DESC (rx_ring , next_to_clean );
463
+ rx_desc = I40E_RX_DESC (rx_ring , next_to_process );
408
464
qword = le64_to_cpu (rx_desc -> wb .qword1 .status_error_len );
409
465
410
466
/* This memory barrier is needed to keep us from reading
@@ -417,9 +473,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
417
473
i40e_clean_programming_status (rx_ring ,
418
474
rx_desc -> raw .qword [0 ],
419
475
qword );
420
- bi = * i40e_rx_bi (rx_ring , next_to_clean );
476
+ bi = * i40e_rx_bi (rx_ring , next_to_process );
421
477
xsk_buff_free (bi );
422
- next_to_clean = (next_to_clean + 1 ) & count_mask ;
478
+ next_to_process = (next_to_process + 1 ) & count_mask ;
423
479
continue ;
424
480
}
425
481
@@ -428,22 +484,35 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
428
484
if (!size )
429
485
break ;
430
486
431
- bi = * i40e_rx_bi (rx_ring , next_to_clean );
487
+ bi = * i40e_rx_bi (rx_ring , next_to_process );
432
488
xsk_buff_set_size (bi , size );
433
489
xsk_buff_dma_sync_for_cpu (bi , rx_ring -> xsk_pool );
434
490
435
- xdp_res = i40e_run_xdp_zc (rx_ring , bi , xdp_prog );
436
- i40e_handle_xdp_result_zc (rx_ring , bi , rx_desc , & rx_packets ,
437
- & rx_bytes , size , xdp_res , & failure );
491
+ if (!first )
492
+ first = bi ;
493
+ else if (i40e_add_xsk_frag (rx_ring , first , bi , size ))
494
+ break ;
495
+
496
+ next_to_process = (next_to_process + 1 ) & count_mask ;
497
+
498
+ if (i40e_is_non_eop (rx_ring , rx_desc ))
499
+ continue ;
500
+
501
+ xdp_res = i40e_run_xdp_zc (rx_ring , first , xdp_prog );
502
+ i40e_handle_xdp_result_zc (rx_ring , first , rx_desc , & rx_packets ,
503
+ & rx_bytes , xdp_res , & failure );
504
+ first -> flags = 0 ;
505
+ next_to_clean = next_to_process ;
438
506
if (failure )
439
507
break ;
440
508
total_rx_packets += rx_packets ;
441
509
total_rx_bytes += rx_bytes ;
442
510
xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR );
443
- next_to_clean = ( next_to_clean + 1 ) & count_mask ;
511
+ first = NULL ;
444
512
}
445
513
446
514
rx_ring -> next_to_clean = next_to_clean ;
515
+ rx_ring -> next_to_process = next_to_process ;
447
516
cleaned_count = (next_to_clean - rx_ring -> next_to_use - 1 ) & count_mask ;
448
517
449
518
if (cleaned_count >= I40E_RX_BUFFER_WRITE )
0 commit comments