@@ -250,27 +250,68 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
250
250
xdp -> data_end - xdp -> data_hard_start ,
251
251
GFP_ATOMIC | __GFP_NOWARN );
252
252
if (unlikely (!skb ))
253
- return NULL ;
253
+ goto out ;
254
254
255
255
skb_reserve (skb , xdp -> data - xdp -> data_hard_start );
256
256
memcpy (__skb_put (skb , datasize ), xdp -> data , datasize );
257
257
if (metasize )
258
258
skb_metadata_set (skb , metasize );
259
259
260
+ out :
260
261
xsk_buff_free (xdp );
261
262
return skb ;
262
263
}
263
264
264
- /**
265
- * i40e_inc_ntc: Advance the next_to_clean index
266
- * @rx_ring: Rx ring
267
- **/
268
- static void i40e_inc_ntc (struct i40e_ring * rx_ring )
265
+ static void i40e_handle_xdp_result_zc (struct i40e_ring * rx_ring ,
266
+ struct xdp_buff * xdp_buff ,
267
+ union i40e_rx_desc * rx_desc ,
268
+ unsigned int * rx_packets ,
269
+ unsigned int * rx_bytes ,
270
+ unsigned int size ,
271
+ unsigned int xdp_res )
269
272
{
270
- u32 ntc = rx_ring -> next_to_clean + 1 ;
273
+ struct sk_buff * skb ;
274
+
275
+ * rx_packets = 1 ;
276
+ * rx_bytes = size ;
277
+
278
+ if (likely (xdp_res == I40E_XDP_REDIR ) || xdp_res == I40E_XDP_TX )
279
+ return ;
280
+
281
+ if (xdp_res == I40E_XDP_CONSUMED ) {
282
+ xsk_buff_free (xdp_buff );
283
+ return ;
284
+ }
285
+
286
+ if (xdp_res == I40E_XDP_PASS ) {
287
+ /* NB! We are not checking for errors using
288
+ * i40e_test_staterr with
289
+ * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
290
+ * SBP is *not* set in PRT_SBPVSI (default not set).
291
+ */
292
+ skb = i40e_construct_skb_zc (rx_ring , xdp_buff );
293
+ if (!skb ) {
294
+ rx_ring -> rx_stats .alloc_buff_failed ++ ;
295
+ * rx_packets = 0 ;
296
+ * rx_bytes = 0 ;
297
+ return ;
298
+ }
271
299
272
- ntc = (ntc < rx_ring -> count ) ? ntc : 0 ;
273
- rx_ring -> next_to_clean = ntc ;
300
+ if (eth_skb_pad (skb )) {
301
+ * rx_packets = 0 ;
302
+ * rx_bytes = 0 ;
303
+ return ;
304
+ }
305
+
306
+ * rx_bytes = skb -> len ;
307
+ i40e_process_skb_fields (rx_ring , rx_desc , skb );
308
+ napi_gro_receive (& rx_ring -> q_vector -> napi , skb );
309
+ return ;
310
+ }
311
+
312
+ /* Should never get here, as all valid cases have been handled already.
313
+ */
314
+ WARN_ON_ONCE (1 );
274
315
}
275
316
276
317
/**
@@ -284,17 +325,20 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
284
325
{
285
326
unsigned int total_rx_bytes = 0 , total_rx_packets = 0 ;
286
327
u16 cleaned_count = I40E_DESC_UNUSED (rx_ring );
328
+ u16 next_to_clean = rx_ring -> next_to_clean ;
329
+ u16 count_mask = rx_ring -> count - 1 ;
287
330
unsigned int xdp_res , xdp_xmit = 0 ;
288
331
bool failure = false;
289
- struct sk_buff * skb ;
290
332
291
333
while (likely (total_rx_packets < (unsigned int )budget )) {
292
334
union i40e_rx_desc * rx_desc ;
293
- struct xdp_buff * * bi ;
335
+ unsigned int rx_packets ;
336
+ unsigned int rx_bytes ;
337
+ struct xdp_buff * bi ;
294
338
unsigned int size ;
295
339
u64 qword ;
296
340
297
- rx_desc = I40E_RX_DESC (rx_ring , rx_ring -> next_to_clean );
341
+ rx_desc = I40E_RX_DESC (rx_ring , next_to_clean );
298
342
qword = le64_to_cpu (rx_desc -> wb .qword1 .status_error_len );
299
343
300
344
/* This memory barrier is needed to keep us from reading
@@ -307,11 +351,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
307
351
i40e_clean_programming_status (rx_ring ,
308
352
rx_desc -> raw .qword [0 ],
309
353
qword );
310
- bi = i40e_rx_bi (rx_ring , rx_ring -> next_to_clean );
311
- xsk_buff_free (* bi );
312
- * bi = NULL ;
313
- cleaned_count ++ ;
314
- i40e_inc_ntc (rx_ring );
354
+ bi = * i40e_rx_bi (rx_ring , next_to_clean );
355
+ xsk_buff_free (bi );
356
+ next_to_clean = (next_to_clean + 1 ) & count_mask ;
315
357
continue ;
316
358
}
317
359
@@ -320,61 +362,30 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
320
362
if (!size )
321
363
break ;
322
364
323
- bi = i40e_rx_bi (rx_ring , rx_ring -> next_to_clean );
324
- (* bi )-> data_end = (* bi )-> data + size ;
325
- xsk_buff_dma_sync_for_cpu (* bi , rx_ring -> xsk_pool );
326
-
327
- xdp_res = i40e_run_xdp_zc (rx_ring , * bi );
328
- if (xdp_res ) {
329
- if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR ))
330
- xdp_xmit |= xdp_res ;
331
- else
332
- xsk_buff_free (* bi );
333
-
334
- * bi = NULL ;
335
- total_rx_bytes += size ;
336
- total_rx_packets ++ ;
337
-
338
- cleaned_count ++ ;
339
- i40e_inc_ntc (rx_ring );
340
- continue ;
341
- }
342
-
343
- /* XDP_PASS path */
344
-
345
- /* NB! We are not checking for errors using
346
- * i40e_test_staterr with
347
- * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
348
- * SBP is *not* set in PRT_SBPVSI (default not set).
349
- */
350
- skb = i40e_construct_skb_zc (rx_ring , * bi );
351
- if (!skb ) {
352
- rx_ring -> rx_stats .alloc_buff_failed ++ ;
353
- break ;
354
- }
355
-
356
- * bi = NULL ;
357
- cleaned_count ++ ;
358
- i40e_inc_ntc (rx_ring );
359
-
360
- if (eth_skb_pad (skb ))
361
- continue ;
362
-
363
- total_rx_bytes += skb -> len ;
364
- total_rx_packets ++ ;
365
-
366
- i40e_process_skb_fields (rx_ring , rx_desc , skb );
367
- napi_gro_receive (& rx_ring -> q_vector -> napi , skb );
365
+ bi = * i40e_rx_bi (rx_ring , next_to_clean );
366
+ bi -> data_end = bi -> data + size ;
367
+ xsk_buff_dma_sync_for_cpu (bi , rx_ring -> xsk_pool );
368
+
369
+ xdp_res = i40e_run_xdp_zc (rx_ring , bi );
370
+ i40e_handle_xdp_result_zc (rx_ring , bi , rx_desc , & rx_packets ,
371
+ & rx_bytes , size , xdp_res );
372
+ total_rx_packets += rx_packets ;
373
+ total_rx_bytes += rx_bytes ;
374
+ xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR );
375
+ next_to_clean = (next_to_clean + 1 ) & count_mask ;
368
376
}
369
377
378
+ rx_ring -> next_to_clean = next_to_clean ;
379
+ cleaned_count = (next_to_clean - rx_ring -> next_to_use - 1 ) & count_mask ;
380
+
370
381
if (cleaned_count >= I40E_RX_BUFFER_WRITE )
371
382
failure = !i40e_alloc_rx_buffers_zc (rx_ring , cleaned_count );
372
383
373
384
i40e_finalize_xdp_rx (rx_ring , xdp_xmit );
374
385
i40e_update_rx_stats (rx_ring , total_rx_bytes , total_rx_packets );
375
386
376
387
if (xsk_uses_need_wakeup (rx_ring -> xsk_pool )) {
377
- if (failure || rx_ring -> next_to_clean == rx_ring -> next_to_use )
388
+ if (failure || next_to_clean == rx_ring -> next_to_use )
378
389
xsk_set_rx_need_wakeup (rx_ring -> xsk_pool );
379
390
else
380
391
xsk_clear_rx_need_wakeup (rx_ring -> xsk_pool );
@@ -604,16 +615,14 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
604
615
605
616
void i40e_xsk_clean_rx_ring (struct i40e_ring * rx_ring )
606
617
{
607
- u16 i ;
608
-
609
- for (i = 0 ; i < rx_ring -> count ; i ++ ) {
610
- struct xdp_buff * rx_bi = * i40e_rx_bi (rx_ring , i );
618
+ u16 count_mask = rx_ring -> count - 1 ;
619
+ u16 ntc = rx_ring -> next_to_clean ;
620
+ u16 ntu = rx_ring -> next_to_use ;
611
621
612
- if (! rx_bi )
613
- continue ;
622
+ for ( ; ntc != ntu ; ntc = ( ntc + 1 ) & count_mask ) {
623
+ struct xdp_buff * rx_bi = * i40e_rx_bi ( rx_ring , ntc ) ;
614
624
615
625
xsk_buff_free (rx_bi );
616
- rx_bi = NULL ;
617
626
}
618
627
}
619
628
0 commit comments