@@ -319,11 +319,21 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
319
319
return err ;
320
320
}
321
321
322
+ static bool mlx5e_frag_can_release (struct mlx5e_wqe_frag_info * frag )
323
+ {
324
+ #define CAN_RELEASE_MASK \
325
+ (BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE) | BIT(MLX5E_WQE_FRAG_SKIP_RELEASE))
326
+
327
+ #define CAN_RELEASE_VALUE BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE)
328
+
329
+ return (frag -> flags & CAN_RELEASE_MASK ) == CAN_RELEASE_VALUE ;
330
+ }
331
+
322
332
static inline void mlx5e_put_rx_frag (struct mlx5e_rq * rq ,
323
333
struct mlx5e_wqe_frag_info * frag ,
324
334
bool recycle )
325
335
{
326
- if (frag -> flags & BIT ( MLX5E_WQE_FRAG_LAST_IN_PAGE ))
336
+ if (mlx5e_frag_can_release ( frag ))
327
337
mlx5e_page_release_fragmented (rq , frag -> frag_page , recycle );
328
338
}
329
339
@@ -347,6 +357,8 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
347
357
if (unlikely (err ))
348
358
goto free_frags ;
349
359
360
+ frag -> flags &= ~BIT (MLX5E_WQE_FRAG_SKIP_RELEASE );
361
+
350
362
headroom = i == 0 ? rq -> buff .headroom : 0 ;
351
363
addr = page_pool_get_dma_addr (frag -> frag_page -> page );
352
364
wqe -> data [i ].addr = cpu_to_be64 (addr + frag -> offset + headroom );
@@ -367,7 +379,7 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
367
379
{
368
380
int i ;
369
381
370
- if (rq -> xsk_pool ) {
382
+ if (rq -> xsk_pool && !( wi -> flags & BIT ( MLX5E_WQE_FRAG_SKIP_RELEASE )) ) {
371
383
/* The `recycle` parameter is ignored, and the page is always
372
384
* put into the Reuse Ring, because there is no way to return
373
385
* the page to the userspace when the interface goes down.
@@ -387,6 +399,20 @@ static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
387
399
mlx5e_free_rx_wqe (rq , wi , false);
388
400
}
389
401
402
+ static void mlx5e_free_rx_wqes (struct mlx5e_rq * rq , u16 ix , int wqe_bulk )
403
+ {
404
+ struct mlx5_wq_cyc * wq = & rq -> wqe .wq ;
405
+ int i ;
406
+
407
+ for (i = 0 ; i < wqe_bulk ; i ++ ) {
408
+ int j = mlx5_wq_cyc_ctr2ix (wq , ix + i );
409
+ struct mlx5e_wqe_frag_info * wi ;
410
+
411
+ wi = get_frag (rq , j );
412
+ mlx5e_free_rx_wqe (rq , wi , true);
413
+ }
414
+ }
415
+
390
416
static int mlx5e_alloc_rx_wqes (struct mlx5e_rq * rq , u16 ix , int wqe_bulk )
391
417
{
392
418
struct mlx5_wq_cyc * wq = & rq -> wqe .wq ;
@@ -792,6 +818,8 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
792
818
*/
793
819
wqe_bulk -= (head + wqe_bulk ) & rq -> wqe .info .wqe_index_mask ;
794
820
821
+ mlx5e_free_rx_wqes (rq , head , wqe_bulk );
822
+
795
823
if (!rq -> xsk_pool )
796
824
count = mlx5e_alloc_rx_wqes (rq , head , wqe_bulk );
797
825
else if (likely (!rq -> xsk_pool -> dma_need_sync ))
@@ -1727,7 +1755,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1727
1755
1728
1756
if (unlikely (MLX5E_RX_ERR_CQE (cqe ))) {
1729
1757
mlx5e_handle_rx_err_cqe (rq , cqe );
1730
- goto free_wqe ;
1758
+ goto wq_cyc_pop ;
1731
1759
}
1732
1760
1733
1761
skb = INDIRECT_CALL_3 (rq -> wqe .skb_from_cqe ,
@@ -1741,23 +1769,21 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1741
1769
/* do not return page to cache,
1742
1770
* it will be returned on XDP_TX completion.
1743
1771
*/
1744
- goto wq_cyc_pop ;
1772
+ wi -> flags |= BIT ( MLX5E_WQE_FRAG_SKIP_RELEASE ) ;
1745
1773
}
1746
- goto free_wqe ;
1774
+ goto wq_cyc_pop ;
1747
1775
}
1748
1776
1749
1777
mlx5e_complete_rx_cqe (rq , cqe , cqe_bcnt , skb );
1750
1778
1751
1779
if (mlx5e_cqe_regb_chain (cqe ))
1752
1780
if (!mlx5e_tc_update_skb_nic (cqe , skb )) {
1753
1781
dev_kfree_skb_any (skb );
1754
- goto free_wqe ;
1782
+ goto wq_cyc_pop ;
1755
1783
}
1756
1784
1757
1785
napi_gro_receive (rq -> cq .napi , skb );
1758
1786
1759
- free_wqe :
1760
- mlx5e_free_rx_wqe (rq , wi , true);
1761
1787
wq_cyc_pop :
1762
1788
mlx5_wq_cyc_pop (wq );
1763
1789
}
@@ -1781,7 +1807,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1781
1807
1782
1808
if (unlikely (MLX5E_RX_ERR_CQE (cqe ))) {
1783
1809
mlx5e_handle_rx_err_cqe (rq , cqe );
1784
- goto free_wqe ;
1810
+ goto wq_cyc_pop ;
1785
1811
}
1786
1812
1787
1813
skb = INDIRECT_CALL_2 (rq -> wqe .skb_from_cqe ,
@@ -1794,9 +1820,9 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1794
1820
/* do not return page to cache,
1795
1821
* it will be returned on XDP_TX completion.
1796
1822
*/
1797
- goto wq_cyc_pop ;
1823
+ wi -> flags |= BIT ( MLX5E_WQE_FRAG_SKIP_RELEASE ) ;
1798
1824
}
1799
- goto free_wqe ;
1825
+ goto wq_cyc_pop ;
1800
1826
}
1801
1827
1802
1828
mlx5e_complete_rx_cqe (rq , cqe , cqe_bcnt , skb );
@@ -1806,8 +1832,6 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1806
1832
1807
1833
mlx5e_rep_tc_receive (cqe , rq , skb );
1808
1834
1809
- free_wqe :
1810
- mlx5e_free_rx_wqe (rq , wi , true);
1811
1835
wq_cyc_pop :
1812
1836
mlx5_wq_cyc_pop (wq );
1813
1837
}
@@ -2454,25 +2478,24 @@ static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2454
2478
2455
2479
if (unlikely (MLX5E_RX_ERR_CQE (cqe ))) {
2456
2480
rq -> stats -> wqe_err ++ ;
2457
- goto wq_free_wqe ;
2481
+ goto wq_cyc_pop ;
2458
2482
}
2459
2483
2460
2484
skb = INDIRECT_CALL_2 (rq -> wqe .skb_from_cqe ,
2461
2485
mlx5e_skb_from_cqe_linear ,
2462
2486
mlx5e_skb_from_cqe_nonlinear ,
2463
2487
rq , wi , cqe , cqe_bcnt );
2464
2488
if (!skb )
2465
- goto wq_free_wqe ;
2489
+ goto wq_cyc_pop ;
2466
2490
2467
2491
mlx5i_complete_rx_cqe (rq , cqe , cqe_bcnt , skb );
2468
2492
if (unlikely (!skb -> dev )) {
2469
2493
dev_kfree_skb_any (skb );
2470
- goto wq_free_wqe ;
2494
+ goto wq_cyc_pop ;
2471
2495
}
2472
2496
napi_gro_receive (rq -> cq .napi , skb );
2473
2497
2474
- wq_free_wqe :
2475
- mlx5e_free_rx_wqe (rq , wi , true);
2498
+ wq_cyc_pop :
2476
2499
mlx5_wq_cyc_pop (wq );
2477
2500
}
2478
2501
@@ -2547,12 +2570,12 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
2547
2570
2548
2571
if (unlikely (MLX5E_RX_ERR_CQE (cqe ))) {
2549
2572
rq -> stats -> wqe_err ++ ;
2550
- goto free_wqe ;
2573
+ goto wq_cyc_pop ;
2551
2574
}
2552
2575
2553
2576
skb = mlx5e_skb_from_cqe_nonlinear (rq , wi , cqe , cqe_bcnt );
2554
2577
if (!skb )
2555
- goto free_wqe ;
2578
+ goto wq_cyc_pop ;
2556
2579
2557
2580
mlx5e_complete_rx_cqe (rq , cqe , cqe_bcnt , skb );
2558
2581
skb_push (skb , ETH_HLEN );
@@ -2561,8 +2584,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
2561
2584
rq -> netdev -> devlink_port );
2562
2585
dev_kfree_skb_any (skb );
2563
2586
2564
- free_wqe :
2565
- mlx5e_free_rx_wqe (rq , wi , false);
2587
+ wq_cyc_pop :
2566
2588
mlx5_wq_cyc_pop (wq );
2567
2589
}
2568
2590
0 commit comments