@@ -127,7 +127,7 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
127
127
128
128
for (i = update_owner_only ; i < cqe_count ;
129
129
i ++ , cq -> mini_arr_idx ++ , cqcc ++ ) {
130
- if (unlikely ( cq -> mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE ) )
130
+ if (cq -> mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE )
131
131
mlx5e_read_mini_arr_slot (cq , cqcc );
132
132
133
133
mlx5e_decompress_cqe_no_hash (rq , cq , cqcc );
@@ -212,6 +212,11 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
212
212
return - ENOMEM ;
213
213
}
214
214
215
+ static inline int mlx5e_mpwqe_strides_per_page (struct mlx5e_rq * rq )
216
+ {
217
+ return rq -> mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER ;
218
+ }
219
+
215
220
static inline void
216
221
mlx5e_dma_pre_sync_linear_mpwqe (struct device * pdev ,
217
222
struct mlx5e_mpw_info * wi ,
@@ -230,13 +235,13 @@ mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev,
230
235
}
231
236
232
237
static inline void
233
- mlx5e_add_skb_frag_linear_mpwqe (struct device * pdev ,
238
+ mlx5e_add_skb_frag_linear_mpwqe (struct mlx5e_rq * rq ,
234
239
struct sk_buff * skb ,
235
240
struct mlx5e_mpw_info * wi ,
236
241
u32 page_idx , u32 frag_offset ,
237
242
u32 len )
238
243
{
239
- unsigned int truesize = ALIGN (len , MLX5_MPWRQ_STRIDE_SIZE );
244
+ unsigned int truesize = ALIGN (len , rq -> mpwqe_stride_sz );
240
245
241
246
wi -> skbs_frags [page_idx ]++ ;
242
247
skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags ,
@@ -245,15 +250,15 @@ mlx5e_add_skb_frag_linear_mpwqe(struct device *pdev,
245
250
}
246
251
247
252
static inline void
248
- mlx5e_add_skb_frag_fragmented_mpwqe (struct device * pdev ,
253
+ mlx5e_add_skb_frag_fragmented_mpwqe (struct mlx5e_rq * rq ,
249
254
struct sk_buff * skb ,
250
255
struct mlx5e_mpw_info * wi ,
251
256
u32 page_idx , u32 frag_offset ,
252
257
u32 len )
253
258
{
254
- unsigned int truesize = ALIGN (len , MLX5_MPWRQ_STRIDE_SIZE );
259
+ unsigned int truesize = ALIGN (len , rq -> mpwqe_stride_sz );
255
260
256
- dma_sync_single_for_cpu (pdev ,
261
+ dma_sync_single_for_cpu (rq -> pdev ,
257
262
wi -> umr .dma_info [page_idx ].addr + frag_offset ,
258
263
len , DMA_FROM_DEVICE );
259
264
wi -> skbs_frags [page_idx ]++ ;
@@ -293,7 +298,6 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
293
298
skb_copy_to_linear_data_offset (skb , 0 ,
294
299
page_address (dma_info -> page ) + offset ,
295
300
len );
296
- #if (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD >= MLX5_MPWRQ_STRIDE_SIZE )
297
301
if (unlikely (offset + headlen > PAGE_SIZE )) {
298
302
dma_info ++ ;
299
303
headlen_pg = len ;
@@ -304,7 +308,6 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
304
308
page_address (dma_info -> page ),
305
309
len );
306
310
}
307
- #endif
308
311
}
309
312
310
313
static u16 mlx5e_get_wqe_mtt_offset (u16 rq_ix , u16 wqe_ix )
@@ -430,7 +433,7 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
430
433
for (i = 0 ; i < MLX5_MPWRQ_PAGES_PER_WQE ; i ++ ) {
431
434
if (unlikely (mlx5e_alloc_and_map_page (rq , wi , i )))
432
435
goto err_unmap ;
433
- atomic_add (MLX5_MPWRQ_STRIDES_PER_PAGE ,
436
+ atomic_add (mlx5e_mpwqe_strides_per_page ( rq ) ,
434
437
& wi -> umr .dma_info [i ].page -> _count );
435
438
wi -> skbs_frags [i ] = 0 ;
436
439
}
@@ -449,7 +452,7 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
449
452
while (-- i >= 0 ) {
450
453
dma_unmap_page (rq -> pdev , wi -> umr .dma_info [i ].addr , PAGE_SIZE ,
451
454
PCI_DMA_FROMDEVICE );
452
- atomic_sub (MLX5_MPWRQ_STRIDES_PER_PAGE ,
455
+ atomic_sub (mlx5e_mpwqe_strides_per_page ( rq ) ,
453
456
& wi -> umr .dma_info [i ].page -> _count );
454
457
put_page (wi -> umr .dma_info [i ].page );
455
458
}
@@ -474,7 +477,7 @@ void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
474
477
for (i = 0 ; i < MLX5_MPWRQ_PAGES_PER_WQE ; i ++ ) {
475
478
dma_unmap_page (rq -> pdev , wi -> umr .dma_info [i ].addr , PAGE_SIZE ,
476
479
PCI_DMA_FROMDEVICE );
477
- atomic_sub (MLX5_MPWRQ_STRIDES_PER_PAGE - wi -> skbs_frags [i ],
480
+ atomic_sub (mlx5e_mpwqe_strides_per_page ( rq ) - wi -> skbs_frags [i ],
478
481
& wi -> umr .dma_info [i ].page -> _count );
479
482
put_page (wi -> umr .dma_info [i ].page );
480
483
}
@@ -524,7 +527,7 @@ static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq,
524
527
*/
525
528
split_page (wi -> dma_info .page , MLX5_MPWRQ_WQE_PAGE_ORDER );
526
529
for (i = 0 ; i < MLX5_MPWRQ_PAGES_PER_WQE ; i ++ ) {
527
- atomic_add (MLX5_MPWRQ_STRIDES_PER_PAGE ,
530
+ atomic_add (mlx5e_mpwqe_strides_per_page ( rq ) ,
528
531
& wi -> dma_info .page [i ]._count );
529
532
wi -> skbs_frags [i ] = 0 ;
530
533
}
@@ -548,7 +551,7 @@ void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
548
551
dma_unmap_page (rq -> pdev , wi -> dma_info .addr , rq -> wqe_sz ,
549
552
PCI_DMA_FROMDEVICE );
550
553
for (i = 0 ; i < MLX5_MPWRQ_PAGES_PER_WQE ; i ++ ) {
551
- atomic_sub (MLX5_MPWRQ_STRIDES_PER_PAGE - wi -> skbs_frags [i ],
554
+ atomic_sub (mlx5e_mpwqe_strides_per_page ( rq ) - wi -> skbs_frags [i ],
552
555
& wi -> dma_info .page [i ]._count );
553
556
put_page (& wi -> dma_info .page [i ]);
554
557
}
@@ -793,29 +796,27 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
793
796
u32 cqe_bcnt ,
794
797
struct sk_buff * skb )
795
798
{
796
- u32 consumed_bytes = ALIGN (cqe_bcnt , MLX5_MPWRQ_STRIDE_SIZE );
799
+ u32 consumed_bytes = ALIGN (cqe_bcnt , rq -> mpwqe_stride_sz );
797
800
u16 stride_ix = mpwrq_get_cqe_stride_index (cqe );
798
- u32 wqe_offset = stride_ix * MLX5_MPWRQ_STRIDE_SIZE ;
801
+ u32 wqe_offset = stride_ix * rq -> mpwqe_stride_sz ;
799
802
u32 head_offset = wqe_offset & (PAGE_SIZE - 1 );
800
803
u32 page_idx = wqe_offset >> PAGE_SHIFT ;
801
804
u32 head_page_idx = page_idx ;
802
805
u16 headlen = min_t (u16 , MLX5_MPWRQ_SMALL_PACKET_THRESHOLD , cqe_bcnt );
803
806
u32 frag_offset = head_offset + headlen ;
804
807
u16 byte_cnt = cqe_bcnt - headlen ;
805
808
806
- #if (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD >= MLX5_MPWRQ_STRIDE_SIZE )
807
809
if (unlikely (frag_offset >= PAGE_SIZE )) {
808
810
page_idx ++ ;
809
811
frag_offset -= PAGE_SIZE ;
810
812
}
811
- #endif
812
813
wi -> dma_pre_sync (rq -> pdev , wi , wqe_offset , consumed_bytes );
813
814
814
815
while (byte_cnt ) {
815
816
u32 pg_consumed_bytes =
816
817
min_t (u32 , PAGE_SIZE - frag_offset , byte_cnt );
817
818
818
- wi -> add_skb_frag (rq -> pdev , skb , wi , page_idx , frag_offset ,
819
+ wi -> add_skb_frag (rq , skb , wi , page_idx , frag_offset ,
819
820
pg_consumed_bytes );
820
821
byte_cnt -= pg_consumed_bytes ;
821
822
frag_offset = 0 ;
@@ -865,7 +866,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
865
866
mlx5e_complete_rx_cqe (rq , cqe , cqe_bcnt , skb );
866
867
867
868
mpwrq_cqe_out :
868
- if (likely (wi -> consumed_strides < MLX5_MPWRQ_NUM_STRIDES ))
869
+ if (likely (wi -> consumed_strides < rq -> mpwqe_num_strides ))
869
870
return ;
870
871
871
872
wi -> free_wqe (rq , wi );
0 commit comments