@@ -103,7 +103,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
103
103
xdptxd -> dma_addr = dma_addr ;
104
104
105
105
if (unlikely (!INDIRECT_CALL_2 (sq -> xmit_xdp_frame , mlx5e_xmit_xdp_frame_mpwqe ,
106
- mlx5e_xmit_xdp_frame , sq , xdptxd , 0 )))
106
+ mlx5e_xmit_xdp_frame , sq , xdptxd , 0 , NULL )))
107
107
return false;
108
108
109
109
/* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */
@@ -145,7 +145,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
145
145
xdptxd -> dma_addr = dma_addr ;
146
146
147
147
if (unlikely (!INDIRECT_CALL_2 (sq -> xmit_xdp_frame , mlx5e_xmit_xdp_frame_mpwqe ,
148
- mlx5e_xmit_xdp_frame , sq , xdptxd , 0 )))
148
+ mlx5e_xmit_xdp_frame , sq , xdptxd , 0 , NULL )))
149
149
return false;
150
150
151
151
/* xmit_mode == MLX5E_XDP_XMIT_MODE_PAGE */
@@ -261,6 +261,37 @@ const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = {
261
261
.xmo_rx_hash = mlx5e_xdp_rx_hash ,
262
262
};
263
263
264
+ struct mlx5e_xsk_tx_complete {
265
+ struct mlx5_cqe64 * cqe ;
266
+ struct mlx5e_cq * cq ;
267
+ };
268
+
269
+ static u64 mlx5e_xsk_fill_timestamp (void * _priv )
270
+ {
271
+ struct mlx5e_xsk_tx_complete * priv = _priv ;
272
+ u64 ts ;
273
+
274
+ ts = get_cqe_ts (priv -> cqe );
275
+
276
+ if (mlx5_is_real_time_rq (priv -> cq -> mdev ) || mlx5_is_real_time_sq (priv -> cq -> mdev ))
277
+ return mlx5_real_time_cyc2time (& priv -> cq -> mdev -> clock , ts );
278
+
279
+ return mlx5_timecounter_cyc2time (& priv -> cq -> mdev -> clock , ts );
280
+ }
281
+
282
+ static void mlx5e_xsk_request_checksum (u16 csum_start , u16 csum_offset , void * priv )
283
+ {
284
+ struct mlx5_wqe_eth_seg * eseg = priv ;
285
+
286
+ /* HW/FW is doing parsing, so offsets are largely ignored. */
287
+ eseg -> cs_flags |= MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM ;
288
+ }
289
+
290
+ const struct xsk_tx_metadata_ops mlx5e_xsk_tx_metadata_ops = {
291
+ .tmo_fill_timestamp = mlx5e_xsk_fill_timestamp ,
292
+ .tmo_request_checksum = mlx5e_xsk_request_checksum ,
293
+ };
294
+
264
295
/* returns true if packet was consumed by xdp */
265
296
bool mlx5e_xdp_handle (struct mlx5e_rq * rq ,
266
297
struct bpf_prog * prog , struct mlx5e_xdp_buff * mxbuf )
@@ -398,11 +429,11 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq
398
429
399
430
INDIRECT_CALLABLE_SCOPE bool
400
431
mlx5e_xmit_xdp_frame (struct mlx5e_xdpsq * sq , struct mlx5e_xmit_data * xdptxd ,
401
- int check_result );
432
+ int check_result , struct xsk_tx_metadata * meta );
402
433
403
434
INDIRECT_CALLABLE_SCOPE bool
404
435
mlx5e_xmit_xdp_frame_mpwqe (struct mlx5e_xdpsq * sq , struct mlx5e_xmit_data * xdptxd ,
405
- int check_result )
436
+ int check_result , struct xsk_tx_metadata * meta )
406
437
{
407
438
struct mlx5e_tx_mpwqe * session = & sq -> mpwqe ;
408
439
struct mlx5e_xdpsq_stats * stats = sq -> stats ;
@@ -420,7 +451,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
420
451
*/
421
452
if (unlikely (sq -> mpwqe .wqe ))
422
453
mlx5e_xdp_mpwqe_complete (sq );
423
- return mlx5e_xmit_xdp_frame (sq , xdptxd , 0 );
454
+ return mlx5e_xmit_xdp_frame (sq , xdptxd , 0 , meta );
424
455
}
425
456
if (!xdptxd -> len ) {
426
457
skb_frag_t * frag = & xdptxdf -> sinfo -> frags [0 ];
@@ -450,6 +481,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
450
481
* and it's safe to complete it at any time.
451
482
*/
452
483
mlx5e_xdp_mpwqe_session_start (sq );
484
+ xsk_tx_metadata_request (meta , & mlx5e_xsk_tx_metadata_ops , & session -> wqe -> eth );
453
485
}
454
486
455
487
mlx5e_xdp_mpwqe_add_dseg (sq , p , stats );
@@ -480,7 +512,7 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
480
512
481
513
INDIRECT_CALLABLE_SCOPE bool
482
514
mlx5e_xmit_xdp_frame (struct mlx5e_xdpsq * sq , struct mlx5e_xmit_data * xdptxd ,
483
- int check_result )
515
+ int check_result , struct xsk_tx_metadata * meta )
484
516
{
485
517
struct mlx5e_xmit_data_frags * xdptxdf =
486
518
container_of (xdptxd , struct mlx5e_xmit_data_frags , xd );
@@ -599,6 +631,8 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
599
631
sq -> pc ++ ;
600
632
}
601
633
634
+ xsk_tx_metadata_request (meta , & mlx5e_xsk_tx_metadata_ops , eseg );
635
+
602
636
sq -> doorbell_cseg = cseg ;
603
637
604
638
stats -> xmit ++ ;
@@ -608,7 +642,9 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
608
642
static void mlx5e_free_xdpsq_desc (struct mlx5e_xdpsq * sq ,
609
643
struct mlx5e_xdp_wqe_info * wi ,
610
644
u32 * xsk_frames ,
611
- struct xdp_frame_bulk * bq )
645
+ struct xdp_frame_bulk * bq ,
646
+ struct mlx5e_cq * cq ,
647
+ struct mlx5_cqe64 * cqe )
612
648
{
613
649
struct mlx5e_xdp_info_fifo * xdpi_fifo = & sq -> db .xdpi_fifo ;
614
650
u16 i ;
@@ -668,10 +704,24 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
668
704
669
705
break ;
670
706
}
671
- case MLX5E_XDP_XMIT_MODE_XSK :
707
+ case MLX5E_XDP_XMIT_MODE_XSK : {
672
708
/* AF_XDP send */
709
+ struct xsk_tx_metadata_compl * compl = NULL ;
710
+ struct mlx5e_xsk_tx_complete priv = {
711
+ .cqe = cqe ,
712
+ .cq = cq ,
713
+ };
714
+
715
+ if (xp_tx_metadata_enabled (sq -> xsk_pool )) {
716
+ xdpi = mlx5e_xdpi_fifo_pop (xdpi_fifo );
717
+ compl = & xdpi .xsk_meta ;
718
+
719
+ xsk_tx_metadata_complete (compl , & mlx5e_xsk_tx_metadata_ops , & priv );
720
+ }
721
+
673
722
(* xsk_frames )++ ;
674
723
break ;
724
+ }
675
725
default :
676
726
WARN_ON_ONCE (true);
677
727
}
@@ -720,7 +770,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
720
770
721
771
sqcc += wi -> num_wqebbs ;
722
772
723
- mlx5e_free_xdpsq_desc (sq , wi , & xsk_frames , & bq );
773
+ mlx5e_free_xdpsq_desc (sq , wi , & xsk_frames , & bq , cq , cqe );
724
774
} while (!last_wqe );
725
775
726
776
if (unlikely (get_cqe_opcode (cqe ) != MLX5_CQE_REQ )) {
@@ -767,7 +817,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
767
817
768
818
sq -> cc += wi -> num_wqebbs ;
769
819
770
- mlx5e_free_xdpsq_desc (sq , wi , & xsk_frames , & bq );
820
+ mlx5e_free_xdpsq_desc (sq , wi , & xsk_frames , & bq , NULL , NULL );
771
821
}
772
822
773
823
xdp_flush_frame_bulk (& bq );
@@ -840,7 +890,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
840
890
}
841
891
842
892
ret = INDIRECT_CALL_2 (sq -> xmit_xdp_frame , mlx5e_xmit_xdp_frame_mpwqe ,
843
- mlx5e_xmit_xdp_frame , sq , xdptxd , 0 );
893
+ mlx5e_xmit_xdp_frame , sq , xdptxd , 0 , NULL );
844
894
if (unlikely (!ret )) {
845
895
int j ;
846
896
0 commit comments