@@ -45,8 +45,8 @@ struct veth_stats {
45
45
u64 xdp_drops ;
46
46
u64 xdp_tx ;
47
47
u64 xdp_tx_err ;
48
- u64 xdp_xmit ;
49
- u64 xdp_xmit_err ;
48
+ u64 peer_tq_xdp_xmit ;
49
+ u64 peer_tq_xdp_xmit_err ;
50
50
};
51
51
52
52
struct veth_rq_stats {
@@ -92,17 +92,22 @@ struct veth_q_stat_desc {
92
92
static const struct veth_q_stat_desc veth_rq_stats_desc [] = {
93
93
{ "xdp_packets" , VETH_RQ_STAT (xdp_packets ) },
94
94
{ "xdp_bytes" , VETH_RQ_STAT (xdp_bytes ) },
95
- { "rx_drops" , VETH_RQ_STAT (rx_drops ) },
96
- { "rx_xdp_redirect" , VETH_RQ_STAT (xdp_redirect ) },
97
- { "rx_xdp_drops" , VETH_RQ_STAT (xdp_drops ) },
98
- { "rx_xdp_tx" , VETH_RQ_STAT (xdp_tx ) },
99
- { "rx_xdp_tx_errors" , VETH_RQ_STAT (xdp_tx_err ) },
100
- { "tx_xdp_xmit" , VETH_RQ_STAT (xdp_xmit ) },
101
- { "tx_xdp_xmit_errors" , VETH_RQ_STAT (xdp_xmit_err ) },
95
+ { "drops" , VETH_RQ_STAT (rx_drops ) },
96
+ { "xdp_redirect" , VETH_RQ_STAT (xdp_redirect ) },
97
+ { "xdp_drops" , VETH_RQ_STAT (xdp_drops ) },
98
+ { "xdp_tx" , VETH_RQ_STAT (xdp_tx ) },
99
+ { "xdp_tx_errors" , VETH_RQ_STAT (xdp_tx_err ) },
102
100
};
103
101
104
102
#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
105
103
104
+ static const struct veth_q_stat_desc veth_tq_stats_desc [] = {
105
+ { "xdp_xmit" , VETH_RQ_STAT (peer_tq_xdp_xmit ) },
106
+ { "xdp_xmit_errors" , VETH_RQ_STAT (peer_tq_xdp_xmit_err ) },
107
+ };
108
+
109
+ #define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
110
+
106
111
static struct {
107
112
const char string [ETH_GSTRING_LEN ];
108
113
} ethtool_stats_keys [] = {
@@ -142,6 +147,14 @@ static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
142
147
p += ETH_GSTRING_LEN ;
143
148
}
144
149
}
150
+ for (i = 0 ; i < dev -> real_num_tx_queues ; i ++ ) {
151
+ for (j = 0 ; j < VETH_TQ_STATS_LEN ; j ++ ) {
152
+ snprintf (p , ETH_GSTRING_LEN ,
153
+ "tx_queue_%u_%.18s" ,
154
+ i , veth_tq_stats_desc [j ].desc );
155
+ p += ETH_GSTRING_LEN ;
156
+ }
157
+ }
145
158
break ;
146
159
}
147
160
}
@@ -151,7 +164,8 @@ static int veth_get_sset_count(struct net_device *dev, int sset)
151
164
switch (sset ) {
152
165
case ETH_SS_STATS :
153
166
return ARRAY_SIZE (ethtool_stats_keys ) +
154
- VETH_RQ_STATS_LEN * dev -> real_num_rx_queues ;
167
+ VETH_RQ_STATS_LEN * dev -> real_num_rx_queues +
168
+ VETH_TQ_STATS_LEN * dev -> real_num_tx_queues ;
155
169
default :
156
170
return - EOPNOTSUPP ;
157
171
}
@@ -160,7 +174,7 @@ static int veth_get_sset_count(struct net_device *dev, int sset)
160
174
static void veth_get_ethtool_stats (struct net_device * dev ,
161
175
struct ethtool_stats * stats , u64 * data )
162
176
{
163
- struct veth_priv * priv = netdev_priv (dev );
177
+ struct veth_priv * rcv_priv , * priv = netdev_priv (dev );
164
178
struct net_device * peer = rtnl_dereference (priv -> peer );
165
179
int i , j , idx ;
166
180
@@ -181,6 +195,26 @@ static void veth_get_ethtool_stats(struct net_device *dev,
181
195
} while (u64_stats_fetch_retry_irq (& rq_stats -> syncp , start ));
182
196
idx += VETH_RQ_STATS_LEN ;
183
197
}
198
+
199
+ if (!peer )
200
+ return ;
201
+
202
+ rcv_priv = netdev_priv (peer );
203
+ for (i = 0 ; i < peer -> real_num_rx_queues ; i ++ ) {
204
+ const struct veth_rq_stats * rq_stats = & rcv_priv -> rq [i ].stats ;
205
+ const void * base = (void * )& rq_stats -> vs ;
206
+ unsigned int start , tx_idx = idx ;
207
+ size_t offset ;
208
+
209
+ tx_idx += (i % dev -> real_num_tx_queues ) * VETH_TQ_STATS_LEN ;
210
+ do {
211
+ start = u64_stats_fetch_begin_irq (& rq_stats -> syncp );
212
+ for (j = 0 ; j < VETH_TQ_STATS_LEN ; j ++ ) {
213
+ offset = veth_tq_stats_desc [j ].offset ;
214
+ data [tx_idx + j ] += * (u64 * )(base + offset );
215
+ }
216
+ } while (u64_stats_fetch_retry_irq (& rq_stats -> syncp , start ));
217
+ }
184
218
}
185
219
186
220
static const struct ethtool_ops veth_ethtool_ops = {
@@ -301,25 +335,25 @@ static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
301
335
struct veth_priv * priv = netdev_priv (dev );
302
336
int i ;
303
337
304
- result -> xdp_xmit_err = 0 ;
338
+ result -> peer_tq_xdp_xmit_err = 0 ;
305
339
result -> xdp_packets = 0 ;
306
340
result -> xdp_tx_err = 0 ;
307
341
result -> xdp_bytes = 0 ;
308
342
result -> rx_drops = 0 ;
309
343
for (i = 0 ; i < dev -> num_rx_queues ; i ++ ) {
310
- u64 packets , bytes , drops , xdp_tx_err , xdp_xmit_err ;
344
+ u64 packets , bytes , drops , xdp_tx_err , peer_tq_xdp_xmit_err ;
311
345
struct veth_rq_stats * stats = & priv -> rq [i ].stats ;
312
346
unsigned int start ;
313
347
314
348
do {
315
349
start = u64_stats_fetch_begin_irq (& stats -> syncp );
316
- xdp_xmit_err = stats -> vs .xdp_xmit_err ;
350
+ peer_tq_xdp_xmit_err = stats -> vs .peer_tq_xdp_xmit_err ;
317
351
xdp_tx_err = stats -> vs .xdp_tx_err ;
318
352
packets = stats -> vs .xdp_packets ;
319
353
bytes = stats -> vs .xdp_bytes ;
320
354
drops = stats -> vs .rx_drops ;
321
355
} while (u64_stats_fetch_retry_irq (& stats -> syncp , start ));
322
- result -> xdp_xmit_err += xdp_xmit_err ;
356
+ result -> peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err ;
323
357
result -> xdp_tx_err += xdp_tx_err ;
324
358
result -> xdp_packets += packets ;
325
359
result -> xdp_bytes += bytes ;
@@ -340,8 +374,8 @@ static void veth_get_stats64(struct net_device *dev,
340
374
tot -> tx_packets = packets ;
341
375
342
376
veth_stats_rx (& rx , dev );
343
- tot -> tx_dropped += rx .xdp_xmit_err + rx . xdp_tx_err ;
344
- tot -> rx_dropped = rx .rx_drops ;
377
+ tot -> tx_dropped += rx .xdp_tx_err ;
378
+ tot -> rx_dropped = rx .rx_drops + rx . peer_tq_xdp_xmit_err ;
345
379
tot -> rx_bytes = rx .xdp_bytes ;
346
380
tot -> rx_packets = rx .xdp_packets ;
347
381
@@ -353,7 +387,8 @@ static void veth_get_stats64(struct net_device *dev,
353
387
tot -> rx_packets += packets ;
354
388
355
389
veth_stats_rx (& rx , peer );
356
- tot -> rx_dropped += rx .xdp_xmit_err + rx .xdp_tx_err ;
390
+ tot -> tx_dropped += rx .peer_tq_xdp_xmit_err ;
391
+ tot -> rx_dropped += rx .xdp_tx_err ;
357
392
tot -> tx_bytes += rx .xdp_bytes ;
358
393
tot -> tx_packets += rx .xdp_packets ;
359
394
}
@@ -394,38 +429,28 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
394
429
u32 flags , bool ndo_xmit )
395
430
{
396
431
struct veth_priv * rcv_priv , * priv = netdev_priv (dev );
397
- unsigned int qidx , max_len ;
432
+ int i , ret = - ENXIO , drops = 0 ;
398
433
struct net_device * rcv ;
399
- int i , ret , drops = n ;
434
+ unsigned int max_len ;
400
435
struct veth_rq * rq ;
401
436
402
- rcu_read_lock ();
403
- if (unlikely (flags & ~XDP_XMIT_FLAGS_MASK )) {
404
- rcu_read_unlock ();
405
- atomic64_add (drops , & priv -> dropped );
437
+ if (unlikely (flags & ~XDP_XMIT_FLAGS_MASK ))
406
438
return - EINVAL ;
407
- }
408
439
440
+ rcu_read_lock ();
409
441
rcv = rcu_dereference (priv -> peer );
410
- if (unlikely (!rcv )) {
411
- rcu_read_unlock ();
412
- atomic64_add (drops , & priv -> dropped );
413
- return - ENXIO ;
414
- }
442
+ if (unlikely (!rcv ))
443
+ goto out ;
415
444
416
445
rcv_priv = netdev_priv (rcv );
417
- qidx = veth_select_rxq (rcv );
418
- rq = & rcv_priv -> rq [qidx ];
446
+ rq = & rcv_priv -> rq [veth_select_rxq (rcv )];
419
447
/* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
420
448
* side. This means an XDP program is loaded on the peer and the peer
421
449
* device is up.
422
450
*/
423
- if (!rcu_access_pointer (rq -> xdp_prog )) {
424
- ret = - ENXIO ;
425
- goto drop ;
426
- }
451
+ if (!rcu_access_pointer (rq -> xdp_prog ))
452
+ goto out ;
427
453
428
- drops = 0 ;
429
454
max_len = rcv -> mtu + rcv -> hard_header_len + VLAN_HLEN ;
430
455
431
456
spin_lock (& rq -> xdp_ring .producer_lock );
@@ -445,18 +470,14 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
445
470
__veth_xdp_flush (rq );
446
471
447
472
ret = n - drops ;
448
- drop :
449
- rq = & priv -> rq [qidx ];
450
- u64_stats_update_begin (& rq -> stats .syncp );
451
473
if (ndo_xmit ) {
452
- rq -> stats .vs .xdp_xmit += n - drops ;
453
- rq -> stats .vs .xdp_xmit_err += drops ;
454
- } else {
455
- rq -> stats .vs .xdp_tx += n - drops ;
456
- rq -> stats .vs .xdp_tx_err += drops ;
474
+ u64_stats_update_begin (& rq -> stats .syncp );
475
+ rq -> stats .vs .peer_tq_xdp_xmit += n - drops ;
476
+ rq -> stats .vs .peer_tq_xdp_xmit_err += drops ;
477
+ u64_stats_update_end (& rq -> stats .syncp );
457
478
}
458
- u64_stats_update_end (& rq -> stats .syncp );
459
479
480
+ out :
460
481
rcu_read_unlock ();
461
482
462
483
return ret ;
@@ -465,49 +486,63 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
465
486
static int veth_ndo_xdp_xmit (struct net_device * dev , int n ,
466
487
struct xdp_frame * * frames , u32 flags )
467
488
{
468
- return veth_xdp_xmit (dev , n , frames , flags , true);
489
+ int err ;
490
+
491
+ err = veth_xdp_xmit (dev , n , frames , flags , true);
492
+ if (err < 0 ) {
493
+ struct veth_priv * priv = netdev_priv (dev );
494
+
495
+ atomic64_add (n , & priv -> dropped );
496
+ }
497
+
498
+ return err ;
469
499
}
470
500
471
- static void veth_xdp_flush_bq (struct net_device * dev , struct veth_xdp_tx_bq * bq )
501
+ static void veth_xdp_flush_bq (struct veth_rq * rq , struct veth_xdp_tx_bq * bq )
472
502
{
473
503
int sent , i , err = 0 ;
474
504
475
- sent = veth_xdp_xmit (dev , bq -> count , bq -> q , 0 , false);
505
+ sent = veth_xdp_xmit (rq -> dev , bq -> count , bq -> q , 0 , false);
476
506
if (sent < 0 ) {
477
507
err = sent ;
478
508
sent = 0 ;
479
509
for (i = 0 ; i < bq -> count ; i ++ )
480
510
xdp_return_frame (bq -> q [i ]);
481
511
}
482
- trace_xdp_bulk_tx (dev , sent , bq -> count - sent , err );
512
+ trace_xdp_bulk_tx (rq -> dev , sent , bq -> count - sent , err );
513
+
514
+ u64_stats_update_begin (& rq -> stats .syncp );
515
+ rq -> stats .vs .xdp_tx += sent ;
516
+ rq -> stats .vs .xdp_tx_err += bq -> count - sent ;
517
+ u64_stats_update_end (& rq -> stats .syncp );
483
518
484
519
bq -> count = 0 ;
485
520
}
486
521
487
- static void veth_xdp_flush (struct net_device * dev , struct veth_xdp_tx_bq * bq )
522
+ static void veth_xdp_flush (struct veth_rq * rq , struct veth_xdp_tx_bq * bq )
488
523
{
489
- struct veth_priv * rcv_priv , * priv = netdev_priv (dev );
524
+ struct veth_priv * rcv_priv , * priv = netdev_priv (rq -> dev );
490
525
struct net_device * rcv ;
491
- struct veth_rq * rq ;
526
+ struct veth_rq * rcv_rq ;
492
527
493
528
rcu_read_lock ();
494
- veth_xdp_flush_bq (dev , bq );
529
+ veth_xdp_flush_bq (rq , bq );
495
530
rcv = rcu_dereference (priv -> peer );
496
531
if (unlikely (!rcv ))
497
532
goto out ;
498
533
499
534
rcv_priv = netdev_priv (rcv );
500
- rq = & rcv_priv -> rq [veth_select_rxq (rcv )];
535
+ rcv_rq = & rcv_priv -> rq [veth_select_rxq (rcv )];
501
536
/* xdp_ring is initialized on receive side? */
502
- if (unlikely (!rcu_access_pointer (rq -> xdp_prog )))
537
+ if (unlikely (!rcu_access_pointer (rcv_rq -> xdp_prog )))
503
538
goto out ;
504
539
505
- __veth_xdp_flush (rq );
540
+ __veth_xdp_flush (rcv_rq );
506
541
out :
507
542
rcu_read_unlock ();
508
543
}
509
544
510
- static int veth_xdp_tx (struct net_device * dev , struct xdp_buff * xdp ,
545
+ static int veth_xdp_tx (struct veth_rq * rq , struct xdp_buff * xdp ,
511
546
struct veth_xdp_tx_bq * bq )
512
547
{
513
548
struct xdp_frame * frame = convert_to_xdp_frame (xdp );
@@ -516,7 +551,7 @@ static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp,
516
551
return - EOVERFLOW ;
517
552
518
553
if (unlikely (bq -> count == VETH_XDP_TX_BULK_SIZE ))
519
- veth_xdp_flush_bq (dev , bq );
554
+ veth_xdp_flush_bq (rq , bq );
520
555
521
556
bq -> q [bq -> count ++ ] = frame ;
522
557
@@ -559,7 +594,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
559
594
orig_frame = * frame ;
560
595
xdp .data_hard_start = head ;
561
596
xdp .rxq -> mem = frame -> mem ;
562
- if (unlikely (veth_xdp_tx (rq -> dev , & xdp , bq ) < 0 )) {
597
+ if (unlikely (veth_xdp_tx (rq , & xdp , bq ) < 0 )) {
563
598
trace_xdp_exception (rq -> dev , xdp_prog , act );
564
599
frame = & orig_frame ;
565
600
stats -> rx_drops ++ ;
@@ -692,7 +727,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
692
727
get_page (virt_to_page (xdp .data ));
693
728
consume_skb (skb );
694
729
xdp .rxq -> mem = rq -> xdp_mem ;
695
- if (unlikely (veth_xdp_tx (rq -> dev , & xdp , bq ) < 0 )) {
730
+ if (unlikely (veth_xdp_tx (rq , & xdp , bq ) < 0 )) {
696
731
trace_xdp_exception (rq -> dev , xdp_prog , act );
697
732
stats -> rx_drops ++ ;
698
733
goto err_xdp ;
@@ -817,7 +852,7 @@ static int veth_poll(struct napi_struct *napi, int budget)
817
852
}
818
853
819
854
if (stats .xdp_tx > 0 )
820
- veth_xdp_flush (rq -> dev , & bq );
855
+ veth_xdp_flush (rq , & bq );
821
856
if (stats .xdp_redirect > 0 )
822
857
xdp_do_flush ();
823
858
xdp_clear_return_frame_no_direct ();
0 commit comments