@@ -344,6 +344,7 @@ enum {
344
344
ETHTOOL_XDP_REDIRECT ,
345
345
ETHTOOL_XDP_PASS ,
346
346
ETHTOOL_XDP_DROP ,
347
+ ETHTOOL_XDP_XMIT ,
347
348
ETHTOOL_XDP_TX ,
348
349
ETHTOOL_MAX_STATS ,
349
350
};
@@ -399,10 +400,11 @@ static const struct mvneta_statistic mvneta_statistics[] = {
399
400
{ ETHTOOL_STAT_EEE_WAKEUP , T_SW , "eee_wakeup_errors" , },
400
401
{ ETHTOOL_STAT_SKB_ALLOC_ERR , T_SW , "skb_alloc_errors" , },
401
402
{ ETHTOOL_STAT_REFILL_ERR , T_SW , "refill_errors" , },
402
- { ETHTOOL_XDP_REDIRECT , T_SW , "xdp_redirect" , },
403
- { ETHTOOL_XDP_PASS , T_SW , "xdp_pass" , },
404
- { ETHTOOL_XDP_DROP , T_SW , "xdp_drop" , },
405
- { ETHTOOL_XDP_TX , T_SW , "xdp_tx" , },
403
+ { ETHTOOL_XDP_REDIRECT , T_SW , "rx_xdp_redirect" , },
404
+ { ETHTOOL_XDP_PASS , T_SW , "rx_xdp_pass" , },
405
+ { ETHTOOL_XDP_DROP , T_SW , "rx_xdp_drop" , },
406
+ { ETHTOOL_XDP_TX , T_SW , "rx_xdp_tx" , },
407
+ { ETHTOOL_XDP_XMIT , T_SW , "tx_xdp_xmit" , },
406
408
};
407
409
408
410
struct mvneta_stats {
@@ -414,6 +416,7 @@ struct mvneta_stats {
414
416
u64 xdp_redirect ;
415
417
u64 xdp_pass ;
416
418
u64 xdp_drop ;
419
+ u64 xdp_xmit ;
417
420
u64 xdp_tx ;
418
421
};
419
422
@@ -2012,7 +2015,6 @@ static int
2012
2015
mvneta_xdp_submit_frame (struct mvneta_port * pp , struct mvneta_tx_queue * txq ,
2013
2016
struct xdp_frame * xdpf , bool dma_map )
2014
2017
{
2015
- struct mvneta_pcpu_stats * stats = this_cpu_ptr (pp -> stats );
2016
2018
struct mvneta_tx_desc * tx_desc ;
2017
2019
struct mvneta_tx_buf * buf ;
2018
2020
dma_addr_t dma_addr ;
@@ -2047,12 +2049,6 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
2047
2049
tx_desc -> buf_phys_addr = dma_addr ;
2048
2050
tx_desc -> data_size = xdpf -> len ;
2049
2051
2050
- u64_stats_update_begin (& stats -> syncp );
2051
- stats -> es .ps .tx_bytes += xdpf -> len ;
2052
- stats -> es .ps .tx_packets ++ ;
2053
- stats -> es .ps .xdp_tx ++ ;
2054
- u64_stats_update_end (& stats -> syncp );
2055
-
2056
2052
mvneta_txq_inc_put (txq );
2057
2053
txq -> pending ++ ;
2058
2054
txq -> count ++ ;
@@ -2079,8 +2075,17 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
2079
2075
2080
2076
__netif_tx_lock (nq , cpu );
2081
2077
ret = mvneta_xdp_submit_frame (pp , txq , xdpf , false);
2082
- if (ret == MVNETA_XDP_TX )
2078
+ if (ret == MVNETA_XDP_TX ) {
2079
+ struct mvneta_pcpu_stats * stats = this_cpu_ptr (pp -> stats );
2080
+
2081
+ u64_stats_update_begin (& stats -> syncp );
2082
+ stats -> es .ps .tx_bytes += xdpf -> len ;
2083
+ stats -> es .ps .tx_packets ++ ;
2084
+ stats -> es .ps .xdp_tx ++ ;
2085
+ u64_stats_update_end (& stats -> syncp );
2086
+
2083
2087
mvneta_txq_pend_desc_add (pp , txq , 0 );
2088
+ }
2084
2089
__netif_tx_unlock (nq );
2085
2090
2086
2091
return ret ;
@@ -2091,10 +2096,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
2091
2096
struct xdp_frame * * frames , u32 flags )
2092
2097
{
2093
2098
struct mvneta_port * pp = netdev_priv (dev );
2099
+ struct mvneta_pcpu_stats * stats = this_cpu_ptr (pp -> stats );
2100
+ int i , nxmit_byte = 0 , nxmit = num_frame ;
2094
2101
int cpu = smp_processor_id ();
2095
2102
struct mvneta_tx_queue * txq ;
2096
2103
struct netdev_queue * nq ;
2097
- int i , drops = 0 ;
2098
2104
u32 ret ;
2099
2105
2100
2106
if (unlikely (flags & ~XDP_XMIT_FLAGS_MASK ))
@@ -2106,17 +2112,25 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
2106
2112
__netif_tx_lock (nq , cpu );
2107
2113
for (i = 0 ; i < num_frame ; i ++ ) {
2108
2114
ret = mvneta_xdp_submit_frame (pp , txq , frames [i ], true);
2109
- if (ret != MVNETA_XDP_TX ) {
2115
+ if (ret == MVNETA_XDP_TX ) {
2116
+ nxmit_byte += frames [i ]-> len ;
2117
+ } else {
2110
2118
xdp_return_frame_rx_napi (frames [i ]);
2111
- drops ++ ;
2119
+ nxmit -- ;
2112
2120
}
2113
2121
}
2114
2122
2115
2123
if (unlikely (flags & XDP_XMIT_FLUSH ))
2116
2124
mvneta_txq_pend_desc_add (pp , txq , 0 );
2117
2125
__netif_tx_unlock (nq );
2118
2126
2119
- return num_frame - drops ;
2127
+ u64_stats_update_begin (& stats -> syncp );
2128
+ stats -> es .ps .tx_bytes += nxmit_byte ;
2129
+ stats -> es .ps .tx_packets += nxmit ;
2130
+ stats -> es .ps .xdp_xmit += nxmit ;
2131
+ u64_stats_update_end (& stats -> syncp );
2132
+
2133
+ return nxmit ;
2120
2134
}
2121
2135
2122
2136
static int
@@ -4484,6 +4498,7 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
4484
4498
u64 xdp_redirect ;
4485
4499
u64 xdp_pass ;
4486
4500
u64 xdp_drop ;
4501
+ u64 xdp_xmit ;
4487
4502
u64 xdp_tx ;
4488
4503
4489
4504
stats = per_cpu_ptr (pp -> stats , cpu );
@@ -4494,6 +4509,7 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
4494
4509
xdp_redirect = stats -> es .ps .xdp_redirect ;
4495
4510
xdp_pass = stats -> es .ps .xdp_pass ;
4496
4511
xdp_drop = stats -> es .ps .xdp_drop ;
4512
+ xdp_xmit = stats -> es .ps .xdp_xmit ;
4497
4513
xdp_tx = stats -> es .ps .xdp_tx ;
4498
4514
} while (u64_stats_fetch_retry_irq (& stats -> syncp , start ));
4499
4515
@@ -4502,6 +4518,7 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
4502
4518
es -> ps .xdp_redirect += xdp_redirect ;
4503
4519
es -> ps .xdp_pass += xdp_pass ;
4504
4520
es -> ps .xdp_drop += xdp_drop ;
4521
+ es -> ps .xdp_xmit += xdp_xmit ;
4505
4522
es -> ps .xdp_tx += xdp_tx ;
4506
4523
}
4507
4524
}
@@ -4555,6 +4572,9 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4555
4572
case ETHTOOL_XDP_TX :
4556
4573
pp -> ethtool_stats [i ] = stats .ps .xdp_tx ;
4557
4574
break ;
4575
+ case ETHTOOL_XDP_XMIT :
4576
+ pp -> ethtool_stats [i ] = stats .ps .xdp_xmit ;
4577
+ break ;
4558
4578
}
4559
4579
break ;
4560
4580
}
0 commit comments