@@ -629,6 +629,7 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
629
629
630
630
static void netcp_empty_rx_queue (struct netcp_intf * netcp )
631
631
{
632
+ struct netcp_stats * rx_stats = & netcp -> stats ;
632
633
struct knav_dma_desc * desc ;
633
634
unsigned int dma_sz ;
634
635
dma_addr_t dma ;
@@ -642,16 +643,17 @@ static void netcp_empty_rx_queue(struct netcp_intf *netcp)
642
643
if (unlikely (!desc )) {
643
644
dev_err (netcp -> ndev_dev , "%s: failed to unmap Rx desc\n" ,
644
645
__func__ );
645
- netcp -> ndev -> stats . rx_errors ++ ;
646
+ rx_stats -> rx_errors ++ ;
646
647
continue ;
647
648
}
648
649
netcp_free_rx_desc_chain (netcp , desc );
649
- netcp -> ndev -> stats . rx_dropped ++ ;
650
+ rx_stats -> rx_dropped ++ ;
650
651
}
651
652
}
652
653
653
654
static int netcp_process_one_rx_packet (struct netcp_intf * netcp )
654
655
{
656
+ struct netcp_stats * rx_stats = & netcp -> stats ;
655
657
unsigned int dma_sz , buf_len , org_buf_len ;
656
658
struct knav_dma_desc * desc , * ndesc ;
657
659
unsigned int pkt_sz = 0 , accum_sz ;
@@ -757,8 +759,8 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
757
759
if (unlikely (ret )) {
758
760
dev_err (netcp -> ndev_dev , "RX hook %d failed: %d\n" ,
759
761
rx_hook -> order , ret );
760
- netcp -> ndev -> stats .rx_errors ++ ;
761
762
/* Free the primary descriptor */
763
+ rx_stats -> rx_dropped ++ ;
762
764
knav_pool_desc_put (netcp -> rx_pool , desc );
763
765
dev_kfree_skb (skb );
764
766
return 0 ;
@@ -767,8 +769,10 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
767
769
/* Free the primary descriptor */
768
770
knav_pool_desc_put (netcp -> rx_pool , desc );
769
771
770
- netcp -> ndev -> stats .rx_packets ++ ;
771
- netcp -> ndev -> stats .rx_bytes += skb -> len ;
772
+ u64_stats_update_begin (& rx_stats -> syncp_rx );
773
+ rx_stats -> rx_packets ++ ;
774
+ rx_stats -> rx_bytes += skb -> len ;
775
+ u64_stats_update_end (& rx_stats -> syncp_rx );
772
776
773
777
/* push skb up the stack */
774
778
skb -> protocol = eth_type_trans (skb , netcp -> ndev );
@@ -777,7 +781,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
777
781
778
782
free_desc :
779
783
netcp_free_rx_desc_chain (netcp , desc );
780
- netcp -> ndev -> stats . rx_errors ++ ;
784
+ rx_stats -> rx_errors ++ ;
781
785
return 0 ;
782
786
}
783
787
@@ -1008,6 +1012,7 @@ static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
1008
1012
static int netcp_process_tx_compl_packets (struct netcp_intf * netcp ,
1009
1013
unsigned int budget )
1010
1014
{
1015
+ struct netcp_stats * tx_stats = & netcp -> stats ;
1011
1016
struct knav_dma_desc * desc ;
1012
1017
struct netcp_tx_cb * tx_cb ;
1013
1018
struct sk_buff * skb ;
@@ -1022,7 +1027,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
1022
1027
desc = knav_pool_desc_unmap (netcp -> tx_pool , dma , dma_sz );
1023
1028
if (unlikely (!desc )) {
1024
1029
dev_err (netcp -> ndev_dev , "failed to unmap Tx desc\n" );
1025
- netcp -> ndev -> stats . tx_errors ++ ;
1030
+ tx_stats -> tx_errors ++ ;
1026
1031
continue ;
1027
1032
}
1028
1033
@@ -1033,7 +1038,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
1033
1038
netcp_free_tx_desc_chain (netcp , desc , dma_sz );
1034
1039
if (!skb ) {
1035
1040
dev_err (netcp -> ndev_dev , "No skb in Tx desc\n" );
1036
- netcp -> ndev -> stats . tx_errors ++ ;
1041
+ tx_stats -> tx_errors ++ ;
1037
1042
continue ;
1038
1043
}
1039
1044
@@ -1050,8 +1055,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
1050
1055
netif_wake_subqueue (netcp -> ndev , subqueue );
1051
1056
}
1052
1057
1053
- netcp -> ndev -> stats .tx_packets ++ ;
1054
- netcp -> ndev -> stats .tx_bytes += skb -> len ;
1058
+ u64_stats_update_begin (& tx_stats -> syncp_tx );
1059
+ tx_stats -> tx_packets ++ ;
1060
+ tx_stats -> tx_bytes += skb -> len ;
1061
+ u64_stats_update_end (& tx_stats -> syncp_tx );
1055
1062
dev_kfree_skb (skb );
1056
1063
pkts ++ ;
1057
1064
}
@@ -1272,6 +1279,7 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
1272
1279
static int netcp_ndo_start_xmit (struct sk_buff * skb , struct net_device * ndev )
1273
1280
{
1274
1281
struct netcp_intf * netcp = netdev_priv (ndev );
1282
+ struct netcp_stats * tx_stats = & netcp -> stats ;
1275
1283
int subqueue = skb_get_queue_mapping (skb );
1276
1284
struct knav_dma_desc * desc ;
1277
1285
int desc_count , ret = 0 ;
@@ -1287,7 +1295,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1287
1295
/* If we get here, the skb has already been dropped */
1288
1296
dev_warn (netcp -> ndev_dev , "padding failed (%d), packet dropped\n" ,
1289
1297
ret );
1290
- ndev -> stats . tx_dropped ++ ;
1298
+ tx_stats -> tx_dropped ++ ;
1291
1299
return ret ;
1292
1300
}
1293
1301
skb -> len = NETCP_MIN_PACKET_SIZE ;
@@ -1315,7 +1323,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1315
1323
return NETDEV_TX_OK ;
1316
1324
1317
1325
drop :
1318
- ndev -> stats . tx_dropped ++ ;
1326
+ tx_stats -> tx_dropped ++ ;
1319
1327
if (desc )
1320
1328
netcp_free_tx_desc_chain (netcp , desc , sizeof (* desc ));
1321
1329
dev_kfree_skb (skb );
@@ -1897,12 +1905,46 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
1897
1905
return 0 ;
1898
1906
}
1899
1907
1908
+ static struct rtnl_link_stats64 *
1909
+ netcp_get_stats (struct net_device * ndev , struct rtnl_link_stats64 * stats )
1910
+ {
1911
+ struct netcp_intf * netcp = netdev_priv (ndev );
1912
+ struct netcp_stats * p = & netcp -> stats ;
1913
+ u64 rxpackets , rxbytes , txpackets , txbytes ;
1914
+ unsigned int start ;
1915
+
1916
+ do {
1917
+ start = u64_stats_fetch_begin_irq (& p -> syncp_rx );
1918
+ rxpackets = p -> rx_packets ;
1919
+ rxbytes = p -> rx_bytes ;
1920
+ } while (u64_stats_fetch_retry_irq (& p -> syncp_rx , start ));
1921
+
1922
+ do {
1923
+ start = u64_stats_fetch_begin_irq (& p -> syncp_tx );
1924
+ txpackets = p -> tx_packets ;
1925
+ txbytes = p -> tx_bytes ;
1926
+ } while (u64_stats_fetch_retry_irq (& p -> syncp_tx , start ));
1927
+
1928
+ stats -> rx_packets = rxpackets ;
1929
+ stats -> rx_bytes = rxbytes ;
1930
+ stats -> tx_packets = txpackets ;
1931
+ stats -> tx_bytes = txbytes ;
1932
+
1933
+ /* The following are stored as 32 bit */
1934
+ stats -> rx_errors = p -> rx_errors ;
1935
+ stats -> rx_dropped = p -> rx_dropped ;
1936
+ stats -> tx_dropped = p -> tx_dropped ;
1937
+
1938
+ return stats ;
1939
+ }
1940
+
1900
1941
static const struct net_device_ops netcp_netdev_ops = {
1901
1942
.ndo_open = netcp_ndo_open ,
1902
1943
.ndo_stop = netcp_ndo_stop ,
1903
1944
.ndo_start_xmit = netcp_ndo_start_xmit ,
1904
1945
.ndo_set_rx_mode = netcp_set_rx_mode ,
1905
1946
.ndo_do_ioctl = netcp_ndo_ioctl ,
1947
+ .ndo_get_stats64 = netcp_get_stats ,
1906
1948
.ndo_set_mac_address = eth_mac_addr ,
1907
1949
.ndo_validate_addr = eth_validate_addr ,
1908
1950
.ndo_vlan_rx_add_vid = netcp_rx_add_vid ,
@@ -1949,6 +1991,8 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
1949
1991
INIT_LIST_HEAD (& netcp -> txhook_list_head );
1950
1992
INIT_LIST_HEAD (& netcp -> rxhook_list_head );
1951
1993
INIT_LIST_HEAD (& netcp -> addr_list );
1994
+ u64_stats_init (& netcp -> stats .syncp_rx );
1995
+ u64_stats_init (& netcp -> stats .syncp_tx );
1952
1996
netcp -> netcp_device = netcp_device ;
1953
1997
netcp -> dev = netcp_device -> device ;
1954
1998
netcp -> ndev = ndev ;
0 commit comments