@@ -113,6 +113,11 @@ static int fbnic_maybe_stop_tx(const struct net_device *dev,
113
113
114
114
res = netif_txq_maybe_stop (txq , fbnic_desc_unused (ring ), size ,
115
115
FBNIC_TX_DESC_WAKEUP );
116
+ if (!res ) {
117
+ u64_stats_update_begin (& ring -> stats .syncp );
118
+ ring -> stats .twq .stop ++ ;
119
+ u64_stats_update_end (& ring -> stats .syncp );
120
+ }
116
121
117
122
return !res ;
118
123
}
@@ -191,19 +196,25 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
191
196
skb -> csum_offset / 2 ));
192
197
193
198
* meta |= cpu_to_le64 (FBNIC_TWD_FLAG_REQ_CSO );
199
+ u64_stats_update_begin (& ring -> stats .syncp );
200
+ ring -> stats .twq .csum_partial ++ ;
201
+ u64_stats_update_end (& ring -> stats .syncp );
194
202
195
203
* meta |= cpu_to_le64 (FIELD_PREP (FBNIC_TWD_L2_HLEN_MASK , l2len / 2 ) |
196
204
FIELD_PREP (FBNIC_TWD_L3_IHLEN_MASK , i3len / 2 ));
197
205
return false;
198
206
}
199
207
200
208
static void
201
- fbnic_rx_csum (u64 rcd , struct sk_buff * skb , struct fbnic_ring * rcq )
209
+ fbnic_rx_csum (u64 rcd , struct sk_buff * skb , struct fbnic_ring * rcq ,
210
+ u64 * csum_cmpl , u64 * csum_none )
202
211
{
203
212
skb_checksum_none_assert (skb );
204
213
205
- if (unlikely (!(skb -> dev -> features & NETIF_F_RXCSUM )))
214
+ if (unlikely (!(skb -> dev -> features & NETIF_F_RXCSUM ))) {
215
+ (* csum_none )++ ;
206
216
return ;
217
+ }
207
218
208
219
if (FIELD_GET (FBNIC_RCD_META_L4_CSUM_UNNECESSARY , rcd )) {
209
220
skb -> ip_summed = CHECKSUM_UNNECESSARY ;
@@ -212,6 +223,7 @@ fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
212
223
213
224
skb -> ip_summed = CHECKSUM_COMPLETE ;
214
225
skb -> csum = (__force __wsum )csum ;
226
+ (* csum_cmpl )++ ;
215
227
}
216
228
}
217
229
@@ -444,7 +456,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
444
456
if (unlikely (discard )) {
445
457
u64_stats_update_begin (& ring -> stats .syncp );
446
458
ring -> stats .dropped += total_packets ;
447
- ring -> stats .ts_lost += ts_lost ;
459
+ ring -> stats .twq . ts_lost += ts_lost ;
448
460
u64_stats_update_end (& ring -> stats .syncp );
449
461
450
462
netdev_tx_completed_queue (txq , total_packets , total_bytes );
@@ -456,9 +468,13 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
456
468
ring -> stats .packets += total_packets ;
457
469
u64_stats_update_end (& ring -> stats .syncp );
458
470
459
- netif_txq_completed_wake (txq , total_packets , total_bytes ,
460
- fbnic_desc_unused (ring ),
461
- FBNIC_TX_DESC_WAKEUP );
471
+ if (!netif_txq_completed_wake (txq , total_packets , total_bytes ,
472
+ fbnic_desc_unused (ring ),
473
+ FBNIC_TX_DESC_WAKEUP )) {
474
+ u64_stats_update_begin (& ring -> stats .syncp );
475
+ ring -> stats .twq .wake ++ ;
476
+ u64_stats_update_end (& ring -> stats .syncp );
477
+ }
462
478
}
463
479
464
480
static void fbnic_clean_tsq (struct fbnic_napi_vector * nv ,
@@ -507,7 +523,7 @@ static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
507
523
508
524
skb_tstamp_tx (skb , & hwtstamp );
509
525
u64_stats_update_begin (& ring -> stats .syncp );
510
- ring -> stats .ts_packets ++ ;
526
+ ring -> stats .twq . ts_packets ++ ;
511
527
u64_stats_update_end (& ring -> stats .syncp );
512
528
}
513
529
@@ -661,8 +677,13 @@ static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
661
677
struct page * page ;
662
678
663
679
page = page_pool_dev_alloc_pages (nv -> page_pool );
664
- if (!page )
680
+ if (!page ) {
681
+ u64_stats_update_begin (& bdq -> stats .syncp );
682
+ bdq -> stats .rx .alloc_failed ++ ;
683
+ u64_stats_update_end (& bdq -> stats .syncp );
684
+
665
685
break ;
686
+ }
666
687
667
688
fbnic_page_pool_init (bdq , i , page );
668
689
fbnic_bd_prep (bdq , i , page );
@@ -875,12 +896,13 @@ static void fbnic_rx_tstamp(struct fbnic_napi_vector *nv, u64 rcd,
875
896
876
897
static void fbnic_populate_skb_fields (struct fbnic_napi_vector * nv ,
877
898
u64 rcd , struct sk_buff * skb ,
878
- struct fbnic_q_triad * qt )
899
+ struct fbnic_q_triad * qt ,
900
+ u64 * csum_cmpl , u64 * csum_none )
879
901
{
880
902
struct net_device * netdev = nv -> napi .dev ;
881
903
struct fbnic_ring * rcq = & qt -> cmpl ;
882
904
883
- fbnic_rx_csum (rcd , skb , rcq );
905
+ fbnic_rx_csum (rcd , skb , rcq , csum_cmpl , csum_none );
884
906
885
907
if (netdev -> features & NETIF_F_RXHASH )
886
908
skb_set_hash (skb ,
@@ -898,7 +920,8 @@ static bool fbnic_rcd_metadata_err(u64 rcd)
898
920
static int fbnic_clean_rcq (struct fbnic_napi_vector * nv ,
899
921
struct fbnic_q_triad * qt , int budget )
900
922
{
901
- unsigned int packets = 0 , bytes = 0 , dropped = 0 ;
923
+ unsigned int packets = 0 , bytes = 0 , dropped = 0 , alloc_failed = 0 ;
924
+ u64 csum_complete = 0 , csum_none = 0 ;
902
925
struct fbnic_ring * rcq = & qt -> cmpl ;
903
926
struct fbnic_pkt_buff * pkt ;
904
927
s32 head0 = -1 , head1 = -1 ;
@@ -947,14 +970,22 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
947
970
948
971
/* Populate skb and invalidate XDP */
949
972
if (!IS_ERR_OR_NULL (skb )) {
950
- fbnic_populate_skb_fields (nv , rcd , skb , qt );
973
+ fbnic_populate_skb_fields (nv , rcd , skb , qt ,
974
+ & csum_complete ,
975
+ & csum_none );
951
976
952
977
packets ++ ;
953
978
bytes += skb -> len ;
954
979
955
980
napi_gro_receive (& nv -> napi , skb );
956
981
} else {
957
- dropped ++ ;
982
+ if (!skb ) {
983
+ alloc_failed ++ ;
984
+ dropped ++ ;
985
+ } else {
986
+ dropped ++ ;
987
+ }
988
+
958
989
fbnic_put_pkt_buff (nv , pkt , 1 );
959
990
}
960
991
@@ -977,6 +1008,9 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
977
1008
/* Re-add ethernet header length (removed in fbnic_build_skb) */
978
1009
rcq -> stats .bytes += ETH_HLEN * packets ;
979
1010
rcq -> stats .dropped += dropped ;
1011
+ rcq -> stats .rx .alloc_failed += alloc_failed ;
1012
+ rcq -> stats .rx .csum_complete += csum_complete ;
1013
+ rcq -> stats .rx .csum_none += csum_none ;
980
1014
u64_stats_update_end (& rcq -> stats .syncp );
981
1015
982
1016
/* Unmap and free processed buffers */
@@ -1054,6 +1088,11 @@ void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
1054
1088
fbn -> rx_stats .bytes += stats -> bytes ;
1055
1089
fbn -> rx_stats .packets += stats -> packets ;
1056
1090
fbn -> rx_stats .dropped += stats -> dropped ;
1091
+ fbn -> rx_stats .rx .alloc_failed += stats -> rx .alloc_failed ;
1092
+ fbn -> rx_stats .rx .csum_complete += stats -> rx .csum_complete ;
1093
+ fbn -> rx_stats .rx .csum_none += stats -> rx .csum_none ;
1094
+ /* Remember to add new stats here */
1095
+ BUILD_BUG_ON (sizeof (fbn -> tx_stats .rx ) / 8 != 3 );
1057
1096
}
1058
1097
1059
1098
void fbnic_aggregate_ring_tx_counters (struct fbnic_net * fbn ,
@@ -1065,8 +1104,13 @@ void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
1065
1104
fbn -> tx_stats .bytes += stats -> bytes ;
1066
1105
fbn -> tx_stats .packets += stats -> packets ;
1067
1106
fbn -> tx_stats .dropped += stats -> dropped ;
1068
- fbn -> tx_stats .ts_lost += stats -> ts_lost ;
1069
- fbn -> tx_stats .ts_packets += stats -> ts_packets ;
1107
+ fbn -> tx_stats .twq .csum_partial += stats -> twq .csum_partial ;
1108
+ fbn -> tx_stats .twq .ts_lost += stats -> twq .ts_lost ;
1109
+ fbn -> tx_stats .twq .ts_packets += stats -> twq .ts_packets ;
1110
+ fbn -> tx_stats .twq .stop += stats -> twq .stop ;
1111
+ fbn -> tx_stats .twq .wake += stats -> twq .wake ;
1112
+ /* Remember to add new stats here */
1113
+ BUILD_BUG_ON (sizeof (fbn -> tx_stats .twq ) / 8 != 5 );
1070
1114
}
1071
1115
1072
1116
static void fbnic_remove_tx_ring (struct fbnic_net * fbn ,
0 commit comments