@@ -1811,7 +1811,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1811
1811
skb = bnxt_copy_skb (bnapi , data_ptr , len , mapping );
1812
1812
if (!skb ) {
1813
1813
bnxt_abort_tpa (cpr , idx , agg_bufs );
1814
- cpr -> bnapi -> cp_ring . sw_stats . rx .rx_oom_discards += 1 ;
1814
+ cpr -> sw_stats -> rx .rx_oom_discards += 1 ;
1815
1815
return NULL ;
1816
1816
}
1817
1817
} else {
@@ -1821,7 +1821,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1821
1821
new_data = __bnxt_alloc_rx_frag (bp , & new_mapping , GFP_ATOMIC );
1822
1822
if (!new_data ) {
1823
1823
bnxt_abort_tpa (cpr , idx , agg_bufs );
1824
- cpr -> bnapi -> cp_ring . sw_stats . rx .rx_oom_discards += 1 ;
1824
+ cpr -> sw_stats -> rx .rx_oom_discards += 1 ;
1825
1825
return NULL ;
1826
1826
}
1827
1827
@@ -1837,7 +1837,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1837
1837
if (!skb ) {
1838
1838
skb_free_frag (data );
1839
1839
bnxt_abort_tpa (cpr , idx , agg_bufs );
1840
- cpr -> bnapi -> cp_ring . sw_stats . rx .rx_oom_discards += 1 ;
1840
+ cpr -> sw_stats -> rx .rx_oom_discards += 1 ;
1841
1841
return NULL ;
1842
1842
}
1843
1843
skb_reserve (skb , bp -> rx_offset );
@@ -1848,7 +1848,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1848
1848
skb = bnxt_rx_agg_pages_skb (bp , cpr , skb , idx , agg_bufs , true);
1849
1849
if (!skb ) {
1850
1850
/* Page reuse already handled by bnxt_rx_pages(). */
1851
- cpr -> bnapi -> cp_ring . sw_stats . rx .rx_oom_discards += 1 ;
1851
+ cpr -> sw_stats -> rx .rx_oom_discards += 1 ;
1852
1852
return NULL ;
1853
1853
}
1854
1854
}
@@ -2106,7 +2106,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2106
2106
2107
2107
rc = - EIO ;
2108
2108
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK ) {
2109
- bnapi -> cp_ring .sw_stats . rx .rx_buf_errors ++ ;
2109
+ bnapi -> cp_ring .sw_stats -> rx .rx_buf_errors ++ ;
2110
2110
if (!(bp -> flags & BNXT_FLAG_CHIP_P5_PLUS ) &&
2111
2111
!(bp -> fw_cap & BNXT_FW_CAP_RING_MONITOR )) {
2112
2112
netdev_warn_once (bp -> dev , "RX buffer error %x\n" ,
@@ -2222,7 +2222,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2222
2222
} else {
2223
2223
if (rxcmp1 -> rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS ) {
2224
2224
if (dev -> features & NETIF_F_RXCSUM )
2225
- bnapi -> cp_ring .sw_stats . rx .rx_l4_csum_errors ++ ;
2225
+ bnapi -> cp_ring .sw_stats -> rx .rx_l4_csum_errors ++ ;
2226
2226
}
2227
2227
}
2228
2228
@@ -2259,7 +2259,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2259
2259
return rc ;
2260
2260
2261
2261
oom_next_rx :
2262
- cpr -> bnapi -> cp_ring . sw_stats . rx .rx_oom_discards += 1 ;
2262
+ cpr -> sw_stats -> rx .rx_oom_discards += 1 ;
2263
2263
rc = - ENOMEM ;
2264
2264
goto next_rx ;
2265
2265
}
@@ -2308,7 +2308,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
2308
2308
}
2309
2309
rc = bnxt_rx_pkt (bp , cpr , raw_cons , event );
2310
2310
if (rc && rc != - EBUSY )
2311
- cpr -> bnapi -> cp_ring . sw_stats . rx .rx_netpoll_discards += 1 ;
2311
+ cpr -> sw_stats -> rx .rx_netpoll_discards += 1 ;
2312
2312
return rc ;
2313
2313
}
2314
2314
@@ -3951,6 +3951,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
3951
3951
if (rc )
3952
3952
return rc ;
3953
3953
cpr2 -> bnapi = bnapi ;
3954
+ cpr2 -> sw_stats = cpr -> sw_stats ;
3954
3955
cpr2 -> cp_idx = k ;
3955
3956
if (!k && rx ) {
3956
3957
bp -> rx_ring [i ].rx_cpr = cpr2 ;
@@ -4792,6 +4793,9 @@ static void bnxt_free_ring_stats(struct bnxt *bp)
4792
4793
struct bnxt_cp_ring_info * cpr = & bnapi -> cp_ring ;
4793
4794
4794
4795
bnxt_free_stats_mem (bp , & cpr -> stats );
4796
+
4797
+ kfree (cpr -> sw_stats );
4798
+ cpr -> sw_stats = NULL ;
4795
4799
}
4796
4800
}
4797
4801
@@ -4806,6 +4810,10 @@ static int bnxt_alloc_stats(struct bnxt *bp)
4806
4810
struct bnxt_napi * bnapi = bp -> bnapi [i ];
4807
4811
struct bnxt_cp_ring_info * cpr = & bnapi -> cp_ring ;
4808
4812
4813
+ cpr -> sw_stats = kzalloc (sizeof (* cpr -> sw_stats ), GFP_KERNEL );
4814
+ if (!cpr -> sw_stats )
4815
+ return - ENOMEM ;
4816
+
4809
4817
cpr -> stats .len = size ;
4810
4818
rc = bnxt_alloc_stats_mem (bp , & cpr -> stats , !i );
4811
4819
if (rc )
@@ -10811,9 +10819,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
10811
10819
10812
10820
cpr = & bnapi -> cp_ring ;
10813
10821
if (bnapi -> tx_fault )
10814
- cpr -> sw_stats . tx .tx_resets ++ ;
10822
+ cpr -> sw_stats -> tx .tx_resets ++ ;
10815
10823
if (bnapi -> in_reset )
10816
- cpr -> sw_stats . rx .rx_resets ++ ;
10824
+ cpr -> sw_stats -> rx .rx_resets ++ ;
10817
10825
napi_disable (& bnapi -> napi );
10818
10826
if (bnapi -> rx_ring )
10819
10827
cancel_work_sync (& cpr -> dim .work );
@@ -12338,8 +12346,8 @@ static void bnxt_get_ring_stats(struct bnxt *bp,
12338
12346
stats -> tx_dropped += BNXT_GET_RING_STATS64 (sw , tx_error_pkts );
12339
12347
12340
12348
stats -> rx_dropped +=
12341
- cpr -> sw_stats . rx .rx_netpoll_discards +
12342
- cpr -> sw_stats . rx .rx_oom_discards ;
12349
+ cpr -> sw_stats -> rx .rx_netpoll_discards +
12350
+ cpr -> sw_stats -> rx .rx_oom_discards ;
12343
12351
}
12344
12352
}
12345
12353
@@ -12406,7 +12414,7 @@ static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
12406
12414
struct bnxt_total_ring_err_stats * stats ,
12407
12415
struct bnxt_cp_ring_info * cpr )
12408
12416
{
12409
- struct bnxt_sw_stats * sw_stats = & cpr -> sw_stats ;
12417
+ struct bnxt_sw_stats * sw_stats = cpr -> sw_stats ;
12410
12418
u64 * hw_stats = cpr -> stats .sw_stats ;
12411
12419
12412
12420
stats -> rx_total_l4_csum_errors += sw_stats -> rx .rx_l4_csum_errors ;
@@ -13249,7 +13257,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
13249
13257
rxr -> bnapi -> in_reset = false;
13250
13258
bnxt_alloc_one_rx_ring (bp , i );
13251
13259
cpr = & rxr -> bnapi -> cp_ring ;
13252
- cpr -> sw_stats . rx .rx_resets ++ ;
13260
+ cpr -> sw_stats -> rx .rx_resets ++ ;
13253
13261
if (bp -> flags & BNXT_FLAG_AGG_RINGS )
13254
13262
bnxt_db_write (bp , & rxr -> rx_agg_db , rxr -> rx_agg_prod );
13255
13263
bnxt_db_write (bp , & rxr -> rx_db , rxr -> rx_prod );
@@ -13461,7 +13469,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
13461
13469
bnxt_dbg_hwrm_ring_info_get (bp ,
13462
13470
DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL ,
13463
13471
fw_ring_id , & val [0 ], & val [1 ]);
13464
- cpr -> sw_stats . cmn .missed_irqs ++ ;
13472
+ cpr -> sw_stats -> cmn .missed_irqs ++ ;
13465
13473
}
13466
13474
}
13467
13475
}
@@ -14769,7 +14777,7 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
14769
14777
stats -> bytes += BNXT_GET_RING_STATS64 (sw , rx_mcast_bytes );
14770
14778
stats -> bytes += BNXT_GET_RING_STATS64 (sw , rx_bcast_bytes );
14771
14779
14772
- stats -> alloc_fail = cpr -> sw_stats . rx .rx_oom_discards ;
14780
+ stats -> alloc_fail = cpr -> sw_stats -> rx .rx_oom_discards ;
14773
14781
}
14774
14782
14775
14783
static void bnxt_get_queue_stats_tx (struct net_device * dev , int i ,
0 commit comments