@@ -1625,7 +1625,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1625
1625
netdev_warn (bp -> dev , "RX buffer error %x\n" , rx_err );
1626
1626
bnxt_sched_reset (bp , rxr );
1627
1627
}
1628
- goto next_rx ;
1628
+ goto next_rx_no_len ;
1629
1629
}
1630
1630
1631
1631
len = le32_to_cpu (rxcmp -> rx_cmp_len_flags_type ) >> RX_CMP_LEN_SHIFT ;
@@ -1706,12 +1706,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1706
1706
rc = 1 ;
1707
1707
1708
1708
next_rx :
1709
- rxr -> rx_prod = NEXT_RX (prod );
1710
- rxr -> rx_next_cons = NEXT_RX (cons );
1711
-
1712
1709
cpr -> rx_packets += 1 ;
1713
1710
cpr -> rx_bytes += len ;
1714
1711
1712
+ next_rx_no_len :
1713
+ rxr -> rx_prod = NEXT_RX (prod );
1714
+ rxr -> rx_next_cons = NEXT_RX (cons );
1715
+
1715
1716
next_rx_no_prod_no_len :
1716
1717
* raw_cons = tmp_raw_cons ;
1717
1718
@@ -5135,10 +5136,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5135
5136
for (i = 0 ; i < bp -> tx_nr_rings ; i ++ ) {
5136
5137
struct bnxt_tx_ring_info * txr = & bp -> tx_ring [i ];
5137
5138
struct bnxt_ring_struct * ring = & txr -> tx_ring_struct ;
5138
- u32 cmpl_ring_id ;
5139
5139
5140
- cmpl_ring_id = bnxt_cp_ring_for_tx (bp , txr );
5141
5140
if (ring -> fw_ring_id != INVALID_HW_RING_ID ) {
5141
+ u32 cmpl_ring_id = bnxt_cp_ring_for_tx (bp , txr );
5142
+
5142
5143
hwrm_ring_free_send_msg (bp , ring ,
5143
5144
RING_FREE_REQ_RING_TYPE_TX ,
5144
5145
close_path ? cmpl_ring_id :
@@ -5151,10 +5152,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5151
5152
struct bnxt_rx_ring_info * rxr = & bp -> rx_ring [i ];
5152
5153
struct bnxt_ring_struct * ring = & rxr -> rx_ring_struct ;
5153
5154
u32 grp_idx = rxr -> bnapi -> index ;
5154
- u32 cmpl_ring_id ;
5155
5155
5156
- cmpl_ring_id = bnxt_cp_ring_for_rx (bp , rxr );
5157
5156
if (ring -> fw_ring_id != INVALID_HW_RING_ID ) {
5157
+ u32 cmpl_ring_id = bnxt_cp_ring_for_rx (bp , rxr );
5158
+
5158
5159
hwrm_ring_free_send_msg (bp , ring ,
5159
5160
RING_FREE_REQ_RING_TYPE_RX ,
5160
5161
close_path ? cmpl_ring_id :
@@ -5173,10 +5174,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5173
5174
struct bnxt_rx_ring_info * rxr = & bp -> rx_ring [i ];
5174
5175
struct bnxt_ring_struct * ring = & rxr -> rx_agg_ring_struct ;
5175
5176
u32 grp_idx = rxr -> bnapi -> index ;
5176
- u32 cmpl_ring_id ;
5177
5177
5178
- cmpl_ring_id = bnxt_cp_ring_for_rx (bp , rxr );
5179
5178
if (ring -> fw_ring_id != INVALID_HW_RING_ID ) {
5179
+ u32 cmpl_ring_id = bnxt_cp_ring_for_rx (bp , rxr );
5180
+
5180
5181
hwrm_ring_free_send_msg (bp , ring , type ,
5181
5182
close_path ? cmpl_ring_id :
5182
5183
INVALID_HW_RING_ID );
@@ -5315,17 +5316,16 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5315
5316
req -> num_tx_rings = cpu_to_le16 (tx_rings );
5316
5317
if (BNXT_NEW_RM (bp )) {
5317
5318
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0 ;
5319
+ enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0 ;
5318
5320
if (bp -> flags & BNXT_FLAG_CHIP_P5 ) {
5319
5321
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0 ;
5320
5322
enables |= tx_rings + ring_grps ?
5321
- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5322
- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0 ;
5323
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0 ;
5323
5324
enables |= rx_rings ?
5324
5325
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0 ;
5325
5326
} else {
5326
5327
enables |= cp_rings ?
5327
- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5328
- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0 ;
5328
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0 ;
5329
5329
enables |= ring_grps ?
5330
5330
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5331
5331
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0 ;
@@ -5365,14 +5365,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5365
5365
enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0 ;
5366
5366
enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5367
5367
FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0 ;
5368
+ enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0 ;
5368
5369
if (bp -> flags & BNXT_FLAG_CHIP_P5 ) {
5369
5370
enables |= tx_rings + ring_grps ?
5370
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5371
- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0 ;
5371
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0 ;
5372
5372
} else {
5373
5373
enables |= cp_rings ?
5374
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5375
- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0 ;
5374
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0 ;
5376
5375
enables |= ring_grps ?
5377
5376
FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0 ;
5378
5377
}
@@ -6753,6 +6752,7 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
6753
6752
struct hwrm_queue_pri2cos_qcfg_input req2 = {0 };
6754
6753
struct hwrm_port_qstats_ext_input req = {0 };
6755
6754
struct bnxt_pf_info * pf = & bp -> pf ;
6755
+ u32 tx_stat_size ;
6756
6756
int rc ;
6757
6757
6758
6758
if (!(bp -> flags & BNXT_FLAG_PORT_STATS_EXT ))
@@ -6762,13 +6762,16 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
6762
6762
req .port_id = cpu_to_le16 (pf -> port_id );
6763
6763
req .rx_stat_size = cpu_to_le16 (sizeof (struct rx_port_stats_ext ));
6764
6764
req .rx_stat_host_addr = cpu_to_le64 (bp -> hw_rx_port_stats_ext_map );
6765
- req .tx_stat_size = cpu_to_le16 (sizeof (struct tx_port_stats_ext ));
6765
+ tx_stat_size = bp -> hw_tx_port_stats_ext ?
6766
+ sizeof (* bp -> hw_tx_port_stats_ext ) : 0 ;
6767
+ req .tx_stat_size = cpu_to_le16 (tx_stat_size );
6766
6768
req .tx_stat_host_addr = cpu_to_le64 (bp -> hw_tx_port_stats_ext_map );
6767
6769
mutex_lock (& bp -> hwrm_cmd_lock );
6768
6770
rc = _hwrm_send_message (bp , & req , sizeof (req ), HWRM_CMD_TIMEOUT );
6769
6771
if (!rc ) {
6770
6772
bp -> fw_rx_stats_ext_size = le16_to_cpu (resp -> rx_stat_size ) / 8 ;
6771
- bp -> fw_tx_stats_ext_size = le16_to_cpu (resp -> tx_stat_size ) / 8 ;
6773
+ bp -> fw_tx_stats_ext_size = tx_stat_size ?
6774
+ le16_to_cpu (resp -> tx_stat_size ) / 8 : 0 ;
6772
6775
} else {
6773
6776
bp -> fw_rx_stats_ext_size = 0 ;
6774
6777
bp -> fw_tx_stats_ext_size = 0 ;
@@ -8961,8 +8964,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
8961
8964
8962
8965
skip_uc :
8963
8966
rc = bnxt_hwrm_cfa_l2_set_rx_mask (bp , 0 );
8967
+ if (rc && vnic -> mc_list_count ) {
8968
+ netdev_info (bp -> dev , "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n" ,
8969
+ rc );
8970
+ vnic -> rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST ;
8971
+ vnic -> mc_list_count = 0 ;
8972
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask (bp , 0 );
8973
+ }
8964
8974
if (rc )
8965
- netdev_err (bp -> dev , "HWRM cfa l2 rx mask failure rc: %x \n" ,
8975
+ netdev_err (bp -> dev , "HWRM cfa l2 rx mask failure rc: %d \n" ,
8966
8976
rc );
8967
8977
8968
8978
return rc ;
@@ -10685,6 +10695,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10685
10695
bnxt_clear_int_mode (bp );
10686
10696
10687
10697
init_err_pci_clean :
10698
+ bnxt_free_hwrm_short_cmd_req (bp );
10688
10699
bnxt_free_hwrm_resources (bp );
10689
10700
bnxt_free_ctx_mem (bp );
10690
10701
kfree (bp -> ctx );
0 commit comments