@@ -2086,6 +2086,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
2086
2086
u32 data1 = le32_to_cpu (cmpl -> event_data1 );
2087
2087
u32 data2 = le32_to_cpu (cmpl -> event_data2 );
2088
2088
2089
+ netdev_dbg (bp -> dev , "hwrm event 0x%x {0x%x, 0x%x}\n" ,
2090
+ event_id , data1 , data2 );
2091
+
2089
2092
/* TODO CHIMP_FW: Define event id's for link change, error etc */
2090
2093
switch (event_id ) {
2091
2094
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE : {
@@ -7691,19 +7694,6 @@ static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7691
7694
BNXT_FW_HEALTH_WIN_MAP_OFF );
7692
7695
}
7693
7696
7694
- bool bnxt_is_fw_healthy (struct bnxt * bp )
7695
- {
7696
- if (bp -> fw_health && bp -> fw_health -> status_reliable ) {
7697
- u32 fw_status ;
7698
-
7699
- fw_status = bnxt_fw_health_readl (bp , BNXT_FW_HEALTH_REG );
7700
- if (fw_status && !BNXT_FW_IS_HEALTHY (fw_status ))
7701
- return false;
7702
- }
7703
-
7704
- return true;
7705
- }
7706
-
7707
7697
static void bnxt_inv_fw_health_reg (struct bnxt * bp )
7708
7698
{
7709
7699
struct bnxt_fw_health * fw_health = bp -> fw_health ;
@@ -8031,6 +8021,12 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
8031
8021
bp -> hwrm_cmd_timeout = le16_to_cpu (resp -> def_req_timeout );
8032
8022
if (!bp -> hwrm_cmd_timeout )
8033
8023
bp -> hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT ;
8024
+ bp -> hwrm_cmd_max_timeout = le16_to_cpu (resp -> max_req_timeout ) * 1000 ;
8025
+ if (!bp -> hwrm_cmd_max_timeout )
8026
+ bp -> hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT ;
8027
+ else if (bp -> hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT )
8028
+ netdev_warn (bp -> dev , "Device requests max timeout of %d seconds, may trigger hung task watchdog\n" ,
8029
+ bp -> hwrm_cmd_max_timeout / 1000 );
8034
8030
8035
8031
if (resp -> hwrm_intf_maj_8b >= 1 ) {
8036
8032
bp -> hwrm_max_req_len = le16_to_cpu (resp -> max_req_win_len );
@@ -8634,7 +8630,10 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8634
8630
/* Filter for default vnic 0 */
8635
8631
rc = bnxt_hwrm_set_vnic_filter (bp , 0 , 0 , bp -> dev -> dev_addr );
8636
8632
if (rc ) {
8637
- netdev_err (bp -> dev , "HWRM vnic filter failure rc: %x\n" , rc );
8633
+ if (BNXT_VF (bp ) && rc == - ENODEV )
8634
+ netdev_err (bp -> dev , "Cannot configure L2 filter while PF is unavailable\n" );
8635
+ else
8636
+ netdev_err (bp -> dev , "HWRM vnic filter failure rc: %x\n" , rc );
8638
8637
goto err_out ;
8639
8638
}
8640
8639
vnic -> uc_filter_count = 1 ;
@@ -9427,6 +9426,10 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9427
9426
rc = hwrm_req_send (bp , req );
9428
9427
if (rc ) {
9429
9428
hwrm_req_drop (bp , req );
9429
+ if (BNXT_VF (bp ) && rc == - ENODEV ) {
9430
+ netdev_warn (bp -> dev , "Cannot obtain link state while PF unavailable.\n" );
9431
+ rc = 0 ;
9432
+ }
9430
9433
return rc ;
9431
9434
}
9432
9435
@@ -10825,12 +10828,21 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
10825
10828
for (i = 1 , off = 0 ; i < vnic -> uc_filter_count ; i ++ , off += ETH_ALEN ) {
10826
10829
rc = bnxt_hwrm_set_vnic_filter (bp , 0 , i , vnic -> uc_list + off );
10827
10830
if (rc ) {
10828
- netdev_err (bp -> dev , "HWRM vnic filter failure rc: %x\n" ,
10829
- rc );
10831
+ if (BNXT_VF (bp ) && rc == - ENODEV ) {
10832
+ if (!test_and_set_bit (BNXT_STATE_L2_FILTER_RETRY , & bp -> state ))
10833
+ netdev_warn (bp -> dev , "Cannot configure L2 filters while PF is unavailable, will retry\n" );
10834
+ else
10835
+ netdev_dbg (bp -> dev , "PF still unavailable while configuring L2 filters.\n" );
10836
+ rc = 0 ;
10837
+ } else {
10838
+ netdev_err (bp -> dev , "HWRM vnic filter failure rc: %x\n" , rc );
10839
+ }
10830
10840
vnic -> uc_filter_count = i ;
10831
10841
return rc ;
10832
10842
}
10833
10843
}
10844
+ if (test_and_clear_bit (BNXT_STATE_L2_FILTER_RETRY , & bp -> state ))
10845
+ netdev_notice (bp -> dev , "Retry of L2 filter configuration successful.\n" );
10834
10846
10835
10847
skip_uc :
10836
10848
if ((vnic -> rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS ) &&
@@ -11395,6 +11407,11 @@ static void bnxt_timer(struct timer_list *t)
11395
11407
}
11396
11408
}
11397
11409
11410
+ if (test_bit (BNXT_STATE_L2_FILTER_RETRY , & bp -> state )) {
11411
+ set_bit (BNXT_RX_MASK_SP_EVENT , & bp -> sp_event );
11412
+ bnxt_queue_sp_work (bp );
11413
+ }
11414
+
11398
11415
if ((bp -> flags & BNXT_FLAG_CHIP_P5 ) && !bp -> chip_rev &&
11399
11416
netif_carrier_ok (dev )) {
11400
11417
set_bit (BNXT_RING_COAL_NOW_SP_EVENT , & bp -> sp_event );
@@ -13101,7 +13118,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
13101
13118
bp -> tx_nr_rings = bp -> tx_nr_rings_per_tc ;
13102
13119
13103
13120
rc = __bnxt_reserve_rings (bp );
13104
- if (rc )
13121
+ if (rc && rc != - ENODEV )
13105
13122
netdev_warn (bp -> dev , "Unable to reserve tx rings\n" );
13106
13123
bp -> tx_nr_rings_per_tc = bp -> tx_nr_rings ;
13107
13124
if (sh )
@@ -13110,7 +13127,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
13110
13127
/* Rings may have been trimmed, re-reserve the trimmed rings. */
13111
13128
if (bnxt_need_reserve_rings (bp )) {
13112
13129
rc = __bnxt_reserve_rings (bp );
13113
- if (rc )
13130
+ if (rc && rc != - ENODEV )
13114
13131
netdev_warn (bp -> dev , "2nd rings reservation failed.\n" );
13115
13132
bp -> tx_nr_rings_per_tc = bp -> tx_nr_rings ;
13116
13133
}
@@ -13136,7 +13153,10 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13136
13153
bnxt_clear_int_mode (bp );
13137
13154
rc = bnxt_set_dflt_rings (bp , true);
13138
13155
if (rc ) {
13139
- netdev_err (bp -> dev , "Not enough rings available.\n" );
13156
+ if (BNXT_VF (bp ) && rc == - ENODEV )
13157
+ netdev_err (bp -> dev , "Cannot configure VF rings while PF is unavailable.\n" );
13158
+ else
13159
+ netdev_err (bp -> dev , "Not enough rings available.\n" );
13140
13160
goto init_dflt_ring_err ;
13141
13161
}
13142
13162
rc = bnxt_init_int_mode (bp );
@@ -13424,8 +13444,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13424
13444
bnxt_set_ring_params (bp );
13425
13445
rc = bnxt_set_dflt_rings (bp , true);
13426
13446
if (rc ) {
13427
- netdev_err (bp -> dev , "Not enough rings available.\n" );
13428
- rc = - ENOMEM ;
13447
+ if (BNXT_VF (bp ) && rc == - ENODEV ) {
13448
+ netdev_err (bp -> dev , "Cannot configure VF rings while PF is unavailable.\n" );
13449
+ } else {
13450
+ netdev_err (bp -> dev , "Not enough rings available.\n" );
13451
+ rc = - ENOMEM ;
13452
+ }
13429
13453
goto init_err_pci_clean ;
13430
13454
}
13431
13455
0 commit comments