@@ -1766,7 +1766,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1766
1766
1767
1767
rc = - EIO ;
1768
1768
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK ) {
1769
- bnapi -> cp_ring .rx_buf_errors ++ ;
1769
+ bnapi -> cp_ring .sw_stats . rx . rx_buf_errors ++ ;
1770
1770
if (!(bp -> flags & BNXT_FLAG_CHIP_P5 )) {
1771
1771
netdev_warn (bp -> dev , "RX buffer error %x\n" ,
1772
1772
rx_err );
@@ -1849,7 +1849,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1849
1849
} else {
1850
1850
if (rxcmp1 -> rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS ) {
1851
1851
if (dev -> features & NETIF_F_RXCSUM )
1852
- bnapi -> cp_ring .rx_l4_csum_errors ++ ;
1852
+ bnapi -> cp_ring .sw_stats . rx . rx_l4_csum_errors ++ ;
1853
1853
}
1854
1854
}
1855
1855
@@ -5045,8 +5045,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5045
5045
req .dflt_ring_grp = cpu_to_le16 (bp -> grp_info [grp_idx ].fw_grp_id );
5046
5046
req .lb_rule = cpu_to_le16 (0xffff );
5047
5047
vnic_mru :
5048
- req .mru = cpu_to_le16 (bp -> dev -> mtu + ETH_HLEN + ETH_FCS_LEN +
5049
- VLAN_HLEN );
5048
+ req .mru = cpu_to_le16 (bp -> dev -> mtu + ETH_HLEN + VLAN_HLEN );
5050
5049
5051
5050
req .vnic_id = cpu_to_le16 (vnic -> fw_vnic_id );
5052
5051
#ifdef CONFIG_BNXT_SRIOV
@@ -5356,9 +5355,9 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5356
5355
{
5357
5356
if (bp -> flags & BNXT_FLAG_CHIP_P5 ) {
5358
5357
if (BNXT_PF (bp ))
5359
- db -> doorbell = bp -> bar1 + 0x10000 ;
5358
+ db -> doorbell = bp -> bar1 + DB_PF_OFFSET_P5 ;
5360
5359
else
5361
- db -> doorbell = bp -> bar1 + 0x4000 ;
5360
+ db -> doorbell = bp -> bar1 + DB_VF_OFFSET_P5 ;
5362
5361
switch (ring_type ) {
5363
5362
case HWRM_RING_ALLOC_TX :
5364
5363
db -> db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ ;
@@ -6365,6 +6364,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6365
6364
{
6366
6365
struct hwrm_func_qcfg_input req = {0 };
6367
6366
struct hwrm_func_qcfg_output * resp = bp -> hwrm_cmd_resp_addr ;
6367
+ u32 min_db_offset = 0 ;
6368
6368
u16 flags ;
6369
6369
int rc ;
6370
6370
@@ -6413,6 +6413,21 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6413
6413
if (!bp -> max_mtu )
6414
6414
bp -> max_mtu = BNXT_MAX_MTU ;
6415
6415
6416
+ if (bp -> db_size )
6417
+ goto func_qcfg_exit ;
6418
+
6419
+ if (bp -> flags & BNXT_FLAG_CHIP_P5 ) {
6420
+ if (BNXT_PF (bp ))
6421
+ min_db_offset = DB_PF_OFFSET_P5 ;
6422
+ else
6423
+ min_db_offset = DB_VF_OFFSET_P5 ;
6424
+ }
6425
+ bp -> db_size = PAGE_ALIGN (le16_to_cpu (resp -> l2_doorbell_bar_size_kb ) *
6426
+ 1024 );
6427
+ if (!bp -> db_size || bp -> db_size > pci_resource_len (bp -> pdev , 2 ) ||
6428
+ bp -> db_size <= min_db_offset )
6429
+ bp -> db_size = pci_resource_len (bp -> pdev , 2 );
6430
+
6416
6431
func_qcfg_exit :
6417
6432
mutex_unlock (& bp -> hwrm_cmd_lock );
6418
6433
return rc ;
@@ -6434,23 +6449,13 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6434
6449
if (!rc ) {
6435
6450
struct bnxt_ctx_pg_info * ctx_pg ;
6436
6451
struct bnxt_ctx_mem_info * ctx ;
6437
- int i ;
6452
+ int i , tqm_rings ;
6438
6453
6439
6454
ctx = kzalloc (sizeof (* ctx ), GFP_KERNEL );
6440
6455
if (!ctx ) {
6441
6456
rc = - ENOMEM ;
6442
6457
goto ctx_err ;
6443
6458
}
6444
- ctx_pg = kzalloc (sizeof (* ctx_pg ) * (bp -> max_q + 1 ), GFP_KERNEL );
6445
- if (!ctx_pg ) {
6446
- kfree (ctx );
6447
- rc = - ENOMEM ;
6448
- goto ctx_err ;
6449
- }
6450
- for (i = 0 ; i < bp -> max_q + 1 ; i ++ , ctx_pg ++ )
6451
- ctx -> tqm_mem [i ] = ctx_pg ;
6452
-
6453
- bp -> ctx = ctx ;
6454
6459
ctx -> qp_max_entries = le32_to_cpu (resp -> qp_max_entries );
6455
6460
ctx -> qp_min_qp1_entries = le16_to_cpu (resp -> qp_min_qp1_entries );
6456
6461
ctx -> qp_max_l2_entries = le16_to_cpu (resp -> qp_max_l2_entries );
@@ -6483,6 +6488,20 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6483
6488
ctx -> tim_entry_size = le16_to_cpu (resp -> tim_entry_size );
6484
6489
ctx -> tim_max_entries = le32_to_cpu (resp -> tim_max_entries );
6485
6490
ctx -> ctx_kind_initializer = resp -> ctx_kind_initializer ;
6491
+ ctx -> tqm_fp_rings_count = resp -> tqm_fp_rings_count ;
6492
+ if (!ctx -> tqm_fp_rings_count )
6493
+ ctx -> tqm_fp_rings_count = bp -> max_q ;
6494
+
6495
+ tqm_rings = ctx -> tqm_fp_rings_count + 1 ;
6496
+ ctx_pg = kcalloc (tqm_rings , sizeof (* ctx_pg ), GFP_KERNEL );
6497
+ if (!ctx_pg ) {
6498
+ kfree (ctx );
6499
+ rc = - ENOMEM ;
6500
+ goto ctx_err ;
6501
+ }
6502
+ for (i = 0 ; i < tqm_rings ; i ++ , ctx_pg ++ )
6503
+ ctx -> tqm_mem [i ] = ctx_pg ;
6504
+ bp -> ctx = ctx ;
6486
6505
} else {
6487
6506
rc = 0 ;
6488
6507
}
@@ -6735,7 +6754,7 @@ static void bnxt_free_ctx_mem(struct bnxt *bp)
6735
6754
return ;
6736
6755
6737
6756
if (ctx -> tqm_mem [0 ]) {
6738
- for (i = 0 ; i < bp -> max_q + 1 ; i ++ )
6757
+ for (i = 0 ; i < ctx -> tqm_fp_rings_count + 1 ; i ++ )
6739
6758
bnxt_free_ctx_pg_tbls (bp , ctx -> tqm_mem [i ]);
6740
6759
kfree (ctx -> tqm_mem [0 ]);
6741
6760
ctx -> tqm_mem [0 ] = NULL ;
@@ -6756,6 +6775,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6756
6775
struct bnxt_ctx_pg_info * ctx_pg ;
6757
6776
struct bnxt_ctx_mem_info * ctx ;
6758
6777
u32 mem_size , ena , entries ;
6778
+ u32 entries_sp , min ;
6759
6779
u32 num_mr , num_ah ;
6760
6780
u32 extra_srqs = 0 ;
6761
6781
u32 extra_qps = 0 ;
@@ -6845,14 +6865,17 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6845
6865
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM ;
6846
6866
6847
6867
skip_rdma :
6848
- entries = ctx -> qp_max_l2_entries + extra_qps ;
6868
+ min = ctx -> tqm_min_entries_per_ring ;
6869
+ entries_sp = ctx -> vnic_max_vnic_entries + ctx -> qp_max_l2_entries +
6870
+ 2 * (extra_qps + ctx -> qp_min_qp1_entries ) + min ;
6871
+ entries_sp = roundup (entries_sp , ctx -> tqm_entries_multiple );
6872
+ entries = ctx -> qp_max_l2_entries + extra_qps + ctx -> qp_min_qp1_entries ;
6849
6873
entries = roundup (entries , ctx -> tqm_entries_multiple );
6850
- entries = clamp_t (u32 , entries , ctx -> tqm_min_entries_per_ring ,
6851
- ctx -> tqm_max_entries_per_ring );
6852
- for (i = 0 ; i < bp -> max_q + 1 ; i ++ ) {
6874
+ entries = clamp_t (u32 , entries , min , ctx -> tqm_max_entries_per_ring );
6875
+ for (i = 0 ; i < ctx -> tqm_fp_rings_count + 1 ; i ++ ) {
6853
6876
ctx_pg = ctx -> tqm_mem [i ];
6854
- ctx_pg -> entries = entries ;
6855
- mem_size = ctx -> tqm_entry_size * entries ;
6877
+ ctx_pg -> entries = i ? entries : entries_sp ;
6878
+ mem_size = ctx -> tqm_entry_size * ctx_pg -> entries ;
6856
6879
rc = bnxt_alloc_ctx_pg_tbls (bp , ctx_pg , mem_size , 1 , false);
6857
6880
if (rc )
6858
6881
return rc ;
@@ -10262,7 +10285,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
10262
10285
bnxt_dbg_hwrm_ring_info_get (bp ,
10263
10286
DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL ,
10264
10287
fw_ring_id , & val [0 ], & val [1 ]);
10265
- cpr -> missed_irqs ++ ;
10288
+ cpr -> sw_stats . cmn . missed_irqs ++ ;
10266
10289
}
10267
10290
}
10268
10291
}
@@ -10891,20 +10914,16 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
10891
10914
bp -> dev = dev ;
10892
10915
bp -> pdev = pdev ;
10893
10916
10917
+ /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
10918
+ * determines the BAR size.
10919
+ */
10894
10920
bp -> bar0 = pci_ioremap_bar (pdev , 0 );
10895
10921
if (!bp -> bar0 ) {
10896
10922
dev_err (& pdev -> dev , "Cannot map device registers, aborting\n" );
10897
10923
rc = - ENOMEM ;
10898
10924
goto init_err_release ;
10899
10925
}
10900
10926
10901
- bp -> bar1 = pci_ioremap_bar (pdev , 2 );
10902
- if (!bp -> bar1 ) {
10903
- dev_err (& pdev -> dev , "Cannot map doorbell registers, aborting\n" );
10904
- rc = - ENOMEM ;
10905
- goto init_err_release ;
10906
- }
10907
-
10908
10927
bp -> bar2 = pci_ioremap_bar (pdev , 4 );
10909
10928
if (!bp -> bar2 ) {
10910
10929
dev_err (& pdev -> dev , "Cannot map bar4 registers, aborting\n" );
@@ -11826,6 +11845,16 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
11826
11845
return 0 ;
11827
11846
}
11828
11847
11848
+ static int bnxt_map_db_bar (struct bnxt * bp )
11849
+ {
11850
+ if (!bp -> db_size )
11851
+ return - ENODEV ;
11852
+ bp -> bar1 = pci_iomap (bp -> pdev , 2 , bp -> db_size );
11853
+ if (!bp -> bar1 )
11854
+ return - ENOMEM ;
11855
+ return 0 ;
11856
+ }
11857
+
11829
11858
static int bnxt_init_one (struct pci_dev * pdev , const struct pci_device_id * ent )
11830
11859
{
11831
11860
struct net_device * dev ;
@@ -11886,6 +11915,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
11886
11915
if (rc )
11887
11916
goto init_err_pci_clean ;
11888
11917
11918
+ rc = bnxt_map_db_bar (bp );
11919
+ if (rc ) {
11920
+ dev_err (& pdev -> dev , "Cannot map doorbell BAR rc = %d, aborting\n" ,
11921
+ rc );
11922
+ goto init_err_pci_clean ;
11923
+ }
11924
+
11889
11925
dev -> hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11890
11926
NETIF_F_TSO | NETIF_F_TSO6 |
11891
11927
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
0 commit comments