@@ -4498,6 +4498,42 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
4498
4498
}
4499
4499
}
4500
4500
4501
+ static int bnxt_hwrm_get_rings (struct bnxt * bp )
4502
+ {
4503
+ struct hwrm_func_qcfg_output * resp = bp -> hwrm_cmd_resp_addr ;
4504
+ struct bnxt_hw_resc * hw_resc = & bp -> hw_resc ;
4505
+ struct hwrm_func_qcfg_input req = {0 };
4506
+ int rc ;
4507
+
4508
+ if (bp -> hwrm_spec_code < 0x10601 )
4509
+ return 0 ;
4510
+
4511
+ bnxt_hwrm_cmd_hdr_init (bp , & req , HWRM_FUNC_QCFG , -1 , -1 );
4512
+ req .fid = cpu_to_le16 (0xffff );
4513
+ mutex_lock (& bp -> hwrm_cmd_lock );
4514
+ rc = _hwrm_send_message (bp , & req , sizeof (req ), HWRM_CMD_TIMEOUT );
4515
+ if (rc ) {
4516
+ mutex_unlock (& bp -> hwrm_cmd_lock );
4517
+ return - EIO ;
4518
+ }
4519
+
4520
+ hw_resc -> resv_tx_rings = le16_to_cpu (resp -> alloc_tx_rings );
4521
+ if (bp -> flags & BNXT_FLAG_NEW_RM ) {
4522
+ u16 cp , stats ;
4523
+
4524
+ hw_resc -> resv_rx_rings = le16_to_cpu (resp -> alloc_rx_rings );
4525
+ hw_resc -> resv_hw_ring_grps =
4526
+ le32_to_cpu (resp -> alloc_hw_ring_grps );
4527
+ hw_resc -> resv_vnics = le16_to_cpu (resp -> alloc_vnics );
4528
+ cp = le16_to_cpu (resp -> alloc_cmpl_rings );
4529
+ stats = le16_to_cpu (resp -> alloc_stat_ctx );
4530
+ cp = min_t (u16 , cp , stats );
4531
+ hw_resc -> resv_cp_rings = cp ;
4532
+ }
4533
+ mutex_unlock (& bp -> hwrm_cmd_lock );
4534
+ return 0 ;
4535
+ }
4536
+
4501
4537
/* Caller must hold bp->hwrm_cmd_lock */
4502
4538
int __bnxt_hwrm_get_tx_rings (struct bnxt * bp , u16 fid , int * tx_rings )
4503
4539
{
@@ -4517,33 +4553,190 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
4517
4553
return rc ;
4518
4554
}
4519
4555
4520
- static int bnxt_hwrm_reserve_tx_rings (struct bnxt * bp , int * tx_rings )
4556
+ static int
4557
+ bnxt_hwrm_reserve_pf_rings (struct bnxt * bp , int tx_rings , int rx_rings ,
4558
+ int ring_grps , int cp_rings , int vnics )
4521
4559
{
4522
4560
struct hwrm_func_cfg_input req = {0 };
4561
+ u32 enables = 0 ;
4523
4562
int rc ;
4524
4563
4525
- if (bp -> hwrm_spec_code < 0x10601 )
4564
+ bnxt_hwrm_cmd_hdr_init (bp , & req , HWRM_FUNC_CFG , -1 , -1 );
4565
+ req .fid = cpu_to_le16 (0xffff );
4566
+ enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0 ;
4567
+ req .num_tx_rings = cpu_to_le16 (tx_rings );
4568
+ if (bp -> flags & BNXT_FLAG_NEW_RM ) {
4569
+ enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0 ;
4570
+ enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4571
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0 ;
4572
+ enables |= ring_grps ?
4573
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0 ;
4574
+ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0 ;
4575
+
4576
+ req .num_rx_rings = cpu_to_le16 (rx_rings );
4577
+ req .num_hw_ring_grps = cpu_to_le16 (ring_grps );
4578
+ req .num_cmpl_rings = cpu_to_le16 (cp_rings );
4579
+ req .num_stat_ctxs = req .num_cmpl_rings ;
4580
+ req .num_vnics = cpu_to_le16 (vnics );
4581
+ }
4582
+ if (!enables )
4526
4583
return 0 ;
4527
4584
4528
- if (BNXT_VF (bp ))
4585
+ req .enables = cpu_to_le32 (enables );
4586
+ rc = hwrm_send_message (bp , & req , sizeof (req ), HWRM_CMD_TIMEOUT );
4587
+ if (rc )
4588
+ return - ENOMEM ;
4589
+
4590
+ if (bp -> hwrm_spec_code < 0x10601 )
4591
+ bp -> hw_resc .resv_tx_rings = tx_rings ;
4592
+
4593
+ rc = bnxt_hwrm_get_rings (bp );
4594
+ return rc ;
4595
+ }
4596
+
4597
+ static int
4598
+ bnxt_hwrm_reserve_vf_rings (struct bnxt * bp , int tx_rings , int rx_rings ,
4599
+ int ring_grps , int cp_rings , int vnics )
4600
+ {
4601
+ struct hwrm_func_vf_cfg_input req = {0 };
4602
+ u32 enables = 0 ;
4603
+ int rc ;
4604
+
4605
+ if (!(bp -> flags & BNXT_FLAG_NEW_RM )) {
4606
+ bp -> hw_resc .resv_tx_rings = tx_rings ;
4529
4607
return 0 ;
4608
+ }
4530
4609
4531
- bnxt_hwrm_cmd_hdr_init (bp , & req , HWRM_FUNC_CFG , -1 , -1 );
4532
- req .fid = cpu_to_le16 (0xffff );
4533
- req .enables = cpu_to_le32 (FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS );
4534
- req .num_tx_rings = cpu_to_le16 (* tx_rings );
4610
+ bnxt_hwrm_cmd_hdr_init (bp , & req , HWRM_FUNC_VF_CFG , -1 , -1 );
4611
+ enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0 ;
4612
+ enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0 ;
4613
+ enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4614
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0 ;
4615
+ enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0 ;
4616
+ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0 ;
4617
+
4618
+ req .num_tx_rings = cpu_to_le16 (tx_rings );
4619
+ req .num_rx_rings = cpu_to_le16 (rx_rings );
4620
+ req .num_hw_ring_grps = cpu_to_le16 (ring_grps );
4621
+ req .num_cmpl_rings = cpu_to_le16 (cp_rings );
4622
+ req .num_stat_ctxs = req .num_cmpl_rings ;
4623
+ req .num_vnics = cpu_to_le16 (vnics );
4624
+
4625
+ req .enables = cpu_to_le32 (enables );
4535
4626
rc = hwrm_send_message (bp , & req , sizeof (req ), HWRM_CMD_TIMEOUT );
4627
+ if (rc )
4628
+ return - ENOMEM ;
4629
+
4630
+ rc = bnxt_hwrm_get_rings (bp );
4631
+ return rc ;
4632
+ }
4633
+
4634
+ static int bnxt_hwrm_reserve_rings (struct bnxt * bp , int tx , int rx , int grp ,
4635
+ int cp , int vnic )
4636
+ {
4637
+ if (BNXT_PF (bp ))
4638
+ return bnxt_hwrm_reserve_pf_rings (bp , tx , rx , grp , cp , vnic );
4639
+ else
4640
+ return bnxt_hwrm_reserve_vf_rings (bp , tx , rx , grp , cp , vnic );
4641
+ }
4642
+
4643
+ static int bnxt_trim_rings (struct bnxt * bp , int * rx , int * tx , int max ,
4644
+ bool shared );
4645
+
4646
+ static int __bnxt_reserve_rings (struct bnxt * bp )
4647
+ {
4648
+ struct bnxt_hw_resc * hw_resc = & bp -> hw_resc ;
4649
+ int tx = bp -> tx_nr_rings ;
4650
+ int rx = bp -> rx_nr_rings ;
4651
+ int cp = bp -> cp_nr_rings ;
4652
+ int grp , rx_rings , rc ;
4653
+ bool sh = false;
4654
+ int vnic = 1 ;
4655
+
4656
+ if (bp -> hwrm_spec_code < 0x10601 )
4657
+ return 0 ;
4658
+
4659
+ if (bp -> flags & BNXT_FLAG_SHARED_RINGS )
4660
+ sh = true;
4661
+ if (bp -> flags & BNXT_FLAG_RFS )
4662
+ vnic = rx + 1 ;
4663
+ if (bp -> flags & BNXT_FLAG_AGG_RINGS )
4664
+ rx <<= 1 ;
4665
+
4666
+ grp = bp -> rx_nr_rings ;
4667
+ if (tx == hw_resc -> resv_tx_rings &&
4668
+ (!(bp -> flags & BNXT_FLAG_NEW_RM ) ||
4669
+ (rx == hw_resc -> resv_rx_rings &&
4670
+ grp == hw_resc -> resv_hw_ring_grps &&
4671
+ cp == hw_resc -> resv_cp_rings && vnic == hw_resc -> resv_vnics )))
4672
+ return 0 ;
4673
+
4674
+ rc = bnxt_hwrm_reserve_rings (bp , tx , rx , grp , cp , vnic );
4536
4675
if (rc )
4537
4676
return rc ;
4538
4677
4539
- mutex_lock (& bp -> hwrm_cmd_lock );
4540
- rc = __bnxt_hwrm_get_tx_rings (bp , 0xffff , tx_rings );
4541
- mutex_unlock (& bp -> hwrm_cmd_lock );
4542
- if (!rc )
4543
- bp -> hw_resc .resv_tx_rings = * tx_rings ;
4678
+ tx = hw_resc -> resv_tx_rings ;
4679
+ if (bp -> flags & BNXT_FLAG_NEW_RM ) {
4680
+ rx = hw_resc -> resv_rx_rings ;
4681
+ cp = hw_resc -> resv_cp_rings ;
4682
+ grp = hw_resc -> resv_hw_ring_grps ;
4683
+ vnic = hw_resc -> resv_vnics ;
4684
+ }
4685
+
4686
+ rx_rings = rx ;
4687
+ if (bp -> flags & BNXT_FLAG_AGG_RINGS ) {
4688
+ if (rx >= 2 ) {
4689
+ rx_rings = rx >> 1 ;
4690
+ } else {
4691
+ if (netif_running (bp -> dev ))
4692
+ return - ENOMEM ;
4693
+
4694
+ bp -> flags &= ~BNXT_FLAG_AGG_RINGS ;
4695
+ bp -> flags |= BNXT_FLAG_NO_AGG_RINGS ;
4696
+ bp -> dev -> hw_features &= ~NETIF_F_LRO ;
4697
+ bp -> dev -> features &= ~NETIF_F_LRO ;
4698
+ bnxt_set_ring_params (bp );
4699
+ }
4700
+ }
4701
+ rx_rings = min_t (int , rx_rings , grp );
4702
+ rc = bnxt_trim_rings (bp , & rx_rings , & tx , cp , sh );
4703
+ if (bp -> flags & BNXT_FLAG_AGG_RINGS )
4704
+ rx = rx_rings << 1 ;
4705
+ cp = sh ? max_t (int , tx , rx_rings ) : tx + rx_rings ;
4706
+ bp -> tx_nr_rings = tx ;
4707
+ bp -> rx_nr_rings = rx_rings ;
4708
+ bp -> cp_nr_rings = cp ;
4709
+
4710
+ if (!tx || !rx || !cp || !grp || !vnic )
4711
+ return - ENOMEM ;
4712
+
4544
4713
return rc ;
4545
4714
}
4546
4715
4716
+ static bool bnxt_need_reserve_rings (struct bnxt * bp )
4717
+ {
4718
+ struct bnxt_hw_resc * hw_resc = & bp -> hw_resc ;
4719
+ int rx = bp -> rx_nr_rings ;
4720
+ int vnic = 1 ;
4721
+
4722
+ if (bp -> hwrm_spec_code < 0x10601 )
4723
+ return false;
4724
+
4725
+ if (hw_resc -> resv_tx_rings != bp -> tx_nr_rings )
4726
+ return true;
4727
+
4728
+ if (bp -> flags & BNXT_FLAG_RFS )
4729
+ vnic = rx + 1 ;
4730
+ if (bp -> flags & BNXT_FLAG_AGG_RINGS )
4731
+ rx <<= 1 ;
4732
+ if ((bp -> flags & BNXT_FLAG_NEW_RM ) &&
4733
+ (hw_resc -> resv_rx_rings != rx ||
4734
+ hw_resc -> resv_cp_rings != bp -> cp_nr_rings ||
4735
+ hw_resc -> resv_vnics != vnic ))
4736
+ return true;
4737
+ return false;
4738
+ }
4739
+
4547
4740
static int bnxt_hwrm_check_tx_rings (struct bnxt * bp , int tx_rings )
4548
4741
{
4549
4742
struct hwrm_func_cfg_input req = {0 };
@@ -5270,15 +5463,6 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
5270
5463
rc );
5271
5464
goto err_out ;
5272
5465
}
5273
- if (bp -> hw_resc .resv_tx_rings != bp -> tx_nr_rings ) {
5274
- int tx = bp -> tx_nr_rings ;
5275
-
5276
- if (bnxt_hwrm_reserve_tx_rings (bp , & tx ) ||
5277
- tx < bp -> tx_nr_rings ) {
5278
- rc = - ENOMEM ;
5279
- goto err_out ;
5280
- }
5281
- }
5282
5466
}
5283
5467
5284
5468
rc = bnxt_hwrm_ring_alloc (bp );
@@ -5637,6 +5821,36 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
5637
5821
bp -> flags &= ~BNXT_FLAG_USING_MSIX ;
5638
5822
}
5639
5823
5824
+ static int bnxt_reserve_rings (struct bnxt * bp )
5825
+ {
5826
+ int orig_cp = bp -> hw_resc .resv_cp_rings ;
5827
+ int tcs = netdev_get_num_tc (bp -> dev );
5828
+ int rc ;
5829
+
5830
+ if (!bnxt_need_reserve_rings (bp ))
5831
+ return 0 ;
5832
+
5833
+ rc = __bnxt_reserve_rings (bp );
5834
+ if (rc ) {
5835
+ netdev_err (bp -> dev , "ring reservation failure rc: %d\n" , rc );
5836
+ return rc ;
5837
+ }
5838
+ if ((bp -> flags & BNXT_FLAG_NEW_RM ) && bp -> cp_nr_rings > orig_cp ) {
5839
+ bnxt_clear_int_mode (bp );
5840
+ rc = bnxt_init_int_mode (bp );
5841
+ if (rc )
5842
+ return rc ;
5843
+ }
5844
+ if (tcs && (bp -> tx_nr_rings_per_tc * tcs != bp -> tx_nr_rings )) {
5845
+ netdev_err (bp -> dev , "tx ring reservation failure\n" );
5846
+ netdev_reset_tc (bp -> dev );
5847
+ bp -> tx_nr_rings_per_tc = bp -> tx_nr_rings ;
5848
+ return - ENOMEM ;
5849
+ }
5850
+ bp -> num_stat_ctxs = bp -> cp_nr_rings ;
5851
+ return 0 ;
5852
+ }
5853
+
5640
5854
static void bnxt_free_irq (struct bnxt * bp )
5641
5855
{
5642
5856
struct bnxt_irq * irq ;
@@ -6387,6 +6601,10 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6387
6601
bnxt_preset_reg_win (bp );
6388
6602
netif_carrier_off (bp -> dev );
6389
6603
if (irq_re_init ) {
6604
+ rc = bnxt_reserve_rings (bp );
6605
+ if (rc )
6606
+ return rc ;
6607
+
6390
6608
rc = bnxt_setup_int_mode (bp );
6391
6609
if (rc ) {
6392
6610
netdev_err (bp -> dev , "bnxt_setup_int_mode err: %x\n" ,
@@ -8062,16 +8280,20 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
8062
8280
bp -> cp_nr_rings = bp -> tx_nr_rings_per_tc + bp -> rx_nr_rings ;
8063
8281
bp -> tx_nr_rings = bp -> tx_nr_rings_per_tc ;
8064
8282
8065
- rc = bnxt_hwrm_reserve_tx_rings (bp , & bp -> tx_nr_rings_per_tc );
8283
+ rc = __bnxt_reserve_rings (bp );
8066
8284
if (rc )
8067
8285
netdev_warn (bp -> dev , "Unable to reserve tx rings\n" );
8068
8286
bp -> tx_nr_rings_per_tc = bp -> tx_nr_rings ;
8069
8287
if (sh )
8070
8288
bnxt_trim_dflt_sh_rings (bp );
8071
8289
8072
- bp -> tx_nr_rings = bp -> tx_nr_rings_per_tc ;
8073
- bp -> cp_nr_rings = sh ? max_t (int , bp -> tx_nr_rings , bp -> rx_nr_rings ) :
8074
- bp -> tx_nr_rings + bp -> rx_nr_rings ;
8290
+ /* Rings may have been trimmed, re-reserve the trimmed rings. */
8291
+ if (bnxt_need_reserve_rings (bp )) {
8292
+ rc = __bnxt_reserve_rings (bp );
8293
+ if (rc )
8294
+ netdev_warn (bp -> dev , "2nd rings reservation failed.\n" );
8295
+ bp -> tx_nr_rings_per_tc = bp -> tx_nr_rings ;
8296
+ }
8075
8297
bp -> num_stat_ctxs = bp -> cp_nr_rings ;
8076
8298
if (BNXT_CHIP_TYPE_NITRO_A0 (bp )) {
8077
8299
bp -> rx_nr_rings ++ ;
0 commit comments