@@ -46,17 +46,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
46
46
{PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_50GE_RDMA ), 0 },
47
47
{PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_50GE_RDMA_MACSEC ), 0 },
48
48
{PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_100G_RDMA_MACSEC ), 0 },
49
- /* Required last entry */
50
- {0 , }
51
- };
52
-
53
- static const struct pci_device_id roce_pci_tbl [] = {
54
- {PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_25GE_RDMA ), 0 },
55
- {PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_25GE_RDMA_MACSEC ), 0 },
56
- {PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_50GE_RDMA ), 0 },
57
- {PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_50GE_RDMA_MACSEC ), 0 },
58
- {PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_100G_RDMA_MACSEC ), 0 },
59
- /* Required last entry */
49
+ /* required last entry */
60
50
{0 , }
61
51
};
62
52
@@ -894,7 +884,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
894
884
hdev -> num_tqps = __le16_to_cpu (req -> tqp_num );
895
885
hdev -> pkt_buf_size = __le16_to_cpu (req -> buf_size ) << HCLGE_BUF_UNIT_S ;
896
886
897
- if (hnae_get_bit (hdev -> ae_dev -> flag , HNAE_DEV_SUPPORT_ROCE_B )) {
887
+ if (hnae3_dev_roce_supported (hdev )) {
898
888
hdev -> num_roce_msix =
899
889
hnae_get_field (__le16_to_cpu (req -> pf_intr_vector_number ),
900
890
HCLGE_PF_VEC_NUM_M , HCLGE_PF_VEC_NUM_S );
@@ -1454,7 +1444,11 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
1454
1444
tc_num = hclge_get_tc_num (hdev );
1455
1445
pfc_enable_num = hclge_get_pfc_enalbe_num (hdev );
1456
1446
1457
- shared_buf_min = 2 * hdev -> mps + HCLGE_DEFAULT_DV ;
1447
+ if (hnae3_dev_dcb_supported (hdev ))
1448
+ shared_buf_min = 2 * hdev -> mps + HCLGE_DEFAULT_DV ;
1449
+ else
1450
+ shared_buf_min = 2 * hdev -> mps + HCLGE_DEFAULT_NON_DCB_DV ;
1451
+
1458
1452
shared_buf_tc = pfc_enable_num * hdev -> mps +
1459
1453
(tc_num - pfc_enable_num ) * hdev -> mps / 2 +
1460
1454
hdev -> mps ;
@@ -1495,6 +1489,16 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
1495
1489
struct hclge_priv_buf * priv ;
1496
1490
int i ;
1497
1491
1492
+ /* When DCB is not supported, rx private
1493
+ * buffer is not allocated.
1494
+ */
1495
+ if (!hnae3_dev_dcb_supported (hdev )) {
1496
+ if (!hclge_is_rx_buf_ok (hdev , rx_all ))
1497
+ return - ENOMEM ;
1498
+
1499
+ return 0 ;
1500
+ }
1501
+
1498
1502
/* step 1, try to alloc private buffer for all enabled tc */
1499
1503
for (i = 0 ; i < HCLGE_MAX_TC_NUM ; i ++ ) {
1500
1504
priv = & hdev -> priv_buf [i ];
@@ -1510,6 +1514,11 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
1510
1514
priv -> wl .high = 2 * hdev -> mps ;
1511
1515
priv -> buf_size = priv -> wl .high ;
1512
1516
}
1517
+ } else {
1518
+ priv -> enable = 0 ;
1519
+ priv -> wl .low = 0 ;
1520
+ priv -> wl .high = 0 ;
1521
+ priv -> buf_size = 0 ;
1513
1522
}
1514
1523
}
1515
1524
@@ -1522,8 +1531,15 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
1522
1531
for (i = 0 ; i < HCLGE_MAX_TC_NUM ; i ++ ) {
1523
1532
priv = & hdev -> priv_buf [i ];
1524
1533
1525
- if (hdev -> hw_tc_map & BIT (i ))
1526
- priv -> enable = 1 ;
1534
+ priv -> enable = 0 ;
1535
+ priv -> wl .low = 0 ;
1536
+ priv -> wl .high = 0 ;
1537
+ priv -> buf_size = 0 ;
1538
+
1539
+ if (!(hdev -> hw_tc_map & BIT (i )))
1540
+ continue ;
1541
+
1542
+ priv -> enable = 1 ;
1527
1543
1528
1544
if (hdev -> tm_info .hw_pfc_map & BIT (i )) {
1529
1545
priv -> wl .low = 128 ;
@@ -1616,6 +1632,10 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
1616
1632
cpu_to_le16 (true << HCLGE_TC0_PRI_BUF_EN_B );
1617
1633
}
1618
1634
1635
+ req -> shared_buf =
1636
+ cpu_to_le16 ((hdev -> s_buf .buf_size >> HCLGE_BUF_UNIT_S ) |
1637
+ (1 << HCLGE_TC0_PRI_BUF_EN_B ));
1638
+
1619
1639
ret = hclge_cmd_send (& hdev -> hw , & desc , 1 );
1620
1640
if (ret ) {
1621
1641
dev_err (& hdev -> pdev -> dev ,
@@ -1782,18 +1802,22 @@ int hclge_buffer_alloc(struct hclge_dev *hdev)
1782
1802
return ret ;
1783
1803
}
1784
1804
1785
- ret = hclge_rx_priv_wl_config (hdev );
1786
- if (ret ) {
1787
- dev_err (& hdev -> pdev -> dev ,
1788
- "could not configure rx private waterline %d\n" , ret );
1789
- return ret ;
1790
- }
1805
+ if (hnae3_dev_dcb_supported (hdev )) {
1806
+ ret = hclge_rx_priv_wl_config (hdev );
1807
+ if (ret ) {
1808
+ dev_err (& hdev -> pdev -> dev ,
1809
+ "could not configure rx private waterline %d\n" ,
1810
+ ret );
1811
+ return ret ;
1812
+ }
1791
1813
1792
- ret = hclge_common_thrd_config (hdev );
1793
- if (ret ) {
1794
- dev_err (& hdev -> pdev -> dev ,
1795
- "could not configure common threshold %d\n" , ret );
1796
- return ret ;
1814
+ ret = hclge_common_thrd_config (hdev );
1815
+ if (ret ) {
1816
+ dev_err (& hdev -> pdev -> dev ,
1817
+ "could not configure common threshold %d\n" ,
1818
+ ret );
1819
+ return ret ;
1820
+ }
1797
1821
}
1798
1822
1799
1823
ret = hclge_common_wl_config (hdev );
@@ -2582,6 +2606,7 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
2582
2606
u16 tc_valid [HCLGE_MAX_TC_NUM ];
2583
2607
u16 tc_size [HCLGE_MAX_TC_NUM ];
2584
2608
u32 * rss_indir = NULL ;
2609
+ u16 rss_size = 0 , roundup_size ;
2585
2610
const u8 * key ;
2586
2611
int i , ret , j ;
2587
2612
@@ -2596,7 +2621,13 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
2596
2621
for (j = 0 ; j < hdev -> num_vmdq_vport + 1 ; j ++ ) {
2597
2622
for (i = 0 ; i < HCLGE_RSS_IND_TBL_SIZE ; i ++ ) {
2598
2623
vport [j ].rss_indirection_tbl [i ] =
2599
- i % hdev -> rss_size_max ;
2624
+ i % vport [j ].alloc_rss_size ;
2625
+
2626
+ /* vport 0 is for PF */
2627
+ if (j != 0 )
2628
+ continue ;
2629
+
2630
+ rss_size = vport [j ].alloc_rss_size ;
2600
2631
rss_indir [i ] = vport [j ].rss_indirection_tbl [i ];
2601
2632
}
2602
2633
}
@@ -2613,42 +2644,31 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
2613
2644
if (ret )
2614
2645
goto err ;
2615
2646
2647
+ /* Each TC have the same queue size, and tc_size set to hardware is
2648
+ * the log2 of roundup power of two of rss_size, the acutal queue
2649
+ * size is limited by indirection table.
2650
+ */
2651
+ if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0 ) {
2652
+ dev_err (& hdev -> pdev -> dev ,
2653
+ "Configure rss tc size failed, invalid TC_SIZE = %d\n" ,
2654
+ rss_size );
2655
+ return - EINVAL ;
2656
+ }
2657
+
2658
+ roundup_size = roundup_pow_of_two (rss_size );
2659
+ roundup_size = ilog2 (roundup_size );
2660
+
2616
2661
for (i = 0 ; i < HCLGE_MAX_TC_NUM ; i ++ ) {
2617
- if (hdev -> hw_tc_map & BIT (i ))
2618
- tc_valid [i ] = 1 ;
2619
- else
2620
- tc_valid [i ] = 0 ;
2662
+ tc_valid [i ] = 0 ;
2621
2663
2622
- switch (hdev -> rss_size_max ) {
2623
- case HCLGE_RSS_TC_SIZE_0 :
2624
- tc_size [i ] = 0 ;
2625
- break ;
2626
- case HCLGE_RSS_TC_SIZE_1 :
2627
- tc_size [i ] = 1 ;
2628
- break ;
2629
- case HCLGE_RSS_TC_SIZE_2 :
2630
- tc_size [i ] = 2 ;
2631
- break ;
2632
- case HCLGE_RSS_TC_SIZE_3 :
2633
- tc_size [i ] = 3 ;
2634
- break ;
2635
- case HCLGE_RSS_TC_SIZE_4 :
2636
- tc_size [i ] = 4 ;
2637
- break ;
2638
- case HCLGE_RSS_TC_SIZE_5 :
2639
- tc_size [i ] = 5 ;
2640
- break ;
2641
- case HCLGE_RSS_TC_SIZE_6 :
2642
- tc_size [i ] = 6 ;
2643
- break ;
2644
- case HCLGE_RSS_TC_SIZE_7 :
2645
- tc_size [i ] = 7 ;
2646
- break ;
2647
- default :
2648
- break ;
2649
- }
2650
- tc_offset [i ] = hdev -> rss_size_max * i ;
2664
+ if (!(hdev -> hw_tc_map & BIT (i )))
2665
+ continue ;
2666
+
2667
+ tc_valid [i ] = 1 ;
2668
+ tc_size [i ] = roundup_size ;
2669
+ tc_offset [i ] = rss_size * i ;
2651
2670
}
2671
+
2652
2672
ret = hclge_set_rss_tc_mode (hdev , tc_valid , tc_size , tc_offset );
2653
2673
2654
2674
err :
@@ -3932,8 +3952,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
3932
3952
goto err ;
3933
3953
3934
3954
if (hdev -> roce_client &&
3935
- hnae_get_bit (hdev -> ae_dev -> flag ,
3936
- HNAE_DEV_SUPPORT_ROCE_B )) {
3955
+ hnae3_dev_roce_supported (hdev )) {
3937
3956
struct hnae3_client * rc = hdev -> roce_client ;
3938
3957
3939
3958
ret = hclge_init_roce_base_info (vport );
@@ -3956,8 +3975,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
3956
3975
3957
3976
break ;
3958
3977
case HNAE3_CLIENT_ROCE :
3959
- if (hnae_get_bit (hdev -> ae_dev -> flag ,
3960
- HNAE_DEV_SUPPORT_ROCE_B )) {
3978
+ if (hnae3_dev_roce_supported (hdev )) {
3961
3979
hdev -> roce_client = client ;
3962
3980
vport -> roce .client = client ;
3963
3981
}
@@ -4069,7 +4087,6 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
4069
4087
static int hclge_init_ae_dev (struct hnae3_ae_dev * ae_dev )
4070
4088
{
4071
4089
struct pci_dev * pdev = ae_dev -> pdev ;
4072
- const struct pci_device_id * id ;
4073
4090
struct hclge_dev * hdev ;
4074
4091
int ret ;
4075
4092
@@ -4084,10 +4101,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4084
4101
hdev -> ae_dev = ae_dev ;
4085
4102
ae_dev -> priv = hdev ;
4086
4103
4087
- id = pci_match_id (roce_pci_tbl , ae_dev -> pdev );
4088
- if (id )
4089
- hnae_set_bit (ae_dev -> flag , HNAE_DEV_SUPPORT_ROCE_B , 1 );
4090
-
4091
4104
ret = hclge_pci_init (hdev );
4092
4105
if (ret ) {
4093
4106
dev_err (& pdev -> dev , "PCI init failed\n" );
@@ -4150,12 +4163,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4150
4163
return ret ;
4151
4164
}
4152
4165
4153
- ret = hclge_rss_init_hw (hdev );
4154
- if (ret ) {
4155
- dev_err (& pdev -> dev , "Rss init fail, ret =%d\n" , ret );
4156
- return ret ;
4157
- }
4158
-
4159
4166
ret = hclge_init_vlan_config (hdev );
4160
4167
if (ret ) {
4161
4168
dev_err (& pdev -> dev , "VLAN init fail, ret =%d\n" , ret );
@@ -4168,6 +4175,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4168
4175
return ret ;
4169
4176
}
4170
4177
4178
+ ret = hclge_rss_init_hw (hdev );
4179
+ if (ret ) {
4180
+ dev_err (& pdev -> dev , "Rss init fail, ret =%d\n" , ret );
4181
+ return ret ;
4182
+ }
4183
+
4171
4184
setup_timer (& hdev -> service_timer , hclge_service_timer ,
4172
4185
(unsigned long )hdev );
4173
4186
INIT_WORK (& hdev -> service_task , hclge_service_task );
0 commit comments