@@ -35,6 +35,7 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
35
35
enum hclge_mta_dmac_sel_type mta_mac_sel ,
36
36
bool enable );
37
37
static int hclge_init_vlan_config (struct hclge_dev * hdev );
38
+ static int hclge_reset_ae_dev (struct hnae3_ae_dev * ae_dev );
38
39
39
40
static struct hnae3_ae_algo ae_algo ;
40
41
@@ -2446,8 +2447,212 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
2446
2447
return ret ;
2447
2448
}
2448
2449
2450
+ static int hclge_notify_client (struct hclge_dev * hdev ,
2451
+ enum hnae3_reset_notify_type type )
2452
+ {
2453
+ struct hnae3_client * client = hdev -> nic_client ;
2454
+ u16 i ;
2455
+
2456
+ if (!client -> ops -> reset_notify )
2457
+ return - EOPNOTSUPP ;
2458
+
2459
+ for (i = 0 ; i < hdev -> num_vmdq_vport + 1 ; i ++ ) {
2460
+ struct hnae3_handle * handle = & hdev -> vport [i ].nic ;
2461
+ int ret ;
2462
+
2463
+ ret = client -> ops -> reset_notify (handle , type );
2464
+ if (ret )
2465
+ return ret ;
2466
+ }
2467
+
2468
+ return 0 ;
2469
+ }
2470
+
2471
+ static int hclge_reset_wait (struct hclge_dev * hdev )
2472
+ {
2473
+ #define HCLGE_RESET_WATI_MS 100
2474
+ #define HCLGE_RESET_WAIT_CNT 5
2475
+ u32 val , reg , reg_bit ;
2476
+ u32 cnt = 0 ;
2477
+
2478
+ switch (hdev -> reset_type ) {
2479
+ case HNAE3_GLOBAL_RESET :
2480
+ reg = HCLGE_GLOBAL_RESET_REG ;
2481
+ reg_bit = HCLGE_GLOBAL_RESET_BIT ;
2482
+ break ;
2483
+ case HNAE3_CORE_RESET :
2484
+ reg = HCLGE_GLOBAL_RESET_REG ;
2485
+ reg_bit = HCLGE_CORE_RESET_BIT ;
2486
+ break ;
2487
+ case HNAE3_FUNC_RESET :
2488
+ reg = HCLGE_FUN_RST_ING ;
2489
+ reg_bit = HCLGE_FUN_RST_ING_B ;
2490
+ break ;
2491
+ default :
2492
+ dev_err (& hdev -> pdev -> dev ,
2493
+ "Wait for unsupported reset type: %d\n" ,
2494
+ hdev -> reset_type );
2495
+ return - EINVAL ;
2496
+ }
2497
+
2498
+ val = hclge_read_dev (& hdev -> hw , reg );
2499
+ while (hnae_get_bit (val , reg_bit ) && cnt < HCLGE_RESET_WAIT_CNT ) {
2500
+ msleep (HCLGE_RESET_WATI_MS );
2501
+ val = hclge_read_dev (& hdev -> hw , reg );
2502
+ cnt ++ ;
2503
+ }
2504
+
2505
+ /* must clear reset status register to
2506
+ * prevent driver detect reset interrupt again
2507
+ */
2508
+ reg = hclge_read_dev (& hdev -> hw , HCLGE_MISC_RESET_STS_REG );
2509
+ hclge_write_dev (& hdev -> hw , HCLGE_MISC_RESET_STS_REG , reg );
2510
+
2511
+ if (cnt >= HCLGE_RESET_WAIT_CNT ) {
2512
+ dev_warn (& hdev -> pdev -> dev ,
2513
+ "Wait for reset timeout: %d\n" , hdev -> reset_type );
2514
+ return - EBUSY ;
2515
+ }
2516
+
2517
+ return 0 ;
2518
+ }
2519
+
2520
+ static int hclge_func_reset_cmd (struct hclge_dev * hdev , int func_id )
2521
+ {
2522
+ struct hclge_desc desc ;
2523
+ struct hclge_reset_cmd * req = (struct hclge_reset_cmd * )desc .data ;
2524
+ int ret ;
2525
+
2526
+ hclge_cmd_setup_basic_desc (& desc , HCLGE_OPC_CFG_RST_TRIGGER , false);
2527
+ hnae_set_bit (req -> mac_func_reset , HCLGE_CFG_RESET_MAC_B , 0 );
2528
+ hnae_set_bit (req -> mac_func_reset , HCLGE_CFG_RESET_FUNC_B , 1 );
2529
+ req -> fun_reset_vfid = func_id ;
2530
+
2531
+ ret = hclge_cmd_send (& hdev -> hw , & desc , 1 );
2532
+ if (ret )
2533
+ dev_err (& hdev -> pdev -> dev ,
2534
+ "send function reset cmd fail, status =%d\n" , ret );
2535
+
2536
+ return ret ;
2537
+ }
2538
+
2539
+ static void hclge_do_reset (struct hclge_dev * hdev , enum hnae3_reset_type type )
2540
+ {
2541
+ struct pci_dev * pdev = hdev -> pdev ;
2542
+ u32 val ;
2543
+
2544
+ switch (type ) {
2545
+ case HNAE3_GLOBAL_RESET :
2546
+ val = hclge_read_dev (& hdev -> hw , HCLGE_GLOBAL_RESET_REG );
2547
+ hnae_set_bit (val , HCLGE_GLOBAL_RESET_BIT , 1 );
2548
+ hclge_write_dev (& hdev -> hw , HCLGE_GLOBAL_RESET_REG , val );
2549
+ dev_info (& pdev -> dev , "Global Reset requested\n" );
2550
+ break ;
2551
+ case HNAE3_CORE_RESET :
2552
+ val = hclge_read_dev (& hdev -> hw , HCLGE_GLOBAL_RESET_REG );
2553
+ hnae_set_bit (val , HCLGE_CORE_RESET_BIT , 1 );
2554
+ hclge_write_dev (& hdev -> hw , HCLGE_GLOBAL_RESET_REG , val );
2555
+ dev_info (& pdev -> dev , "Core Reset requested\n" );
2556
+ break ;
2557
+ case HNAE3_FUNC_RESET :
2558
+ dev_info (& pdev -> dev , "PF Reset requested\n" );
2559
+ hclge_func_reset_cmd (hdev , 0 );
2560
+ break ;
2561
+ default :
2562
+ dev_warn (& pdev -> dev ,
2563
+ "Unsupported reset type: %d\n" , type );
2564
+ break ;
2565
+ }
2566
+ }
2567
+
2568
+ static enum hnae3_reset_type hclge_detected_reset_event (struct hclge_dev * hdev )
2569
+ {
2570
+ enum hnae3_reset_type rst_level = HNAE3_NONE_RESET ;
2571
+ u32 rst_reg_val ;
2572
+
2573
+ rst_reg_val = hclge_read_dev (& hdev -> hw , HCLGE_MISC_RESET_STS_REG );
2574
+ if (BIT (HCLGE_VECTOR0_GLOBALRESET_INT_B ) & rst_reg_val )
2575
+ rst_level = HNAE3_GLOBAL_RESET ;
2576
+ else if (BIT (HCLGE_VECTOR0_CORERESET_INT_B ) & rst_reg_val )
2577
+ rst_level = HNAE3_CORE_RESET ;
2578
+ else if (BIT (HCLGE_VECTOR0_IMPRESET_INT_B ) & rst_reg_val )
2579
+ rst_level = HNAE3_IMP_RESET ;
2580
+
2581
+ return rst_level ;
2582
+ }
2583
+
2584
+ static void hclge_reset_event (struct hnae3_handle * handle ,
2585
+ enum hnae3_reset_type reset )
2586
+ {
2587
+ struct hclge_vport * vport = hclge_get_vport (handle );
2588
+ struct hclge_dev * hdev = vport -> back ;
2589
+
2590
+ dev_info (& hdev -> pdev -> dev ,
2591
+ "Receive reset event , reset_type is %d" , reset );
2592
+
2593
+ switch (reset ) {
2594
+ case HNAE3_FUNC_RESET :
2595
+ case HNAE3_CORE_RESET :
2596
+ case HNAE3_GLOBAL_RESET :
2597
+ if (test_bit (HCLGE_STATE_RESET_INT , & hdev -> state )) {
2598
+ dev_err (& hdev -> pdev -> dev , "Already in reset state" );
2599
+ return ;
2600
+ }
2601
+ hdev -> reset_type = reset ;
2602
+ set_bit (HCLGE_STATE_RESET_INT , & hdev -> state );
2603
+ set_bit (HCLGE_STATE_SERVICE_SCHED , & hdev -> state );
2604
+ schedule_work (& hdev -> service_task );
2605
+ break ;
2606
+ default :
2607
+ dev_warn (& hdev -> pdev -> dev , "Unsupported reset event:%d" , reset );
2608
+ break ;
2609
+ }
2610
+ }
2611
+
2612
+ static void hclge_reset_subtask (struct hclge_dev * hdev )
2613
+ {
2614
+ bool do_reset ;
2615
+
2616
+ do_reset = hdev -> reset_type != HNAE3_NONE_RESET ;
2617
+
2618
+ /* Reset is detected by interrupt */
2619
+ if (hdev -> reset_type == HNAE3_NONE_RESET )
2620
+ hdev -> reset_type = hclge_detected_reset_event (hdev );
2621
+
2622
+ if (hdev -> reset_type == HNAE3_NONE_RESET )
2623
+ return ;
2624
+
2625
+ switch (hdev -> reset_type ) {
2626
+ case HNAE3_FUNC_RESET :
2627
+ case HNAE3_CORE_RESET :
2628
+ case HNAE3_GLOBAL_RESET :
2629
+ case HNAE3_IMP_RESET :
2630
+ hclge_notify_client (hdev , HNAE3_DOWN_CLIENT );
2631
+
2632
+ if (do_reset )
2633
+ hclge_do_reset (hdev , hdev -> reset_type );
2634
+ else
2635
+ set_bit (HCLGE_STATE_RESET_INT , & hdev -> state );
2636
+
2637
+ if (!hclge_reset_wait (hdev )) {
2638
+ hclge_notify_client (hdev , HNAE3_UNINIT_CLIENT );
2639
+ hclge_reset_ae_dev (hdev -> ae_dev );
2640
+ hclge_notify_client (hdev , HNAE3_INIT_CLIENT );
2641
+ clear_bit (HCLGE_STATE_RESET_INT , & hdev -> state );
2642
+ }
2643
+ hclge_notify_client (hdev , HNAE3_UP_CLIENT );
2644
+ break ;
2645
+ default :
2646
+ dev_err (& hdev -> pdev -> dev , "Unsupported reset type:%d\n" ,
2647
+ hdev -> reset_type );
2648
+ break ;
2649
+ }
2650
+ hdev -> reset_type = HNAE3_NONE_RESET ;
2651
+ }
2652
+
2449
2653
static void hclge_misc_irq_service_task (struct hclge_dev * hdev )
2450
2654
{
2655
+ hclge_reset_subtask (hdev );
2451
2656
hclge_enable_vector (& hdev -> misc_vector , true);
2452
2657
}
2453
2658
@@ -4498,6 +4703,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4498
4703
hdev -> flag |= HCLGE_FLAG_USE_MSIX ;
4499
4704
hdev -> pdev = pdev ;
4500
4705
hdev -> ae_dev = ae_dev ;
4706
+ hdev -> reset_type = HNAE3_NONE_RESET ;
4501
4707
ae_dev -> priv = hdev ;
4502
4708
4503
4709
ret = hclge_pci_init (hdev );
@@ -4630,6 +4836,84 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4630
4836
return ret ;
4631
4837
}
4632
4838
4839
+ static int hclge_reset_ae_dev (struct hnae3_ae_dev * ae_dev )
4840
+ {
4841
+ struct hclge_dev * hdev = ae_dev -> priv ;
4842
+ struct pci_dev * pdev = ae_dev -> pdev ;
4843
+ int ret ;
4844
+
4845
+ set_bit (HCLGE_STATE_DOWN , & hdev -> state );
4846
+
4847
+ ret = hclge_cmd_init (hdev );
4848
+ if (ret ) {
4849
+ dev_err (& pdev -> dev , "Cmd queue init failed\n" );
4850
+ return ret ;
4851
+ }
4852
+
4853
+ ret = hclge_get_cap (hdev );
4854
+ if (ret ) {
4855
+ dev_err (& pdev -> dev , "get hw capability error, ret = %d.\n" ,
4856
+ ret );
4857
+ return ret ;
4858
+ }
4859
+
4860
+ ret = hclge_configure (hdev );
4861
+ if (ret ) {
4862
+ dev_err (& pdev -> dev , "Configure dev error, ret = %d.\n" , ret );
4863
+ return ret ;
4864
+ }
4865
+
4866
+ ret = hclge_map_tqp (hdev );
4867
+ if (ret ) {
4868
+ dev_err (& pdev -> dev , "Map tqp error, ret = %d.\n" , ret );
4869
+ return ret ;
4870
+ }
4871
+
4872
+ ret = hclge_mac_init (hdev );
4873
+ if (ret ) {
4874
+ dev_err (& pdev -> dev , "Mac init error, ret = %d\n" , ret );
4875
+ return ret ;
4876
+ }
4877
+
4878
+ ret = hclge_buffer_alloc (hdev );
4879
+ if (ret ) {
4880
+ dev_err (& pdev -> dev , "Buffer allocate fail, ret =%d\n" , ret );
4881
+ return ret ;
4882
+ }
4883
+
4884
+ ret = hclge_config_tso (hdev , HCLGE_TSO_MSS_MIN , HCLGE_TSO_MSS_MAX );
4885
+ if (ret ) {
4886
+ dev_err (& pdev -> dev , "Enable tso fail, ret =%d\n" , ret );
4887
+ return ret ;
4888
+ }
4889
+
4890
+ ret = hclge_init_vlan_config (hdev );
4891
+ if (ret ) {
4892
+ dev_err (& pdev -> dev , "VLAN init fail, ret =%d\n" , ret );
4893
+ return ret ;
4894
+ }
4895
+
4896
+ ret = hclge_tm_schd_init (hdev );
4897
+ if (ret ) {
4898
+ dev_err (& pdev -> dev , "tm schd init fail, ret =%d\n" , ret );
4899
+ return ret ;
4900
+ }
4901
+
4902
+ ret = hclge_rss_init_hw (hdev );
4903
+ if (ret ) {
4904
+ dev_err (& pdev -> dev , "Rss init fail, ret =%d\n" , ret );
4905
+ return ret ;
4906
+ }
4907
+
4908
+ /* Enable MISC vector(vector0) */
4909
+ hclge_enable_vector (& hdev -> misc_vector , true);
4910
+
4911
+ dev_info (& pdev -> dev , "Reset done, %s driver initialization finished.\n" ,
4912
+ HCLGE_DRIVER_NAME );
4913
+
4914
+ return 0 ;
4915
+ }
4916
+
4633
4917
static void hclge_uninit_ae_dev (struct hnae3_ae_dev * ae_dev )
4634
4918
{
4635
4919
struct hclge_dev * hdev = ae_dev -> priv ;
@@ -4699,6 +4983,7 @@ static const struct hnae3_ae_ops hclge_ops = {
4699
4983
.get_mdix_mode = hclge_get_mdix_mode ,
4700
4984
.set_vlan_filter = hclge_set_port_vlan_filter ,
4701
4985
.set_vf_vlan_filter = hclge_set_vf_vlan_filter ,
4986
+ .reset_event = hclge_reset_event ,
4702
4987
};
4703
4988
4704
4989
static struct hnae3_ae_algo ae_algo = {
0 commit comments