@@ -447,6 +447,7 @@ static void ice_vsi_free(struct ice_vsi *vsi)
447
447
448
448
ice_vsi_free_stats (vsi );
449
449
ice_vsi_free_arrays (vsi );
450
+ mutex_destroy (& vsi -> xdp_state_lock );
450
451
mutex_unlock (& pf -> sw_mutex );
451
452
devm_kfree (dev , vsi );
452
453
}
@@ -626,6 +627,8 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
626
627
pf -> next_vsi = ice_get_free_slot (pf -> vsi , pf -> num_alloc_vsi ,
627
628
pf -> next_vsi );
628
629
630
+ mutex_init (& vsi -> xdp_state_lock );
631
+
629
632
unlock_pf :
630
633
mutex_unlock (& pf -> sw_mutex );
631
634
return vsi ;
@@ -2286,9 +2289,6 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
2286
2289
2287
2290
ice_vsi_map_rings_to_vectors (vsi );
2288
2291
2289
- /* Associate q_vector rings to napi */
2290
- ice_vsi_set_napi_queues (vsi );
2291
-
2292
2292
vsi -> stat_offsets_loaded = false;
2293
2293
2294
2294
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
@@ -2426,7 +2426,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
2426
2426
dev_err (ice_pf_to_dev (pf ), "Failed to remove RDMA scheduler config for VSI %u, err %d\n" ,
2427
2427
vsi -> vsi_num , err );
2428
2428
2429
- if (ice_is_xdp_ena_vsi ( vsi ) )
2429
+ if (vsi -> xdp_rings )
2430
2430
/* return value check can be skipped here, it always returns
2431
2431
* 0 if reset is in progress
2432
2432
*/
@@ -2528,7 +2528,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
2528
2528
for (q = 0 ; q < q_vector -> num_ring_tx ; q ++ ) {
2529
2529
ice_write_itr (& q_vector -> tx , 0 );
2530
2530
wr32 (hw , QINT_TQCTL (vsi -> txq_map [txq ]), 0 );
2531
- if (ice_is_xdp_ena_vsi ( vsi ) ) {
2531
+ if (vsi -> xdp_rings ) {
2532
2532
u32 xdp_txq = txq + vsi -> num_xdp_txq ;
2533
2533
2534
2534
wr32 (hw , QINT_TQCTL (vsi -> txq_map [xdp_txq ]), 0 );
@@ -2628,6 +2628,7 @@ void ice_vsi_close(struct ice_vsi *vsi)
2628
2628
if (!test_and_set_bit (ICE_VSI_DOWN , vsi -> state ))
2629
2629
ice_down (vsi );
2630
2630
2631
+ ice_vsi_clear_napi_queues (vsi );
2631
2632
ice_vsi_free_irq (vsi );
2632
2633
ice_vsi_free_tx_rings (vsi );
2633
2634
ice_vsi_free_rx_rings (vsi );
@@ -2671,143 +2672,78 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
2671
2672
*/
2672
2673
void ice_dis_vsi (struct ice_vsi * vsi , bool locked )
2673
2674
{
2674
- if (test_bit (ICE_VSI_DOWN , vsi -> state ))
2675
- return ;
2675
+ bool already_down = test_bit (ICE_VSI_DOWN , vsi -> state );
2676
2676
2677
2677
set_bit (ICE_VSI_NEEDS_RESTART , vsi -> state );
2678
2678
2679
2679
if (vsi -> type == ICE_VSI_PF && vsi -> netdev ) {
2680
2680
if (netif_running (vsi -> netdev )) {
2681
2681
if (!locked )
2682
2682
rtnl_lock ();
2683
-
2684
- ice_vsi_close (vsi );
2683
+ already_down = test_bit (ICE_VSI_DOWN , vsi -> state );
2684
+ if (!already_down )
2685
+ ice_vsi_close (vsi );
2685
2686
2686
2687
if (!locked )
2687
2688
rtnl_unlock ();
2688
- } else {
2689
+ } else if (! already_down ) {
2689
2690
ice_vsi_close (vsi );
2690
2691
}
2691
- } else if (vsi -> type == ICE_VSI_CTRL ) {
2692
+ } else if (vsi -> type == ICE_VSI_CTRL && ! already_down ) {
2692
2693
ice_vsi_close (vsi );
2693
2694
}
2694
2695
}
2695
2696
2696
2697
/**
2697
- * __ice_queue_set_napi - Set the napi instance for the queue
2698
- * @dev: device to which NAPI and queue belong
2699
- * @queue_index: Index of queue
2700
- * @type: queue type as RX or TX
2701
- * @napi: NAPI context
2702
- * @locked: is the rtnl_lock already held
2703
- *
2704
- * Set the napi instance for the queue. Caller indicates the lock status.
2705
- */
2706
- static void
2707
- __ice_queue_set_napi (struct net_device * dev , unsigned int queue_index ,
2708
- enum netdev_queue_type type , struct napi_struct * napi ,
2709
- bool locked )
2710
- {
2711
- if (!locked )
2712
- rtnl_lock ();
2713
- netif_queue_set_napi (dev , queue_index , type , napi );
2714
- if (!locked )
2715
- rtnl_unlock ();
2716
- }
2717
-
2718
- /**
2719
- * ice_queue_set_napi - Set the napi instance for the queue
2720
- * @vsi: VSI being configured
2721
- * @queue_index: Index of queue
2722
- * @type: queue type as RX or TX
2723
- * @napi: NAPI context
2698
+ * ice_vsi_set_napi_queues - associate netdev queues with napi
2699
+ * @vsi: VSI pointer
2724
2700
*
2725
- * Set the napi instance for the queue. The rtnl lock state is derived from the
2726
- * execution path .
2701
+ * Associate queue[s] with napi for all vectors.
2702
+ * The caller must hold rtnl_lock .
2727
2703
*/
2728
- void
2729
- ice_queue_set_napi (struct ice_vsi * vsi , unsigned int queue_index ,
2730
- enum netdev_queue_type type , struct napi_struct * napi )
2704
+ void ice_vsi_set_napi_queues (struct ice_vsi * vsi )
2731
2705
{
2732
- struct ice_pf * pf = vsi -> back ;
2706
+ struct net_device * netdev = vsi -> netdev ;
2707
+ int q_idx , v_idx ;
2733
2708
2734
- if (!vsi -> netdev )
2709
+ if (!netdev )
2735
2710
return ;
2736
2711
2737
- if (current_work () == & pf -> serv_task ||
2738
- test_bit (ICE_PREPARED_FOR_RESET , pf -> state ) ||
2739
- test_bit (ICE_DOWN , pf -> state ) ||
2740
- test_bit (ICE_SUSPENDED , pf -> state ))
2741
- __ice_queue_set_napi (vsi -> netdev , queue_index , type , napi ,
2742
- false);
2743
- else
2744
- __ice_queue_set_napi (vsi -> netdev , queue_index , type , napi ,
2745
- true);
2746
- }
2712
+ ice_for_each_rxq (vsi , q_idx )
2713
+ netif_queue_set_napi (netdev , q_idx , NETDEV_QUEUE_TYPE_RX ,
2714
+ & vsi -> rx_rings [q_idx ]-> q_vector -> napi );
2747
2715
2748
- /**
2749
- * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
2750
- * @q_vector: q_vector pointer
2751
- * @locked: is the rtnl_lock already held
2752
- *
2753
- * Associate the q_vector napi with all the queue[s] on the vector.
2754
- * Caller indicates the lock status.
2755
- */
2756
- void __ice_q_vector_set_napi_queues (struct ice_q_vector * q_vector , bool locked )
2757
- {
2758
- struct ice_rx_ring * rx_ring ;
2759
- struct ice_tx_ring * tx_ring ;
2760
-
2761
- ice_for_each_rx_ring (rx_ring , q_vector -> rx )
2762
- __ice_queue_set_napi (q_vector -> vsi -> netdev , rx_ring -> q_index ,
2763
- NETDEV_QUEUE_TYPE_RX , & q_vector -> napi ,
2764
- locked );
2765
-
2766
- ice_for_each_tx_ring (tx_ring , q_vector -> tx )
2767
- __ice_queue_set_napi (q_vector -> vsi -> netdev , tx_ring -> q_index ,
2768
- NETDEV_QUEUE_TYPE_TX , & q_vector -> napi ,
2769
- locked );
2716
+ ice_for_each_txq (vsi , q_idx )
2717
+ netif_queue_set_napi (netdev , q_idx , NETDEV_QUEUE_TYPE_TX ,
2718
+ & vsi -> tx_rings [q_idx ]-> q_vector -> napi );
2770
2719
/* Also set the interrupt number for the NAPI */
2771
- netif_napi_set_irq (& q_vector -> napi , q_vector -> irq .virq );
2772
- }
2773
-
2774
- /**
2775
- * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
2776
- * @q_vector: q_vector pointer
2777
- *
2778
- * Associate the q_vector napi with all the queue[s] on the vector
2779
- */
2780
- void ice_q_vector_set_napi_queues (struct ice_q_vector * q_vector )
2781
- {
2782
- struct ice_rx_ring * rx_ring ;
2783
- struct ice_tx_ring * tx_ring ;
2784
-
2785
- ice_for_each_rx_ring (rx_ring , q_vector -> rx )
2786
- ice_queue_set_napi (q_vector -> vsi , rx_ring -> q_index ,
2787
- NETDEV_QUEUE_TYPE_RX , & q_vector -> napi );
2720
+ ice_for_each_q_vector (vsi , v_idx ) {
2721
+ struct ice_q_vector * q_vector = vsi -> q_vectors [v_idx ];
2788
2722
2789
- ice_for_each_tx_ring (tx_ring , q_vector -> tx )
2790
- ice_queue_set_napi (q_vector -> vsi , tx_ring -> q_index ,
2791
- NETDEV_QUEUE_TYPE_TX , & q_vector -> napi );
2792
- /* Also set the interrupt number for the NAPI */
2793
- netif_napi_set_irq (& q_vector -> napi , q_vector -> irq .virq );
2723
+ netif_napi_set_irq (& q_vector -> napi , q_vector -> irq .virq );
2724
+ }
2794
2725
}
2795
2726
2796
2727
/**
2797
- * ice_vsi_set_napi_queues
2728
+ * ice_vsi_clear_napi_queues - dissociate netdev queues from napi
2798
2729
* @vsi: VSI pointer
2799
2730
*
2800
- * Associate queue[s] with napi for all vectors
2731
+ * Clear the association between all VSI queues queue[s] and napi.
2732
+ * The caller must hold rtnl_lock.
2801
2733
*/
2802
- void ice_vsi_set_napi_queues (struct ice_vsi * vsi )
2734
+ void ice_vsi_clear_napi_queues (struct ice_vsi * vsi )
2803
2735
{
2804
- int i ;
2736
+ struct net_device * netdev = vsi -> netdev ;
2737
+ int q_idx ;
2805
2738
2806
- if (!vsi -> netdev )
2739
+ if (!netdev )
2807
2740
return ;
2808
2741
2809
- ice_for_each_q_vector (vsi , i )
2810
- ice_q_vector_set_napi_queues (vsi -> q_vectors [i ]);
2742
+ ice_for_each_txq (vsi , q_idx )
2743
+ netif_queue_set_napi (netdev , q_idx , NETDEV_QUEUE_TYPE_TX , NULL );
2744
+
2745
+ ice_for_each_rxq (vsi , q_idx )
2746
+ netif_queue_set_napi (netdev , q_idx , NETDEV_QUEUE_TYPE_RX , NULL );
2811
2747
}
2812
2748
2813
2749
/**
@@ -3039,42 +2975,47 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
3039
2975
if (WARN_ON (vsi -> type == ICE_VSI_VF && !vsi -> vf ))
3040
2976
return - EINVAL ;
3041
2977
2978
+ mutex_lock (& vsi -> xdp_state_lock );
2979
+
3042
2980
ret = ice_vsi_realloc_stat_arrays (vsi );
3043
2981
if (ret )
3044
- goto err_vsi_cfg ;
2982
+ goto unlock ;
3045
2983
3046
2984
ice_vsi_decfg (vsi );
3047
2985
ret = ice_vsi_cfg_def (vsi );
3048
2986
if (ret )
3049
- goto err_vsi_cfg ;
2987
+ goto unlock ;
3050
2988
3051
2989
coalesce = kcalloc (vsi -> num_q_vectors ,
3052
2990
sizeof (struct ice_coalesce_stored ), GFP_KERNEL );
3053
- if (!coalesce )
3054
- return - ENOMEM ;
2991
+ if (!coalesce ) {
2992
+ ret = - ENOMEM ;
2993
+ goto decfg ;
2994
+ }
3055
2995
3056
2996
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce (vsi , coalesce );
3057
2997
3058
2998
ret = ice_vsi_cfg_tc_lan (pf , vsi );
3059
2999
if (ret ) {
3060
3000
if (vsi_flags & ICE_VSI_FLAG_INIT ) {
3061
3001
ret = - EIO ;
3062
- goto err_vsi_cfg_tc_lan ;
3002
+ goto free_coalesce ;
3063
3003
}
3064
3004
3065
- kfree ( coalesce );
3066
- return ice_schedule_reset ( pf , ICE_RESET_PFR ) ;
3005
+ ret = ice_schedule_reset ( pf , ICE_RESET_PFR );
3006
+ goto free_coalesce ;
3067
3007
}
3068
3008
3069
3009
ice_vsi_rebuild_set_coalesce (vsi , coalesce , prev_num_q_vectors );
3070
- kfree (coalesce );
3071
-
3072
- return 0 ;
3010
+ clear_bit (ICE_VSI_REBUILD_PENDING , vsi -> state );
3073
3011
3074
- err_vsi_cfg_tc_lan :
3075
- ice_vsi_decfg (vsi );
3012
+ free_coalesce :
3076
3013
kfree (coalesce );
3077
- err_vsi_cfg :
3014
+ decfg :
3015
+ if (ret )
3016
+ ice_vsi_decfg (vsi );
3017
+ unlock :
3018
+ mutex_unlock (& vsi -> xdp_state_lock );
3078
3019
return ret ;
3079
3020
}
3080
3021
0 commit comments