@@ -2498,14 +2498,33 @@ static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
2498
2498
2499
2499
static void mlx5e_activate_priv_channels (struct mlx5e_priv * priv )
2500
2500
{
2501
+ int num_txqs = priv -> channels .num * priv -> channels .params .num_tc ;
2502
+ struct net_device * netdev = priv -> netdev ;
2503
+
2504
+ mlx5e_netdev_set_tcs (netdev );
2505
+ if (netdev -> real_num_tx_queues != num_txqs )
2506
+ netif_set_real_num_tx_queues (netdev , num_txqs );
2507
+ if (netdev -> real_num_rx_queues != priv -> channels .num )
2508
+ netif_set_real_num_rx_queues (netdev , priv -> channels .num );
2509
+
2501
2510
mlx5e_build_channels_tx_maps (priv );
2502
2511
mlx5e_activate_channels (& priv -> channels );
2503
2512
netif_tx_start_all_queues (priv -> netdev );
2513
+
2514
+ if (MLX5_CAP_GEN (priv -> mdev , vport_group_manager ))
2515
+ mlx5e_add_sqs_fwd_rules (priv );
2516
+
2504
2517
mlx5e_wait_channels_min_rx_wqes (& priv -> channels );
2518
+ mlx5e_redirect_rqts_to_channels (priv , & priv -> channels );
2505
2519
}
2506
2520
2507
2521
static void mlx5e_deactivate_priv_channels (struct mlx5e_priv * priv )
2508
2522
{
2523
+ mlx5e_redirect_rqts_to_drop (priv );
2524
+
2525
+ if (MLX5_CAP_GEN (priv -> mdev , vport_group_manager ))
2526
+ mlx5e_remove_sqs_fwd_rules (priv );
2527
+
2509
2528
/* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2510
2529
* polling for inactive tx queues.
2511
2530
*/
@@ -2517,40 +2536,24 @@ static void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2517
2536
int mlx5e_open_locked (struct net_device * netdev )
2518
2537
{
2519
2538
struct mlx5e_priv * priv = netdev_priv (netdev );
2520
- struct mlx5_core_dev * mdev = priv -> mdev ;
2521
- int num_txqs ;
2522
2539
int err ;
2523
2540
2524
2541
set_bit (MLX5E_STATE_OPENED , & priv -> state );
2525
2542
2526
- mlx5e_netdev_set_tcs (netdev );
2527
-
2528
- num_txqs = priv -> channels .params .num_channels * priv -> channels .params .num_tc ;
2529
- netif_set_real_num_tx_queues (netdev , num_txqs );
2530
- netif_set_real_num_rx_queues (netdev , priv -> channels .params .num_channels );
2531
-
2532
2543
err = mlx5e_open_channels (priv , & priv -> channels );
2533
2544
if (err )
2534
2545
goto err_clear_state_opened_flag ;
2535
2546
2536
2547
mlx5e_refresh_tirs (priv , false);
2537
2548
mlx5e_activate_priv_channels (priv );
2538
- mlx5e_redirect_rqts_to_channels (priv , & priv -> channels );
2539
2549
mlx5e_update_carrier (priv );
2540
2550
mlx5e_timestamp_init (priv );
2541
2551
2542
2552
if (priv -> profile -> update_stats )
2543
2553
queue_delayed_work (priv -> wq , & priv -> update_stats_work , 0 );
2544
2554
2545
- if (MLX5_CAP_GEN (mdev , vport_group_manager )) {
2546
- err = mlx5e_add_sqs_fwd_rules (priv );
2547
- if (err )
2548
- goto err_close_channels ;
2549
- }
2550
2555
return 0 ;
2551
2556
2552
- err_close_channels :
2553
- mlx5e_close_channels (& priv -> channels );
2554
2557
err_clear_state_opened_flag :
2555
2558
clear_bit (MLX5E_STATE_OPENED , & priv -> state );
2556
2559
return err ;
@@ -2571,7 +2574,6 @@ int mlx5e_open(struct net_device *netdev)
2571
2574
int mlx5e_close_locked (struct net_device * netdev )
2572
2575
{
2573
2576
struct mlx5e_priv * priv = netdev_priv (netdev );
2574
- struct mlx5_core_dev * mdev = priv -> mdev ;
2575
2577
2576
2578
/* May already be CLOSED in case a previous configuration operation
2577
2579
* (e.g RX/TX queue size change) that involves close&open failed.
@@ -2581,12 +2583,8 @@ int mlx5e_close_locked(struct net_device *netdev)
2581
2583
2582
2584
clear_bit (MLX5E_STATE_OPENED , & priv -> state );
2583
2585
2584
- if (MLX5_CAP_GEN (mdev , vport_group_manager ))
2585
- mlx5e_remove_sqs_fwd_rules (priv );
2586
-
2587
2586
mlx5e_timestamp_cleanup (priv );
2588
2587
netif_carrier_off (priv -> netdev );
2589
- mlx5e_redirect_rqts_to_drop (priv );
2590
2588
mlx5e_deactivate_priv_channels (priv );
2591
2589
mlx5e_close_channels (& priv -> channels );
2592
2590
0 commit comments