@@ -121,6 +121,7 @@ static void mlx5_ldev_free(struct kref *ref)
121
121
mlx5_lag_mp_cleanup (ldev );
122
122
cancel_delayed_work_sync (& ldev -> bond_work );
123
123
destroy_workqueue (ldev -> wq );
124
+ mutex_destroy (& ldev -> lock );
124
125
kfree (ldev );
125
126
}
126
127
@@ -150,6 +151,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
150
151
}
151
152
152
153
kref_init (& ldev -> ref );
154
+ mutex_init (& ldev -> lock );
153
155
INIT_DELAYED_WORK (& ldev -> bond_work , mlx5_do_bond_work );
154
156
155
157
ldev -> nb .notifier_call = mlx5_lag_netdev_event ;
@@ -643,31 +645,11 @@ static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
643
645
queue_delayed_work (ldev -> wq , & ldev -> bond_work , delay );
644
646
}
645
647
646
- static void mlx5_lag_lock_eswitches (struct mlx5_core_dev * dev0 ,
647
- struct mlx5_core_dev * dev1 )
648
- {
649
- if (dev0 )
650
- mlx5_esw_lock (dev0 -> priv .eswitch );
651
- if (dev1 )
652
- mlx5_esw_lock (dev1 -> priv .eswitch );
653
- }
654
-
655
- static void mlx5_lag_unlock_eswitches (struct mlx5_core_dev * dev0 ,
656
- struct mlx5_core_dev * dev1 )
657
- {
658
- if (dev1 )
659
- mlx5_esw_unlock (dev1 -> priv .eswitch );
660
- if (dev0 )
661
- mlx5_esw_unlock (dev0 -> priv .eswitch );
662
- }
663
-
664
648
static void mlx5_do_bond_work (struct work_struct * work )
665
649
{
666
650
struct delayed_work * delayed_work = to_delayed_work (work );
667
651
struct mlx5_lag * ldev = container_of (delayed_work , struct mlx5_lag ,
668
652
bond_work );
669
- struct mlx5_core_dev * dev0 = ldev -> pf [MLX5_LAG_P1 ].dev ;
670
- struct mlx5_core_dev * dev1 = ldev -> pf [MLX5_LAG_P2 ].dev ;
671
653
int status ;
672
654
673
655
status = mlx5_dev_list_trylock ();
@@ -676,15 +658,16 @@ static void mlx5_do_bond_work(struct work_struct *work)
676
658
return ;
677
659
}
678
660
661
+ mutex_lock (& ldev -> lock );
679
662
if (ldev -> mode_changes_in_progress ) {
663
+ mutex_unlock (& ldev -> lock );
680
664
mlx5_dev_list_unlock ();
681
665
mlx5_queue_bond_work (ldev , HZ );
682
666
return ;
683
667
}
684
668
685
- mlx5_lag_lock_eswitches (dev0 , dev1 );
686
669
mlx5_do_bond (ldev );
687
- mlx5_lag_unlock_eswitches ( dev0 , dev1 );
670
+ mutex_unlock ( & ldev -> lock );
688
671
mlx5_dev_list_unlock ();
689
672
}
690
673
@@ -908,7 +891,6 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
908
891
dev -> priv .lag = ldev ;
909
892
}
910
893
911
- /* Must be called with intf_mutex held */
912
894
static void mlx5_ldev_remove_mdev (struct mlx5_lag * ldev ,
913
895
struct mlx5_core_dev * dev )
914
896
{
@@ -946,13 +928,18 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
946
928
mlx5_core_err (dev , "Failed to alloc lag dev\n" );
947
929
return 0 ;
948
930
}
949
- } else {
950
- if (ldev -> mode_changes_in_progress )
951
- return - EAGAIN ;
952
- mlx5_ldev_get (ldev );
931
+ mlx5_ldev_add_mdev (ldev , dev );
932
+ return 0 ;
953
933
}
954
934
935
+ mutex_lock (& ldev -> lock );
936
+ if (ldev -> mode_changes_in_progress ) {
937
+ mutex_unlock (& ldev -> lock );
938
+ return - EAGAIN ;
939
+ }
940
+ mlx5_ldev_get (ldev );
955
941
mlx5_ldev_add_mdev (ldev , dev );
942
+ mutex_unlock (& ldev -> lock );
956
943
957
944
return 0 ;
958
945
}
@@ -966,14 +953,14 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
966
953
return ;
967
954
968
955
recheck :
969
- mlx5_dev_list_lock ( );
956
+ mutex_lock ( & ldev -> lock );
970
957
if (ldev -> mode_changes_in_progress ) {
971
- mlx5_dev_list_unlock ( );
958
+ mutex_unlock ( & ldev -> lock );
972
959
msleep (100 );
973
960
goto recheck ;
974
961
}
975
962
mlx5_ldev_remove_mdev (ldev , dev );
976
- mlx5_dev_list_unlock ( );
963
+ mutex_unlock ( & ldev -> lock );
977
964
mlx5_ldev_put (ldev );
978
965
}
979
966
@@ -984,32 +971,35 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
984
971
recheck :
985
972
mlx5_dev_list_lock ();
986
973
err = __mlx5_lag_dev_add_mdev (dev );
974
+ mlx5_dev_list_unlock ();
975
+
987
976
if (err ) {
988
- mlx5_dev_list_unlock ();
989
977
msleep (100 );
990
978
goto recheck ;
991
979
}
992
- mlx5_dev_list_unlock ();
993
980
}
994
981
995
- /* Must be called with intf_mutex held */
996
982
void mlx5_lag_remove_netdev (struct mlx5_core_dev * dev ,
997
983
struct net_device * netdev )
998
984
{
999
985
struct mlx5_lag * ldev ;
986
+ bool lag_is_active ;
1000
987
1001
988
ldev = mlx5_lag_dev (dev );
1002
989
if (!ldev )
1003
990
return ;
1004
991
992
+ mutex_lock (& ldev -> lock );
1005
993
mlx5_ldev_remove_netdev (ldev , netdev );
1006
994
ldev -> flags &= ~MLX5_LAG_FLAG_READY ;
1007
995
1008
- if (__mlx5_lag_is_active (ldev ))
996
+ lag_is_active = __mlx5_lag_is_active (ldev );
997
+ mutex_unlock (& ldev -> lock );
998
+
999
+ if (lag_is_active )
1009
1000
mlx5_queue_bond_work (ldev , 0 );
1010
1001
}
1011
1002
1012
- /* Must be called with intf_mutex held */
1013
1003
void mlx5_lag_add_netdev (struct mlx5_core_dev * dev ,
1014
1004
struct net_device * netdev )
1015
1005
{
@@ -1020,6 +1010,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
1020
1010
if (!ldev )
1021
1011
return ;
1022
1012
1013
+ mutex_lock (& ldev -> lock );
1023
1014
mlx5_ldev_add_netdev (ldev , dev , netdev );
1024
1015
1025
1016
for (i = 0 ; i < MLX5_MAX_PORTS ; i ++ )
@@ -1028,6 +1019,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
1028
1019
1029
1020
if (i >= MLX5_MAX_PORTS )
1030
1021
ldev -> flags |= MLX5_LAG_FLAG_READY ;
1022
+ mutex_unlock (& ldev -> lock );
1031
1023
mlx5_queue_bond_work (ldev , 0 );
1032
1024
}
1033
1025
@@ -1104,25 +1096,20 @@ EXPORT_SYMBOL(mlx5_lag_is_shared_fdb);
1104
1096
1105
1097
void mlx5_lag_disable_change (struct mlx5_core_dev * dev )
1106
1098
{
1107
- struct mlx5_core_dev * dev0 ;
1108
- struct mlx5_core_dev * dev1 ;
1109
1099
struct mlx5_lag * ldev ;
1110
1100
1111
1101
ldev = mlx5_lag_dev (dev );
1112
1102
if (!ldev )
1113
1103
return ;
1114
1104
1115
1105
mlx5_dev_list_lock ();
1116
-
1117
- dev0 = ldev -> pf [MLX5_LAG_P1 ].dev ;
1118
- dev1 = ldev -> pf [MLX5_LAG_P2 ].dev ;
1106
+ mutex_lock (& ldev -> lock );
1119
1107
1120
1108
ldev -> mode_changes_in_progress ++ ;
1121
- if (__mlx5_lag_is_active (ldev )) {
1122
- mlx5_lag_lock_eswitches (dev0 , dev1 );
1109
+ if (__mlx5_lag_is_active (ldev ))
1123
1110
mlx5_disable_lag (ldev );
1124
- mlx5_lag_unlock_eswitches ( dev0 , dev1 );
1125
- }
1111
+
1112
+ mutex_unlock ( & ldev -> lock );
1126
1113
mlx5_dev_list_unlock ();
1127
1114
}
1128
1115
@@ -1134,9 +1121,9 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
1134
1121
if (!ldev )
1135
1122
return ;
1136
1123
1137
- mlx5_dev_list_lock ( );
1124
+ mutex_lock ( & ldev -> lock );
1138
1125
ldev -> mode_changes_in_progress -- ;
1139
- mlx5_dev_list_unlock ( );
1126
+ mutex_unlock ( & ldev -> lock );
1140
1127
mlx5_queue_bond_work (ldev , 0 );
1141
1128
}
1142
1129
0 commit comments