Skip to content

Commit ec2fa47

Browse files
mark-blochSaeed Mahameed
authored andcommitted
net/mlx5: Lag, use lag lock
Use a lag specific lock instead of depending on external locks to synchronise the lag creation/destruction. With this, taking E-Switch mode lock is no longer needed for syncing lag logic. Cleanup any dead code that is left over and don't export functions that aren't used outside the E-Switch core code. Signed-off-by: Mark Bloch <[email protected]> Reviewed-by: Maor Gottlieb <[email protected]> Signed-off-by: Saeed Mahameed <[email protected]>
1 parent 4202ea9 commit ec2fa47

File tree

4 files changed

+35
-65
lines changed

4 files changed

+35
-65
lines changed

drivers/net/ethernet/mellanox/mlx5/core/eswitch.c

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1569,9 +1569,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
15691569
ida_init(&esw->offloads.vport_metadata_ida);
15701570
xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
15711571
mutex_init(&esw->state_lock);
1572-
lockdep_register_key(&esw->mode_lock_key);
15731572
init_rwsem(&esw->mode_lock);
1574-
lockdep_set_class(&esw->mode_lock, &esw->mode_lock_key);
15751573
refcount_set(&esw->qos.refcnt, 0);
15761574

15771575
esw->enabled_vports = 0;
@@ -1615,7 +1613,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
16151613
esw->dev->priv.eswitch = NULL;
16161614
destroy_workqueue(esw->work_queue);
16171615
WARN_ON(refcount_read(&esw->qos.refcnt));
1618-
lockdep_unregister_key(&esw->mode_lock_key);
16191616
mutex_destroy(&esw->state_lock);
16201617
WARN_ON(!xa_empty(&esw->offloads.vhca_map));
16211618
xa_destroy(&esw->offloads.vhca_map);
@@ -2003,17 +2000,6 @@ void mlx5_esw_unlock(struct mlx5_eswitch *esw)
20032000
up_write(&esw->mode_lock);
20042001
}
20052002

2006-
/**
2007-
* mlx5_esw_lock() - Take write lock on esw mode lock
2008-
* @esw: eswitch device.
2009-
*/
2010-
void mlx5_esw_lock(struct mlx5_eswitch *esw)
2011-
{
2012-
if (!mlx5_esw_allowed(esw))
2013-
return;
2014-
down_write(&esw->mode_lock);
2015-
}
2016-
20172003
/**
20182004
* mlx5_eswitch_get_total_vports - Get total vports of the eswitch
20192005
*

drivers/net/ethernet/mellanox/mlx5/core/eswitch.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -331,7 +331,6 @@ struct mlx5_eswitch {
331331
u32 large_group_num;
332332
} params;
333333
struct blocking_notifier_head n_head;
334-
struct lock_class_key mode_lock_key;
335334
};
336335

337336
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -704,7 +703,6 @@ void mlx5_esw_get(struct mlx5_core_dev *dev);
704703
void mlx5_esw_put(struct mlx5_core_dev *dev);
705704
int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
706705
void mlx5_esw_unlock(struct mlx5_eswitch *esw);
707-
void mlx5_esw_lock(struct mlx5_eswitch *esw);
708706

709707
void esw_vport_change_handle_locked(struct mlx5_vport *vport);
710708

@@ -730,9 +728,6 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
730728
return ERR_PTR(-EOPNOTSUPP);
731729
}
732730

733-
static inline void mlx5_esw_unlock(struct mlx5_eswitch *esw) { return; }
734-
static inline void mlx5_esw_lock(struct mlx5_eswitch *esw) { return; }
735-
736731
static inline struct mlx5_flow_handle *
737732
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
738733
{

drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c

Lines changed: 33 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,7 @@ static void mlx5_ldev_free(struct kref *ref)
121121
mlx5_lag_mp_cleanup(ldev);
122122
cancel_delayed_work_sync(&ldev->bond_work);
123123
destroy_workqueue(ldev->wq);
124+
mutex_destroy(&ldev->lock);
124125
kfree(ldev);
125126
}
126127

@@ -150,6 +151,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
150151
}
151152

152153
kref_init(&ldev->ref);
154+
mutex_init(&ldev->lock);
153155
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
154156

155157
ldev->nb.notifier_call = mlx5_lag_netdev_event;
@@ -643,31 +645,11 @@ static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
643645
queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
644646
}
645647

646-
static void mlx5_lag_lock_eswitches(struct mlx5_core_dev *dev0,
647-
struct mlx5_core_dev *dev1)
648-
{
649-
if (dev0)
650-
mlx5_esw_lock(dev0->priv.eswitch);
651-
if (dev1)
652-
mlx5_esw_lock(dev1->priv.eswitch);
653-
}
654-
655-
static void mlx5_lag_unlock_eswitches(struct mlx5_core_dev *dev0,
656-
struct mlx5_core_dev *dev1)
657-
{
658-
if (dev1)
659-
mlx5_esw_unlock(dev1->priv.eswitch);
660-
if (dev0)
661-
mlx5_esw_unlock(dev0->priv.eswitch);
662-
}
663-
664648
static void mlx5_do_bond_work(struct work_struct *work)
665649
{
666650
struct delayed_work *delayed_work = to_delayed_work(work);
667651
struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
668652
bond_work);
669-
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
670-
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
671653
int status;
672654

673655
status = mlx5_dev_list_trylock();
@@ -676,15 +658,16 @@ static void mlx5_do_bond_work(struct work_struct *work)
676658
return;
677659
}
678660

661+
mutex_lock(&ldev->lock);
679662
if (ldev->mode_changes_in_progress) {
663+
mutex_unlock(&ldev->lock);
680664
mlx5_dev_list_unlock();
681665
mlx5_queue_bond_work(ldev, HZ);
682666
return;
683667
}
684668

685-
mlx5_lag_lock_eswitches(dev0, dev1);
686669
mlx5_do_bond(ldev);
687-
mlx5_lag_unlock_eswitches(dev0, dev1);
670+
mutex_unlock(&ldev->lock);
688671
mlx5_dev_list_unlock();
689672
}
690673

@@ -908,7 +891,6 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
908891
dev->priv.lag = ldev;
909892
}
910893

911-
/* Must be called with intf_mutex held */
912894
static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
913895
struct mlx5_core_dev *dev)
914896
{
@@ -946,13 +928,18 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
946928
mlx5_core_err(dev, "Failed to alloc lag dev\n");
947929
return 0;
948930
}
949-
} else {
950-
if (ldev->mode_changes_in_progress)
951-
return -EAGAIN;
952-
mlx5_ldev_get(ldev);
931+
mlx5_ldev_add_mdev(ldev, dev);
932+
return 0;
953933
}
954934

935+
mutex_lock(&ldev->lock);
936+
if (ldev->mode_changes_in_progress) {
937+
mutex_unlock(&ldev->lock);
938+
return -EAGAIN;
939+
}
940+
mlx5_ldev_get(ldev);
955941
mlx5_ldev_add_mdev(ldev, dev);
942+
mutex_unlock(&ldev->lock);
956943

957944
return 0;
958945
}
@@ -966,14 +953,14 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
966953
return;
967954

968955
recheck:
969-
mlx5_dev_list_lock();
956+
mutex_lock(&ldev->lock);
970957
if (ldev->mode_changes_in_progress) {
971-
mlx5_dev_list_unlock();
958+
mutex_unlock(&ldev->lock);
972959
msleep(100);
973960
goto recheck;
974961
}
975962
mlx5_ldev_remove_mdev(ldev, dev);
976-
mlx5_dev_list_unlock();
963+
mutex_unlock(&ldev->lock);
977964
mlx5_ldev_put(ldev);
978965
}
979966

@@ -984,32 +971,35 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
984971
recheck:
985972
mlx5_dev_list_lock();
986973
err = __mlx5_lag_dev_add_mdev(dev);
974+
mlx5_dev_list_unlock();
975+
987976
if (err) {
988-
mlx5_dev_list_unlock();
989977
msleep(100);
990978
goto recheck;
991979
}
992-
mlx5_dev_list_unlock();
993980
}
994981

995-
/* Must be called with intf_mutex held */
996982
void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
997983
struct net_device *netdev)
998984
{
999985
struct mlx5_lag *ldev;
986+
bool lag_is_active;
1000987

1001988
ldev = mlx5_lag_dev(dev);
1002989
if (!ldev)
1003990
return;
1004991

992+
mutex_lock(&ldev->lock);
1005993
mlx5_ldev_remove_netdev(ldev, netdev);
1006994
ldev->flags &= ~MLX5_LAG_FLAG_READY;
1007995

1008-
if (__mlx5_lag_is_active(ldev))
996+
lag_is_active = __mlx5_lag_is_active(ldev);
997+
mutex_unlock(&ldev->lock);
998+
999+
if (lag_is_active)
10091000
mlx5_queue_bond_work(ldev, 0);
10101001
}
10111002

1012-
/* Must be called with intf_mutex held */
10131003
void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
10141004
struct net_device *netdev)
10151005
{
@@ -1020,6 +1010,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
10201010
if (!ldev)
10211011
return;
10221012

1013+
mutex_lock(&ldev->lock);
10231014
mlx5_ldev_add_netdev(ldev, dev, netdev);
10241015

10251016
for (i = 0; i < MLX5_MAX_PORTS; i++)
@@ -1028,6 +1019,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
10281019

10291020
if (i >= MLX5_MAX_PORTS)
10301021
ldev->flags |= MLX5_LAG_FLAG_READY;
1022+
mutex_unlock(&ldev->lock);
10311023
mlx5_queue_bond_work(ldev, 0);
10321024
}
10331025

@@ -1104,25 +1096,20 @@ EXPORT_SYMBOL(mlx5_lag_is_shared_fdb);
11041096

11051097
void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
11061098
{
1107-
struct mlx5_core_dev *dev0;
1108-
struct mlx5_core_dev *dev1;
11091099
struct mlx5_lag *ldev;
11101100

11111101
ldev = mlx5_lag_dev(dev);
11121102
if (!ldev)
11131103
return;
11141104

11151105
mlx5_dev_list_lock();
1116-
1117-
dev0 = ldev->pf[MLX5_LAG_P1].dev;
1118-
dev1 = ldev->pf[MLX5_LAG_P2].dev;
1106+
mutex_lock(&ldev->lock);
11191107

11201108
ldev->mode_changes_in_progress++;
1121-
if (__mlx5_lag_is_active(ldev)) {
1122-
mlx5_lag_lock_eswitches(dev0, dev1);
1109+
if (__mlx5_lag_is_active(ldev))
11231110
mlx5_disable_lag(ldev);
1124-
mlx5_lag_unlock_eswitches(dev0, dev1);
1125-
}
1111+
1112+
mutex_unlock(&ldev->lock);
11261113
mlx5_dev_list_unlock();
11271114
}
11281115

@@ -1134,9 +1121,9 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
11341121
if (!ldev)
11351122
return;
11361123

1137-
mlx5_dev_list_lock();
1124+
mutex_lock(&ldev->lock);
11381125
ldev->mode_changes_in_progress--;
1139-
mlx5_dev_list_unlock();
1126+
mutex_unlock(&ldev->lock);
11401127
mlx5_queue_bond_work(ldev, 0);
11411128
}
11421129

drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,8 @@ struct mlx5_lag {
5656
struct notifier_block nb;
5757
struct lag_mp lag_mp;
5858
struct mlx5_lag_port_sel port_sel;
59+
/* Protect lag fields/state changes */
60+
struct mutex lock;
5961
};
6062

6163
static inline struct mlx5_lag *

0 commit comments

Comments
 (0)