@@ -161,7 +161,7 @@ static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
161
161
return -1 ;
162
162
}
163
163
164
- static bool mlx5_lag_is_bonded (struct mlx5_lag * ldev )
164
+ static bool __mlx5_lag_is_active (struct mlx5_lag * ldev )
165
165
{
166
166
return !!(ldev -> flags & MLX5_LAG_FLAG_BONDED );
167
167
}
@@ -278,7 +278,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
278
278
279
279
do_bond = tracker .is_bonded && mlx5_lag_check_prereq (ldev );
280
280
281
- if (do_bond && !mlx5_lag_is_bonded (ldev )) {
281
+ if (do_bond && !__mlx5_lag_is_active (ldev )) {
282
282
if (!sriov_enabled )
283
283
for (i = 0 ; i < MLX5_MAX_PORTS ; i ++ )
284
284
mlx5_remove_dev_by_protocol (ldev -> pf [i ].dev ,
@@ -290,9 +290,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
290
290
mlx5_add_dev_by_protocol (dev0 , MLX5_INTERFACE_PROTOCOL_IB );
291
291
mlx5_nic_vport_enable_roce (dev1 );
292
292
}
293
- } else if (do_bond && mlx5_lag_is_bonded (ldev )) {
293
+ } else if (do_bond && __mlx5_lag_is_active (ldev )) {
294
294
mlx5_modify_lag (ldev , & tracker );
295
- } else if (!do_bond && mlx5_lag_is_bonded (ldev )) {
295
+ } else if (!do_bond && __mlx5_lag_is_active (ldev )) {
296
296
if (!sriov_enabled ) {
297
297
mlx5_remove_dev_by_protocol (dev0 , MLX5_INTERFACE_PROTOCOL_IB );
298
298
mlx5_nic_vport_disable_roce (dev1 );
@@ -555,7 +555,7 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
555
555
if (!ldev )
556
556
return ;
557
557
558
- if (mlx5_lag_is_bonded (ldev ))
558
+ if (__mlx5_lag_is_active (ldev ))
559
559
mlx5_deactivate_lag (ldev );
560
560
561
561
mlx5_lag_dev_remove_pf (ldev , dev );
@@ -579,7 +579,7 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
579
579
580
580
mutex_lock (& lag_mutex );
581
581
ldev = mlx5_lag_dev_get (dev );
582
- res = ldev && mlx5_lag_is_bonded (ldev );
582
+ res = ldev && __mlx5_lag_is_active (ldev );
583
583
mutex_unlock (& lag_mutex );
584
584
585
585
return res ;
@@ -609,7 +609,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
609
609
mutex_lock (& lag_mutex );
610
610
ldev = mlx5_lag_dev_get (dev );
611
611
612
- if (!(ldev && mlx5_lag_is_bonded (ldev )))
612
+ if (!(ldev && __mlx5_lag_is_active (ldev )))
613
613
goto unlock ;
614
614
615
615
if (ldev -> tracker .tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ) {
@@ -638,7 +638,7 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
638
638
return true;
639
639
640
640
ldev = mlx5_lag_dev_get (dev );
641
- if (!ldev || !mlx5_lag_is_bonded (ldev ) || ldev -> pf [0 ].dev == dev )
641
+ if (!ldev || !__mlx5_lag_is_active (ldev ) || ldev -> pf [0 ].dev == dev )
642
642
return true;
643
643
644
644
/* If bonded, we do not add an IB device for PF1. */
@@ -665,7 +665,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
665
665
666
666
mutex_lock (& lag_mutex );
667
667
ldev = mlx5_lag_dev_get (dev );
668
- if (ldev && mlx5_lag_is_bonded (ldev )) {
668
+ if (ldev && __mlx5_lag_is_active (ldev )) {
669
669
num_ports = MLX5_MAX_PORTS ;
670
670
mdev [0 ] = ldev -> pf [0 ].dev ;
671
671
mdev [1 ] = ldev -> pf [1 ].dev ;
0 commit comments