@@ -2818,11 +2818,45 @@ dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
2818
2818
return NOTIFY_DONE ;
2819
2819
}
2820
2820
2821
+ /* To be eligible as a DSA master, a LAG must have all lower interfaces be
2822
+ * eligible DSA masters. Additionally, all LAG slaves must be DSA masters of
2823
+ * switches in the same switch tree.
2824
+ */
2825
+ static int dsa_lag_master_validate (struct net_device * lag_dev ,
2826
+ struct netlink_ext_ack * extack )
2827
+ {
2828
+ struct net_device * lower1 , * lower2 ;
2829
+ struct list_head * iter1 , * iter2 ;
2830
+
2831
+ netdev_for_each_lower_dev (lag_dev , lower1 , iter1 ) {
2832
+ netdev_for_each_lower_dev (lag_dev , lower2 , iter2 ) {
2833
+ if (!netdev_uses_dsa (lower1 ) ||
2834
+ !netdev_uses_dsa (lower2 )) {
2835
+ NL_SET_ERR_MSG_MOD (extack ,
2836
+ "All LAG ports must be eligible as DSA masters" );
2837
+ return notifier_from_errno (- EINVAL );
2838
+ }
2839
+
2840
+ if (lower1 == lower2 )
2841
+ continue ;
2842
+
2843
+ if (!dsa_port_tree_same (lower1 -> dsa_ptr ,
2844
+ lower2 -> dsa_ptr )) {
2845
+ NL_SET_ERR_MSG_MOD (extack ,
2846
+ "LAG contains DSA masters of disjoint switch trees" );
2847
+ return notifier_from_errno (- EINVAL );
2848
+ }
2849
+ }
2850
+ }
2851
+
2852
+ return NOTIFY_DONE ;
2853
+ }
2854
+
2821
2855
static int
2822
2856
dsa_master_prechangeupper_sanity_check (struct net_device * master ,
2823
2857
struct netdev_notifier_changeupper_info * info )
2824
2858
{
2825
- struct netlink_ext_ack * extack ;
2859
+ struct netlink_ext_ack * extack = netdev_notifier_info_to_extack ( & info -> info ) ;
2826
2860
2827
2861
if (!netdev_uses_dsa (master ))
2828
2862
return NOTIFY_DONE ;
@@ -2840,13 +2874,51 @@ dsa_master_prechangeupper_sanity_check(struct net_device *master,
2840
2874
if (netif_is_bridge_master (info -> upper_dev ))
2841
2875
return NOTIFY_DONE ;
2842
2876
2843
- extack = netdev_notifier_info_to_extack (& info -> info );
2877
+ /* Allow LAG uppers, subject to further restrictions in
2878
+ * dsa_lag_master_prechangelower_sanity_check()
2879
+ */
2880
+ if (netif_is_lag_master (info -> upper_dev ))
2881
+ return dsa_lag_master_validate (info -> upper_dev , extack );
2844
2882
2845
2883
NL_SET_ERR_MSG_MOD (extack ,
2846
2884
"DSA master cannot join unknown upper interfaces" );
2847
2885
return notifier_from_errno (- EBUSY );
2848
2886
}
2849
2887
2888
+ static int
2889
+ dsa_lag_master_prechangelower_sanity_check (struct net_device * dev ,
2890
+ struct netdev_notifier_changeupper_info * info )
2891
+ {
2892
+ struct netlink_ext_ack * extack = netdev_notifier_info_to_extack (& info -> info );
2893
+ struct net_device * lag_dev = info -> upper_dev ;
2894
+ struct net_device * lower ;
2895
+ struct list_head * iter ;
2896
+
2897
+ if (!netdev_uses_dsa (lag_dev ) || !netif_is_lag_master (lag_dev ))
2898
+ return NOTIFY_DONE ;
2899
+
2900
+ if (!info -> linking )
2901
+ return NOTIFY_DONE ;
2902
+
2903
+ if (!netdev_uses_dsa (dev )) {
2904
+ NL_SET_ERR_MSG (extack ,
2905
+ "Only DSA masters can join a LAG DSA master" );
2906
+ return notifier_from_errno (- EINVAL );
2907
+ }
2908
+
2909
+ netdev_for_each_lower_dev (lag_dev , lower , iter ) {
2910
+ if (!dsa_port_tree_same (dev -> dsa_ptr , lower -> dsa_ptr )) {
2911
+ NL_SET_ERR_MSG (extack ,
2912
+ "Interface is DSA master for a different switch tree than this LAG" );
2913
+ return notifier_from_errno (- EINVAL );
2914
+ }
2915
+
2916
+ break ;
2917
+ }
2918
+
2919
+ return NOTIFY_DONE ;
2920
+ }
2921
+
2850
2922
/* Don't allow bridging of DSA masters, since the bridge layer rx_handler
2851
2923
* prevents the DSA fake ethertype handler to be invoked, so we don't get the
2852
2924
* chance to strip off and parse the DSA switch tag protocol header (the bridge
@@ -2887,6 +2959,136 @@ dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
2887
2959
return NOTIFY_DONE ;
2888
2960
}
2889
2961
2962
+ static void dsa_tree_migrate_ports_from_lag_master (struct dsa_switch_tree * dst ,
2963
+ struct net_device * lag_dev )
2964
+ {
2965
+ struct net_device * new_master = dsa_tree_find_first_master (dst );
2966
+ struct dsa_port * dp ;
2967
+ int err ;
2968
+
2969
+ dsa_tree_for_each_user_port (dp , dst ) {
2970
+ if (dsa_port_to_master (dp ) != lag_dev )
2971
+ continue ;
2972
+
2973
+ err = dsa_slave_change_master (dp -> slave , new_master , NULL );
2974
+ if (err ) {
2975
+ netdev_err (dp -> slave ,
2976
+ "failed to restore master to %s: %pe\n" ,
2977
+ new_master -> name , ERR_PTR (err ));
2978
+ }
2979
+ }
2980
+ }
2981
+
2982
+ static int dsa_master_lag_join (struct net_device * master ,
2983
+ struct net_device * lag_dev ,
2984
+ struct netdev_lag_upper_info * uinfo ,
2985
+ struct netlink_ext_ack * extack )
2986
+ {
2987
+ struct dsa_port * cpu_dp = master -> dsa_ptr ;
2988
+ struct dsa_switch_tree * dst = cpu_dp -> dst ;
2989
+ struct dsa_port * dp ;
2990
+ int err ;
2991
+
2992
+ err = dsa_master_lag_setup (lag_dev , cpu_dp , uinfo , extack );
2993
+ if (err )
2994
+ return err ;
2995
+
2996
+ dsa_tree_for_each_user_port (dp , dst ) {
2997
+ if (dsa_port_to_master (dp ) != master )
2998
+ continue ;
2999
+
3000
+ err = dsa_slave_change_master (dp -> slave , lag_dev , extack );
3001
+ if (err )
3002
+ goto restore ;
3003
+ }
3004
+
3005
+ return 0 ;
3006
+
3007
+ restore :
3008
+ dsa_tree_for_each_user_port_continue_reverse (dp , dst ) {
3009
+ if (dsa_port_to_master (dp ) != lag_dev )
3010
+ continue ;
3011
+
3012
+ err = dsa_slave_change_master (dp -> slave , master , NULL );
3013
+ if (err ) {
3014
+ netdev_err (dp -> slave ,
3015
+ "failed to restore master to %s: %pe\n" ,
3016
+ master -> name , ERR_PTR (err ));
3017
+ }
3018
+ }
3019
+
3020
+ dsa_master_lag_teardown (lag_dev , master -> dsa_ptr );
3021
+
3022
+ return err ;
3023
+ }
3024
+
3025
+ static void dsa_master_lag_leave (struct net_device * master ,
3026
+ struct net_device * lag_dev )
3027
+ {
3028
+ struct dsa_port * dp , * cpu_dp = lag_dev -> dsa_ptr ;
3029
+ struct dsa_switch_tree * dst = cpu_dp -> dst ;
3030
+ struct dsa_port * new_cpu_dp = NULL ;
3031
+ struct net_device * lower ;
3032
+ struct list_head * iter ;
3033
+
3034
+ netdev_for_each_lower_dev (lag_dev , lower , iter ) {
3035
+ if (netdev_uses_dsa (lower )) {
3036
+ new_cpu_dp = lower -> dsa_ptr ;
3037
+ break ;
3038
+ }
3039
+ }
3040
+
3041
+ if (new_cpu_dp ) {
3042
+ /* Update the CPU port of the user ports still under the LAG
3043
+ * so that dsa_port_to_master() continues to work properly
3044
+ */
3045
+ dsa_tree_for_each_user_port (dp , dst )
3046
+ if (dsa_port_to_master (dp ) == lag_dev )
3047
+ dp -> cpu_dp = new_cpu_dp ;
3048
+
3049
+ /* Update the index of the virtual CPU port to match the lowest
3050
+ * physical CPU port
3051
+ */
3052
+ lag_dev -> dsa_ptr = new_cpu_dp ;
3053
+ wmb ();
3054
+ } else {
3055
+ /* If the LAG DSA master has no ports left, migrate back all
3056
+ * user ports to the first physical CPU port
3057
+ */
3058
+ dsa_tree_migrate_ports_from_lag_master (dst , lag_dev );
3059
+ }
3060
+
3061
+ /* This DSA master has left its LAG in any case, so let
3062
+ * the CPU port leave the hardware LAG as well
3063
+ */
3064
+ dsa_master_lag_teardown (lag_dev , master -> dsa_ptr );
3065
+ }
3066
+
3067
+ static int dsa_master_changeupper (struct net_device * dev ,
3068
+ struct netdev_notifier_changeupper_info * info )
3069
+ {
3070
+ struct netlink_ext_ack * extack ;
3071
+ int err = NOTIFY_DONE ;
3072
+
3073
+ if (!netdev_uses_dsa (dev ))
3074
+ return err ;
3075
+
3076
+ extack = netdev_notifier_info_to_extack (& info -> info );
3077
+
3078
+ if (netif_is_lag_master (info -> upper_dev )) {
3079
+ if (info -> linking ) {
3080
+ err = dsa_master_lag_join (dev , info -> upper_dev ,
3081
+ info -> upper_info , extack );
3082
+ err = notifier_from_errno (err );
3083
+ } else {
3084
+ dsa_master_lag_leave (dev , info -> upper_dev );
3085
+ err = NOTIFY_OK ;
3086
+ }
3087
+ }
3088
+
3089
+ return err ;
3090
+ }
3091
+
2890
3092
static int dsa_slave_netdevice_event (struct notifier_block * nb ,
2891
3093
unsigned long event , void * ptr )
2892
3094
{
@@ -2905,6 +3107,10 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
2905
3107
if (notifier_to_errno (err ))
2906
3108
return err ;
2907
3109
3110
+ err = dsa_lag_master_prechangelower_sanity_check (dev , info );
3111
+ if (notifier_to_errno (err ))
3112
+ return err ;
3113
+
2908
3114
err = dsa_bridge_prechangelower_sanity_check (dev , info );
2909
3115
if (notifier_to_errno (err ))
2910
3116
return err ;
@@ -2930,19 +3136,32 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
2930
3136
if (notifier_to_errno (err ))
2931
3137
return err ;
2932
3138
3139
+ err = dsa_master_changeupper (dev , ptr );
3140
+ if (notifier_to_errno (err ))
3141
+ return err ;
3142
+
2933
3143
break ;
2934
3144
}
2935
3145
case NETDEV_CHANGELOWERSTATE : {
2936
3146
struct netdev_notifier_changelowerstate_info * info = ptr ;
2937
3147
struct dsa_port * dp ;
2938
3148
int err ;
2939
3149
2940
- if (!dsa_slave_dev_check (dev ))
2941
- break ;
3150
+ if (dsa_slave_dev_check (dev )) {
3151
+ dp = dsa_slave_to_port (dev );
3152
+
3153
+ err = dsa_port_lag_change (dp , info -> lower_state_info );
3154
+ }
2942
3155
2943
- dp = dsa_slave_to_port (dev );
3156
+ /* Mirror LAG port events on DSA masters that are in
3157
+ * a LAG towards their respective switch CPU ports
3158
+ */
3159
+ if (netdev_uses_dsa (dev )) {
3160
+ dp = dev -> dsa_ptr ;
3161
+
3162
+ err = dsa_port_lag_change (dp , info -> lower_state_info );
3163
+ }
2944
3164
2945
- err = dsa_port_lag_change (dp , info -> lower_state_info );
2946
3165
return notifier_from_errno (err );
2947
3166
}
2948
3167
case NETDEV_CHANGE :
0 commit comments