@@ -1070,57 +1070,21 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
1070
1070
vport -> egress .acl = NULL ;
1071
1071
}
1072
1072
1073
- int esw_vport_enable_ingress_acl (struct mlx5_eswitch * esw ,
1074
- struct mlx5_vport * vport )
1073
+ static int
1074
+ esw_vport_create_legacy_ingress_acl_groups (struct mlx5_eswitch * esw ,
1075
+ struct mlx5_vport * vport )
1075
1076
{
1076
1077
int inlen = MLX5_ST_SZ_BYTES (create_flow_group_in );
1077
1078
struct mlx5_core_dev * dev = esw -> dev ;
1078
- struct mlx5_flow_namespace * root_ns ;
1079
- struct mlx5_flow_table * acl ;
1080
1079
struct mlx5_flow_group * g ;
1081
1080
void * match_criteria ;
1082
1081
u32 * flow_group_in ;
1083
- /* The ingress acl table contains 4 groups
1084
- * (2 active rules at the same time -
1085
- * 1 allow rule from one of the first 3 groups.
1086
- * 1 drop rule from the last group):
1087
- * 1)Allow untagged traffic with smac=original mac.
1088
- * 2)Allow untagged traffic.
1089
- * 3)Allow traffic with smac=original mac.
1090
- * 4)Drop all other traffic.
1091
- */
1092
- int table_size = 4 ;
1093
- int err = 0 ;
1094
-
1095
- if (!MLX5_CAP_ESW_INGRESS_ACL (dev , ft_support ))
1096
- return - EOPNOTSUPP ;
1097
-
1098
- if (!IS_ERR_OR_NULL (vport -> ingress .acl ))
1099
- return 0 ;
1100
-
1101
- esw_debug (dev , "Create vport[%d] ingress ACL log_max_size(%d)\n" ,
1102
- vport -> vport , MLX5_CAP_ESW_INGRESS_ACL (dev , log_max_ft_size ));
1103
-
1104
- root_ns = mlx5_get_flow_vport_acl_namespace (dev , MLX5_FLOW_NAMESPACE_ESW_INGRESS ,
1105
- mlx5_eswitch_vport_num_to_index (esw , vport -> vport ));
1106
- if (!root_ns ) {
1107
- esw_warn (dev , "Failed to get E-Switch ingress flow namespace for vport (%d)\n" , vport -> vport );
1108
- return - EOPNOTSUPP ;
1109
- }
1082
+ int err ;
1110
1083
1111
1084
flow_group_in = kvzalloc (inlen , GFP_KERNEL );
1112
1085
if (!flow_group_in )
1113
1086
return - ENOMEM ;
1114
1087
1115
- acl = mlx5_create_vport_flow_table (root_ns , 0 , table_size , 0 , vport -> vport );
1116
- if (IS_ERR (acl )) {
1117
- err = PTR_ERR (acl );
1118
- esw_warn (dev , "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n" ,
1119
- vport -> vport , err );
1120
- goto out ;
1121
- }
1122
- vport -> ingress .acl = acl ;
1123
-
1124
1088
match_criteria = MLX5_ADDR_OF (create_flow_group_in , flow_group_in , match_criteria );
1125
1089
1126
1090
MLX5_SET (create_flow_group_in , flow_group_in , match_criteria_enable , MLX5_MATCH_OUTER_HEADERS );
@@ -1130,29 +1094,29 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1130
1094
MLX5_SET (create_flow_group_in , flow_group_in , start_flow_index , 0 );
1131
1095
MLX5_SET (create_flow_group_in , flow_group_in , end_flow_index , 0 );
1132
1096
1133
- g = mlx5_create_flow_group (acl , flow_group_in );
1097
+ g = mlx5_create_flow_group (vport -> ingress . acl , flow_group_in );
1134
1098
if (IS_ERR (g )) {
1135
1099
err = PTR_ERR (g );
1136
- esw_warn (dev , "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n" ,
1100
+ esw_warn (dev , "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n" ,
1137
1101
vport -> vport , err );
1138
- goto out ;
1102
+ goto spoof_err ;
1139
1103
}
1140
- vport -> ingress .allow_untagged_spoofchk_grp = g ;
1104
+ vport -> ingress .legacy . allow_untagged_spoofchk_grp = g ;
1141
1105
1142
1106
memset (flow_group_in , 0 , inlen );
1143
1107
MLX5_SET (create_flow_group_in , flow_group_in , match_criteria_enable , MLX5_MATCH_OUTER_HEADERS );
1144
1108
MLX5_SET_TO_ONES (fte_match_param , match_criteria , outer_headers .cvlan_tag );
1145
1109
MLX5_SET (create_flow_group_in , flow_group_in , start_flow_index , 1 );
1146
1110
MLX5_SET (create_flow_group_in , flow_group_in , end_flow_index , 1 );
1147
1111
1148
- g = mlx5_create_flow_group (acl , flow_group_in );
1112
+ g = mlx5_create_flow_group (vport -> ingress . acl , flow_group_in );
1149
1113
if (IS_ERR (g )) {
1150
1114
err = PTR_ERR (g );
1151
- esw_warn (dev , "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n" ,
1115
+ esw_warn (dev , "vport[%d] ingress create untagged flow group, err(%d)\n" ,
1152
1116
vport -> vport , err );
1153
- goto out ;
1117
+ goto untagged_err ;
1154
1118
}
1155
- vport -> ingress .allow_untagged_only_grp = g ;
1119
+ vport -> ingress .legacy . allow_untagged_only_grp = g ;
1156
1120
1157
1121
memset (flow_group_in , 0 , inlen );
1158
1122
MLX5_SET (create_flow_group_in , flow_group_in , match_criteria_enable , MLX5_MATCH_OUTER_HEADERS );
@@ -1161,80 +1125,134 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1161
1125
MLX5_SET (create_flow_group_in , flow_group_in , start_flow_index , 2 );
1162
1126
MLX5_SET (create_flow_group_in , flow_group_in , end_flow_index , 2 );
1163
1127
1164
- g = mlx5_create_flow_group (acl , flow_group_in );
1128
+ g = mlx5_create_flow_group (vport -> ingress . acl , flow_group_in );
1165
1129
if (IS_ERR (g )) {
1166
1130
err = PTR_ERR (g );
1167
- esw_warn (dev , "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n" ,
1131
+ esw_warn (dev , "vport[%d] ingress create spoofchk flow group, err(%d)\n" ,
1168
1132
vport -> vport , err );
1169
- goto out ;
1133
+ goto allow_spoof_err ;
1170
1134
}
1171
- vport -> ingress .allow_spoofchk_only_grp = g ;
1135
+ vport -> ingress .legacy . allow_spoofchk_only_grp = g ;
1172
1136
1173
1137
memset (flow_group_in , 0 , inlen );
1174
1138
MLX5_SET (create_flow_group_in , flow_group_in , start_flow_index , 3 );
1175
1139
MLX5_SET (create_flow_group_in , flow_group_in , end_flow_index , 3 );
1176
1140
1177
- g = mlx5_create_flow_group (acl , flow_group_in );
1141
+ g = mlx5_create_flow_group (vport -> ingress . acl , flow_group_in );
1178
1142
if (IS_ERR (g )) {
1179
1143
err = PTR_ERR (g );
1180
- esw_warn (dev , "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n" ,
1144
+ esw_warn (dev , "vport[%d] ingress create drop flow group, err(%d)\n" ,
1181
1145
vport -> vport , err );
1182
- goto out ;
1146
+ goto drop_err ;
1183
1147
}
1184
- vport -> ingress .drop_grp = g ;
1148
+ vport -> ingress .legacy .drop_grp = g ;
1149
+ kvfree (flow_group_in );
1150
+ return 0 ;
1185
1151
1186
- out :
1187
- if (err ) {
1188
- if (!IS_ERR_OR_NULL (vport -> ingress .allow_spoofchk_only_grp ))
1189
- mlx5_destroy_flow_group (
1190
- vport -> ingress .allow_spoofchk_only_grp );
1191
- if (!IS_ERR_OR_NULL (vport -> ingress .allow_untagged_only_grp ))
1192
- mlx5_destroy_flow_group (
1193
- vport -> ingress .allow_untagged_only_grp );
1194
- if (!IS_ERR_OR_NULL (vport -> ingress .allow_untagged_spoofchk_grp ))
1195
- mlx5_destroy_flow_group (
1196
- vport -> ingress .allow_untagged_spoofchk_grp );
1197
- if (!IS_ERR_OR_NULL (vport -> ingress .acl ))
1198
- mlx5_destroy_flow_table (vport -> ingress .acl );
1152
+ drop_err :
1153
+ if (!IS_ERR_OR_NULL (vport -> ingress .legacy .allow_spoofchk_only_grp )) {
1154
+ mlx5_destroy_flow_group (vport -> ingress .legacy .allow_spoofchk_only_grp );
1155
+ vport -> ingress .legacy .allow_spoofchk_only_grp = NULL ;
1199
1156
}
1200
-
1157
+ allow_spoof_err :
1158
+ if (!IS_ERR_OR_NULL (vport -> ingress .legacy .allow_untagged_only_grp )) {
1159
+ mlx5_destroy_flow_group (vport -> ingress .legacy .allow_untagged_only_grp );
1160
+ vport -> ingress .legacy .allow_untagged_only_grp = NULL ;
1161
+ }
1162
+ untagged_err :
1163
+ if (!IS_ERR_OR_NULL (vport -> ingress .legacy .allow_untagged_spoofchk_grp )) {
1164
+ mlx5_destroy_flow_group (vport -> ingress .legacy .allow_untagged_spoofchk_grp );
1165
+ vport -> ingress .legacy .allow_untagged_spoofchk_grp = NULL ;
1166
+ }
1167
+ spoof_err :
1201
1168
kvfree (flow_group_in );
1202
1169
return err ;
1203
1170
}
1204
1171
1172
+ int esw_vport_create_ingress_acl_table (struct mlx5_eswitch * esw ,
1173
+ struct mlx5_vport * vport , int table_size )
1174
+ {
1175
+ struct mlx5_core_dev * dev = esw -> dev ;
1176
+ struct mlx5_flow_namespace * root_ns ;
1177
+ struct mlx5_flow_table * acl ;
1178
+ int vport_index ;
1179
+ int err ;
1180
+
1181
+ if (!MLX5_CAP_ESW_INGRESS_ACL (dev , ft_support ))
1182
+ return - EOPNOTSUPP ;
1183
+
1184
+ esw_debug (dev , "Create vport[%d] ingress ACL log_max_size(%d)\n" ,
1185
+ vport -> vport , MLX5_CAP_ESW_INGRESS_ACL (dev , log_max_ft_size ));
1186
+
1187
+ vport_index = mlx5_eswitch_vport_num_to_index (esw , vport -> vport );
1188
+ root_ns = mlx5_get_flow_vport_acl_namespace (dev , MLX5_FLOW_NAMESPACE_ESW_INGRESS ,
1189
+ vport_index );
1190
+ if (!root_ns ) {
1191
+ esw_warn (dev , "Failed to get E-Switch ingress flow namespace for vport (%d)\n" ,
1192
+ vport -> vport );
1193
+ return - EOPNOTSUPP ;
1194
+ }
1195
+
1196
+ acl = mlx5_create_vport_flow_table (root_ns , 0 , table_size , 0 , vport -> vport );
1197
+ if (IS_ERR (acl )) {
1198
+ err = PTR_ERR (acl );
1199
+ esw_warn (dev , "vport[%d] ingress create flow Table, err(%d)\n" ,
1200
+ vport -> vport , err );
1201
+ return err ;
1202
+ }
1203
+ vport -> ingress .acl = acl ;
1204
+ return 0 ;
1205
+ }
1206
+
1207
+ void esw_vport_destroy_ingress_acl_table (struct mlx5_vport * vport )
1208
+ {
1209
+ if (!vport -> ingress .acl )
1210
+ return ;
1211
+
1212
+ mlx5_destroy_flow_table (vport -> ingress .acl );
1213
+ vport -> ingress .acl = NULL ;
1214
+ }
1215
+
1205
1216
void esw_vport_cleanup_ingress_rules (struct mlx5_eswitch * esw ,
1206
1217
struct mlx5_vport * vport )
1207
1218
{
1208
- if (! IS_ERR_OR_NULL ( vport -> ingress .legacy .drop_rule ) ) {
1219
+ if (vport -> ingress .legacy .drop_rule ) {
1209
1220
mlx5_del_flow_rules (vport -> ingress .legacy .drop_rule );
1210
1221
vport -> ingress .legacy .drop_rule = NULL ;
1211
1222
}
1212
1223
1213
- if (! IS_ERR_OR_NULL ( vport -> ingress .allow_rule ) ) {
1224
+ if (vport -> ingress .allow_rule ) {
1214
1225
mlx5_del_flow_rules (vport -> ingress .allow_rule );
1215
1226
vport -> ingress .allow_rule = NULL ;
1216
1227
}
1217
1228
}
1218
1229
1219
- void esw_vport_disable_ingress_acl (struct mlx5_eswitch * esw ,
1220
- struct mlx5_vport * vport )
1230
+ static void esw_vport_disable_legacy_ingress_acl (struct mlx5_eswitch * esw ,
1231
+ struct mlx5_vport * vport )
1221
1232
{
1222
- if (IS_ERR_OR_NULL ( vport -> ingress .acl ) )
1233
+ if (! vport -> ingress .acl )
1223
1234
return ;
1224
1235
1225
1236
esw_debug (esw -> dev , "Destroy vport[%d] E-Switch ingress ACL\n" , vport -> vport );
1226
1237
1227
1238
esw_vport_cleanup_ingress_rules (esw , vport );
1228
- mlx5_destroy_flow_group (vport -> ingress .allow_spoofchk_only_grp );
1229
- mlx5_destroy_flow_group (vport -> ingress .allow_untagged_only_grp );
1230
- mlx5_destroy_flow_group (vport -> ingress .allow_untagged_spoofchk_grp );
1231
- mlx5_destroy_flow_group (vport -> ingress .drop_grp );
1232
- mlx5_destroy_flow_table (vport -> ingress .acl );
1233
- vport -> ingress .acl = NULL ;
1234
- vport -> ingress .drop_grp = NULL ;
1235
- vport -> ingress .allow_spoofchk_only_grp = NULL ;
1236
- vport -> ingress .allow_untagged_only_grp = NULL ;
1237
- vport -> ingress .allow_untagged_spoofchk_grp = NULL ;
1239
+ if (vport -> ingress .legacy .allow_spoofchk_only_grp ) {
1240
+ mlx5_destroy_flow_group (vport -> ingress .legacy .allow_spoofchk_only_grp );
1241
+ vport -> ingress .legacy .allow_spoofchk_only_grp = NULL ;
1242
+ }
1243
+ if (vport -> ingress .legacy .allow_untagged_only_grp ) {
1244
+ mlx5_destroy_flow_group (vport -> ingress .legacy .allow_untagged_only_grp );
1245
+ vport -> ingress .legacy .allow_untagged_only_grp = NULL ;
1246
+ }
1247
+ if (vport -> ingress .legacy .allow_untagged_spoofchk_grp ) {
1248
+ mlx5_destroy_flow_group (vport -> ingress .legacy .allow_untagged_spoofchk_grp );
1249
+ vport -> ingress .legacy .allow_untagged_spoofchk_grp = NULL ;
1250
+ }
1251
+ if (vport -> ingress .legacy .drop_grp ) {
1252
+ mlx5_destroy_flow_group (vport -> ingress .legacy .drop_grp );
1253
+ vport -> ingress .legacy .drop_grp = NULL ;
1254
+ }
1255
+ esw_vport_destroy_ingress_acl_table (vport );
1238
1256
}
1239
1257
1240
1258
static int esw_vport_ingress_config (struct mlx5_eswitch * esw ,
@@ -1249,19 +1267,36 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1249
1267
int err = 0 ;
1250
1268
u8 * smac_v ;
1251
1269
1270
+ /* The ingress acl table contains 4 groups
1271
+ * (2 active rules at the same time -
1272
+ * 1 allow rule from one of the first 3 groups.
1273
+ * 1 drop rule from the last group):
1274
+ * 1)Allow untagged traffic with smac=original mac.
1275
+ * 2)Allow untagged traffic.
1276
+ * 3)Allow traffic with smac=original mac.
1277
+ * 4)Drop all other traffic.
1278
+ */
1279
+ int table_size = 4 ;
1280
+
1252
1281
esw_vport_cleanup_ingress_rules (esw , vport );
1253
1282
1254
1283
if (!vport -> info .vlan && !vport -> info .qos && !vport -> info .spoofchk ) {
1255
- esw_vport_disable_ingress_acl (esw , vport );
1284
+ esw_vport_disable_legacy_ingress_acl (esw , vport );
1256
1285
return 0 ;
1257
1286
}
1258
1287
1259
- err = esw_vport_enable_ingress_acl (esw , vport );
1260
- if (err ) {
1261
- mlx5_core_warn (esw -> dev ,
1262
- "failed to enable ingress acl (%d) on vport[%d]\n" ,
1263
- err , vport -> vport );
1264
- return err ;
1288
+ if (!vport -> ingress .acl ) {
1289
+ err = esw_vport_create_ingress_acl_table (esw , vport , table_size );
1290
+ if (err ) {
1291
+ esw_warn (esw -> dev ,
1292
+ "vport[%d] enable ingress acl err (%d)\n" ,
1293
+ err , vport -> vport );
1294
+ return err ;
1295
+ }
1296
+
1297
+ err = esw_vport_create_legacy_ingress_acl_groups (esw , vport );
1298
+ if (err )
1299
+ goto out ;
1265
1300
}
1266
1301
1267
1302
esw_debug (esw -> dev ,
@@ -1322,10 +1357,11 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1322
1357
vport -> ingress .legacy .drop_rule = NULL ;
1323
1358
goto out ;
1324
1359
}
1360
+ kvfree (spec );
1361
+ return 0 ;
1325
1362
1326
1363
out :
1327
- if (err )
1328
- esw_vport_cleanup_ingress_rules (esw , vport );
1364
+ esw_vport_disable_legacy_ingress_acl (esw , vport );
1329
1365
kvfree (spec );
1330
1366
return err ;
1331
1367
}
@@ -1705,7 +1741,7 @@ static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
1705
1741
return 0 ;
1706
1742
1707
1743
egress_err :
1708
- esw_vport_disable_ingress_acl (esw , vport );
1744
+ esw_vport_disable_legacy_ingress_acl (esw , vport );
1709
1745
mlx5_fc_destroy (esw -> dev , vport -> egress .legacy .drop_counter );
1710
1746
vport -> egress .legacy .drop_counter = NULL ;
1711
1747
@@ -1735,7 +1771,7 @@ static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
1735
1771
mlx5_fc_destroy (esw -> dev , vport -> egress .legacy .drop_counter );
1736
1772
vport -> egress .legacy .drop_counter = NULL ;
1737
1773
1738
- esw_vport_disable_ingress_acl (esw , vport );
1774
+ esw_vport_disable_legacy_ingress_acl (esw , vport );
1739
1775
mlx5_fc_destroy (esw -> dev , vport -> ingress .legacy .drop_counter );
1740
1776
vport -> ingress .legacy .drop_counter = NULL ;
1741
1777
}
0 commit comments