@@ -69,6 +69,8 @@ struct mlx5_tc_ct_priv {
69
69
struct rhashtable ct_tuples_nat_ht ;
70
70
struct mlx5_flow_table * ct ;
71
71
struct mlx5_flow_table * ct_nat ;
72
+ struct mlx5_flow_group * ct_nat_miss_group ;
73
+ struct mlx5_flow_handle * ct_nat_miss_rule ;
72
74
struct mlx5e_post_act * post_act ;
73
75
struct mutex control_lock ; /* guards parallel adds/dels */
74
76
struct mapping_ctx * zone_mapping ;
@@ -141,6 +143,8 @@ struct mlx5_ct_counter {
141
143
142
144
enum {
143
145
MLX5_CT_ENTRY_FLAG_VALID ,
146
+ MLX5_CT_ENTRY_IN_CT_TABLE ,
147
+ MLX5_CT_ENTRY_IN_CT_NAT_TABLE ,
144
148
};
145
149
146
150
struct mlx5_ct_entry {
@@ -198,9 +202,15 @@ static const struct rhashtable_params tuples_nat_ht_params = {
198
202
};
199
203
200
204
static bool
201
- mlx5_tc_ct_entry_has_nat (struct mlx5_ct_entry * entry )
205
+ mlx5_tc_ct_entry_in_ct_table (struct mlx5_ct_entry * entry )
202
206
{
203
- return !!(entry -> tuple_nat_node .next );
207
+ return test_bit (MLX5_CT_ENTRY_IN_CT_TABLE , & entry -> flags );
208
+ }
209
+
210
+ static bool
211
+ mlx5_tc_ct_entry_in_ct_nat_table (struct mlx5_ct_entry * entry )
212
+ {
213
+ return test_bit (MLX5_CT_ENTRY_IN_CT_NAT_TABLE , & entry -> flags );
204
214
}
205
215
206
216
static int
@@ -526,8 +536,10 @@ static void
526
536
mlx5_tc_ct_entry_del_rules (struct mlx5_tc_ct_priv * ct_priv ,
527
537
struct mlx5_ct_entry * entry )
528
538
{
529
- mlx5_tc_ct_entry_del_rule (ct_priv , entry , true);
530
- mlx5_tc_ct_entry_del_rule (ct_priv , entry , false);
539
+ if (mlx5_tc_ct_entry_in_ct_nat_table (entry ))
540
+ mlx5_tc_ct_entry_del_rule (ct_priv , entry , true);
541
+ if (mlx5_tc_ct_entry_in_ct_table (entry ))
542
+ mlx5_tc_ct_entry_del_rule (ct_priv , entry , false);
531
543
532
544
atomic_dec (& ct_priv -> debugfs .stats .offloaded );
533
545
}
@@ -814,7 +826,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
814
826
& zone_rule -> mh ,
815
827
zone_restore_id ,
816
828
nat ,
817
- mlx5_tc_ct_entry_has_nat (entry ));
829
+ mlx5_tc_ct_entry_in_ct_nat_table (entry ));
818
830
if (err ) {
819
831
ct_dbg ("Failed to create ct entry mod hdr" );
820
832
goto err_mod_hdr ;
@@ -888,7 +900,7 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
888
900
* old_attr = * attr ;
889
901
890
902
err = mlx5_tc_ct_entry_create_mod_hdr (ct_priv , attr , flow_rule , & mh , zone_restore_id ,
891
- nat , mlx5_tc_ct_entry_has_nat (entry ));
903
+ nat , mlx5_tc_ct_entry_in_ct_nat_table (entry ));
892
904
if (err ) {
893
905
ct_dbg ("Failed to create ct entry mod hdr" );
894
906
goto err_mod_hdr ;
@@ -957,11 +969,13 @@ static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry)
957
969
{
958
970
struct mlx5_tc_ct_priv * ct_priv = entry -> ct_priv ;
959
971
960
- rhashtable_remove_fast (& ct_priv -> ct_tuples_nat_ht ,
961
- & entry -> tuple_nat_node ,
962
- tuples_nat_ht_params );
963
- rhashtable_remove_fast (& ct_priv -> ct_tuples_ht , & entry -> tuple_node ,
964
- tuples_ht_params );
972
+ if (mlx5_tc_ct_entry_in_ct_nat_table (entry ))
973
+ rhashtable_remove_fast (& ct_priv -> ct_tuples_nat_ht ,
974
+ & entry -> tuple_nat_node ,
975
+ tuples_nat_ht_params );
976
+ if (mlx5_tc_ct_entry_in_ct_table (entry ))
977
+ rhashtable_remove_fast (& ct_priv -> ct_tuples_ht , & entry -> tuple_node ,
978
+ tuples_ht_params );
965
979
}
966
980
967
981
static void mlx5_tc_ct_entry_del (struct mlx5_ct_entry * entry )
@@ -1100,21 +1114,26 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
1100
1114
return err ;
1101
1115
}
1102
1116
1103
- err = mlx5_tc_ct_entry_add_rule (ct_priv , flow_rule , entry , false,
1104
- zone_restore_id );
1105
- if (err )
1106
- goto err_orig ;
1117
+ if (mlx5_tc_ct_entry_in_ct_table (entry )) {
1118
+ err = mlx5_tc_ct_entry_add_rule (ct_priv , flow_rule , entry , false,
1119
+ zone_restore_id );
1120
+ if (err )
1121
+ goto err_orig ;
1122
+ }
1107
1123
1108
- err = mlx5_tc_ct_entry_add_rule (ct_priv , flow_rule , entry , true,
1109
- zone_restore_id );
1110
- if (err )
1111
- goto err_nat ;
1124
+ if (mlx5_tc_ct_entry_in_ct_nat_table (entry )) {
1125
+ err = mlx5_tc_ct_entry_add_rule (ct_priv , flow_rule , entry , true,
1126
+ zone_restore_id );
1127
+ if (err )
1128
+ goto err_nat ;
1129
+ }
1112
1130
1113
1131
atomic_inc (& ct_priv -> debugfs .stats .offloaded );
1114
1132
return 0 ;
1115
1133
1116
1134
err_nat :
1117
- mlx5_tc_ct_entry_del_rule (ct_priv , entry , false);
1135
+ if (mlx5_tc_ct_entry_in_ct_table (entry ))
1136
+ mlx5_tc_ct_entry_del_rule (ct_priv , entry , false);
1118
1137
err_orig :
1119
1138
mlx5_tc_ct_counter_put (ct_priv , entry );
1120
1139
return err ;
@@ -1128,15 +1147,19 @@ mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
1128
1147
{
1129
1148
int err ;
1130
1149
1131
- err = mlx5_tc_ct_entry_replace_rule (ct_priv , flow_rule , entry , false,
1132
- zone_restore_id );
1133
- if (err )
1134
- return err ;
1150
+ if (mlx5_tc_ct_entry_in_ct_table (entry )) {
1151
+ err = mlx5_tc_ct_entry_replace_rule (ct_priv , flow_rule , entry , false,
1152
+ zone_restore_id );
1153
+ if (err )
1154
+ return err ;
1155
+ }
1135
1156
1136
- err = mlx5_tc_ct_entry_replace_rule (ct_priv , flow_rule , entry , true,
1137
- zone_restore_id );
1138
- if (err )
1139
- mlx5_tc_ct_entry_del_rule (ct_priv , entry , false);
1157
+ if (mlx5_tc_ct_entry_in_ct_nat_table (entry )) {
1158
+ err = mlx5_tc_ct_entry_replace_rule (ct_priv , flow_rule , entry , true,
1159
+ zone_restore_id );
1160
+ if (err && mlx5_tc_ct_entry_in_ct_table (entry ))
1161
+ mlx5_tc_ct_entry_del_rule (ct_priv , entry , false);
1162
+ }
1140
1163
return err ;
1141
1164
}
1142
1165
@@ -1224,18 +1247,24 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
1224
1247
if (err )
1225
1248
goto err_entries ;
1226
1249
1227
- err = rhashtable_lookup_insert_fast (& ct_priv -> ct_tuples_ht ,
1228
- & entry -> tuple_node ,
1229
- tuples_ht_params );
1230
- if (err )
1231
- goto err_tuple ;
1232
-
1233
1250
if (memcmp (& entry -> tuple , & entry -> tuple_nat , sizeof (entry -> tuple ))) {
1234
1251
err = rhashtable_lookup_insert_fast (& ct_priv -> ct_tuples_nat_ht ,
1235
1252
& entry -> tuple_nat_node ,
1236
1253
tuples_nat_ht_params );
1237
1254
if (err )
1238
1255
goto err_tuple_nat ;
1256
+
1257
+ set_bit (MLX5_CT_ENTRY_IN_CT_NAT_TABLE , & entry -> flags );
1258
+ }
1259
+
1260
+ if (!mlx5_tc_ct_entry_in_ct_nat_table (entry )) {
1261
+ err = rhashtable_lookup_insert_fast (& ct_priv -> ct_tuples_ht ,
1262
+ & entry -> tuple_node ,
1263
+ tuples_ht_params );
1264
+ if (err )
1265
+ goto err_tuple ;
1266
+
1267
+ set_bit (MLX5_CT_ENTRY_IN_CT_TABLE , & entry -> flags );
1239
1268
}
1240
1269
spin_unlock_bh (& ct_priv -> ht_lock );
1241
1270
@@ -1251,17 +1280,10 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
1251
1280
1252
1281
err_rules :
1253
1282
spin_lock_bh (& ct_priv -> ht_lock );
1254
- if (mlx5_tc_ct_entry_has_nat (entry ))
1255
- rhashtable_remove_fast (& ct_priv -> ct_tuples_nat_ht ,
1256
- & entry -> tuple_nat_node , tuples_nat_ht_params );
1257
- err_tuple_nat :
1258
- rhashtable_remove_fast (& ct_priv -> ct_tuples_ht ,
1259
- & entry -> tuple_node ,
1260
- tuples_ht_params );
1261
1283
err_tuple :
1262
- rhashtable_remove_fast ( & ft -> ct_entries_ht ,
1263
- & entry -> node ,
1264
- cts_ht_params );
1284
+ mlx5_tc_ct_entry_remove_from_tuples ( entry );
1285
+ err_tuple_nat :
1286
+ rhashtable_remove_fast ( & ft -> ct_entries_ht , & entry -> node , cts_ht_params );
1265
1287
err_entries :
1266
1288
spin_unlock_bh (& ct_priv -> ht_lock );
1267
1289
err_set :
@@ -2149,6 +2171,76 @@ mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
2149
2171
debugfs_remove_recursive (ct_priv -> debugfs .root );
2150
2172
}
2151
2173
2174
+ static struct mlx5_flow_handle *
2175
+ tc_ct_add_miss_rule (struct mlx5_flow_table * ft ,
2176
+ struct mlx5_flow_table * next_ft )
2177
+ {
2178
+ struct mlx5_flow_destination dest = {};
2179
+ struct mlx5_flow_act act = {};
2180
+
2181
+ act .flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND ;
2182
+ act .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ;
2183
+ dest .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE ;
2184
+ dest .ft = next_ft ;
2185
+
2186
+ return mlx5_add_flow_rules (ft , NULL , & act , & dest , 1 );
2187
+ }
2188
+
2189
+ static int
2190
+ tc_ct_add_ct_table_miss_rule (struct mlx5_flow_table * from ,
2191
+ struct mlx5_flow_table * to ,
2192
+ struct mlx5_flow_group * * miss_group ,
2193
+ struct mlx5_flow_handle * * miss_rule )
2194
+ {
2195
+ int inlen = MLX5_ST_SZ_BYTES (create_flow_group_in );
2196
+ struct mlx5_flow_group * group ;
2197
+ struct mlx5_flow_handle * rule ;
2198
+ unsigned int max_fte = from -> max_fte ;
2199
+ u32 * flow_group_in ;
2200
+ int err = 0 ;
2201
+
2202
+ flow_group_in = kvzalloc (inlen , GFP_KERNEL );
2203
+ if (!flow_group_in )
2204
+ return - ENOMEM ;
2205
+
2206
+ /* create miss group */
2207
+ MLX5_SET (create_flow_group_in , flow_group_in , start_flow_index ,
2208
+ max_fte - 2 );
2209
+ MLX5_SET (create_flow_group_in , flow_group_in , end_flow_index ,
2210
+ max_fte - 1 );
2211
+ group = mlx5_create_flow_group (from , flow_group_in );
2212
+ if (IS_ERR (group )) {
2213
+ err = PTR_ERR (group );
2214
+ goto err_miss_grp ;
2215
+ }
2216
+
2217
+ /* add miss rule to next fdb */
2218
+ rule = tc_ct_add_miss_rule (from , to );
2219
+ if (IS_ERR (rule )) {
2220
+ err = PTR_ERR (rule );
2221
+ goto err_miss_rule ;
2222
+ }
2223
+
2224
+ * miss_group = group ;
2225
+ * miss_rule = rule ;
2226
+ kvfree (flow_group_in );
2227
+ return 0 ;
2228
+
2229
+ err_miss_rule :
2230
+ mlx5_destroy_flow_group (group );
2231
+ err_miss_grp :
2232
+ kvfree (flow_group_in );
2233
+ return err ;
2234
+ }
2235
+
2236
+ static void
2237
+ tc_ct_del_ct_table_miss_rule (struct mlx5_flow_group * miss_group ,
2238
+ struct mlx5_flow_handle * miss_rule )
2239
+ {
2240
+ mlx5_del_flow_rules (miss_rule );
2241
+ mlx5_destroy_flow_group (miss_group );
2242
+ }
2243
+
2152
2244
#define INIT_ERR_PREFIX "tc ct offload init failed"
2153
2245
2154
2246
struct mlx5_tc_ct_priv *
@@ -2212,6 +2304,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
2212
2304
goto err_ct_nat_tbl ;
2213
2305
}
2214
2306
2307
+ err = tc_ct_add_ct_table_miss_rule (ct_priv -> ct_nat , ct_priv -> ct ,
2308
+ & ct_priv -> ct_nat_miss_group ,
2309
+ & ct_priv -> ct_nat_miss_rule );
2310
+ if (err )
2311
+ goto err_ct_zone_ht ;
2312
+
2215
2313
ct_priv -> post_act = post_act ;
2216
2314
mutex_init (& ct_priv -> control_lock );
2217
2315
if (rhashtable_init (& ct_priv -> zone_ht , & zone_params ))
@@ -2273,6 +2371,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
2273
2371
ct_priv -> fs_ops -> destroy (ct_priv -> fs );
2274
2372
kfree (ct_priv -> fs );
2275
2373
2374
+ tc_ct_del_ct_table_miss_rule (ct_priv -> ct_nat_miss_group , ct_priv -> ct_nat_miss_rule );
2276
2375
mlx5_chains_destroy_global_table (chains , ct_priv -> ct_nat );
2277
2376
mlx5_chains_destroy_global_table (chains , ct_priv -> ct );
2278
2377
mapping_destroy (ct_priv -> zone_mapping );
0 commit comments