50
50
#define MLX5_ESW_MISS_FLOWS (2)
51
51
#define UPLINK_REP_INDEX 0
52
52
53
+ /* Per vport tables */
54
+
55
+ #define MLX5_ESW_VPORT_TABLE_SIZE 128
56
+
57
+ /* This struct is used as a key to the hash table and we need it to be packed
58
+ * so hash result is consistent
59
+ */
60
+ struct mlx5_vport_key {
61
+ u32 chain ;
62
+ u16 prio ;
63
+ u16 vport ;
64
+ u16 vhca_id ;
65
+ } __packed ;
66
+
67
+ struct mlx5_vport_table {
68
+ struct hlist_node hlist ;
69
+ struct mlx5_flow_table * fdb ;
70
+ u32 num_rules ;
71
+ struct mlx5_vport_key key ;
72
+ };
73
+
74
+ static struct mlx5_flow_table *
75
+ esw_vport_tbl_create (struct mlx5_eswitch * esw , struct mlx5_flow_namespace * ns )
76
+ {
77
+ struct mlx5_flow_table_attr ft_attr = {};
78
+ struct mlx5_flow_table * fdb ;
79
+
80
+ ft_attr .autogroup .max_num_groups = ESW_OFFLOADS_NUM_GROUPS ;
81
+ ft_attr .max_fte = MLX5_ESW_VPORT_TABLE_SIZE ;
82
+ ft_attr .prio = FDB_PER_VPORT ;
83
+ fdb = mlx5_create_auto_grouped_flow_table (ns , & ft_attr );
84
+ if (IS_ERR (fdb )) {
85
+ esw_warn (esw -> dev , "Failed to create per vport FDB Table err %ld\n" ,
86
+ PTR_ERR (fdb ));
87
+ }
88
+
89
+ return fdb ;
90
+ }
91
+
92
+ static u32 flow_attr_to_vport_key (struct mlx5_eswitch * esw ,
93
+ struct mlx5_esw_flow_attr * attr ,
94
+ struct mlx5_vport_key * key )
95
+ {
96
+ key -> vport = attr -> in_rep -> vport ;
97
+ key -> chain = attr -> chain ;
98
+ key -> prio = attr -> prio ;
99
+ key -> vhca_id = MLX5_CAP_GEN (esw -> dev , vhca_id );
100
+ return jhash (key , sizeof (* key ), 0 );
101
+ }
102
+
103
+ /* caller must hold vports.lock */
104
+ static struct mlx5_vport_table *
105
+ esw_vport_tbl_lookup (struct mlx5_eswitch * esw , struct mlx5_vport_key * skey , u32 key )
106
+ {
107
+ struct mlx5_vport_table * e ;
108
+
109
+ hash_for_each_possible (esw -> fdb_table .offloads .vports .table , e , hlist , key )
110
+ if (!memcmp (& e -> key , skey , sizeof (* skey )))
111
+ return e ;
112
+
113
+ return NULL ;
114
+ }
115
+
116
+ static void
117
+ esw_vport_tbl_put (struct mlx5_eswitch * esw , struct mlx5_esw_flow_attr * attr )
118
+ {
119
+ struct mlx5_vport_table * e ;
120
+ struct mlx5_vport_key key ;
121
+ u32 hkey ;
122
+
123
+ mutex_lock (& esw -> fdb_table .offloads .vports .lock );
124
+ hkey = flow_attr_to_vport_key (esw , attr , & key );
125
+ e = esw_vport_tbl_lookup (esw , & key , hkey );
126
+ if (!e || -- e -> num_rules )
127
+ goto out ;
128
+
129
+ hash_del (& e -> hlist );
130
+ mlx5_destroy_flow_table (e -> fdb );
131
+ kfree (e );
132
+ out :
133
+ mutex_unlock (& esw -> fdb_table .offloads .vports .lock );
134
+ }
135
+
136
+ static struct mlx5_flow_table *
137
+ esw_vport_tbl_get (struct mlx5_eswitch * esw , struct mlx5_esw_flow_attr * attr )
138
+ {
139
+ struct mlx5_core_dev * dev = esw -> dev ;
140
+ struct mlx5_flow_namespace * ns ;
141
+ struct mlx5_flow_table * fdb ;
142
+ struct mlx5_vport_table * e ;
143
+ struct mlx5_vport_key skey ;
144
+ u32 hkey ;
145
+
146
+ mutex_lock (& esw -> fdb_table .offloads .vports .lock );
147
+ hkey = flow_attr_to_vport_key (esw , attr , & skey );
148
+ e = esw_vport_tbl_lookup (esw , & skey , hkey );
149
+ if (e ) {
150
+ e -> num_rules ++ ;
151
+ goto out ;
152
+ }
153
+
154
+ e = kzalloc (sizeof (* e ), GFP_KERNEL );
155
+ if (!e ) {
156
+ fdb = ERR_PTR (- ENOMEM );
157
+ goto err_alloc ;
158
+ }
159
+
160
+ ns = mlx5_get_flow_namespace (dev , MLX5_FLOW_NAMESPACE_FDB );
161
+ if (!ns ) {
162
+ esw_warn (dev , "Failed to get FDB namespace\n" );
163
+ fdb = ERR_PTR (- ENOENT );
164
+ goto err_ns ;
165
+ }
166
+
167
+ fdb = esw_vport_tbl_create (esw , ns );
168
+ if (IS_ERR (fdb ))
169
+ goto err_ns ;
170
+
171
+ e -> fdb = fdb ;
172
+ e -> num_rules = 1 ;
173
+ e -> key = skey ;
174
+ hash_add (esw -> fdb_table .offloads .vports .table , & e -> hlist , hkey );
175
+ out :
176
+ mutex_unlock (& esw -> fdb_table .offloads .vports .lock );
177
+ return e -> fdb ;
178
+
179
+ err_ns :
180
+ kfree (e );
181
+ err_alloc :
182
+ mutex_unlock (& esw -> fdb_table .offloads .vports .lock );
183
+ return fdb ;
184
+ }
185
+
186
+ int mlx5_esw_vport_tbl_get (struct mlx5_eswitch * esw )
187
+ {
188
+ struct mlx5_esw_flow_attr attr = {};
189
+ struct mlx5_eswitch_rep rep = {};
190
+ struct mlx5_flow_table * fdb ;
191
+ struct mlx5_vport * vport ;
192
+ int i ;
193
+
194
+ attr .prio = 1 ;
195
+ attr .in_rep = & rep ;
196
+ mlx5_esw_for_all_vports (esw , i , vport ) {
197
+ attr .in_rep -> vport = vport -> vport ;
198
+ fdb = esw_vport_tbl_get (esw , & attr );
199
+ if (!fdb )
200
+ goto out ;
201
+ }
202
+ return 0 ;
203
+
204
+ out :
205
+ mlx5_esw_vport_tbl_put (esw );
206
+ return PTR_ERR (fdb );
207
+ }
208
+
209
+ void mlx5_esw_vport_tbl_put (struct mlx5_eswitch * esw )
210
+ {
211
+ struct mlx5_esw_flow_attr attr = {};
212
+ struct mlx5_eswitch_rep rep = {};
213
+ struct mlx5_vport * vport ;
214
+ int i ;
215
+
216
+ attr .prio = 1 ;
217
+ attr .in_rep = & rep ;
218
+ mlx5_esw_for_all_vports (esw , i , vport ) {
219
+ attr .in_rep -> vport = vport -> vport ;
220
+ esw_vport_tbl_put (esw , & attr );
221
+ }
222
+ }
223
+
224
+ /* End: Per vport tables */
225
+
53
226
static struct mlx5_eswitch_rep * mlx5_eswitch_get_rep (struct mlx5_eswitch * esw ,
54
227
u16 vport_num )
55
228
{
@@ -191,8 +364,6 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
191
364
i ++ ;
192
365
}
193
366
194
- mlx5_eswitch_set_rule_source_port (esw , spec , attr );
195
-
196
367
if (attr -> outer_match_level != MLX5_MATCH_NONE )
197
368
spec -> match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS ;
198
369
if (attr -> inner_match_level != MLX5_MATCH_NONE )
@@ -201,8 +372,13 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
201
372
if (flow_act .action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR )
202
373
flow_act .modify_hdr = attr -> modify_hdr ;
203
374
204
- fdb = mlx5_esw_chains_get_table (esw , attr -> chain , attr -> prio ,
205
- !!split );
375
+ if (split ) {
376
+ fdb = esw_vport_tbl_get (esw , attr );
377
+ } else {
378
+ fdb = mlx5_esw_chains_get_table (esw , attr -> chain , attr -> prio ,
379
+ 0 );
380
+ mlx5_eswitch_set_rule_source_port (esw , spec , attr );
381
+ }
206
382
if (IS_ERR (fdb )) {
207
383
rule = ERR_CAST (fdb );
208
384
goto err_esw_get ;
@@ -221,7 +397,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
221
397
return rule ;
222
398
223
399
err_add_rule :
224
- mlx5_esw_chains_put_table (esw , attr -> chain , attr -> prio , !!split );
400
+ if (split )
401
+ esw_vport_tbl_put (esw , attr );
402
+ else
403
+ mlx5_esw_chains_put_table (esw , attr -> chain , attr -> prio , 0 );
225
404
err_esw_get :
226
405
if (!(attr -> flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ) && attr -> dest_chain )
227
406
mlx5_esw_chains_put_table (esw , attr -> dest_chain , 1 , 0 );
@@ -247,7 +426,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
247
426
goto err_get_fast ;
248
427
}
249
428
250
- fwd_fdb = mlx5_esw_chains_get_table (esw , attr -> chain , attr -> prio , 1 );
429
+ fwd_fdb = esw_vport_tbl_get (esw , attr );
251
430
if (IS_ERR (fwd_fdb )) {
252
431
rule = ERR_CAST (fwd_fdb );
253
432
goto err_get_fwd ;
@@ -285,7 +464,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
285
464
286
465
return rule ;
287
466
add_err :
288
- mlx5_esw_chains_put_table (esw , attr -> chain , attr -> prio , 1 );
467
+ esw_vport_tbl_put (esw , attr );
289
468
err_get_fwd :
290
469
mlx5_esw_chains_put_table (esw , attr -> chain , attr -> prio , 0 );
291
470
err_get_fast :
@@ -312,11 +491,14 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
312
491
atomic64_dec (& esw -> offloads .num_flows );
313
492
314
493
if (fwd_rule ) {
315
- mlx5_esw_chains_put_table (esw , attr -> chain , attr -> prio , 1 );
494
+ esw_vport_tbl_put (esw , attr );
316
495
mlx5_esw_chains_put_table (esw , attr -> chain , attr -> prio , 0 );
317
496
} else {
318
- mlx5_esw_chains_put_table (esw , attr -> chain , attr -> prio ,
319
- !!split );
497
+ if (split )
498
+ esw_vport_tbl_put (esw , attr );
499
+ else
500
+ mlx5_esw_chains_put_table (esw , attr -> chain , attr -> prio ,
501
+ 0 );
320
502
if (attr -> dest_chain )
321
503
mlx5_esw_chains_put_table (esw , attr -> dest_chain , 1 , 0 );
322
504
}
@@ -1923,6 +2105,9 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
1923
2105
if (err )
1924
2106
goto create_fg_err ;
1925
2107
2108
+ mutex_init (& esw -> fdb_table .offloads .vports .lock );
2109
+ hash_init (esw -> fdb_table .offloads .vports .table );
2110
+
1926
2111
return 0 ;
1927
2112
1928
2113
create_fg_err :
@@ -1939,6 +2124,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
1939
2124
1940
2125
static void esw_offloads_steering_cleanup (struct mlx5_eswitch * esw )
1941
2126
{
2127
+ mutex_destroy (& esw -> fdb_table .offloads .vports .lock );
1942
2128
esw_destroy_vport_rx_group (esw );
1943
2129
esw_destroy_offloads_table (esw );
1944
2130
esw_destroy_offloads_fdb_tables (esw );
0 commit comments