Skip to content

Commit bbd00f7

Browse files
hadarhenziondavem330
authored andcommitted
net/mlx5e: Add TC tunnel release action for SRIOV offloads
Enhance the parsing of offloaded TC rules to set HW matching on outer (encapsulation) headers. Parse TC tunnel release action and set it as mlx5 decap action when the required capabilities are supported. Signed-off-by: Hadar Hen Zion <[email protected]> Signed-off-by: Saeed Mahameed <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 66958ed commit bbd00f7

File tree

2 files changed

+163
-2
lines changed

2 files changed

+163
-2
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en_tc.c

Lines changed: 155 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,11 @@
4040
#include <net/switchdev.h>
4141
#include <net/tc_act/tc_mirred.h>
4242
#include <net/tc_act/tc_vlan.h>
43+
#include <net/tc_act/tc_tunnel_key.h>
4344
#include "en.h"
4445
#include "en_tc.h"
4546
#include "eswitch.h"
47+
#include "vxlan.h"
4648

4749
struct mlx5e_tc_flow {
4850
struct rhash_head node;
@@ -155,6 +157,121 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
155157
}
156158
}
157159

160+
static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
161+
struct tc_cls_flower_offload *f)
162+
{
163+
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
164+
outer_headers);
165+
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
166+
outer_headers);
167+
void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
168+
misc_parameters);
169+
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
170+
misc_parameters);
171+
172+
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
173+
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
174+
175+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
176+
struct flow_dissector_key_keyid *key =
177+
skb_flow_dissector_target(f->dissector,
178+
FLOW_DISSECTOR_KEY_ENC_KEYID,
179+
f->key);
180+
struct flow_dissector_key_keyid *mask =
181+
skb_flow_dissector_target(f->dissector,
182+
FLOW_DISSECTOR_KEY_ENC_KEYID,
183+
f->mask);
184+
MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
185+
be32_to_cpu(mask->keyid));
186+
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
187+
be32_to_cpu(key->keyid));
188+
}
189+
}
190+
191+
static int parse_tunnel_attr(struct mlx5e_priv *priv,
192+
struct mlx5_flow_spec *spec,
193+
struct tc_cls_flower_offload *f)
194+
{
195+
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
196+
outer_headers);
197+
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
198+
outer_headers);
199+
200+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
201+
struct flow_dissector_key_ports *key =
202+
skb_flow_dissector_target(f->dissector,
203+
FLOW_DISSECTOR_KEY_ENC_PORTS,
204+
f->key);
205+
struct flow_dissector_key_ports *mask =
206+
skb_flow_dissector_target(f->dissector,
207+
FLOW_DISSECTOR_KEY_ENC_PORTS,
208+
f->mask);
209+
210+
/* Full udp dst port must be given */
211+
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
212+
return -EOPNOTSUPP;
213+
214+
/* udp src port isn't supported */
215+
if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
216+
return -EOPNOTSUPP;
217+
218+
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
219+
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
220+
parse_vxlan_attr(spec, f);
221+
else
222+
return -EOPNOTSUPP;
223+
224+
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
225+
udp_dport, ntohs(mask->dst));
226+
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
227+
udp_dport, ntohs(key->dst));
228+
229+
} else { /* udp dst port must be given */
230+
return -EOPNOTSUPP;
231+
}
232+
233+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
234+
struct flow_dissector_key_ipv4_addrs *key =
235+
skb_flow_dissector_target(f->dissector,
236+
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
237+
f->key);
238+
struct flow_dissector_key_ipv4_addrs *mask =
239+
skb_flow_dissector_target(f->dissector,
240+
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
241+
f->mask);
242+
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
243+
src_ipv4_src_ipv6.ipv4_layout.ipv4,
244+
ntohl(mask->src));
245+
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
246+
src_ipv4_src_ipv6.ipv4_layout.ipv4,
247+
ntohl(key->src));
248+
249+
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
250+
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
251+
ntohl(mask->dst));
252+
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
253+
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
254+
ntohl(key->dst));
255+
}
256+
257+
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
258+
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
259+
260+
/* Enforce DMAC when offloading incoming tunneled flows.
261+
* Flow counters require a match on the DMAC.
262+
*/
263+
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
264+
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
265+
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
266+
dmac_47_16), priv->netdev->dev_addr);
267+
268+
/* let software handle IP fragments */
269+
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
270+
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
271+
272+
return 0;
273+
}
274+
158275
static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
159276
struct tc_cls_flower_offload *f)
160277
{
@@ -172,12 +289,44 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
172289
BIT(FLOW_DISSECTOR_KEY_VLAN) |
173290
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
174291
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
175-
BIT(FLOW_DISSECTOR_KEY_PORTS))) {
292+
BIT(FLOW_DISSECTOR_KEY_PORTS) |
293+
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
294+
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
295+
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
296+
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
297+
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
176298
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
177299
f->dissector->used_keys);
178300
return -EOPNOTSUPP;
179301
}
180302

303+
if ((dissector_uses_key(f->dissector,
304+
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
305+
dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
306+
dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
307+
dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
308+
struct flow_dissector_key_control *key =
309+
skb_flow_dissector_target(f->dissector,
310+
FLOW_DISSECTOR_KEY_ENC_CONTROL,
311+
f->key);
312+
switch (key->addr_type) {
313+
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
314+
if (parse_tunnel_attr(priv, spec, f))
315+
return -EOPNOTSUPP;
316+
break;
317+
default:
318+
return -EOPNOTSUPP;
319+
}
320+
321+
/* In decap flow, header pointers should point to the inner
322+
* headers, outer header were already set by parse_tunnel_attr
323+
*/
324+
headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
325+
inner_headers);
326+
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
327+
inner_headers);
328+
}
329+
181330
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
182331
struct flow_dissector_key_control *key =
183332
skb_flow_dissector_target(f->dissector,
@@ -442,6 +591,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
442591
continue;
443592
}
444593

594+
if (is_tcf_tunnel_release(a)) {
595+
attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
596+
continue;
597+
}
598+
445599
return -EINVAL;
446600
}
447601
return 0;

drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
8282

8383
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
8484
MLX5_MATCH_MISC_PARAMETERS;
85+
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
86+
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
8587

8688
rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
8789
spec, &flow_act, dest, i);
@@ -409,6 +411,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
409411
u32 *flow_group_in;
410412
void *match_criteria;
411413
int table_size, ix, err = 0;
414+
u32 flags = 0;
412415

413416
flow_group_in = mlx5_vzalloc(inlen);
414417
if (!flow_group_in)
@@ -423,10 +426,14 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
423426
esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
424427
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
425428

429+
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
430+
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
431+
flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
432+
426433
fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
427434
ESW_OFFLOADS_NUM_ENTRIES,
428435
ESW_OFFLOADS_NUM_GROUPS, 0,
429-
0);
436+
flags);
430437
if (IS_ERR(fdb)) {
431438
err = PTR_ERR(fdb);
432439
esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);

0 commit comments

Comments
 (0)