Skip to content

Commit e4ad91f

Browse files
mishuang2017Saeed Mahameed
authored andcommitted
net/mlx5e: Split offloaded eswitch TC rules for port mirroring
If a TC rule needs to be split for mirroring, create two HW rules, in the first level and the second level flow tables accordingly. In the first level flow table, forward the packet to the mirror port and forward the packet to the second level flow table for further processing, eg. encap, vlan push or header re-write. Currently the matching is repeated in both stages. While here, simplify the setup of the vhca id valid indicator also in the existing code. Signed-off-by: Chris Mi <[email protected]> Reviewed-by: Paul Blakey <[email protected]> Reviewed-by: Or Gerlitz <[email protected]> Signed-off-by: Saeed Mahameed <[email protected]>
1 parent 592d365 commit e4ad91f

File tree

3 files changed

+108
-21
lines changed

3 files changed

+108
-21
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en_tc.c

Lines changed: 43 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -75,12 +75,14 @@ enum {
7575
MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
7676
};
7777

78+
#define MLX5E_TC_MAX_SPLITS 1
79+
7880
struct mlx5e_tc_flow {
7981
struct rhash_head node;
8082
struct mlx5e_priv *priv;
8183
u64 cookie;
8284
u8 flags;
83-
struct mlx5_flow_handle *rule;
85+
struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
8486
struct list_head encap; /* flows sharing the same encap ID */
8587
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
8688
struct list_head hairpin; /* flows sharing the same hairpin */
@@ -794,8 +796,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
794796
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
795797
struct mlx5_fc *counter = NULL;
796798

797-
counter = mlx5_flow_rule_counter(flow->rule);
798-
mlx5_del_flow_rules(flow->rule);
799+
counter = mlx5_flow_rule_counter(flow->rule[0]);
800+
mlx5_del_flow_rules(flow->rule[0]);
799801
mlx5_fc_destroy(priv->mdev, counter);
800802

801803
if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
@@ -870,9 +872,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
870872
rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
871873
if (IS_ERR(rule))
872874
goto err_add_rule;
875+
876+
if (attr->mirror_count) {
877+
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr);
878+
if (IS_ERR(flow->rule[1]))
879+
goto err_fwd_rule;
880+
}
873881
}
874882
return rule;
875883

884+
err_fwd_rule:
885+
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
886+
rule = flow->rule[1];
876887
err_add_rule:
877888
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
878889
mlx5e_detach_mod_hdr(priv, flow);
@@ -893,7 +904,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
893904

894905
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
895906
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
896-
mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
907+
if (attr->mirror_count)
908+
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
909+
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
897910
}
898911

899912
mlx5_eswitch_del_vlan_action(esw, attr);
@@ -929,13 +942,25 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
929942
list_for_each_entry(flow, &e->flows, encap) {
930943
esw_attr = flow->esw_attr;
931944
esw_attr->encap_id = e->encap_id;
932-
flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
933-
if (IS_ERR(flow->rule)) {
934-
err = PTR_ERR(flow->rule);
945+
flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
946+
if (IS_ERR(flow->rule[0])) {
947+
err = PTR_ERR(flow->rule[0]);
935948
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
936949
err);
937950
continue;
938951
}
952+
953+
if (esw_attr->mirror_count) {
954+
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
955+
if (IS_ERR(flow->rule[1])) {
956+
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr);
957+
err = PTR_ERR(flow->rule[1]);
958+
mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n",
959+
err);
960+
continue;
961+
}
962+
}
963+
939964
flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
940965
}
941966
}
@@ -948,8 +973,12 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
948973

949974
list_for_each_entry(flow, &e->flows, encap) {
950975
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
976+
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
977+
951978
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
952-
mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
979+
if (attr->mirror_count)
980+
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
981+
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
953982
}
954983
}
955984

@@ -984,7 +1013,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
9841013
continue;
9851014
list_for_each_entry(flow, &e->flows, encap) {
9861015
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
987-
counter = mlx5_flow_rule_counter(flow->rule);
1016+
counter = mlx5_flow_rule_counter(flow->rule[0]);
9881017
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
9891018
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
9901019
neigh_used = true;
@@ -2714,16 +2743,16 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
27142743
err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
27152744
if (err < 0)
27162745
goto err_free;
2717-
flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
2746+
flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
27182747
} else {
27192748
err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
27202749
if (err < 0)
27212750
goto err_free;
2722-
flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
2751+
flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
27232752
}
27242753

2725-
if (IS_ERR(flow->rule)) {
2726-
err = PTR_ERR(flow->rule);
2754+
if (IS_ERR(flow->rule[0])) {
2755+
err = PTR_ERR(flow->rule[0]);
27272756
if (err != -EAGAIN)
27282757
goto err_free;
27292758
}
@@ -2796,7 +2825,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
27962825
if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
27972826
return 0;
27982827

2799-
counter = mlx5_flow_rule_counter(flow->rule);
2828+
counter = mlx5_flow_rule_counter(flow->rule[0]);
28002829
if (!counter)
28012830
return 0;
28022831

drivers/net/ethernet/mellanox/mlx5/core/eswitch.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -219,6 +219,10 @@ struct mlx5_flow_handle *
219219
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
220220
struct mlx5_flow_spec *spec,
221221
struct mlx5_esw_flow_attr *attr);
222+
struct mlx5_flow_handle *
223+
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
224+
struct mlx5_flow_spec *spec,
225+
struct mlx5_esw_flow_attr *attr);
222226
void
223227
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
224228
struct mlx5_flow_handle *rule,

drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c

Lines changed: 61 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
5050
{
5151
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
5252
struct mlx5_flow_act flow_act = {0};
53+
struct mlx5_flow_table *ft = NULL;
5354
struct mlx5_fc *counter = NULL;
5455
struct mlx5_flow_handle *rule;
5556
int j, i = 0;
@@ -58,6 +59,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
5859
if (esw->mode != SRIOV_OFFLOADS)
5960
return ERR_PTR(-EOPNOTSUPP);
6061

62+
if (attr->mirror_count)
63+
ft = esw->fdb_table.offloads.fwd_fdb;
64+
else
65+
ft = esw->fdb_table.offloads.fast_fdb;
66+
6167
flow_act.action = attr->action;
6268
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
6369
if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
@@ -73,11 +79,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
7379
for (j = attr->mirror_count; j < attr->out_count; j++) {
7480
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
7581
dest[i].vport.num = attr->out_rep[j]->vport;
76-
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
77-
dest[i].vport.vhca_id =
78-
MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
79-
dest[i].vport.vhca_id_valid = 1;
80-
}
82+
dest[i].vport.vhca_id =
83+
MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
84+
dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
8185
i++;
8286
}
8387
}
@@ -121,8 +125,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
121125
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
122126
flow_act.encap_id = attr->encap_id;
123127

124-
rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.offloads.fast_fdb,
125-
spec, &flow_act, dest, i);
128+
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
126129
if (IS_ERR(rule))
127130
goto err_add_rule;
128131
else
@@ -136,6 +139,57 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
136139
return rule;
137140
}
138141

142+
struct mlx5_flow_handle *
143+
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
144+
struct mlx5_flow_spec *spec,
145+
struct mlx5_esw_flow_attr *attr)
146+
{
147+
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
148+
struct mlx5_flow_act flow_act = {0};
149+
struct mlx5_flow_handle *rule;
150+
void *misc;
151+
int i;
152+
153+
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
154+
for (i = 0; i < attr->mirror_count; i++) {
155+
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
156+
dest[i].vport.num = attr->out_rep[i]->vport;
157+
dest[i].vport.vhca_id =
158+
MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
159+
dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
160+
}
161+
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
162+
dest[i].ft = esw->fdb_table.offloads.fwd_fdb,
163+
i++;
164+
165+
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
166+
MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
167+
168+
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
169+
MLX5_SET(fte_match_set_misc, misc,
170+
source_eswitch_owner_vhca_id,
171+
MLX5_CAP_GEN(attr->in_mdev, vhca_id));
172+
173+
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
174+
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
175+
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
176+
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
177+
source_eswitch_owner_vhca_id);
178+
179+
if (attr->match_level == MLX5_MATCH_NONE)
180+
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
181+
else
182+
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
183+
MLX5_MATCH_MISC_PARAMETERS;
184+
185+
rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i);
186+
187+
if (!IS_ERR(rule))
188+
esw->offloads.num_flows++;
189+
190+
return rule;
191+
}
192+
139193
void
140194
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
141195
struct mlx5_flow_handle *rule,

0 commit comments

Comments
 (0)