Skip to content

Commit 1848673

Browse files
Eli BritsteinSaeed Mahameed
authored andcommitted
net/mlx5e: ACLs for priority tag mode
Current ConnectX HW is unable to perform VLAN pop in TX path and VLAN push on RX path. As a workaround, untagged packets are tagged with VID 0x000 allowing pop/push actions to be exchanged with VLAN rewrite actions. Use the ingress ACL table, preceding the FDB, to push VLAN 0x000 ID tag for untagged packets and the egress ACL table, succeeding the FDB, to pop VLAN 0x000 ID tag. Signed-off-by: Eli Britstein <[email protected]> Reviewed-by: Oz Shlomo <[email protected]> Signed-off-by: Saeed Mahameed <[email protected]>
1 parent 69dad68 commit 1848673

File tree

3 files changed

+193
-12
lines changed

3 files changed

+193
-12
lines changed

drivers/net/ethernet/mellanox/mlx5/core/eswitch.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -922,8 +922,8 @@ static void esw_vport_change_handler(struct work_struct *work)
922922
mutex_unlock(&esw->state_lock);
923923
}
924924

925-
static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
926-
struct mlx5_vport *vport)
925+
int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
926+
struct mlx5_vport *vport)
927927
{
928928
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
929929
struct mlx5_flow_group *vlan_grp = NULL;
@@ -1006,8 +1006,8 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
10061006
return err;
10071007
}
10081008

1009-
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
1010-
struct mlx5_vport *vport)
1009+
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
1010+
struct mlx5_vport *vport)
10111011
{
10121012
if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
10131013
mlx5_del_flow_rules(vport->egress.allowed_vlan);
@@ -1019,8 +1019,8 @@ static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
10191019
vport->egress.drop_rule = NULL;
10201020
}
10211021

1022-
static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
1023-
struct mlx5_vport *vport)
1022+
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
1023+
struct mlx5_vport *vport)
10241024
{
10251025
if (IS_ERR_OR_NULL(vport->egress.acl))
10261026
return;
@@ -1036,8 +1036,8 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
10361036
vport->egress.acl = NULL;
10371037
}
10381038

1039-
static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1040-
struct mlx5_vport *vport)
1039+
int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1040+
struct mlx5_vport *vport)
10411041
{
10421042
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
10431043
struct mlx5_core_dev *dev = esw->dev;
@@ -1168,8 +1168,8 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
11681168
return err;
11691169
}
11701170

1171-
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
1172-
struct mlx5_vport *vport)
1171+
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
1172+
struct mlx5_vport *vport)
11731173
{
11741174
if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
11751175
mlx5_del_flow_rules(vport->ingress.drop_rule);
@@ -1181,8 +1181,8 @@ static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
11811181
vport->ingress.allow_rule = NULL;
11821182
}
11831183

1184-
static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
1185-
struct mlx5_vport *vport)
1184+
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
1185+
struct mlx5_vport *vport)
11861186
{
11871187
if (IS_ERR_OR_NULL(vport->ingress.acl))
11881188
return;

drivers/net/ethernet/mellanox/mlx5/core/eswitch.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,18 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
227227
int total_nvports);
228228
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
229229
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
230+
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
231+
struct mlx5_vport *vport);
232+
int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
233+
struct mlx5_vport *vport);
234+
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
235+
struct mlx5_vport *vport);
236+
int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
237+
struct mlx5_vport *vport);
238+
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
239+
struct mlx5_vport *vport);
240+
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
241+
struct mlx5_vport *vport);
230242

231243
/* E-Switch API */
232244
int mlx5_eswitch_init(struct mlx5_core_dev *dev);

drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c

Lines changed: 169 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1601,13 +1601,180 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
16011601
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
16021602
}
16031603

1604+
static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1605+
struct mlx5_vport *vport)
1606+
{
1607+
struct mlx5_core_dev *dev = esw->dev;
1608+
struct mlx5_flow_act flow_act = {0};
1609+
struct mlx5_flow_spec *spec;
1610+
int err = 0;
1611+
1612+
/* For prio tag mode, there is only 1 FTEs:
1613+
* 1) Untagged packets - push prio tag VLAN, allow
1614+
* Unmatched traffic is allowed by default
1615+
*/
1616+
1617+
if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1618+
return -EOPNOTSUPP;
1619+
1620+
esw_vport_cleanup_ingress_rules(esw, vport);
1621+
1622+
err = esw_vport_enable_ingress_acl(esw, vport);
1623+
if (err) {
1624+
mlx5_core_warn(esw->dev,
1625+
"failed to enable prio tag ingress acl (%d) on vport[%d]\n",
1626+
err, vport->vport);
1627+
return err;
1628+
}
1629+
1630+
esw_debug(esw->dev,
1631+
"vport[%d] configure ingress rules\n", vport->vport);
1632+
1633+
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1634+
if (!spec) {
1635+
err = -ENOMEM;
1636+
goto out_no_mem;
1637+
}
1638+
1639+
/* Untagged packets - push prio tag VLAN, allow */
1640+
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1641+
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1642+
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1643+
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1644+
MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1645+
flow_act.vlan[0].ethtype = ETH_P_8021Q;
1646+
flow_act.vlan[0].vid = 0;
1647+
flow_act.vlan[0].prio = 0;
1648+
vport->ingress.allow_rule =
1649+
mlx5_add_flow_rules(vport->ingress.acl, spec,
1650+
&flow_act, NULL, 0);
1651+
if (IS_ERR(vport->ingress.allow_rule)) {
1652+
err = PTR_ERR(vport->ingress.allow_rule);
1653+
esw_warn(esw->dev,
1654+
"vport[%d] configure ingress untagged allow rule, err(%d)\n",
1655+
vport->vport, err);
1656+
vport->ingress.allow_rule = NULL;
1657+
goto out;
1658+
}
1659+
1660+
out:
1661+
kvfree(spec);
1662+
out_no_mem:
1663+
if (err)
1664+
esw_vport_cleanup_ingress_rules(esw, vport);
1665+
return err;
1666+
}
1667+
1668+
static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
1669+
struct mlx5_vport *vport)
1670+
{
1671+
struct mlx5_flow_act flow_act = {0};
1672+
struct mlx5_flow_spec *spec;
1673+
int err = 0;
1674+
1675+
/* For prio tag mode, there is only 1 FTEs:
1676+
* 1) prio tag packets - pop the prio tag VLAN, allow
1677+
* Unmatched traffic is allowed by default
1678+
*/
1679+
1680+
esw_vport_cleanup_egress_rules(esw, vport);
1681+
1682+
err = esw_vport_enable_egress_acl(esw, vport);
1683+
if (err) {
1684+
mlx5_core_warn(esw->dev,
1685+
"failed to enable egress acl (%d) on vport[%d]\n",
1686+
err, vport->vport);
1687+
return err;
1688+
}
1689+
1690+
esw_debug(esw->dev,
1691+
"vport[%d] configure prio tag egress rules\n", vport->vport);
1692+
1693+
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1694+
if (!spec) {
1695+
err = -ENOMEM;
1696+
goto out_no_mem;
1697+
}
1698+
1699+
/* prio tag vlan rule - pop it so VF receives untagged packets */
1700+
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1701+
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1702+
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1703+
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
1704+
1705+
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1706+
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1707+
MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1708+
vport->egress.allowed_vlan =
1709+
mlx5_add_flow_rules(vport->egress.acl, spec,
1710+
&flow_act, NULL, 0);
1711+
if (IS_ERR(vport->egress.allowed_vlan)) {
1712+
err = PTR_ERR(vport->egress.allowed_vlan);
1713+
esw_warn(esw->dev,
1714+
"vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
1715+
vport->vport, err);
1716+
vport->egress.allowed_vlan = NULL;
1717+
goto out;
1718+
}
1719+
1720+
out:
1721+
kvfree(spec);
1722+
out_no_mem:
1723+
if (err)
1724+
esw_vport_cleanup_egress_rules(esw, vport);
1725+
return err;
1726+
}
1727+
1728+
static int esw_prio_tag_acls_config(struct mlx5_eswitch *esw, int nvports)
1729+
{
1730+
int i, j;
1731+
int err;
1732+
1733+
mlx5_esw_for_each_vf_vport(esw, i, nvports) {
1734+
err = esw_vport_ingress_prio_tag_config(esw, &esw->vports[i]);
1735+
if (err)
1736+
goto err_ingress;
1737+
err = esw_vport_egress_prio_tag_config(esw, &esw->vports[i]);
1738+
if (err)
1739+
goto err_egress;
1740+
}
1741+
1742+
return 0;
1743+
1744+
err_egress:
1745+
esw_vport_disable_ingress_acl(esw, &esw->vports[i]);
1746+
err_ingress:
1747+
mlx5_esw_for_each_vf_vport_reverse(esw, j, i - 1) {
1748+
esw_vport_disable_egress_acl(esw, &esw->vports[j]);
1749+
esw_vport_disable_ingress_acl(esw, &esw->vports[j]);
1750+
}
1751+
1752+
return err;
1753+
}
1754+
1755+
static void esw_prio_tag_acls_cleanup(struct mlx5_eswitch *esw)
1756+
{
1757+
int i;
1758+
1759+
mlx5_esw_for_each_vf_vport(esw, i, esw->nvports) {
1760+
esw_vport_disable_egress_acl(esw, &esw->vports[i]);
1761+
esw_vport_disable_ingress_acl(esw, &esw->vports[i]);
1762+
}
1763+
}
1764+
16041765
static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
16051766
{
16061767
int err;
16071768

16081769
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
16091770
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
16101771

1772+
if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
1773+
err = esw_prio_tag_acls_config(esw, nvports);
1774+
if (err)
1775+
return err;
1776+
}
1777+
16111778
err = esw_create_offloads_fdb_tables(esw, nvports);
16121779
if (err)
16131780
return err;
@@ -1636,6 +1803,8 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
16361803
esw_destroy_vport_rx_group(esw);
16371804
esw_destroy_offloads_table(esw);
16381805
esw_destroy_offloads_fdb_tables(esw);
1806+
if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
1807+
esw_prio_tag_acls_cleanup(esw);
16391808
}
16401809

16411810
static void esw_host_params_event_handler(struct work_struct *work)

0 commit comments

Comments
 (0)