@@ -88,6 +88,14 @@ u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
88
88
return 1 ;
89
89
}
90
90
91
+ static bool
92
+ esw_check_ingress_prio_tag_enabled (const struct mlx5_eswitch * esw ,
93
+ const struct mlx5_vport * vport )
94
+ {
95
+ return (MLX5_CAP_GEN (esw -> dev , prio_tag_required ) &&
96
+ mlx5_eswitch_is_vf_vport (esw , vport -> vport ));
97
+ }
98
+
91
99
static void
92
100
mlx5_eswitch_set_rule_source_port (struct mlx5_eswitch * esw ,
93
101
struct mlx5_flow_spec * spec ,
@@ -1760,12 +1768,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1760
1768
* required, allow
1761
1769
* Unmatched traffic is allowed by default
1762
1770
*/
1763
-
1764
1771
spec = kvzalloc (sizeof (* spec ), GFP_KERNEL );
1765
- if (!spec ) {
1766
- err = - ENOMEM ;
1767
- goto out_no_mem ;
1768
- }
1772
+ if (!spec )
1773
+ return - ENOMEM ;
1769
1774
1770
1775
/* Untagged packets - push prio tag VLAN, allow */
1771
1776
MLX5_SET_TO_ONES (fte_match_param , spec -> match_criteria , outer_headers .cvlan_tag );
@@ -1791,14 +1796,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1791
1796
"vport[%d] configure ingress untagged allow rule, err(%d)\n" ,
1792
1797
vport -> vport , err );
1793
1798
vport -> ingress .allow_rule = NULL ;
1794
- goto out ;
1795
1799
}
1796
1800
1797
- out :
1798
1801
kvfree (spec );
1799
- out_no_mem :
1800
- if (err )
1801
- esw_vport_cleanup_ingress_rules (esw , vport );
1802
1802
return err ;
1803
1803
}
1804
1804
@@ -1836,13 +1836,9 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1836
1836
esw_warn (esw -> dev ,
1837
1837
"failed to add setting metadata rule for vport %d ingress acl, err(%d)\n" ,
1838
1838
vport -> vport , err );
1839
+ mlx5_modify_header_dealloc (esw -> dev , vport -> ingress .offloads .modify_metadata );
1839
1840
vport -> ingress .offloads .modify_metadata_rule = NULL ;
1840
- goto out ;
1841
1841
}
1842
-
1843
- out :
1844
- if (err )
1845
- mlx5_modify_header_dealloc (esw -> dev , vport -> ingress .offloads .modify_metadata );
1846
1842
return err ;
1847
1843
}
1848
1844
@@ -1862,50 +1858,103 @@ static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
1862
1858
{
1863
1859
int inlen = MLX5_ST_SZ_BYTES (create_flow_group_in );
1864
1860
struct mlx5_flow_group * g ;
1861
+ void * match_criteria ;
1865
1862
u32 * flow_group_in ;
1863
+ u32 flow_index = 0 ;
1866
1864
int ret = 0 ;
1867
1865
1868
1866
flow_group_in = kvzalloc (inlen , GFP_KERNEL );
1869
1867
if (!flow_group_in )
1870
1868
return - ENOMEM ;
1871
1869
1872
- memset (flow_group_in , 0 , inlen );
1873
- MLX5_SET (create_flow_group_in , flow_group_in , start_flow_index , 0 );
1874
- MLX5_SET (create_flow_group_in , flow_group_in , end_flow_index , 0 );
1870
+ if (esw_check_ingress_prio_tag_enabled (esw , vport )) {
1871
+ /* This group is to hold FTE to match untagged packets when prio_tag
1872
+ * is enabled.
1873
+ */
1874
+ memset (flow_group_in , 0 , inlen );
1875
1875
1876
- g = mlx5_create_flow_group (vport -> ingress .acl , flow_group_in );
1877
- if (IS_ERR (g )) {
1878
- ret = PTR_ERR (g );
1879
- esw_warn (esw -> dev ,
1880
- "Failed to create vport[%d] ingress metadata group, err(%d)\n" ,
1881
- vport -> vport , ret );
1882
- goto grp_err ;
1876
+ match_criteria = MLX5_ADDR_OF (create_flow_group_in ,
1877
+ flow_group_in , match_criteria );
1878
+ MLX5_SET (create_flow_group_in , flow_group_in ,
1879
+ match_criteria_enable , MLX5_MATCH_OUTER_HEADERS );
1880
+ MLX5_SET_TO_ONES (fte_match_param , match_criteria , outer_headers .cvlan_tag );
1881
+ MLX5_SET (create_flow_group_in , flow_group_in , start_flow_index , flow_index );
1882
+ MLX5_SET (create_flow_group_in , flow_group_in , end_flow_index , flow_index );
1883
+
1884
+ g = mlx5_create_flow_group (vport -> ingress .acl , flow_group_in );
1885
+ if (IS_ERR (g )) {
1886
+ ret = PTR_ERR (g );
1887
+ esw_warn (esw -> dev , "vport[%d] ingress create untagged flow group, err(%d)\n" ,
1888
+ vport -> vport , ret );
1889
+ goto prio_tag_err ;
1890
+ }
1891
+ vport -> ingress .offloads .metadata_prio_tag_grp = g ;
1892
+ flow_index ++ ;
1893
+ }
1894
+
1895
+ if (mlx5_eswitch_vport_match_metadata_enabled (esw )) {
1896
+ /* This group holds an FTE with no matches for add metadata for
1897
+ * tagged packets, if prio-tag is enabled (as a fallthrough),
1898
+ * or all traffic in case prio-tag is disabled.
1899
+ */
1900
+ memset (flow_group_in , 0 , inlen );
1901
+ MLX5_SET (create_flow_group_in , flow_group_in , start_flow_index , flow_index );
1902
+ MLX5_SET (create_flow_group_in , flow_group_in , end_flow_index , flow_index );
1903
+
1904
+ g = mlx5_create_flow_group (vport -> ingress .acl , flow_group_in );
1905
+ if (IS_ERR (g )) {
1906
+ ret = PTR_ERR (g );
1907
+ esw_warn (esw -> dev , "vport[%d] ingress create drop flow group, err(%d)\n" ,
1908
+ vport -> vport , ret );
1909
+ goto metadata_err ;
1910
+ }
1911
+ vport -> ingress .offloads .metadata_allmatch_grp = g ;
1912
+ }
1913
+
1914
+ kvfree (flow_group_in );
1915
+ return 0 ;
1916
+
1917
+ metadata_err :
1918
+ if (!IS_ERR_OR_NULL (vport -> ingress .offloads .metadata_prio_tag_grp )) {
1919
+ mlx5_destroy_flow_group (vport -> ingress .offloads .metadata_prio_tag_grp );
1920
+ vport -> ingress .offloads .metadata_prio_tag_grp = NULL ;
1883
1921
}
1884
- vport -> ingress .offloads .metadata_grp = g ;
1885
- grp_err :
1922
+ prio_tag_err :
1886
1923
kvfree (flow_group_in );
1887
1924
return ret ;
1888
1925
}
1889
1926
1890
1927
static void esw_vport_destroy_ingress_acl_group (struct mlx5_vport * vport )
1891
1928
{
1892
- if (vport -> ingress .offloads .metadata_grp ) {
1893
- mlx5_destroy_flow_group (vport -> ingress .offloads .metadata_grp );
1894
- vport -> ingress .offloads .metadata_grp = NULL ;
1929
+ if (vport -> ingress .offloads .metadata_allmatch_grp ) {
1930
+ mlx5_destroy_flow_group (vport -> ingress .offloads .metadata_allmatch_grp );
1931
+ vport -> ingress .offloads .metadata_allmatch_grp = NULL ;
1932
+ }
1933
+
1934
+ if (vport -> ingress .offloads .metadata_prio_tag_grp ) {
1935
+ mlx5_destroy_flow_group (vport -> ingress .offloads .metadata_prio_tag_grp );
1936
+ vport -> ingress .offloads .metadata_prio_tag_grp = NULL ;
1895
1937
}
1896
1938
}
1897
1939
1898
1940
static int esw_vport_ingress_config (struct mlx5_eswitch * esw ,
1899
1941
struct mlx5_vport * vport )
1900
1942
{
1943
+ int num_ftes = 0 ;
1901
1944
int err ;
1902
1945
1903
1946
if (!mlx5_eswitch_vport_match_metadata_enabled (esw ) &&
1904
- !MLX5_CAP_GEN (esw -> dev , prio_tag_required ))
1947
+ !esw_check_ingress_prio_tag_enabled (esw , vport ))
1905
1948
return 0 ;
1906
1949
1907
1950
esw_vport_cleanup_ingress_rules (esw , vport );
1908
- err = esw_vport_create_ingress_acl_table (esw , vport , 1 );
1951
+
1952
+ if (mlx5_eswitch_vport_match_metadata_enabled (esw ))
1953
+ num_ftes ++ ;
1954
+ if (esw_check_ingress_prio_tag_enabled (esw , vport ))
1955
+ num_ftes ++ ;
1956
+
1957
+ err = esw_vport_create_ingress_acl_table (esw , vport , num_ftes );
1909
1958
if (err ) {
1910
1959
esw_warn (esw -> dev ,
1911
1960
"failed to enable ingress acl (%d) on vport[%d]\n" ,
@@ -1926,8 +1975,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1926
1975
goto metadata_err ;
1927
1976
}
1928
1977
1929
- if (MLX5_CAP_GEN (esw -> dev , prio_tag_required ) &&
1930
- mlx5_eswitch_is_vf_vport (esw , vport -> vport )) {
1978
+ if (esw_check_ingress_prio_tag_enabled (esw , vport )) {
1931
1979
err = esw_vport_ingress_prio_tag_config (esw , vport );
1932
1980
if (err )
1933
1981
goto prio_tag_err ;
@@ -1937,7 +1985,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1937
1985
prio_tag_err :
1938
1986
esw_vport_del_ingress_acl_modify_metadata (esw , vport );
1939
1987
metadata_err :
1940
- esw_vport_cleanup_ingress_rules (esw , vport );
1941
1988
esw_vport_destroy_ingress_acl_group (vport );
1942
1989
group_err :
1943
1990
esw_vport_destroy_ingress_acl_table (vport );
@@ -2008,8 +2055,9 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2008
2055
if (mlx5_eswitch_is_vf_vport (esw , vport -> vport )) {
2009
2056
err = esw_vport_egress_config (esw , vport );
2010
2057
if (err ) {
2011
- esw_vport_del_ingress_acl_modify_metadata (esw , vport );
2012
2058
esw_vport_cleanup_ingress_rules (esw , vport );
2059
+ esw_vport_del_ingress_acl_modify_metadata (esw , vport );
2060
+ esw_vport_destroy_ingress_acl_group (vport );
2013
2061
esw_vport_destroy_ingress_acl_table (vport );
2014
2062
}
2015
2063
}
@@ -2021,8 +2069,8 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2021
2069
struct mlx5_vport * vport )
2022
2070
{
2023
2071
esw_vport_disable_egress_acl (esw , vport );
2024
- esw_vport_del_ingress_acl_modify_metadata (esw , vport );
2025
2072
esw_vport_cleanup_ingress_rules (esw , vport );
2073
+ esw_vport_del_ingress_acl_modify_metadata (esw , vport );
2026
2074
esw_vport_destroy_ingress_acl_group (vport );
2027
2075
esw_vport_destroy_ingress_acl_table (vport );
2028
2076
}
0 commit comments