@@ -1322,7 +1322,7 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1322
1322
1323
1323
static bool same_hw_devs (struct mlx5e_priv * priv , struct mlx5e_priv * peer_priv );
1324
1324
1325
- static bool mlx5e_tc_is_vf_tunnel (struct net_device * out_dev , struct net_device * route_dev )
1325
+ bool mlx5e_tc_is_vf_tunnel (struct net_device * out_dev , struct net_device * route_dev )
1326
1326
{
1327
1327
struct mlx5_core_dev * out_mdev , * route_mdev ;
1328
1328
struct mlx5e_priv * out_priv , * route_priv ;
@@ -1339,8 +1339,7 @@ static bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device
1339
1339
return same_hw_devs (out_priv , route_priv );
1340
1340
}
1341
1341
1342
- static int mlx5e_tc_query_route_vport (struct net_device * out_dev , struct net_device * route_dev ,
1343
- u16 * vport )
1342
+ int mlx5e_tc_query_route_vport (struct net_device * out_dev , struct net_device * route_dev , u16 * vport )
1344
1343
{
1345
1344
struct mlx5e_priv * out_priv , * route_priv ;
1346
1345
struct mlx5_core_dev * route_mdev ;
@@ -1504,6 +1503,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1504
1503
kfree (attr -> parse_attr -> tun_info [out_index ]);
1505
1504
}
1506
1505
kvfree (attr -> parse_attr );
1506
+ kvfree (attr -> esw_attr -> rx_tun_attr );
1507
1507
1508
1508
mlx5_tc_ct_match_del (get_ct_priv (priv ), & flow -> attr -> ct_attr );
1509
1509
@@ -2134,6 +2134,67 @@ void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
2134
2134
}
2135
2135
}
2136
2136
2137
+ static u8 mlx5e_tc_get_ip_version (struct mlx5_flow_spec * spec , bool outer )
2138
+ {
2139
+ void * headers_v ;
2140
+ u16 ethertype ;
2141
+ u8 ip_version ;
2142
+
2143
+ if (outer )
2144
+ headers_v = MLX5_ADDR_OF (fte_match_param , spec -> match_value , outer_headers );
2145
+ else
2146
+ headers_v = MLX5_ADDR_OF (fte_match_param , spec -> match_value , inner_headers );
2147
+
2148
+ ip_version = MLX5_GET (fte_match_set_lyr_2_4 , headers_v , ip_version );
2149
+ /* Return ip_version converted from ethertype anyway */
2150
+ if (!ip_version ) {
2151
+ ethertype = MLX5_GET (fte_match_set_lyr_2_4 , headers_v , ethertype );
2152
+ if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP )
2153
+ ip_version = 4 ;
2154
+ else if (ethertype == ETH_P_IPV6 )
2155
+ ip_version = 6 ;
2156
+ }
2157
+ return ip_version ;
2158
+ }
2159
+
2160
+ static int mlx5e_tc_set_attr_rx_tun (struct mlx5e_tc_flow * flow ,
2161
+ struct mlx5_flow_spec * spec )
2162
+ {
2163
+ struct mlx5_esw_flow_attr * esw_attr = flow -> attr -> esw_attr ;
2164
+ struct mlx5_rx_tun_attr * tun_attr ;
2165
+ void * daddr , * saddr ;
2166
+ u8 ip_version ;
2167
+
2168
+ tun_attr = kvzalloc (sizeof (* tun_attr ), GFP_KERNEL );
2169
+ if (!tun_attr )
2170
+ return - ENOMEM ;
2171
+
2172
+ esw_attr -> rx_tun_attr = tun_attr ;
2173
+ ip_version = mlx5e_tc_get_ip_version (spec , true);
2174
+
2175
+ if (ip_version == 4 ) {
2176
+ daddr = MLX5_ADDR_OF (fte_match_param , spec -> match_value ,
2177
+ outer_headers .dst_ipv4_dst_ipv6 .ipv4_layout .ipv4 );
2178
+ saddr = MLX5_ADDR_OF (fte_match_param , spec -> match_value ,
2179
+ outer_headers .src_ipv4_src_ipv6 .ipv4_layout .ipv4 );
2180
+ tun_attr -> dst_ip .v4 = * (__be32 * )daddr ;
2181
+ tun_attr -> src_ip .v4 = * (__be32 * )saddr ;
2182
+ }
2183
+ #if IS_ENABLED (CONFIG_INET ) && IS_ENABLED (CONFIG_IPV6 )
2184
+ else if (ip_version == 6 ) {
2185
+ int ipv6_size = MLX5_FLD_SZ_BYTES (ipv6_layout , ipv6 );
2186
+
2187
+ daddr = MLX5_ADDR_OF (fte_match_param , spec -> match_value ,
2188
+ outer_headers .dst_ipv4_dst_ipv6 .ipv6_layout .ipv6 );
2189
+ saddr = MLX5_ADDR_OF (fte_match_param , spec -> match_value ,
2190
+ outer_headers .src_ipv4_src_ipv6 .ipv6_layout .ipv6 );
2191
+ memcpy (& tun_attr -> dst_ip .v6 , daddr , ipv6_size );
2192
+ memcpy (& tun_attr -> src_ip .v6 , saddr , ipv6_size );
2193
+ }
2194
+ #endif
2195
+ return 0 ;
2196
+ }
2197
+
2137
2198
static int parse_tunnel_attr (struct mlx5e_priv * priv ,
2138
2199
struct mlx5e_tc_flow * flow ,
2139
2200
struct mlx5_flow_spec * spec ,
@@ -2142,6 +2203,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
2142
2203
u8 * match_level ,
2143
2204
bool * match_inner )
2144
2205
{
2206
+ struct mlx5e_tc_tunnel * tunnel = mlx5e_get_tc_tun (filter_dev );
2145
2207
struct mlx5_eswitch * esw = priv -> mdev -> priv .eswitch ;
2146
2208
struct netlink_ext_ack * extack = f -> common .extack ;
2147
2209
bool needs_mapping , sets_mapping ;
@@ -2179,6 +2241,31 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
2179
2241
*/
2180
2242
if (!netif_is_bareudp (filter_dev ))
2181
2243
flow -> attr -> action |= MLX5_FLOW_CONTEXT_ACTION_DECAP ;
2244
+ err = mlx5e_tc_set_attr_rx_tun (flow , spec );
2245
+ if (err )
2246
+ return err ;
2247
+ } else if (tunnel && tunnel -> tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN ) {
2248
+ struct mlx5_flow_spec * tmp_spec ;
2249
+
2250
+ tmp_spec = kvzalloc (sizeof (* tmp_spec ), GFP_KERNEL );
2251
+ if (!tmp_spec ) {
2252
+ NL_SET_ERR_MSG_MOD (extack , "Failed to allocate memory for vxlan tmp spec" );
2253
+ netdev_warn (priv -> netdev , "Failed to allocate memory for vxlan tmp spec" );
2254
+ return - ENOMEM ;
2255
+ }
2256
+ memcpy (tmp_spec , spec , sizeof (* tmp_spec ));
2257
+
2258
+ err = mlx5e_tc_tun_parse (filter_dev , priv , tmp_spec , f , match_level );
2259
+ if (err ) {
2260
+ kvfree (tmp_spec );
2261
+ NL_SET_ERR_MSG_MOD (extack , "Failed to parse tunnel attributes" );
2262
+ netdev_warn (priv -> netdev , "Failed to parse tunnel attributes" );
2263
+ return err ;
2264
+ }
2265
+ err = mlx5e_tc_set_attr_rx_tun (flow , tmp_spec );
2266
+ kvfree (tmp_spec );
2267
+ if (err )
2268
+ return err ;
2182
2269
}
2183
2270
2184
2271
if (!needs_mapping && !sets_mapping )
@@ -4473,6 +4560,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
4473
4560
}
4474
4561
}
4475
4562
4563
+ if (decap && esw_attr -> rx_tun_attr ) {
4564
+ err = mlx5e_tc_tun_route_lookup (priv , & parse_attr -> spec , attr );
4565
+ if (err )
4566
+ return err ;
4567
+ }
4568
+
4569
+ /* always set IP version for indirect table handling */
4570
+ attr -> ip_version = mlx5e_tc_get_ip_version (& parse_attr -> spec , true);
4571
+
4476
4572
if (MLX5_CAP_GEN (esw -> dev , prio_tag_required ) &&
4477
4573
action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP ) {
4478
4574
/* For prio tag mode, replace vlan pop with rewrite vlan prio
0 commit comments