Skip to content

Commit 4ee70ef

Browse files
idoschdavem330
authored andcommitted
mlxsw: spectrum_nve: Add support for VXLAN on Spectrum-2
Spectrum-1 and Spectrum-2 are largely backward compatible with regards to VXLAN. One difference - as explained in previous patch - is that an underlay RIF needs to be specified instead of an underlay VR during NVE initialization. This is accomplished by calling the relevant function that returns the index of such a RIF based on the table ID (RT_TABLE_MAIN) where underlay look up occurs. The second difference is that VXLAN learning (snooping) is controlled via a different register (TNPC). Signed-off-by: Ido Schimmel <[email protected]> Reviewed-by: Petr Machata <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 3179a56 commit 4ee70ef

File tree

2 files changed

+110
-1
lines changed

2 files changed

+110
-1
lines changed

drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ struct mlxsw_sp_nve {
2828
unsigned int num_nve_tunnels; /* Protected by RTNL */
2929
unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX];
3030
u32 tunnel_index;
31+
u16 ul_rif_index; /* Reserved for Spectrum */
3132
};
3233

3334
struct mlxsw_sp_nve_ops {

drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c

Lines changed: 109 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
#include <net/vxlan.h>
88

99
#include "reg.h"
10+
#include "spectrum.h"
1011
#include "spectrum_nve.h"
1112

1213
/* Eth (18B) | IPv6 (40B) | UDP (8B) | VxLAN (8B) | Eth (14B) | IPv6 (40B)
@@ -254,14 +255,121 @@ static bool mlxsw_sp2_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
254255
return false;
255256
}
256257

258+
static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
259+
bool learning_en)
260+
{
261+
char tnpc_pl[MLXSW_REG_TNPC_LEN];
262+
263+
mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TNPC_TUNNEL_PORT_NVE,
264+
learning_en);
265+
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl);
266+
}
267+
268+
static int
269+
mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
270+
const struct mlxsw_sp_nve_config *config)
271+
{
272+
char tngcr_pl[MLXSW_REG_TNGCR_LEN];
273+
u16 ul_rif_index;
274+
int err;
275+
276+
err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, config->ul_tb_id,
277+
&ul_rif_index);
278+
if (err)
279+
return err;
280+
mlxsw_sp->nve->ul_rif_index = ul_rif_index;
281+
282+
err = mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, config->learning_en);
283+
if (err)
284+
goto err_vxlan_learning_set;
285+
286+
mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
287+
mlxsw_reg_tngcr_underlay_rif_set(tngcr_pl, ul_rif_index);
288+
289+
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
290+
if (err)
291+
goto err_tngcr_write;
292+
293+
return 0;
294+
295+
err_tngcr_write:
296+
mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
297+
err_vxlan_learning_set:
298+
mlxsw_sp_router_ul_rif_put(mlxsw_sp, ul_rif_index);
299+
return err;
300+
}
301+
302+
static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
303+
{
304+
char tngcr_pl[MLXSW_REG_TNGCR_LEN];
305+
306+
mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
307+
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
308+
mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
309+
mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->nve->ul_rif_index);
310+
}
311+
312+
static int mlxsw_sp2_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
313+
unsigned int tunnel_index,
314+
u16 ul_rif_index)
315+
{
316+
char rtdp_pl[MLXSW_REG_RTDP_LEN];
317+
318+
mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
319+
mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_index);
320+
321+
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
322+
}
323+
257324
static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
258325
const struct mlxsw_sp_nve_config *config)
259326
{
260-
return -EOPNOTSUPP;
327+
struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
328+
int err;
329+
330+
err = mlxsw_sp_nve_parsing_set(mlxsw_sp,
331+
MLXSW_SP_NVE_VXLAN_PARSING_DEPTH,
332+
config->udp_dport);
333+
if (err)
334+
return err;
335+
336+
err = mlxsw_sp2_nve_vxlan_config_set(mlxsw_sp, config);
337+
if (err)
338+
goto err_config_set;
339+
340+
err = mlxsw_sp2_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index,
341+
nve->ul_rif_index);
342+
if (err)
343+
goto err_rtdp_set;
344+
345+
err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
346+
config->ul_proto,
347+
&config->ul_sip,
348+
nve->tunnel_index);
349+
if (err)
350+
goto err_promote_decap;
351+
352+
return 0;
353+
354+
err_promote_decap:
355+
err_rtdp_set:
356+
mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
357+
err_config_set:
358+
mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
359+
config->udp_dport);
360+
return err;
261361
}
262362

263363
static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
264364
{
365+
struct mlxsw_sp_nve_config *config = &nve->config;
366+
struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
367+
368+
mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
369+
config->ul_proto, &config->ul_sip);
370+
mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
371+
mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
372+
config->udp_dport);
265373
}
266374

267375
const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {

0 commit comments

Comments
 (0)