@@ -81,6 +81,9 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
81
81
if (!strcmp (out_dev -> rtnl_link_ops -> kind , "vxlan" ))
82
82
return tun_type == NFP_FL_TUNNEL_VXLAN ;
83
83
84
+ if (!strcmp (out_dev -> rtnl_link_ops -> kind , "geneve" ))
85
+ return tun_type == NFP_FL_TUNNEL_GENEVE ;
86
+
84
87
return false;
85
88
}
86
89
@@ -136,11 +139,23 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
136
139
return 0 ;
137
140
}
138
141
139
- static bool nfp_fl_supported_tun_port (const struct tc_action * action )
142
+ static enum nfp_flower_tun_type
143
+ nfp_fl_get_tun_from_act_l4_port (struct nfp_app * app ,
144
+ const struct tc_action * action )
140
145
{
141
146
struct ip_tunnel_info * tun = tcf_tunnel_info (action );
142
-
143
- return tun -> key .tp_dst == htons (NFP_FL_VXLAN_PORT );
147
+ struct nfp_flower_priv * priv = app -> priv ;
148
+
149
+ switch (tun -> key .tp_dst ) {
150
+ case htons (NFP_FL_VXLAN_PORT ):
151
+ return NFP_FL_TUNNEL_VXLAN ;
152
+ case htons (NFP_FL_GENEVE_PORT ):
153
+ if (priv -> flower_ext_feats & NFP_FL_FEATS_GENEVE )
154
+ return NFP_FL_TUNNEL_GENEVE ;
155
+ /* FALLTHROUGH */
156
+ default :
157
+ return NFP_FL_TUNNEL_NONE ;
158
+ }
144
159
}
145
160
146
161
static struct nfp_fl_pre_tunnel * nfp_fl_pre_tunnel (char * act_data , int act_len )
@@ -165,38 +180,33 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
165
180
}
166
181
167
182
static int
168
- nfp_fl_set_vxlan (struct nfp_fl_set_vxlan * set_vxlan ,
169
- const struct tc_action * action ,
170
- struct nfp_fl_pre_tunnel * pre_tun )
183
+ nfp_fl_set_ipv4_udp_tun (struct nfp_fl_set_ipv4_udp_tun * set_tun ,
184
+ const struct tc_action * action ,
185
+ struct nfp_fl_pre_tunnel * pre_tun ,
186
+ enum nfp_flower_tun_type tun_type )
171
187
{
172
- struct ip_tunnel_info * vxlan = tcf_tunnel_info ( action );
173
- size_t act_size = sizeof ( struct nfp_fl_set_vxlan );
174
- u32 tmp_set_vxlan_type_index = 0 ;
188
+ size_t act_size = sizeof ( struct nfp_fl_set_ipv4_udp_tun );
189
+ struct ip_tunnel_info * ip_tun = tcf_tunnel_info ( action );
190
+ u32 tmp_set_ip_tun_type_index = 0 ;
175
191
/* Currently support one pre-tunnel so index is always 0. */
176
192
int pretun_idx = 0 ;
177
193
178
- if (vxlan -> options_len ) {
179
- /* Do not support options e.g. vxlan gpe. */
194
+ if (ip_tun -> options_len )
180
195
return - EOPNOTSUPP ;
181
- }
182
196
183
- set_vxlan -> head .jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL ;
184
- set_vxlan -> head .len_lw = act_size >> NFP_FL_LW_SIZ ;
197
+ set_tun -> head .jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL ;
198
+ set_tun -> head .len_lw = act_size >> NFP_FL_LW_SIZ ;
185
199
186
200
/* Set tunnel type and pre-tunnel index. */
187
- tmp_set_vxlan_type_index |=
188
- FIELD_PREP (NFP_FL_IPV4_TUNNEL_TYPE , NFP_FL_TUNNEL_VXLAN ) |
201
+ tmp_set_ip_tun_type_index |=
202
+ FIELD_PREP (NFP_FL_IPV4_TUNNEL_TYPE , tun_type ) |
189
203
FIELD_PREP (NFP_FL_IPV4_PRE_TUN_INDEX , pretun_idx );
190
204
191
- set_vxlan -> tun_type_index = cpu_to_be32 (tmp_set_vxlan_type_index );
192
-
193
- set_vxlan -> tun_id = vxlan -> key .tun_id ;
194
- set_vxlan -> tun_flags = vxlan -> key .tun_flags ;
195
- set_vxlan -> ipv4_ttl = vxlan -> key .ttl ;
196
- set_vxlan -> ipv4_tos = vxlan -> key .tos ;
205
+ set_tun -> tun_type_index = cpu_to_be32 (tmp_set_ip_tun_type_index );
206
+ set_tun -> tun_id = ip_tun -> key .tun_id ;
197
207
198
208
/* Complete pre_tunnel action. */
199
- pre_tun -> ipv4_dst = vxlan -> key .u .ipv4 .dst ;
209
+ pre_tun -> ipv4_dst = ip_tun -> key .u .ipv4 .dst ;
200
210
201
211
return 0 ;
202
212
}
@@ -433,8 +443,8 @@ nfp_flower_loop_action(const struct tc_action *a,
433
443
struct net_device * netdev ,
434
444
enum nfp_flower_tun_type * tun_type , int * tun_out_cnt )
435
445
{
446
+ struct nfp_fl_set_ipv4_udp_tun * set_tun ;
436
447
struct nfp_fl_pre_tunnel * pre_tun ;
437
- struct nfp_fl_set_vxlan * s_vxl ;
438
448
struct nfp_fl_push_vlan * psh_v ;
439
449
struct nfp_fl_pop_vlan * pop_v ;
440
450
struct nfp_fl_output * output ;
@@ -482,26 +492,29 @@ nfp_flower_loop_action(const struct tc_action *a,
482
492
483
493
nfp_fl_push_vlan (psh_v , a );
484
494
* a_len += sizeof (struct nfp_fl_push_vlan );
485
- } else if (is_tcf_tunnel_set (a ) && nfp_fl_supported_tun_port (a )) {
495
+ } else if (is_tcf_tunnel_set (a )) {
496
+ struct nfp_repr * repr = netdev_priv (netdev );
497
+ * tun_type = nfp_fl_get_tun_from_act_l4_port (repr -> app , a );
498
+ if (* tun_type == NFP_FL_TUNNEL_NONE )
499
+ return - EOPNOTSUPP ;
500
+
486
501
/* Pre-tunnel action is required for tunnel encap.
487
502
* This checks for next hop entries on NFP.
488
503
* If none, the packet falls back before applying other actions.
489
504
*/
490
505
if (* a_len + sizeof (struct nfp_fl_pre_tunnel ) +
491
- sizeof (struct nfp_fl_set_vxlan ) > NFP_FL_MAX_A_SIZ )
506
+ sizeof (struct nfp_fl_set_ipv4_udp_tun ) > NFP_FL_MAX_A_SIZ )
492
507
return - EOPNOTSUPP ;
493
508
494
- * tun_type = NFP_FL_TUNNEL_VXLAN ;
495
509
pre_tun = nfp_fl_pre_tunnel (nfp_fl -> action_data , * a_len );
496
510
nfp_fl -> meta .shortcut = cpu_to_be32 (NFP_FL_SC_ACT_NULL );
497
511
* a_len += sizeof (struct nfp_fl_pre_tunnel );
498
512
499
- s_vxl = (struct nfp_fl_set_vxlan * )& nfp_fl -> action_data [* a_len ];
500
- err = nfp_fl_set_vxlan ( s_vxl , a , pre_tun );
513
+ set_tun = (void * )& nfp_fl -> action_data [* a_len ];
514
+ err = nfp_fl_set_ipv4_udp_tun ( set_tun , a , pre_tun , * tun_type );
501
515
if (err )
502
516
return err ;
503
-
504
- * a_len += sizeof (struct nfp_fl_set_vxlan );
517
+ * a_len += sizeof (struct nfp_fl_set_ipv4_udp_tun );
505
518
} else if (is_tcf_tunnel_release (a )) {
506
519
/* Tunnel decap is handled by default so accept action. */
507
520
return 0 ;
0 commit comments