10
10
#include <linux/skbuff.h>
11
11
#include <linux/rtnetlink.h>
12
12
#include <net/geneve.h>
13
+ #include <net/vxlan.h>
14
+ #include <net/erspan.h>
13
15
#include <net/netlink.h>
14
16
#include <net/pkt_sched.h>
15
17
#include <net/dst.h>
@@ -53,7 +55,11 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
53
55
54
56
static const struct nla_policy
55
57
enc_opts_policy [TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1 ] = {
58
+ [TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC ] = {
59
+ .strict_start_type = TCA_TUNNEL_KEY_ENC_OPTS_VXLAN },
56
60
[TCA_TUNNEL_KEY_ENC_OPTS_GENEVE ] = { .type = NLA_NESTED },
61
+ [TCA_TUNNEL_KEY_ENC_OPTS_VXLAN ] = { .type = NLA_NESTED },
62
+ [TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN ] = { .type = NLA_NESTED },
57
63
};
58
64
59
65
static const struct nla_policy
@@ -64,6 +70,19 @@ geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
64
70
.len = 128 },
65
71
};
66
72
73
+ static const struct nla_policy
74
+ vxlan_opt_policy [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1 ] = {
75
+ [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP ] = { .type = NLA_U32 },
76
+ };
77
+
78
+ static const struct nla_policy
79
+ erspan_opt_policy [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1 ] = {
80
+ [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER ] = { .type = NLA_U8 },
81
+ [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX ] = { .type = NLA_U32 },
82
+ [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR ] = { .type = NLA_U8 },
83
+ [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID ] = { .type = NLA_U8 },
84
+ };
85
+
67
86
static int
68
87
tunnel_key_copy_geneve_opt (const struct nlattr * nla , void * dst , int dst_len ,
69
88
struct netlink_ext_ack * extack )
@@ -116,10 +135,89 @@ tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
116
135
return opt_len ;
117
136
}
118
137
138
+ static int
139
+ tunnel_key_copy_vxlan_opt (const struct nlattr * nla , void * dst , int dst_len ,
140
+ struct netlink_ext_ack * extack )
141
+ {
142
+ struct nlattr * tb [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1 ];
143
+ int err ;
144
+
145
+ err = nla_parse_nested (tb , TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX , nla ,
146
+ vxlan_opt_policy , extack );
147
+ if (err < 0 )
148
+ return err ;
149
+
150
+ if (!tb [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP ]) {
151
+ NL_SET_ERR_MSG (extack , "Missing tunnel key vxlan option gbp" );
152
+ return - EINVAL ;
153
+ }
154
+
155
+ if (dst ) {
156
+ struct vxlan_metadata * md = dst ;
157
+
158
+ md -> gbp = nla_get_u32 (tb [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP ]);
159
+ }
160
+
161
+ return sizeof (struct vxlan_metadata );
162
+ }
163
+
164
+ static int
165
+ tunnel_key_copy_erspan_opt (const struct nlattr * nla , void * dst , int dst_len ,
166
+ struct netlink_ext_ack * extack )
167
+ {
168
+ struct nlattr * tb [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1 ];
169
+ int err ;
170
+ u8 ver ;
171
+
172
+ err = nla_parse_nested (tb , TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX , nla ,
173
+ erspan_opt_policy , extack );
174
+ if (err < 0 )
175
+ return err ;
176
+
177
+ if (!tb [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER ]) {
178
+ NL_SET_ERR_MSG (extack , "Missing tunnel key erspan option ver" );
179
+ return - EINVAL ;
180
+ }
181
+
182
+ ver = nla_get_u8 (tb [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER ]);
183
+ if (ver == 1 ) {
184
+ if (!tb [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX ]) {
185
+ NL_SET_ERR_MSG (extack , "Missing tunnel key erspan option index" );
186
+ return - EINVAL ;
187
+ }
188
+ } else if (ver == 2 ) {
189
+ if (!tb [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR ] ||
190
+ !tb [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID ]) {
191
+ NL_SET_ERR_MSG (extack , "Missing tunnel key erspan option dir or hwid" );
192
+ return - EINVAL ;
193
+ }
194
+ } else {
195
+ NL_SET_ERR_MSG (extack , "Tunnel key erspan option ver is incorrect" );
196
+ return - EINVAL ;
197
+ }
198
+
199
+ if (dst ) {
200
+ struct erspan_metadata * md = dst ;
201
+
202
+ md -> version = ver ;
203
+ if (ver == 1 ) {
204
+ nla = tb [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX ];
205
+ md -> u .index = nla_get_be32 (nla );
206
+ } else {
207
+ nla = tb [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR ];
208
+ md -> u .md2 .dir = nla_get_u8 (nla );
209
+ nla = tb [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID ];
210
+ set_hwid (& md -> u .md2 , nla_get_u8 (nla ));
211
+ }
212
+ }
213
+
214
+ return sizeof (struct erspan_metadata );
215
+ }
216
+
119
217
static int tunnel_key_copy_opts (const struct nlattr * nla , u8 * dst ,
120
218
int dst_len , struct netlink_ext_ack * extack )
121
219
{
122
- int err , rem , opt_len , len = nla_len (nla ), opts_len = 0 ;
220
+ int err , rem , opt_len , len = nla_len (nla ), opts_len = 0 , type = 0 ;
123
221
const struct nlattr * attr , * head = nla_data (nla );
124
222
125
223
err = nla_validate_deprecated (head , len , TCA_TUNNEL_KEY_ENC_OPTS_MAX ,
@@ -130,6 +228,10 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
130
228
nla_for_each_attr (attr , head , len , rem ) {
131
229
switch (nla_type (attr )) {
132
230
case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE :
231
+ if (type && type != TUNNEL_GENEVE_OPT ) {
232
+ NL_SET_ERR_MSG (extack , "Duplicate type for geneve options" );
233
+ return - EINVAL ;
234
+ }
133
235
opt_len = tunnel_key_copy_geneve_opt (attr , dst ,
134
236
dst_len , extack );
135
237
if (opt_len < 0 )
@@ -139,6 +241,31 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
139
241
dst_len -= opt_len ;
140
242
dst += opt_len ;
141
243
}
244
+ type = TUNNEL_GENEVE_OPT ;
245
+ break ;
246
+ case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN :
247
+ if (type ) {
248
+ NL_SET_ERR_MSG (extack , "Duplicate type for vxlan options" );
249
+ return - EINVAL ;
250
+ }
251
+ opt_len = tunnel_key_copy_vxlan_opt (attr , dst ,
252
+ dst_len , extack );
253
+ if (opt_len < 0 )
254
+ return opt_len ;
255
+ opts_len += opt_len ;
256
+ type = TUNNEL_VXLAN_OPT ;
257
+ break ;
258
+ case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN :
259
+ if (type ) {
260
+ NL_SET_ERR_MSG (extack , "Duplicate type for erspan options" );
261
+ return - EINVAL ;
262
+ }
263
+ opt_len = tunnel_key_copy_erspan_opt (attr , dst ,
264
+ dst_len , extack );
265
+ if (opt_len < 0 )
266
+ return opt_len ;
267
+ opts_len += opt_len ;
268
+ type = TUNNEL_ERSPAN_OPT ;
142
269
break ;
143
270
}
144
271
}
@@ -174,6 +301,22 @@ static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
174
301
opts_len , extack );
175
302
#else
176
303
return - EAFNOSUPPORT ;
304
+ #endif
305
+ case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN :
306
+ #if IS_ENABLED (CONFIG_INET )
307
+ info -> key .tun_flags |= TUNNEL_VXLAN_OPT ;
308
+ return tunnel_key_copy_opts (nla , ip_tunnel_info_opts (info ),
309
+ opts_len , extack );
310
+ #else
311
+ return - EAFNOSUPPORT ;
312
+ #endif
313
+ case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN :
314
+ #if IS_ENABLED (CONFIG_INET )
315
+ info -> key .tun_flags |= TUNNEL_ERSPAN_OPT ;
316
+ return tunnel_key_copy_opts (nla , ip_tunnel_info_opts (info ),
317
+ opts_len , extack );
318
+ #else
319
+ return - EAFNOSUPPORT ;
177
320
#endif
178
321
default :
179
322
NL_SET_ERR_MSG (extack , "Cannot set tunnel options for unknown tunnel type" );
@@ -451,6 +594,56 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
451
594
return 0 ;
452
595
}
453
596
597
+ static int tunnel_key_vxlan_opts_dump (struct sk_buff * skb ,
598
+ const struct ip_tunnel_info * info )
599
+ {
600
+ struct vxlan_metadata * md = (struct vxlan_metadata * )(info + 1 );
601
+ struct nlattr * start ;
602
+
603
+ start = nla_nest_start_noflag (skb , TCA_TUNNEL_KEY_ENC_OPTS_VXLAN );
604
+ if (!start )
605
+ return - EMSGSIZE ;
606
+
607
+ if (nla_put_u32 (skb , TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP , md -> gbp )) {
608
+ nla_nest_cancel (skb , start );
609
+ return - EMSGSIZE ;
610
+ }
611
+
612
+ nla_nest_end (skb , start );
613
+ return 0 ;
614
+ }
615
+
616
+ static int tunnel_key_erspan_opts_dump (struct sk_buff * skb ,
617
+ const struct ip_tunnel_info * info )
618
+ {
619
+ struct erspan_metadata * md = (struct erspan_metadata * )(info + 1 );
620
+ struct nlattr * start ;
621
+
622
+ start = nla_nest_start_noflag (skb , TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN );
623
+ if (!start )
624
+ return - EMSGSIZE ;
625
+
626
+ if (nla_put_u8 (skb , TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER , md -> version ))
627
+ goto err ;
628
+
629
+ if (md -> version == 1 &&
630
+ nla_put_be32 (skb , TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX , md -> u .index ))
631
+ goto err ;
632
+
633
+ if (md -> version == 2 &&
634
+ (nla_put_u8 (skb , TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR ,
635
+ md -> u .md2 .dir ) ||
636
+ nla_put_u8 (skb , TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID ,
637
+ get_hwid (& md -> u .md2 ))))
638
+ goto err ;
639
+
640
+ nla_nest_end (skb , start );
641
+ return 0 ;
642
+ err :
643
+ nla_nest_cancel (skb , start );
644
+ return - EMSGSIZE ;
645
+ }
646
+
454
647
static int tunnel_key_opts_dump (struct sk_buff * skb ,
455
648
const struct ip_tunnel_info * info )
456
649
{
@@ -468,6 +661,14 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
468
661
err = tunnel_key_geneve_opts_dump (skb , info );
469
662
if (err )
470
663
goto err_out ;
664
+ } else if (info -> key .tun_flags & TUNNEL_VXLAN_OPT ) {
665
+ err = tunnel_key_vxlan_opts_dump (skb , info );
666
+ if (err )
667
+ goto err_out ;
668
+ } else if (info -> key .tun_flags & TUNNEL_ERSPAN_OPT ) {
669
+ err = tunnel_key_erspan_opts_dump (skb , info );
670
+ if (err )
671
+ goto err_out ;
471
672
} else {
472
673
err_out :
473
674
nla_nest_cancel (skb , start );
0 commit comments