@@ -315,11 +315,26 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
315
315
return PACKET_REJECT ;
316
316
317
317
memcpy (md , pkt_md , sizeof (* md ));
318
+ md -> version = ver ;
319
+
318
320
info = & tun_dst -> u .tun_info ;
319
321
info -> key .tun_flags |= TUNNEL_ERSPAN_OPT ;
320
322
info -> options_len = sizeof (* md );
321
323
} else {
322
- tunnel -> index = ntohl (pkt_md -> u .index );
324
+ tunnel -> erspan_ver = ver ;
325
+ if (ver == 1 ) {
326
+ tunnel -> index = ntohl (pkt_md -> u .index );
327
+ } else {
328
+ u16 md2_flags ;
329
+ u16 dir , hwid ;
330
+
331
+ md2_flags = ntohs (pkt_md -> u .md2 .flags );
332
+ dir = (md2_flags & DIR_MASK ) >> DIR_OFFSET ;
333
+ hwid = (md2_flags & HWID_MASK ) >> HWID_OFFSET ;
334
+ tunnel -> dir = dir ;
335
+ tunnel -> hwid = hwid ;
336
+ }
337
+
323
338
}
324
339
325
340
skb_reset_mac_header (skb );
@@ -413,7 +428,8 @@ static int gre_rcv(struct sk_buff *skb)
413
428
if (hdr_len < 0 )
414
429
goto drop ;
415
430
416
- if (unlikely (tpi .proto == htons (ETH_P_ERSPAN ))) {
431
+ if (unlikely (tpi .proto == htons (ETH_P_ERSPAN ) ||
432
+ tpi .proto == htons (ETH_P_ERSPAN2 ))) {
417
433
if (erspan_rcv (skb , & tpi , hdr_len ) == PACKET_RCVD )
418
434
return 0 ;
419
435
}
@@ -568,6 +584,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
568
584
bool truncate = false;
569
585
struct flowi4 fl ;
570
586
int tunnel_hlen ;
587
+ int version ;
571
588
__be16 df ;
572
589
573
590
tun_info = skb_tunnel_info (skb );
@@ -576,9 +593,13 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
576
593
goto err_free_skb ;
577
594
578
595
key = & tun_info -> key ;
596
+ md = ip_tunnel_info_opts (tun_info );
597
+ if (!md )
598
+ goto err_free_rt ;
579
599
580
600
/* ERSPAN has fixed 8 byte GRE header */
581
- tunnel_hlen = 8 + sizeof (struct erspan_base_hdr ) + ERSPAN_V1_MDSIZE ;
601
+ version = md -> version ;
602
+ tunnel_hlen = 8 + erspan_hdr_len (version );
582
603
583
604
rt = prepare_fb_xmit (skb , dev , & fl , tunnel_hlen );
584
605
if (!rt )
@@ -592,12 +613,23 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
592
613
truncate = true;
593
614
}
594
615
595
- md = ip_tunnel_info_opts (tun_info );
596
- if (!md )
597
- goto err_free_rt ;
616
+ if (version == 1 ) {
617
+ erspan_build_header (skb , tunnel_id_to_key32 (key -> tun_id ),
618
+ ntohl (md -> u .index ), truncate , true);
619
+ } else if (version == 2 ) {
620
+ u16 md2_flags ;
621
+ u8 direction ;
622
+ u16 hwid ;
598
623
599
- erspan_build_header (skb , tunnel_id_to_key32 (key -> tun_id ),
600
- ntohl (md -> u .index ), truncate , true);
624
+ md2_flags = ntohs (md -> u .md2 .flags );
625
+ direction = (md2_flags & DIR_MASK ) >> DIR_OFFSET ;
626
+ hwid = (md2_flags & HWID_MASK ) >> HWID_OFFSET ;
627
+
628
+ erspan_build_header_v2 (skb , tunnel_id_to_key32 (key -> tun_id ),
629
+ direction , hwid , truncate , true);
630
+ } else {
631
+ goto err_free_rt ;
632
+ }
601
633
602
634
gre_build_header (skb , 8 , TUNNEL_SEQ ,
603
635
htons (ETH_P_ERSPAN ), 0 , htonl (tunnel -> o_seqno ++ ));
@@ -699,8 +731,14 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
699
731
}
700
732
701
733
/* Push ERSPAN header */
702
- erspan_build_header (skb , tunnel -> parms .o_key , tunnel -> index ,
703
- truncate , true);
734
+ if (tunnel -> erspan_ver == 1 )
735
+ erspan_build_header (skb , tunnel -> parms .o_key , tunnel -> index ,
736
+ truncate , true);
737
+ else
738
+ erspan_build_header_v2 (skb , tunnel -> parms .o_key ,
739
+ tunnel -> dir , tunnel -> hwid ,
740
+ truncate , true);
741
+
704
742
tunnel -> parms .o_flags &= ~TUNNEL_KEY ;
705
743
__gre_xmit (skb , dev , & tunnel -> parms .iph , htons (ETH_P_ERSPAN ));
706
744
return NETDEV_TX_OK ;
@@ -1172,13 +1210,32 @@ static int ipgre_netlink_parms(struct net_device *dev,
1172
1210
if (data [IFLA_GRE_FWMARK ])
1173
1211
* fwmark = nla_get_u32 (data [IFLA_GRE_FWMARK ]);
1174
1212
1175
- if (data [IFLA_GRE_ERSPAN_INDEX ]) {
1176
- t -> index = nla_get_u32 (data [IFLA_GRE_ERSPAN_INDEX ]);
1213
+ if (data [IFLA_GRE_ERSPAN_VER ]) {
1214
+ t -> erspan_ver = nla_get_u8 (data [IFLA_GRE_ERSPAN_VER ]);
1177
1215
1178
- if (t -> index & ~ INDEX_MASK )
1216
+ if (t -> erspan_ver != 1 && t -> erspan_ver != 2 )
1179
1217
return - EINVAL ;
1180
1218
}
1181
1219
1220
+ if (t -> erspan_ver == 1 ) {
1221
+ if (data [IFLA_GRE_ERSPAN_INDEX ]) {
1222
+ t -> index = nla_get_u32 (data [IFLA_GRE_ERSPAN_INDEX ]);
1223
+ if (t -> index & ~INDEX_MASK )
1224
+ return - EINVAL ;
1225
+ }
1226
+ } else if (t -> erspan_ver == 2 ) {
1227
+ if (data [IFLA_GRE_ERSPAN_DIR ]) {
1228
+ t -> dir = nla_get_u8 (data [IFLA_GRE_ERSPAN_DIR ]);
1229
+ if (t -> dir & ~(DIR_MASK >> DIR_OFFSET ))
1230
+ return - EINVAL ;
1231
+ }
1232
+ if (data [IFLA_GRE_ERSPAN_HWID ]) {
1233
+ t -> hwid = nla_get_u16 (data [IFLA_GRE_ERSPAN_HWID ]);
1234
+ if (t -> hwid & ~(HWID_MASK >> HWID_OFFSET ))
1235
+ return - EINVAL ;
1236
+ }
1237
+ }
1238
+
1182
1239
return 0 ;
1183
1240
}
1184
1241
@@ -1245,7 +1302,7 @@ static int erspan_tunnel_init(struct net_device *dev)
1245
1302
tunnel -> tun_hlen = 8 ;
1246
1303
tunnel -> parms .iph .protocol = IPPROTO_GRE ;
1247
1304
tunnel -> hlen = tunnel -> tun_hlen + tunnel -> encap_hlen +
1248
- sizeof ( struct erspan_base_hdr ) + ERSPAN_V1_MDSIZE ;
1305
+ erspan_hdr_len ( tunnel -> erspan_ver ) ;
1249
1306
t_hlen = tunnel -> hlen + sizeof (struct iphdr );
1250
1307
1251
1308
dev -> needed_headroom = LL_MAX_HEADER + t_hlen + 4 ;
@@ -1375,6 +1432,12 @@ static size_t ipgre_get_size(const struct net_device *dev)
1375
1432
nla_total_size (4 ) +
1376
1433
/* IFLA_GRE_ERSPAN_INDEX */
1377
1434
nla_total_size (4 ) +
1435
+ /* IFLA_GRE_ERSPAN_VER */
1436
+ nla_total_size (1 ) +
1437
+ /* IFLA_GRE_ERSPAN_DIR */
1438
+ nla_total_size (1 ) +
1439
+ /* IFLA_GRE_ERSPAN_HWID */
1440
+ nla_total_size (2 ) +
1378
1441
0 ;
1379
1442
}
1380
1443
@@ -1417,9 +1480,18 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1417
1480
goto nla_put_failure ;
1418
1481
}
1419
1482
1420
- if (t -> index )
1483
+ if (nla_put_u8 (skb , IFLA_GRE_ERSPAN_VER , t -> erspan_ver ))
1484
+ goto nla_put_failure ;
1485
+
1486
+ if (t -> erspan_ver == 1 ) {
1421
1487
if (nla_put_u32 (skb , IFLA_GRE_ERSPAN_INDEX , t -> index ))
1422
1488
goto nla_put_failure ;
1489
+ } else if (t -> erspan_ver == 2 ) {
1490
+ if (nla_put_u8 (skb , IFLA_GRE_ERSPAN_DIR , t -> dir ))
1491
+ goto nla_put_failure ;
1492
+ if (nla_put_u16 (skb , IFLA_GRE_ERSPAN_HWID , t -> hwid ))
1493
+ goto nla_put_failure ;
1494
+ }
1423
1495
1424
1496
return 0 ;
1425
1497
@@ -1455,6 +1527,9 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1455
1527
[IFLA_GRE_IGNORE_DF ] = { .type = NLA_U8 },
1456
1528
[IFLA_GRE_FWMARK ] = { .type = NLA_U32 },
1457
1529
[IFLA_GRE_ERSPAN_INDEX ] = { .type = NLA_U32 },
1530
+ [IFLA_GRE_ERSPAN_VER ] = { .type = NLA_U8 },
1531
+ [IFLA_GRE_ERSPAN_DIR ] = { .type = NLA_U8 },
1532
+ [IFLA_GRE_ERSPAN_HWID ] = { .type = NLA_U16 },
1458
1533
};
1459
1534
1460
1535
static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
0 commit comments