40
40
#include <net/ipv6.h>
41
41
#include <net/ip6_route.h>
42
42
#include <net/ip_tunnels.h>
43
+ #include <net/ip6_checksum.h>
43
44
#include <net/addrconf.h>
44
45
#include <linux/icmpv6.h>
45
46
#include <linux/netfilter.h>
@@ -385,8 +386,13 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
385
386
mtu = dst_mtu (& rt -> dst ) - sizeof (struct iphdr );
386
387
if (!dest )
387
388
goto err_put ;
388
- if (dest -> tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE )
389
+ if (dest -> tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE ) {
389
390
mtu -= sizeof (struct udphdr ) + sizeof (struct guehdr );
391
+ if ((dest -> tun_flags &
392
+ IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM ) &&
393
+ skb -> ip_summed == CHECKSUM_PARTIAL )
394
+ mtu -= GUE_PLEN_REMCSUM + GUE_LEN_PRIV ;
395
+ }
390
396
if (mtu < 68 ) {
391
397
IP_VS_DBG_RL ("%s(): mtu less than 68\n" , __func__ );
392
398
goto err_put ;
@@ -540,8 +546,13 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
540
546
mtu = dst_mtu (& rt -> dst ) - sizeof (struct ipv6hdr );
541
547
if (!dest )
542
548
goto err_put ;
543
- if (dest -> tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE )
549
+ if (dest -> tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE ) {
544
550
mtu -= sizeof (struct udphdr ) + sizeof (struct guehdr );
551
+ if ((dest -> tun_flags &
552
+ IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM ) &&
553
+ skb -> ip_summed == CHECKSUM_PARTIAL )
554
+ mtu -= GUE_PLEN_REMCSUM + GUE_LEN_PRIV ;
555
+ }
545
556
if (mtu < IPV6_MIN_MTU ) {
546
557
IP_VS_DBG_RL ("%s(): mtu less than %d\n" , __func__ ,
547
558
IPV6_MIN_MTU );
@@ -1006,17 +1017,56 @@ ipvs_gue_encap(struct net *net, struct sk_buff *skb,
1006
1017
__be16 sport = udp_flow_src_port (net , skb , 0 , 0 , false);
1007
1018
struct udphdr * udph ; /* Our new UDP header */
1008
1019
struct guehdr * gueh ; /* Our new GUE header */
1020
+ size_t hdrlen , optlen = 0 ;
1021
+ void * data ;
1022
+ bool need_priv = false;
1023
+
1024
+ if ((cp -> dest -> tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM ) &&
1025
+ skb -> ip_summed == CHECKSUM_PARTIAL ) {
1026
+ optlen += GUE_PLEN_REMCSUM + GUE_LEN_PRIV ;
1027
+ need_priv = true;
1028
+ }
1009
1029
1010
- skb_push (skb , sizeof (struct guehdr ));
1030
+ hdrlen = sizeof (struct guehdr ) + optlen ;
1031
+
1032
+ skb_push (skb , hdrlen );
1011
1033
1012
1034
gueh = (struct guehdr * )skb -> data ;
1013
1035
1014
1036
gueh -> control = 0 ;
1015
1037
gueh -> version = 0 ;
1016
- gueh -> hlen = 0 ;
1038
+ gueh -> hlen = optlen >> 2 ;
1017
1039
gueh -> flags = 0 ;
1018
1040
gueh -> proto_ctype = * next_protocol ;
1019
1041
1042
+ data = & gueh [1 ];
1043
+
1044
+ if (need_priv ) {
1045
+ __be32 * flags = data ;
1046
+ u16 csum_start = skb_checksum_start_offset (skb );
1047
+ __be16 * pd ;
1048
+
1049
+ gueh -> flags |= GUE_FLAG_PRIV ;
1050
+ * flags = 0 ;
1051
+ data += GUE_LEN_PRIV ;
1052
+
1053
+ if (csum_start < hdrlen )
1054
+ return - EINVAL ;
1055
+
1056
+ csum_start -= hdrlen ;
1057
+ pd = data ;
1058
+ pd [0 ] = htons (csum_start );
1059
+ pd [1 ] = htons (csum_start + skb -> csum_offset );
1060
+
1061
+ if (!skb_is_gso (skb )) {
1062
+ skb -> ip_summed = CHECKSUM_NONE ;
1063
+ skb -> encapsulation = 0 ;
1064
+ }
1065
+
1066
+ * flags |= GUE_PFLAG_REMCSUM ;
1067
+ data += GUE_PLEN_REMCSUM ;
1068
+ }
1069
+
1020
1070
skb_push (skb , sizeof (struct udphdr ));
1021
1071
skb_reset_transport_header (skb );
1022
1072
@@ -1070,6 +1120,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1070
1120
unsigned int max_headroom ; /* The extra header space needed */
1071
1121
int ret , local ;
1072
1122
int tun_type , gso_type ;
1123
+ int tun_flags ;
1073
1124
1074
1125
EnterFunction (10 );
1075
1126
@@ -1092,9 +1143,19 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1092
1143
max_headroom = LL_RESERVED_SPACE (tdev ) + sizeof (struct iphdr );
1093
1144
1094
1145
tun_type = cp -> dest -> tun_type ;
1146
+ tun_flags = cp -> dest -> tun_flags ;
1095
1147
1096
- if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE )
1097
- max_headroom += sizeof (struct udphdr ) + sizeof (struct guehdr );
1148
+ if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE ) {
1149
+ size_t gue_hdrlen , gue_optlen = 0 ;
1150
+
1151
+ if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM ) &&
1152
+ skb -> ip_summed == CHECKSUM_PARTIAL ) {
1153
+ gue_optlen += GUE_PLEN_REMCSUM + GUE_LEN_PRIV ;
1154
+ }
1155
+ gue_hdrlen = sizeof (struct guehdr ) + gue_optlen ;
1156
+
1157
+ max_headroom += sizeof (struct udphdr ) + gue_hdrlen ;
1158
+ }
1098
1159
1099
1160
/* We only care about the df field if sysctl_pmtu_disc(ipvs) is set */
1100
1161
dfp = sysctl_pmtu_disc (ipvs ) ? & df : NULL ;
@@ -1105,8 +1166,17 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1105
1166
goto tx_error ;
1106
1167
1107
1168
gso_type = __tun_gso_type_mask (AF_INET , cp -> af );
1108
- if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE )
1109
- gso_type |= SKB_GSO_UDP_TUNNEL ;
1169
+ if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE ) {
1170
+ if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM ) ||
1171
+ (tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM ))
1172
+ gso_type |= SKB_GSO_UDP_TUNNEL_CSUM ;
1173
+ else
1174
+ gso_type |= SKB_GSO_UDP_TUNNEL ;
1175
+ if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM ) &&
1176
+ skb -> ip_summed == CHECKSUM_PARTIAL ) {
1177
+ gso_type |= SKB_GSO_TUNNEL_REMCSUM ;
1178
+ }
1179
+ }
1110
1180
1111
1181
if (iptunnel_handle_offloads (skb , gso_type ))
1112
1182
goto tx_error ;
@@ -1115,8 +1185,19 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1115
1185
1116
1186
skb_set_inner_ipproto (skb , next_protocol );
1117
1187
1118
- if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE )
1119
- ipvs_gue_encap (net , skb , cp , & next_protocol );
1188
+ if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE ) {
1189
+ bool check = false;
1190
+
1191
+ if (ipvs_gue_encap (net , skb , cp , & next_protocol ))
1192
+ goto tx_error ;
1193
+
1194
+ if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM ) ||
1195
+ (tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM ))
1196
+ check = true;
1197
+
1198
+ udp_set_csum (!check , skb , saddr , cp -> daddr .ip , skb -> len );
1199
+ }
1200
+
1120
1201
1121
1202
skb_push (skb , sizeof (struct iphdr ));
1122
1203
skb_reset_network_header (skb );
@@ -1174,6 +1255,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1174
1255
unsigned int max_headroom ; /* The extra header space needed */
1175
1256
int ret , local ;
1176
1257
int tun_type , gso_type ;
1258
+ int tun_flags ;
1177
1259
1178
1260
EnterFunction (10 );
1179
1261
@@ -1197,9 +1279,19 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1197
1279
max_headroom = LL_RESERVED_SPACE (tdev ) + sizeof (struct ipv6hdr );
1198
1280
1199
1281
tun_type = cp -> dest -> tun_type ;
1282
+ tun_flags = cp -> dest -> tun_flags ;
1200
1283
1201
- if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE )
1202
- max_headroom += sizeof (struct udphdr ) + sizeof (struct guehdr );
1284
+ if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE ) {
1285
+ size_t gue_hdrlen , gue_optlen = 0 ;
1286
+
1287
+ if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM ) &&
1288
+ skb -> ip_summed == CHECKSUM_PARTIAL ) {
1289
+ gue_optlen += GUE_PLEN_REMCSUM + GUE_LEN_PRIV ;
1290
+ }
1291
+ gue_hdrlen = sizeof (struct guehdr ) + gue_optlen ;
1292
+
1293
+ max_headroom += sizeof (struct udphdr ) + gue_hdrlen ;
1294
+ }
1203
1295
1204
1296
skb = ip_vs_prepare_tunneled_skb (skb , cp -> af , max_headroom ,
1205
1297
& next_protocol , & payload_len ,
@@ -1208,8 +1300,17 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1208
1300
goto tx_error ;
1209
1301
1210
1302
gso_type = __tun_gso_type_mask (AF_INET6 , cp -> af );
1211
- if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE )
1212
- gso_type |= SKB_GSO_UDP_TUNNEL ;
1303
+ if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE ) {
1304
+ if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM ) ||
1305
+ (tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM ))
1306
+ gso_type |= SKB_GSO_UDP_TUNNEL_CSUM ;
1307
+ else
1308
+ gso_type |= SKB_GSO_UDP_TUNNEL ;
1309
+ if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM ) &&
1310
+ skb -> ip_summed == CHECKSUM_PARTIAL ) {
1311
+ gso_type |= SKB_GSO_TUNNEL_REMCSUM ;
1312
+ }
1313
+ }
1213
1314
1214
1315
if (iptunnel_handle_offloads (skb , gso_type ))
1215
1316
goto tx_error ;
@@ -1218,8 +1319,18 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1218
1319
1219
1320
skb_set_inner_ipproto (skb , next_protocol );
1220
1321
1221
- if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE )
1222
- ipvs_gue_encap (net , skb , cp , & next_protocol );
1322
+ if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE ) {
1323
+ bool check = false;
1324
+
1325
+ if (ipvs_gue_encap (net , skb , cp , & next_protocol ))
1326
+ goto tx_error ;
1327
+
1328
+ if ((tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM ) ||
1329
+ (tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM ))
1330
+ check = true;
1331
+
1332
+ udp6_set_csum (!check , skb , & saddr , & cp -> daddr .in6 , skb -> len );
1333
+ }
1223
1334
1224
1335
skb_push (skb , sizeof (struct ipv6hdr ));
1225
1336
skb_reset_network_header (skb );
0 commit comments