@@ -678,72 +678,49 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
678
678
return ret ;
679
679
}
680
680
/**
681
- * ip6ip6_tnl_xmit - encapsulate packet and send
681
+ * ip6_tnl_xmit2 - encapsulate packet and send
682
682
* @skb: the outgoing socket buffer
683
683
* @dev: the outgoing tunnel device
684
+ * @dsfield: dscp code for outer header
685
+ * @fl: flow of tunneled packet
686
+ * @encap_limit: encapsulation limit
687
+ * @pmtu: Path MTU is stored if packet is too big
684
688
*
685
689
* Description:
686
690
* Build new header and do some sanity checks on the packet before sending
687
691
* it.
688
692
*
689
693
* Return:
690
694
* 0
695
+ * -1 fail
696
+ * %-EMSGSIZE message too big. return mtu in this case.
691
697
**/
692
698
693
- static int
694
- ip6ip6_tnl_xmit (struct sk_buff * skb , struct net_device * dev )
699
+ static int ip6_tnl_xmit2 (struct sk_buff * skb ,
700
+ struct net_device * dev ,
701
+ __u8 dsfield ,
702
+ struct flowi * fl ,
703
+ int encap_limit ,
704
+ __u32 * pmtu )
695
705
{
696
706
struct ip6_tnl * t = netdev_priv (dev );
697
707
struct net_device_stats * stats = & t -> stat ;
698
708
struct ipv6hdr * ipv6h = skb -> nh .ipv6h ;
699
- int encap_limit = -1 ;
700
709
struct ipv6_tel_txoption opt ;
701
- __u16 offset ;
702
- struct flowi fl ;
703
710
struct dst_entry * dst ;
704
711
struct net_device * tdev ;
705
712
int mtu ;
706
713
int max_headroom = sizeof (struct ipv6hdr );
707
714
u8 proto ;
708
- int err ;
715
+ int err = -1 ;
709
716
int pkt_len ;
710
- int dsfield ;
711
-
712
- if (t -> recursion ++ ) {
713
- stats -> collisions ++ ;
714
- goto tx_err ;
715
- }
716
- if (skb -> protocol != htons (ETH_P_IPV6 ) ||
717
- !ip6_tnl_xmit_ctl (t ) || ip6ip6_tnl_addr_conflict (t , ipv6h ))
718
- goto tx_err ;
719
-
720
- if ((offset = parse_tlv_tnl_enc_lim (skb , skb -> nh .raw )) > 0 ) {
721
- struct ipv6_tlv_tnl_enc_lim * tel ;
722
- tel = (struct ipv6_tlv_tnl_enc_lim * ) & skb -> nh .raw [offset ];
723
- if (tel -> encap_limit == 0 ) {
724
- icmpv6_send (skb , ICMPV6_PARAMPROB ,
725
- ICMPV6_HDR_FIELD , offset + 2 , skb -> dev );
726
- goto tx_err ;
727
- }
728
- encap_limit = tel -> encap_limit - 1 ;
729
- } else if (!(t -> parms .flags & IP6_TNL_F_IGN_ENCAP_LIMIT ))
730
- encap_limit = t -> parms .encap_limit ;
731
-
732
- memcpy (& fl , & t -> fl , sizeof (fl ));
733
- proto = fl .proto ;
734
-
735
- dsfield = ipv6_get_dsfield (ipv6h );
736
- if ((t -> parms .flags & IP6_TNL_F_USE_ORIG_TCLASS ))
737
- fl .fl6_flowlabel |= (* (__be32 * ) ipv6h & IPV6_TCLASS_MASK );
738
- if ((t -> parms .flags & IP6_TNL_F_USE_ORIG_FLOWLABEL ))
739
- fl .fl6_flowlabel |= (* (__be32 * ) ipv6h & IPV6_FLOWLABEL_MASK );
740
717
741
718
if ((dst = ip6_tnl_dst_check (t )) != NULL )
742
719
dst_hold (dst );
743
720
else {
744
- dst = ip6_route_output (NULL , & fl );
721
+ dst = ip6_route_output (NULL , fl );
745
722
746
- if (dst -> error || xfrm_lookup (& dst , & fl , NULL , 0 ) < 0 )
723
+ if (dst -> error || xfrm_lookup (& dst , fl , NULL , 0 ) < 0 )
747
724
goto tx_err_link_failure ;
748
725
}
749
726
@@ -767,7 +744,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
767
744
if (skb -> dst )
768
745
skb -> dst -> ops -> update_pmtu (skb -> dst , mtu );
769
746
if (skb -> len > mtu ) {
770
- icmpv6_send (skb , ICMPV6_PKT_TOOBIG , 0 , mtu , dev );
747
+ * pmtu = mtu ;
748
+ err = - EMSGSIZE ;
771
749
goto tx_err_dst_release ;
772
750
}
773
751
@@ -793,20 +771,21 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
793
771
794
772
skb -> h .raw = skb -> nh .raw ;
795
773
774
+ proto = fl -> proto ;
796
775
if (encap_limit >= 0 ) {
797
776
init_tel_txopt (& opt , encap_limit );
798
777
ipv6_push_nfrag_opts (skb , & opt .ops , & proto , NULL );
799
778
}
800
779
skb -> nh .raw = skb_push (skb , sizeof (struct ipv6hdr ));
801
780
ipv6h = skb -> nh .ipv6h ;
802
- * (__be32 * )ipv6h = fl . fl6_flowlabel | htonl (0x60000000 );
781
+ * (__be32 * )ipv6h = fl -> fl6_flowlabel | htonl (0x60000000 );
803
782
dsfield = INET_ECN_encapsulate (0 , dsfield );
804
783
ipv6_change_dsfield (ipv6h , ~INET_ECN_MASK , dsfield );
805
784
ipv6h -> payload_len = htons (skb -> len - sizeof (struct ipv6hdr ));
806
785
ipv6h -> hop_limit = t -> parms .hop_limit ;
807
786
ipv6h -> nexthdr = proto ;
808
- ipv6_addr_copy (& ipv6h -> saddr , & fl . fl6_src );
809
- ipv6_addr_copy (& ipv6h -> daddr , & fl . fl6_dst );
787
+ ipv6_addr_copy (& ipv6h -> saddr , & fl -> fl6_src );
788
+ ipv6_addr_copy (& ipv6h -> daddr , & fl -> fl6_dst );
810
789
nf_reset (skb );
811
790
pkt_len = skb -> len ;
812
791
err = NF_HOOK (PF_INET6 , NF_IP6_LOCAL_OUT , skb , NULL ,
@@ -820,13 +799,87 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
820
799
stats -> tx_aborted_errors ++ ;
821
800
}
822
801
ip6_tnl_dst_store (t , dst );
823
- t -> recursion -- ;
824
802
return 0 ;
825
803
tx_err_link_failure :
826
804
stats -> tx_carrier_errors ++ ;
827
805
dst_link_failure (skb );
828
806
tx_err_dst_release :
829
807
dst_release (dst );
808
+ return err ;
809
+ }
810
+
811
+ static inline int
812
+ ip6ip6_tnl_xmit (struct sk_buff * skb , struct net_device * dev )
813
+ {
814
+ struct ip6_tnl * t = netdev_priv (dev );
815
+ struct ipv6hdr * ipv6h = skb -> nh .ipv6h ;
816
+ int encap_limit = -1 ;
817
+ __u16 offset ;
818
+ struct flowi fl ;
819
+ __u8 dsfield ;
820
+ __u32 mtu ;
821
+ int err ;
822
+
823
+ if (!ip6_tnl_xmit_ctl (t ) || ip6ip6_tnl_addr_conflict (t , ipv6h ))
824
+ return -1 ;
825
+
826
+ if ((offset = parse_tlv_tnl_enc_lim (skb , skb -> nh .raw )) > 0 ) {
827
+ struct ipv6_tlv_tnl_enc_lim * tel ;
828
+ tel = (struct ipv6_tlv_tnl_enc_lim * ) & skb -> nh .raw [offset ];
829
+ if (tel -> encap_limit == 0 ) {
830
+ icmpv6_send (skb , ICMPV6_PARAMPROB ,
831
+ ICMPV6_HDR_FIELD , offset + 2 , skb -> dev );
832
+ return -1 ;
833
+ }
834
+ encap_limit = tel -> encap_limit - 1 ;
835
+ } else if (!(t -> parms .flags & IP6_TNL_F_IGN_ENCAP_LIMIT ))
836
+ encap_limit = t -> parms .encap_limit ;
837
+
838
+ memcpy (& fl , & t -> fl , sizeof (fl ));
839
+ fl .proto = IPPROTO_IPV6 ;
840
+
841
+ dsfield = ipv6_get_dsfield (ipv6h );
842
+ if ((t -> parms .flags & IP6_TNL_F_USE_ORIG_TCLASS ))
843
+ fl .fl6_flowlabel |= (* (__be32 * ) ipv6h & IPV6_TCLASS_MASK );
844
+ if ((t -> parms .flags & IP6_TNL_F_USE_ORIG_FLOWLABEL ))
845
+ fl .fl6_flowlabel |= (* (__be32 * ) ipv6h & IPV6_FLOWLABEL_MASK );
846
+
847
+ err = ip6_tnl_xmit2 (skb , dev , dsfield , & fl , encap_limit , & mtu );
848
+ if (err != 0 ) {
849
+ if (err == - EMSGSIZE )
850
+ icmpv6_send (skb , ICMPV6_PKT_TOOBIG , 0 , mtu , dev );
851
+ return -1 ;
852
+ }
853
+
854
+ return 0 ;
855
+ }
856
+
857
+ static int
858
+ ip6_tnl_xmit (struct sk_buff * skb , struct net_device * dev )
859
+ {
860
+ struct ip6_tnl * t = netdev_priv (dev );
861
+ struct net_device_stats * stats = & t -> stat ;
862
+ int ret ;
863
+
864
+ if (t -> recursion ++ ) {
865
+ t -> stat .collisions ++ ;
866
+ goto tx_err ;
867
+ }
868
+
869
+ switch (skb -> protocol ) {
870
+ case __constant_htons (ETH_P_IPV6 ):
871
+ ret = ip6ip6_tnl_xmit (skb , dev );
872
+ break ;
873
+ default :
874
+ goto tx_err ;
875
+ }
876
+
877
+ if (ret < 0 )
878
+ goto tx_err ;
879
+
880
+ t -> recursion -- ;
881
+ return 0 ;
882
+
830
883
tx_err :
831
884
stats -> tx_errors ++ ;
832
885
stats -> tx_dropped ++ ;
@@ -1088,7 +1141,7 @@ static void ip6ip6_tnl_dev_setup(struct net_device *dev)
1088
1141
SET_MODULE_OWNER (dev );
1089
1142
dev -> uninit = ip6ip6_tnl_dev_uninit ;
1090
1143
dev -> destructor = free_netdev ;
1091
- dev -> hard_start_xmit = ip6ip6_tnl_xmit ;
1144
+ dev -> hard_start_xmit = ip6_tnl_xmit ;
1092
1145
dev -> get_stats = ip6ip6_tnl_get_stats ;
1093
1146
dev -> do_ioctl = ip6ip6_tnl_ioctl ;
1094
1147
dev -> change_mtu = ip6ip6_tnl_change_mtu ;
0 commit comments