Skip to content

Commit 61ec2ae

Browse files
yasuyuki5David S. Miller
authored andcommitted
[IPV6] IP6TUNNEL: Split out generic routine in ip6ip6_xmit().
This enables to add IPv4/IPv6 specific handling later, Signed-off-by: Yasuyuki Kozakai <[email protected]> Signed-off-by: YOSHIFUJI Hideaki <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 8359925 commit 61ec2ae

File tree

1 file changed

+98
-45
lines changed

1 file changed

+98
-45
lines changed

net/ipv6/ip6_tunnel.c

Lines changed: 98 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -678,72 +678,49 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
678678
return ret;
679679
}
680680
/**
681-
* ip6ip6_tnl_xmit - encapsulate packet and send
681+
* ip6_tnl_xmit2 - encapsulate packet and send
682682
* @skb: the outgoing socket buffer
683683
* @dev: the outgoing tunnel device
684+
* @dsfield: dscp code for outer header
685+
* @fl: flow of tunneled packet
686+
* @encap_limit: encapsulation limit
687+
* @pmtu: Path MTU is stored if packet is too big
684688
*
685689
* Description:
686690
* Build new header and do some sanity checks on the packet before sending
687691
* it.
688692
*
689693
* Return:
690694
* 0
695+
* -1 fail
696+
* %-EMSGSIZE message too big. return mtu in this case.
691697
**/
692698

693-
static int
694-
ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
699+
static int ip6_tnl_xmit2(struct sk_buff *skb,
700+
struct net_device *dev,
701+
__u8 dsfield,
702+
struct flowi *fl,
703+
int encap_limit,
704+
__u32 *pmtu)
695705
{
696706
struct ip6_tnl *t = netdev_priv(dev);
697707
struct net_device_stats *stats = &t->stat;
698708
struct ipv6hdr *ipv6h = skb->nh.ipv6h;
699-
int encap_limit = -1;
700709
struct ipv6_tel_txoption opt;
701-
__u16 offset;
702-
struct flowi fl;
703710
struct dst_entry *dst;
704711
struct net_device *tdev;
705712
int mtu;
706713
int max_headroom = sizeof(struct ipv6hdr);
707714
u8 proto;
708-
int err;
715+
int err = -1;
709716
int pkt_len;
710-
int dsfield;
711-
712-
if (t->recursion++) {
713-
stats->collisions++;
714-
goto tx_err;
715-
}
716-
if (skb->protocol != htons(ETH_P_IPV6) ||
717-
!ip6_tnl_xmit_ctl(t) || ip6ip6_tnl_addr_conflict(t, ipv6h))
718-
goto tx_err;
719-
720-
if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) {
721-
struct ipv6_tlv_tnl_enc_lim *tel;
722-
tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset];
723-
if (tel->encap_limit == 0) {
724-
icmpv6_send(skb, ICMPV6_PARAMPROB,
725-
ICMPV6_HDR_FIELD, offset + 2, skb->dev);
726-
goto tx_err;
727-
}
728-
encap_limit = tel->encap_limit - 1;
729-
} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
730-
encap_limit = t->parms.encap_limit;
731-
732-
memcpy(&fl, &t->fl, sizeof (fl));
733-
proto = fl.proto;
734-
735-
dsfield = ipv6_get_dsfield(ipv6h);
736-
if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
737-
fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
738-
if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL))
739-
fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
740717

741718
if ((dst = ip6_tnl_dst_check(t)) != NULL)
742719
dst_hold(dst);
743720
else {
744-
dst = ip6_route_output(NULL, &fl);
721+
dst = ip6_route_output(NULL, fl);
745722

746-
if (dst->error || xfrm_lookup(&dst, &fl, NULL, 0) < 0)
723+
if (dst->error || xfrm_lookup(&dst, fl, NULL, 0) < 0)
747724
goto tx_err_link_failure;
748725
}
749726

@@ -767,7 +744,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
767744
if (skb->dst)
768745
skb->dst->ops->update_pmtu(skb->dst, mtu);
769746
if (skb->len > mtu) {
770-
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
747+
*pmtu = mtu;
748+
err = -EMSGSIZE;
771749
goto tx_err_dst_release;
772750
}
773751

@@ -793,20 +771,21 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
793771

794772
skb->h.raw = skb->nh.raw;
795773

774+
proto = fl->proto;
796775
if (encap_limit >= 0) {
797776
init_tel_txopt(&opt, encap_limit);
798777
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
799778
}
800779
skb->nh.raw = skb_push(skb, sizeof(struct ipv6hdr));
801780
ipv6h = skb->nh.ipv6h;
802-
*(__be32*)ipv6h = fl.fl6_flowlabel | htonl(0x60000000);
781+
*(__be32*)ipv6h = fl->fl6_flowlabel | htonl(0x60000000);
803782
dsfield = INET_ECN_encapsulate(0, dsfield);
804783
ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
805784
ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
806785
ipv6h->hop_limit = t->parms.hop_limit;
807786
ipv6h->nexthdr = proto;
808-
ipv6_addr_copy(&ipv6h->saddr, &fl.fl6_src);
809-
ipv6_addr_copy(&ipv6h->daddr, &fl.fl6_dst);
787+
ipv6_addr_copy(&ipv6h->saddr, &fl->fl6_src);
788+
ipv6_addr_copy(&ipv6h->daddr, &fl->fl6_dst);
810789
nf_reset(skb);
811790
pkt_len = skb->len;
812791
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL,
@@ -820,13 +799,87 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
820799
stats->tx_aborted_errors++;
821800
}
822801
ip6_tnl_dst_store(t, dst);
823-
t->recursion--;
824802
return 0;
825803
tx_err_link_failure:
826804
stats->tx_carrier_errors++;
827805
dst_link_failure(skb);
828806
tx_err_dst_release:
829807
dst_release(dst);
808+
return err;
809+
}
810+
811+
static inline int
812+
ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
813+
{
814+
struct ip6_tnl *t = netdev_priv(dev);
815+
struct ipv6hdr *ipv6h = skb->nh.ipv6h;
816+
int encap_limit = -1;
817+
__u16 offset;
818+
struct flowi fl;
819+
__u8 dsfield;
820+
__u32 mtu;
821+
int err;
822+
823+
if (!ip6_tnl_xmit_ctl(t) || ip6ip6_tnl_addr_conflict(t, ipv6h))
824+
return -1;
825+
826+
if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) {
827+
struct ipv6_tlv_tnl_enc_lim *tel;
828+
tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset];
829+
if (tel->encap_limit == 0) {
830+
icmpv6_send(skb, ICMPV6_PARAMPROB,
831+
ICMPV6_HDR_FIELD, offset + 2, skb->dev);
832+
return -1;
833+
}
834+
encap_limit = tel->encap_limit - 1;
835+
} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
836+
encap_limit = t->parms.encap_limit;
837+
838+
memcpy(&fl, &t->fl, sizeof (fl));
839+
fl.proto = IPPROTO_IPV6;
840+
841+
dsfield = ipv6_get_dsfield(ipv6h);
842+
if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
843+
fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
844+
if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL))
845+
fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
846+
847+
err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu);
848+
if (err != 0) {
849+
if (err == -EMSGSIZE)
850+
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
851+
return -1;
852+
}
853+
854+
return 0;
855+
}
856+
857+
static int
858+
ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
859+
{
860+
struct ip6_tnl *t = netdev_priv(dev);
861+
struct net_device_stats *stats = &t->stat;
862+
int ret;
863+
864+
if (t->recursion++) {
865+
t->stat.collisions++;
866+
goto tx_err;
867+
}
868+
869+
switch (skb->protocol) {
870+
case __constant_htons(ETH_P_IPV6):
871+
ret = ip6ip6_tnl_xmit(skb, dev);
872+
break;
873+
default:
874+
goto tx_err;
875+
}
876+
877+
if (ret < 0)
878+
goto tx_err;
879+
880+
t->recursion--;
881+
return 0;
882+
830883
tx_err:
831884
stats->tx_errors++;
832885
stats->tx_dropped++;
@@ -1088,7 +1141,7 @@ static void ip6ip6_tnl_dev_setup(struct net_device *dev)
10881141
SET_MODULE_OWNER(dev);
10891142
dev->uninit = ip6ip6_tnl_dev_uninit;
10901143
dev->destructor = free_netdev;
1091-
dev->hard_start_xmit = ip6ip6_tnl_xmit;
1144+
dev->hard_start_xmit = ip6_tnl_xmit;
10921145
dev->get_stats = ip6ip6_tnl_get_stats;
10931146
dev->do_ioctl = ip6ip6_tnl_ioctl;
10941147
dev->change_mtu = ip6ip6_tnl_change_mtu;

0 commit comments

Comments
 (0)