@@ -609,6 +609,111 @@ void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
609
609
}
610
610
EXPORT_SYMBOL (ip_fraglist_prepare );
611
611
612
+ void ip_frag_init (struct sk_buff * skb , unsigned int hlen ,
613
+ unsigned int ll_rs , unsigned int mtu ,
614
+ struct ip_frag_state * state )
615
+ {
616
+ struct iphdr * iph = ip_hdr (skb );
617
+
618
+ state -> hlen = hlen ;
619
+ state -> ll_rs = ll_rs ;
620
+ state -> mtu = mtu ;
621
+
622
+ state -> left = skb -> len - hlen ; /* Space per frame */
623
+ state -> ptr = hlen ; /* Where to start from */
624
+
625
+ state -> offset = (ntohs (iph -> frag_off ) & IP_OFFSET ) << 3 ;
626
+ state -> not_last_frag = iph -> frag_off & htons (IP_MF );
627
+ }
628
+ EXPORT_SYMBOL (ip_frag_init );
629
+
630
+ struct sk_buff * ip_frag_next (struct sk_buff * skb , struct ip_frag_state * state )
631
+ {
632
+ unsigned int len = state -> left ;
633
+ struct sk_buff * skb2 ;
634
+ struct iphdr * iph ;
635
+
636
+ len = state -> left ;
637
+ /* IF: it doesn't fit, use 'mtu' - the data space left */
638
+ if (len > state -> mtu )
639
+ len = state -> mtu ;
640
+ /* IF: we are not sending up to and including the packet end
641
+ then align the next start on an eight byte boundary */
642
+ if (len < state -> left ) {
643
+ len &= ~7 ;
644
+ }
645
+
646
+ /* Allocate buffer */
647
+ skb2 = alloc_skb (len + state -> hlen + state -> ll_rs , GFP_ATOMIC );
648
+ if (!skb2 )
649
+ return ERR_PTR (- ENOMEM );
650
+
651
+ /*
652
+ * Set up data on packet
653
+ */
654
+
655
+ ip_copy_metadata (skb2 , skb );
656
+ skb_reserve (skb2 , state -> ll_rs );
657
+ skb_put (skb2 , len + state -> hlen );
658
+ skb_reset_network_header (skb2 );
659
+ skb2 -> transport_header = skb2 -> network_header + state -> hlen ;
660
+
661
+ /*
662
+ * Charge the memory for the fragment to any owner
663
+ * it might possess
664
+ */
665
+
666
+ if (skb -> sk )
667
+ skb_set_owner_w (skb2 , skb -> sk );
668
+
669
+ /*
670
+ * Copy the packet header into the new buffer.
671
+ */
672
+
673
+ skb_copy_from_linear_data (skb , skb_network_header (skb2 ), state -> hlen );
674
+
675
+ /*
676
+ * Copy a block of the IP datagram.
677
+ */
678
+ if (skb_copy_bits (skb , state -> ptr , skb_transport_header (skb2 ), len ))
679
+ BUG ();
680
+ state -> left -= len ;
681
+
682
+ /*
683
+ * Fill in the new header fields.
684
+ */
685
+ iph = ip_hdr (skb2 );
686
+ iph -> frag_off = htons ((state -> offset >> 3 ));
687
+
688
+ if (IPCB (skb )-> flags & IPSKB_FRAG_PMTU )
689
+ iph -> frag_off |= htons (IP_DF );
690
+
691
+ /* ANK: dirty, but effective trick. Upgrade options only if
692
+ * the segment to be fragmented was THE FIRST (otherwise,
693
+ * options are already fixed) and make it ONCE
694
+ * on the initial skb, so that all the following fragments
695
+ * will inherit fixed options.
696
+ */
697
+ if (state -> offset == 0 )
698
+ ip_options_fragment (skb );
699
+
700
+ /*
701
+ * Added AC : If we are fragmenting a fragment that's not the
702
+ * last fragment then keep MF on each bit
703
+ */
704
+ if (state -> left > 0 || state -> not_last_frag )
705
+ iph -> frag_off |= htons (IP_MF );
706
+ state -> ptr += len ;
707
+ state -> offset += len ;
708
+
709
+ iph -> tot_len = htons (len + state -> hlen );
710
+
711
+ ip_send_check (iph );
712
+
713
+ return skb2 ;
714
+ }
715
+ EXPORT_SYMBOL (ip_frag_next );
716
+
612
717
/*
613
718
* This IP datagram is too large to be sent in one piece. Break it up into
614
719
* smaller pieces (each of size equal to IP header plus
@@ -620,13 +725,11 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
620
725
int (* output )(struct net * , struct sock * , struct sk_buff * ))
621
726
{
622
727
struct iphdr * iph ;
623
- int ptr ;
624
728
struct sk_buff * skb2 ;
625
- unsigned int mtu , hlen , left , len , ll_rs ;
626
- int offset ;
627
- __be16 not_last_frag ;
628
729
struct rtable * rt = skb_rtable (skb );
730
+ unsigned int mtu , hlen , ll_rs ;
629
731
struct ip_fraglist_iter iter ;
732
+ struct ip_frag_state state ;
630
733
int err = 0 ;
631
734
632
735
/* for offloaded checksums cleanup checksum before fragmentation */
@@ -730,105 +833,26 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
730
833
}
731
834
732
835
slow_path :
733
- iph = ip_hdr (skb );
734
-
735
- left = skb -> len - hlen ; /* Space per frame */
736
- ptr = hlen ; /* Where to start from */
737
-
738
836
/*
739
837
* Fragment the datagram.
740
838
*/
741
839
742
- offset = (ntohs (iph -> frag_off ) & IP_OFFSET ) << 3 ;
743
- not_last_frag = iph -> frag_off & htons (IP_MF );
840
+ ip_frag_init (skb , hlen , ll_rs , mtu , & state );
744
841
745
842
/*
746
843
* Keep copying data until we run out.
747
844
*/
748
845
749
- while (left > 0 ) {
750
- len = left ;
751
- /* IF: it doesn't fit, use 'mtu' - the data space left */
752
- if (len > mtu )
753
- len = mtu ;
754
- /* IF: we are not sending up to and including the packet end
755
- then align the next start on an eight byte boundary */
756
- if (len < left ) {
757
- len &= ~7 ;
758
- }
759
-
760
- /* Allocate buffer */
761
- skb2 = alloc_skb (len + hlen + ll_rs , GFP_ATOMIC );
762
- if (!skb2 ) {
763
- err = - ENOMEM ;
846
+ while (state .left > 0 ) {
847
+ skb2 = ip_frag_next (skb , & state );
848
+ if (IS_ERR (skb2 )) {
849
+ err = PTR_ERR (skb2 );
764
850
goto fail ;
765
851
}
766
852
767
- /*
768
- * Set up data on packet
769
- */
770
-
771
- ip_copy_metadata (skb2 , skb );
772
- skb_reserve (skb2 , ll_rs );
773
- skb_put (skb2 , len + hlen );
774
- skb_reset_network_header (skb2 );
775
- skb2 -> transport_header = skb2 -> network_header + hlen ;
776
-
777
- /*
778
- * Charge the memory for the fragment to any owner
779
- * it might possess
780
- */
781
-
782
- if (skb -> sk )
783
- skb_set_owner_w (skb2 , skb -> sk );
784
-
785
- /*
786
- * Copy the packet header into the new buffer.
787
- */
788
-
789
- skb_copy_from_linear_data (skb , skb_network_header (skb2 ), hlen );
790
-
791
- /*
792
- * Copy a block of the IP datagram.
793
- */
794
- if (skb_copy_bits (skb , ptr , skb_transport_header (skb2 ), len ))
795
- BUG ();
796
- left -= len ;
797
-
798
- /*
799
- * Fill in the new header fields.
800
- */
801
- iph = ip_hdr (skb2 );
802
- iph -> frag_off = htons ((offset >> 3 ));
803
-
804
- if (IPCB (skb )-> flags & IPSKB_FRAG_PMTU )
805
- iph -> frag_off |= htons (IP_DF );
806
-
807
- /* ANK: dirty, but effective trick. Upgrade options only if
808
- * the segment to be fragmented was THE FIRST (otherwise,
809
- * options are already fixed) and make it ONCE
810
- * on the initial skb, so that all the following fragments
811
- * will inherit fixed options.
812
- */
813
- if (offset == 0 )
814
- ip_options_fragment (skb );
815
-
816
- /*
817
- * Added AC : If we are fragmenting a fragment that's not the
818
- * last fragment then keep MF on each bit
819
- */
820
- if (left > 0 || not_last_frag )
821
- iph -> frag_off |= htons (IP_MF );
822
- ptr += len ;
823
- offset += len ;
824
-
825
853
/*
826
854
* Put this fragment into the sending queue.
827
855
*/
828
- iph -> tot_len = htons (len + hlen );
829
-
830
- ip_send_check (iph );
831
-
832
856
err = output (net , sk , skb2 );
833
857
if (err )
834
858
goto fail ;
0 commit comments