@@ -545,6 +545,113 @@ static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
545
545
return event_eor ;
546
546
}
547
547
548
+ static struct sctp_ulpevent * sctp_intl_retrieve_first (struct sctp_ulpq * ulpq )
549
+ {
550
+ struct sctp_stream_in * csin , * sin = NULL ;
551
+ struct sk_buff * first_frag = NULL ;
552
+ struct sk_buff * last_frag = NULL ;
553
+ struct sctp_ulpevent * retval ;
554
+ struct sk_buff * pos ;
555
+ __u32 next_fsn = 0 ;
556
+ __u16 sid = 0 ;
557
+
558
+ skb_queue_walk (& ulpq -> reasm , pos ) {
559
+ struct sctp_ulpevent * cevent = sctp_skb2event (pos );
560
+
561
+ csin = sctp_stream_in (ulpq -> asoc , cevent -> stream );
562
+ if (csin -> pd_mode )
563
+ continue ;
564
+
565
+ switch (cevent -> msg_flags & SCTP_DATA_FRAG_MASK ) {
566
+ case SCTP_DATA_FIRST_FRAG :
567
+ if (first_frag )
568
+ goto out ;
569
+ if (cevent -> mid == csin -> mid ) {
570
+ first_frag = pos ;
571
+ last_frag = pos ;
572
+ next_fsn = 0 ;
573
+ sin = csin ;
574
+ sid = cevent -> stream ;
575
+ }
576
+ break ;
577
+ case SCTP_DATA_MIDDLE_FRAG :
578
+ if (!first_frag )
579
+ break ;
580
+ if (cevent -> stream == sid &&
581
+ cevent -> mid == sin -> mid &&
582
+ cevent -> fsn == next_fsn ) {
583
+ next_fsn ++ ;
584
+ last_frag = pos ;
585
+ } else {
586
+ goto out ;
587
+ }
588
+ break ;
589
+ case SCTP_DATA_LAST_FRAG :
590
+ if (first_frag )
591
+ goto out ;
592
+ break ;
593
+ default :
594
+ break ;
595
+ }
596
+ }
597
+
598
+ if (!first_frag )
599
+ return NULL ;
600
+
601
+ out :
602
+ retval = sctp_make_reassembled_event (sock_net (ulpq -> asoc -> base .sk ),
603
+ & ulpq -> reasm , first_frag ,
604
+ last_frag );
605
+ if (retval ) {
606
+ sin -> fsn = next_fsn ;
607
+ sin -> pd_mode = 1 ;
608
+ }
609
+
610
+ return retval ;
611
+ }
612
+
613
+ static void sctp_intl_start_pd (struct sctp_ulpq * ulpq , gfp_t gfp )
614
+ {
615
+ struct sctp_ulpevent * event ;
616
+
617
+ if (skb_queue_empty (& ulpq -> reasm ))
618
+ return ;
619
+
620
+ do {
621
+ event = sctp_intl_retrieve_first (ulpq );
622
+ if (event )
623
+ sctp_enqueue_event (ulpq , event );
624
+ } while (event );
625
+ }
626
+
627
+ static void sctp_renege_events (struct sctp_ulpq * ulpq , struct sctp_chunk * chunk ,
628
+ gfp_t gfp )
629
+ {
630
+ struct sctp_association * asoc = ulpq -> asoc ;
631
+ __u32 freed = 0 ;
632
+ __u16 needed ;
633
+
634
+ if (chunk ) {
635
+ needed = ntohs (chunk -> chunk_hdr -> length );
636
+ needed -= sizeof (struct sctp_idata_chunk );
637
+ } else {
638
+ needed = SCTP_DEFAULT_MAXWINDOW ;
639
+ }
640
+
641
+ if (skb_queue_empty (& asoc -> base .sk -> sk_receive_queue )) {
642
+ freed = sctp_ulpq_renege_list (ulpq , & ulpq -> lobby , needed );
643
+ if (freed < needed )
644
+ freed += sctp_ulpq_renege_list (ulpq , & ulpq -> reasm ,
645
+ needed );
646
+ }
647
+
648
+ if (chunk && freed >= needed )
649
+ if (sctp_ulpevent_idata (ulpq , chunk , gfp ) <= 0 )
650
+ sctp_intl_start_pd (ulpq , gfp );
651
+
652
+ sk_mem_reclaim (asoc -> base .sk );
653
+ }
654
+
548
655
static struct sctp_stream_interleave sctp_stream_interleave_0 = {
549
656
.data_chunk_len = sizeof (struct sctp_data_chunk ),
550
657
/* DATA process functions */
@@ -553,6 +660,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_0 = {
553
660
.validate_data = sctp_validate_data ,
554
661
.ulpevent_data = sctp_ulpq_tail_data ,
555
662
.enqueue_event = sctp_ulpq_tail_event ,
663
+ .renege_events = sctp_ulpq_renege ,
556
664
};
557
665
558
666
static struct sctp_stream_interleave sctp_stream_interleave_1 = {
@@ -563,6 +671,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_1 = {
563
671
.validate_data = sctp_validate_idata ,
564
672
.ulpevent_data = sctp_ulpevent_idata ,
565
673
.enqueue_event = sctp_enqueue_event ,
674
+ .renege_events = sctp_renege_events ,
566
675
};
567
676
568
677
void sctp_stream_interleave_init (struct sctp_stream * stream )
0 commit comments