26
26
#include <linux/list.h>
27
27
#include <linux/slab.h>
28
28
#include <linux/if_vlan.h>
29
+ #include <linux/skb_array.h>
29
30
#include <net/sch_generic.h>
30
31
#include <net/pkt_sched.h>
31
32
#include <net/dst.h>
@@ -578,93 +579,93 @@ static const u8 prio2band[TC_PRIO_MAX + 1] = {
578
579
579
580
/*
580
581
* Private data for a pfifo_fast scheduler containing:
581
- * - queues for the three band
582
- * - bitmap indicating which of the bands contain skbs
582
+ * - rings for priority bands
583
583
*/
584
584
struct pfifo_fast_priv {
585
- u32 bitmap ;
586
- struct qdisc_skb_head q [PFIFO_FAST_BANDS ];
585
+ struct skb_array q [PFIFO_FAST_BANDS ];
587
586
};
588
587
589
- /*
590
- * Convert a bitmap to the first band number where an skb is queued, where:
591
- * bitmap=0 means there are no skbs on any band.
592
- * bitmap=1 means there is an skb on band 0.
593
- * bitmap=7 means there are skbs on all 3 bands, etc.
594
- */
595
- static const int bitmap2band [] = {-1 , 0 , 1 , 0 , 2 , 0 , 1 , 0 };
596
-
597
- static inline struct qdisc_skb_head * band2list (struct pfifo_fast_priv * priv ,
598
- int band )
588
+ static inline struct skb_array * band2list (struct pfifo_fast_priv * priv ,
589
+ int band )
599
590
{
600
- return priv -> q + band ;
591
+ return & priv -> q [ band ] ;
601
592
}
602
593
603
594
static int pfifo_fast_enqueue (struct sk_buff * skb , struct Qdisc * qdisc ,
604
595
struct sk_buff * * to_free )
605
596
{
606
- if (qdisc -> q .qlen < qdisc_dev (qdisc )-> tx_queue_len ) {
607
- int band = prio2band [skb -> priority & TC_PRIO_MAX ];
608
- struct pfifo_fast_priv * priv = qdisc_priv (qdisc );
609
- struct qdisc_skb_head * list = band2list (priv , band );
610
-
611
- priv -> bitmap |= (1 << band );
612
- qdisc -> q .qlen ++ ;
613
- return __qdisc_enqueue_tail (skb , qdisc , list );
614
- }
597
+ int band = prio2band [skb -> priority & TC_PRIO_MAX ];
598
+ struct pfifo_fast_priv * priv = qdisc_priv (qdisc );
599
+ struct skb_array * q = band2list (priv , band );
600
+ int err ;
615
601
616
- return qdisc_drop (skb , qdisc , to_free );
602
+ err = skb_array_produce (q , skb );
603
+
604
+ if (unlikely (err ))
605
+ return qdisc_drop_cpu (skb , qdisc , to_free );
606
+
607
+ qdisc_qstats_cpu_qlen_inc (qdisc );
608
+ qdisc_qstats_cpu_backlog_inc (qdisc , skb );
609
+ return NET_XMIT_SUCCESS ;
617
610
}
618
611
619
612
static struct sk_buff * pfifo_fast_dequeue (struct Qdisc * qdisc )
620
613
{
621
614
struct pfifo_fast_priv * priv = qdisc_priv (qdisc );
622
- int band = bitmap2band [priv -> bitmap ];
623
-
624
- if (likely (band >= 0 )) {
625
- struct qdisc_skb_head * qh = band2list (priv , band );
626
- struct sk_buff * skb = __qdisc_dequeue_head (qh );
615
+ struct sk_buff * skb = NULL ;
616
+ int band ;
627
617
628
- if (likely (skb != NULL )) {
629
- qdisc_qstats_backlog_dec (qdisc , skb );
630
- qdisc_bstats_update (qdisc , skb );
631
- }
618
+ for (band = 0 ; band < PFIFO_FAST_BANDS && !skb ; band ++ ) {
619
+ struct skb_array * q = band2list (priv , band );
632
620
633
- qdisc -> q .qlen -- ;
634
- if (qh -> qlen == 0 )
635
- priv -> bitmap &= ~(1 << band );
621
+ if (__skb_array_empty (q ))
622
+ continue ;
636
623
637
- return skb ;
624
+ skb = skb_array_consume_bh (q );
625
+ }
626
+ if (likely (skb )) {
627
+ qdisc_qstats_cpu_backlog_dec (qdisc , skb );
628
+ qdisc_bstats_cpu_update (qdisc , skb );
629
+ qdisc_qstats_cpu_qlen_dec (qdisc );
638
630
}
639
631
640
- return NULL ;
632
+ return skb ;
641
633
}
642
634
643
635
static struct sk_buff * pfifo_fast_peek (struct Qdisc * qdisc )
644
636
{
645
637
struct pfifo_fast_priv * priv = qdisc_priv (qdisc );
646
- int band = bitmap2band [priv -> bitmap ];
638
+ struct sk_buff * skb = NULL ;
639
+ int band ;
647
640
648
- if (band > = 0 ) {
649
- struct qdisc_skb_head * qh = band2list (priv , band );
641
+ for (band = 0 ; band < PFIFO_FAST_BANDS && ! skb ; band ++ ) {
642
+ struct skb_array * q = band2list (priv , band );
650
643
651
- return qh -> head ;
644
+ skb = __skb_array_peek ( q ) ;
652
645
}
653
646
654
- return NULL ;
647
+ return skb ;
655
648
}
656
649
657
650
static void pfifo_fast_reset (struct Qdisc * qdisc )
658
651
{
659
- int prio ;
652
+ int i , band ;
660
653
struct pfifo_fast_priv * priv = qdisc_priv (qdisc );
661
654
662
- for (prio = 0 ; prio < PFIFO_FAST_BANDS ; prio ++ )
663
- __qdisc_reset_queue (band2list (priv , prio ));
655
+ for (band = 0 ; band < PFIFO_FAST_BANDS ; band ++ ) {
656
+ struct skb_array * q = band2list (priv , band );
657
+ struct sk_buff * skb ;
664
658
665
- priv -> bitmap = 0 ;
666
- qdisc -> qstats .backlog = 0 ;
667
- qdisc -> q .qlen = 0 ;
659
+ while ((skb = skb_array_consume_bh (q )) != NULL )
660
+ kfree_skb (skb );
661
+ }
662
+
663
+ for_each_possible_cpu (i ) {
664
+ struct gnet_stats_queue * q = per_cpu_ptr (qdisc -> cpu_qstats , i );
665
+
666
+ q -> backlog = 0 ;
667
+ q -> qlen = 0 ;
668
+ }
668
669
}
669
670
670
671
static int pfifo_fast_dump (struct Qdisc * qdisc , struct sk_buff * skb )
@@ -682,27 +683,60 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
682
683
683
684
static int pfifo_fast_init (struct Qdisc * qdisc , struct nlattr * opt )
684
685
{
685
- int prio ;
686
+ unsigned int qlen = qdisc_dev ( qdisc ) -> tx_queue_len ;
686
687
struct pfifo_fast_priv * priv = qdisc_priv (qdisc );
688
+ int prio ;
689
+
690
+ /* guard against zero length rings */
691
+ if (!qlen )
692
+ return - EINVAL ;
687
693
688
- for (prio = 0 ; prio < PFIFO_FAST_BANDS ; prio ++ )
689
- qdisc_skb_head_init (band2list (priv , prio ));
694
+ for (prio = 0 ; prio < PFIFO_FAST_BANDS ; prio ++ ) {
695
+ struct skb_array * q = band2list (priv , prio );
696
+ int err ;
697
+
698
+ err = skb_array_init (q , qlen , GFP_KERNEL );
699
+ if (err )
700
+ return - ENOMEM ;
701
+ }
690
702
691
703
/* Can by-pass the queue discipline */
692
704
qdisc -> flags |= TCQ_F_CAN_BYPASS ;
693
705
return 0 ;
694
706
}
695
707
708
+ static void pfifo_fast_destroy (struct Qdisc * sch )
709
+ {
710
+ struct pfifo_fast_priv * priv = qdisc_priv (sch );
711
+ int prio ;
712
+
713
+ for (prio = 0 ; prio < PFIFO_FAST_BANDS ; prio ++ ) {
714
+ struct skb_array * q = band2list (priv , prio );
715
+
716
+ /* NULL ring is possible if destroy path is due to a failed
717
+ * skb_array_init() in pfifo_fast_init() case.
718
+ */
719
+ if (!& q -> ring .queue )
720
+ continue ;
721
+ /* Destroy ring but no need to kfree_skb because a call to
722
+ * pfifo_fast_reset() has already done that work.
723
+ */
724
+ ptr_ring_cleanup (& q -> ring , NULL );
725
+ }
726
+ }
727
+
696
728
struct Qdisc_ops pfifo_fast_ops __read_mostly = {
697
729
.id = "pfifo_fast" ,
698
730
.priv_size = sizeof (struct pfifo_fast_priv ),
699
731
.enqueue = pfifo_fast_enqueue ,
700
732
.dequeue = pfifo_fast_dequeue ,
701
733
.peek = pfifo_fast_peek ,
702
734
.init = pfifo_fast_init ,
735
+ .destroy = pfifo_fast_destroy ,
703
736
.reset = pfifo_fast_reset ,
704
737
.dump = pfifo_fast_dump ,
705
738
.owner = THIS_MODULE ,
739
+ .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS ,
706
740
};
707
741
EXPORT_SYMBOL (pfifo_fast_ops );
708
742
0 commit comments