@@ -45,6 +45,68 @@ EXPORT_SYMBOL(default_qdisc_ops);
45
45
* - ingress filtering is also serialized via qdisc root lock
46
46
* - updates to tree and tree walking are only done under the rtnl mutex.
47
47
*/
48
+
49
+ static inline struct sk_buff * __skb_dequeue_bad_txq (struct Qdisc * q )
50
+ {
51
+ const struct netdev_queue * txq = q -> dev_queue ;
52
+ spinlock_t * lock = NULL ;
53
+ struct sk_buff * skb ;
54
+
55
+ if (q -> flags & TCQ_F_NOLOCK ) {
56
+ lock = qdisc_lock (q );
57
+ spin_lock (lock );
58
+ }
59
+
60
+ skb = skb_peek (& q -> skb_bad_txq );
61
+ if (skb ) {
62
+ /* check the reason of requeuing without tx lock first */
63
+ txq = skb_get_tx_queue (txq -> dev , skb );
64
+ if (!netif_xmit_frozen_or_stopped (txq )) {
65
+ skb = __skb_dequeue (& q -> skb_bad_txq );
66
+ if (qdisc_is_percpu_stats (q )) {
67
+ qdisc_qstats_cpu_backlog_dec (q , skb );
68
+ qdisc_qstats_cpu_qlen_dec (q );
69
+ } else {
70
+ qdisc_qstats_backlog_dec (q , skb );
71
+ q -> q .qlen -- ;
72
+ }
73
+ } else {
74
+ skb = NULL ;
75
+ }
76
+ }
77
+
78
+ if (lock )
79
+ spin_unlock (lock );
80
+
81
+ return skb ;
82
+ }
83
+
84
+ static inline struct sk_buff * qdisc_dequeue_skb_bad_txq (struct Qdisc * q )
85
+ {
86
+ struct sk_buff * skb = skb_peek (& q -> skb_bad_txq );
87
+
88
+ if (unlikely (skb ))
89
+ skb = __skb_dequeue_bad_txq (q );
90
+
91
+ return skb ;
92
+ }
93
+
94
+ static inline void qdisc_enqueue_skb_bad_txq (struct Qdisc * q ,
95
+ struct sk_buff * skb )
96
+ {
97
+ spinlock_t * lock = NULL ;
98
+
99
+ if (q -> flags & TCQ_F_NOLOCK ) {
100
+ lock = qdisc_lock (q );
101
+ spin_lock (lock );
102
+ }
103
+
104
+ __skb_queue_tail (& q -> skb_bad_txq , skb );
105
+
106
+ if (lock )
107
+ spin_unlock (lock );
108
+ }
109
+
48
110
static inline int __dev_requeue_skb (struct sk_buff * skb , struct Qdisc * q )
49
111
{
50
112
__skb_queue_head (& q -> gso_skb , skb );
@@ -117,9 +179,15 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
117
179
if (!nskb )
118
180
break ;
119
181
if (unlikely (skb_get_queue_mapping (nskb ) != mapping )) {
120
- q -> skb_bad_txq = nskb ;
121
- qdisc_qstats_backlog_inc (q , nskb );
122
- q -> q .qlen ++ ;
182
+ qdisc_enqueue_skb_bad_txq (q , nskb );
183
+
184
+ if (qdisc_is_percpu_stats (q )) {
185
+ qdisc_qstats_cpu_backlog_inc (q , nskb );
186
+ qdisc_qstats_cpu_qlen_inc (q );
187
+ } else {
188
+ qdisc_qstats_backlog_inc (q , nskb );
189
+ q -> q .qlen ++ ;
190
+ }
123
191
break ;
124
192
}
125
193
skb -> next = nskb ;
@@ -180,19 +248,9 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
180
248
}
181
249
validate :
182
250
* validate = true;
183
- skb = q -> skb_bad_txq ;
184
- if (unlikely (skb )) {
185
- /* check the reason of requeuing without tx lock first */
186
- txq = skb_get_tx_queue (txq -> dev , skb );
187
- if (!netif_xmit_frozen_or_stopped (txq )) {
188
- q -> skb_bad_txq = NULL ;
189
- qdisc_qstats_backlog_dec (q , skb );
190
- q -> q .qlen -- ;
191
- goto bulk ;
192
- }
193
- skb = NULL ;
194
- goto trace ;
195
- }
251
+ skb = qdisc_dequeue_skb_bad_txq (q );
252
+ if (unlikely (skb ))
253
+ goto bulk ;
196
254
if (!(q -> flags & TCQ_F_ONETXQUEUE ) ||
197
255
!netif_xmit_frozen_or_stopped (txq ))
198
256
skb = q -> dequeue (q );
@@ -680,6 +738,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
680
738
sch -> padded = (char * ) sch - (char * ) p ;
681
739
}
682
740
__skb_queue_head_init (& sch -> gso_skb );
741
+ __skb_queue_head_init (& sch -> skb_bad_txq );
683
742
qdisc_skb_head_init (& sch -> q );
684
743
spin_lock_init (& sch -> q .lock );
685
744
@@ -753,14 +812,16 @@ void qdisc_reset(struct Qdisc *qdisc)
753
812
if (ops -> reset )
754
813
ops -> reset (qdisc );
755
814
756
- kfree_skb (qdisc -> skb_bad_txq );
757
- qdisc -> skb_bad_txq = NULL ;
758
-
759
815
skb_queue_walk_safe (& qdisc -> gso_skb , skb , tmp ) {
760
816
__skb_unlink (skb , & qdisc -> gso_skb );
761
817
kfree_skb_list (skb );
762
818
}
763
819
820
+ skb_queue_walk_safe (& qdisc -> skb_bad_txq , skb , tmp ) {
821
+ __skb_unlink (skb , & qdisc -> skb_bad_txq );
822
+ kfree_skb_list (skb );
823
+ }
824
+
764
825
qdisc -> q .qlen = 0 ;
765
826
qdisc -> qstats .backlog = 0 ;
766
827
}
@@ -804,7 +865,11 @@ void qdisc_destroy(struct Qdisc *qdisc)
804
865
kfree_skb_list (skb );
805
866
}
806
867
807
- kfree_skb (qdisc -> skb_bad_txq );
868
+ skb_queue_walk_safe (& qdisc -> skb_bad_txq , skb , tmp ) {
869
+ __skb_unlink (skb , & qdisc -> skb_bad_txq );
870
+ kfree_skb_list (skb );
871
+ }
872
+
808
873
qdisc_free (qdisc );
809
874
}
810
875
EXPORT_SYMBOL (qdisc_destroy );
@@ -1042,6 +1107,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
1042
1107
rcu_assign_pointer (dev_queue -> qdisc , qdisc );
1043
1108
dev_queue -> qdisc_sleeping = qdisc ;
1044
1109
__skb_queue_head_init (& qdisc -> gso_skb );
1110
+ __skb_queue_head_init (& qdisc -> skb_bad_txq );
1045
1111
}
1046
1112
1047
1113
void dev_init_scheduler (struct net_device * dev )
0 commit comments