Skip to content

Commit 73eb628

Browse files
Paolo Abenidavem330
authored andcommitted
Revert: "net: sched: put back q.qlen into a single location"
This revert commit 46b1c18 ("net: sched: put back q.qlen into a single location"). After the previous patch, when a NOLOCK qdisc is enslaved to a locking qdisc it switches to global stats accounting. As a consequence, when a classful qdisc accesses directly a child qdisc's qlen, such qdisc is not doing per CPU accounting and qlen value is consistent. In the control path nobody uses directly qlen since commit e5f0e8f ("net: sched: introduce and use qdisc tree flush/purge helpers"), so we can remove the contented atomic ops from the datapath. v1 -> v2: - complete the qdisc_qstats_atomic_qlen_dec() -> qdisc_qstats_cpu_qlen_dec() replacement, fix build issue - more descriptive commit message Signed-off-by: Paolo Abeni <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 8a53e61 commit 73eb628

File tree

3 files changed

+28
-20
lines changed

3 files changed

+28
-20
lines changed

include/net/sch_generic.h

Lines changed: 21 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -52,10 +52,7 @@ struct qdisc_size_table {
5252
struct qdisc_skb_head {
5353
struct sk_buff *head;
5454
struct sk_buff *tail;
55-
union {
56-
u32 qlen;
57-
atomic_t atomic_qlen;
58-
};
55+
__u32 qlen;
5956
spinlock_t lock;
6057
};
6158

@@ -486,19 +483,27 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
486483
BUILD_BUG_ON(sizeof(qcb->data) < sz);
487484
}
488485

486+
static inline int qdisc_qlen_cpu(const struct Qdisc *q)
487+
{
488+
return this_cpu_ptr(q->cpu_qstats)->qlen;
489+
}
490+
489491
static inline int qdisc_qlen(const struct Qdisc *q)
490492
{
491493
return q->q.qlen;
492494
}
493495

494-
static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
496+
static inline int qdisc_qlen_sum(const struct Qdisc *q)
495497
{
496-
u32 qlen = q->qstats.qlen;
498+
__u32 qlen = q->qstats.qlen;
499+
int i;
497500

498-
if (qdisc_is_percpu_stats(q))
499-
qlen += atomic_read(&q->q.atomic_qlen);
500-
else
501+
if (qdisc_is_percpu_stats(q)) {
502+
for_each_possible_cpu(i)
503+
qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
504+
} else {
501505
qlen += q->q.qlen;
506+
}
502507

503508
return qlen;
504509
}
@@ -889,14 +894,14 @@ static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
889894
this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
890895
}
891896

892-
static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
897+
static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
893898
{
894-
atomic_inc(&sch->q.atomic_qlen);
899+
this_cpu_inc(sch->cpu_qstats->qlen);
895900
}
896901

897-
static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
902+
static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
898903
{
899-
atomic_dec(&sch->q.atomic_qlen);
904+
this_cpu_dec(sch->cpu_qstats->qlen);
900905
}
901906

902907
static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
@@ -1112,7 +1117,7 @@ static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
11121117
if (qdisc_is_percpu_stats(sch)) {
11131118
qdisc_qstats_cpu_backlog_dec(sch, skb);
11141119
qdisc_bstats_cpu_update(sch, skb);
1115-
qdisc_qstats_atomic_qlen_dec(sch);
1120+
qdisc_qstats_cpu_qlen_dec(sch);
11161121
} else {
11171122
qdisc_qstats_backlog_dec(sch, skb);
11181123
qdisc_bstats_update(sch, skb);
@@ -1124,7 +1129,7 @@ static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
11241129
unsigned int pkt_len)
11251130
{
11261131
if (qdisc_is_percpu_stats(sch)) {
1127-
qdisc_qstats_atomic_qlen_inc(sch);
1132+
qdisc_qstats_cpu_qlen_inc(sch);
11281133
this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
11291134
} else {
11301135
sch->qstats.backlog += pkt_len;
@@ -1141,7 +1146,7 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
11411146
skb = __skb_dequeue(&sch->gso_skb);
11421147
if (qdisc_is_percpu_stats(sch)) {
11431148
qdisc_qstats_cpu_backlog_dec(sch, skb);
1144-
qdisc_qstats_atomic_qlen_dec(sch);
1149+
qdisc_qstats_cpu_qlen_dec(sch);
11451150
} else {
11461151
qdisc_qstats_backlog_dec(sch, skb);
11471152
sch->q.qlen--;

net/core/gen_stats.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -291,6 +291,7 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
291291
for_each_possible_cpu(i) {
292292
const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
293293

294+
qstats->qlen = 0;
294295
qstats->backlog += qcpu->backlog;
295296
qstats->drops += qcpu->drops;
296297
qstats->requeues += qcpu->requeues;
@@ -306,6 +307,7 @@ void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
306307
if (cpu) {
307308
__gnet_stats_copy_queue_cpu(qstats, cpu);
308309
} else {
310+
qstats->qlen = q->qlen;
309311
qstats->backlog = q->backlog;
310312
qstats->drops = q->drops;
311313
qstats->requeues = q->requeues;

net/sched/sch_generic.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
6868
skb = __skb_dequeue(&q->skb_bad_txq);
6969
if (qdisc_is_percpu_stats(q)) {
7070
qdisc_qstats_cpu_backlog_dec(q, skb);
71-
qdisc_qstats_atomic_qlen_dec(q);
71+
qdisc_qstats_cpu_qlen_dec(q);
7272
} else {
7373
qdisc_qstats_backlog_dec(q, skb);
7474
q->q.qlen--;
@@ -108,7 +108,7 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
108108

109109
if (qdisc_is_percpu_stats(q)) {
110110
qdisc_qstats_cpu_backlog_inc(q, skb);
111-
qdisc_qstats_atomic_qlen_inc(q);
111+
qdisc_qstats_cpu_qlen_inc(q);
112112
} else {
113113
qdisc_qstats_backlog_inc(q, skb);
114114
q->q.qlen++;
@@ -136,7 +136,7 @@ static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
136136
if (qdisc_is_percpu_stats(q)) {
137137
qdisc_qstats_cpu_requeues_inc(q);
138138
qdisc_qstats_cpu_backlog_inc(q, skb);
139-
qdisc_qstats_atomic_qlen_inc(q);
139+
qdisc_qstats_cpu_qlen_inc(q);
140140
} else {
141141
q->qstats.requeues++;
142142
qdisc_qstats_backlog_inc(q, skb);
@@ -236,7 +236,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
236236
skb = __skb_dequeue(&q->gso_skb);
237237
if (qdisc_is_percpu_stats(q)) {
238238
qdisc_qstats_cpu_backlog_dec(q, skb);
239-
qdisc_qstats_atomic_qlen_dec(q);
239+
qdisc_qstats_cpu_qlen_dec(q);
240240
} else {
241241
qdisc_qstats_backlog_dec(q, skb);
242242
q->q.qlen--;
@@ -694,6 +694,7 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
694694
struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
695695

696696
q->backlog = 0;
697+
q->qlen = 0;
697698
}
698699
}
699700

0 commit comments

Comments
 (0)