Skip to content

Commit 9c01c9f

Browse files
Paolo Abenidavem330
authored andcommitted
net: sched: always do stats accounting according to TCQ_F_CPUSTATS
The core sched implementation checks independently for NOLOCK flag to acquire/release the root spin lock and for qdisc_is_percpu_stats() to account per CPU values in many places. This change update the last few places checking the TCQ_F_NOLOCK to do per CPU stats accounting according to qdisc_is_percpu_stats() value. The above allows to clean dev_requeue_skb() implementation a bit and makes stats update always consistent with a single flag. v1 -> v2: - do not move qdisc_is_empty definition, fix build issue Signed-off-by: Paolo Abeni <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 1f5e6fd commit 9c01c9f

File tree

2 files changed

+31
-42
lines changed

2 files changed

+31
-42
lines changed

include/net/sch_generic.h

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -146,9 +146,14 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc)
146146
return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
147147
}
148148

149+
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
150+
{
151+
return q->flags & TCQ_F_CPUSTATS;
152+
}
153+
149154
static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
150155
{
151-
if (qdisc->flags & TCQ_F_NOLOCK)
156+
if (qdisc_is_percpu_stats(qdisc))
152157
return qdisc->empty;
153158
return !qdisc->q.qlen;
154159
}
@@ -490,7 +495,7 @@ static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
490495
{
491496
u32 qlen = q->qstats.qlen;
492497

493-
if (q->flags & TCQ_F_NOLOCK)
498+
if (qdisc_is_percpu_stats(q))
494499
qlen += atomic_read(&q->q.atomic_qlen);
495500
else
496501
qlen += q->q.qlen;
@@ -817,11 +822,6 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
817822
return sch->enqueue(skb, sch, to_free);
818823
}
819824

820-
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
821-
{
822-
return q->flags & TCQ_F_CPUSTATS;
823-
}
824-
825825
static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
826826
__u64 bytes, __u32 packets)
827827
{
@@ -1113,8 +1113,13 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
11131113

11141114
if (skb) {
11151115
skb = __skb_dequeue(&sch->gso_skb);
1116-
qdisc_qstats_backlog_dec(sch, skb);
1117-
sch->q.qlen--;
1116+
if (qdisc_is_percpu_stats(sch)) {
1117+
qdisc_qstats_cpu_backlog_dec(sch, skb);
1118+
qdisc_qstats_atomic_qlen_dec(sch);
1119+
} else {
1120+
qdisc_qstats_backlog_dec(sch, skb);
1121+
sch->q.qlen--;
1122+
}
11181123
} else {
11191124
skb = sch->dequeue(sch);
11201125
}

net/sched/sch_generic.c

Lines changed: 17 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -118,52 +118,36 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
118118
spin_unlock(lock);
119119
}
120120

121-
static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
121+
static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
122122
{
123-
while (skb) {
124-
struct sk_buff *next = skb->next;
125-
126-
__skb_queue_tail(&q->gso_skb, skb);
127-
q->qstats.requeues++;
128-
qdisc_qstats_backlog_inc(q, skb);
129-
q->q.qlen++; /* it's still part of the queue */
123+
spinlock_t *lock = NULL;
130124

131-
skb = next;
125+
if (q->flags & TCQ_F_NOLOCK) {
126+
lock = qdisc_lock(q);
127+
spin_lock(lock);
132128
}
133-
__netif_schedule(q);
134-
135-
return 0;
136-
}
137129

138-
static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
139-
{
140-
spinlock_t *lock = qdisc_lock(q);
141-
142-
spin_lock(lock);
143130
while (skb) {
144131
struct sk_buff *next = skb->next;
145132

146133
__skb_queue_tail(&q->gso_skb, skb);
147134

148-
qdisc_qstats_cpu_requeues_inc(q);
149-
qdisc_qstats_cpu_backlog_inc(q, skb);
150-
qdisc_qstats_atomic_qlen_inc(q);
135+
/* it's still part of the queue */
136+
if (qdisc_is_percpu_stats(q)) {
137+
qdisc_qstats_cpu_requeues_inc(q);
138+
qdisc_qstats_cpu_backlog_inc(q, skb);
139+
qdisc_qstats_atomic_qlen_inc(q);
140+
} else {
141+
q->qstats.requeues++;
142+
qdisc_qstats_backlog_inc(q, skb);
143+
q->q.qlen++;
144+
}
151145

152146
skb = next;
153147
}
154-
spin_unlock(lock);
155-
148+
if (lock)
149+
spin_unlock(lock);
156150
__netif_schedule(q);
157-
158-
return 0;
159-
}
160-
161-
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
162-
{
163-
if (q->flags & TCQ_F_NOLOCK)
164-
return dev_requeue_skb_locked(skb, q);
165-
else
166-
return __dev_requeue_skb(skb, q);
167151
}
168152

169153
static void try_bulk_dequeue_skb(struct Qdisc *q,

0 commit comments

Comments
 (0)