Skip to content

Commit 7df40c2

Browse files
Eric Dumazetdavem330
authored andcommitted
net_sched: fq: take care of throttled flows before reuse
Normally, a socket can not be freed/reused unless all its TX packets left qdisc and were TX-completed. However connect(AF_UNSPEC) allows this to happen. With commit fc59d5b ("pkt_sched: fq: clear time_next_packet for reused flows") we cleared f->time_next_packet but took no special action if the flow was still in the throttled rb-tree. Since f->time_next_packet is the key used in the rb-tree searches, blindly clearing it might break rb-tree integrity. We need to make sure the flow is no longer in the rb-tree to avoid this problem. Fixes: fc59d5b ("pkt_sched: fq: clear time_next_packet for reused flows") Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 30ca22e commit 7df40c2

File tree

1 file changed

+25
-12
lines changed

1 file changed

+25
-12
lines changed

net/sched/sch_fq.c

Lines changed: 25 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
128128
return f->next == &detached;
129129
}
130130

131+
static bool fq_flow_is_throttled(const struct fq_flow *f)
132+
{
133+
return f->next == &throttled;
134+
}
135+
136+
static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
137+
{
138+
if (head->first)
139+
head->last->next = flow;
140+
else
141+
head->first = flow;
142+
head->last = flow;
143+
flow->next = NULL;
144+
}
145+
146+
static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
147+
{
148+
rb_erase(&f->rate_node, &q->delayed);
149+
q->throttled_flows--;
150+
fq_flow_add_tail(&q->old_flows, f);
151+
}
152+
131153
static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
132154
{
133155
struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
@@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
155177

156178
static struct kmem_cache *fq_flow_cachep __read_mostly;
157179

158-
static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
159-
{
160-
if (head->first)
161-
head->last->next = flow;
162-
else
163-
head->first = flow;
164-
head->last = flow;
165-
flow->next = NULL;
166-
}
167180

168181
/* limit number of collected flows per round */
169182
#define FQ_GC_MAX 8
@@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
267280
f->socket_hash != sk->sk_hash)) {
268281
f->credit = q->initial_quantum;
269282
f->socket_hash = sk->sk_hash;
283+
if (fq_flow_is_throttled(f))
284+
fq_flow_unset_throttled(q, f);
270285
f->time_next_packet = 0ULL;
271286
}
272287
return f;
@@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
438453
q->time_next_delayed_flow = f->time_next_packet;
439454
break;
440455
}
441-
rb_erase(p, &q->delayed);
442-
q->throttled_flows--;
443-
fq_flow_add_tail(&q->old_flows, f);
456+
fq_flow_unset_throttled(q, f);
444457
}
445458
}
446459

0 commit comments

Comments
 (0)