Skip to content

Commit aee9caa

Browse files
pmachatadavem330
authored andcommitted
net: sched: sch_red: Add qevents "early_drop" and "mark"
In order to allow acting on dropped and/or ECN-marked packets, add two new qevents to the RED qdisc: "early_drop" and "mark". Filters attached at "early_drop" block are executed as packets are early-dropped, those attached at the "mark" block are executed as packets are ECN-marked. Two new attributes are introduced: TCA_RED_EARLY_DROP_BLOCK with the block index for the "early_drop" qevent, and TCA_RED_MARK_BLOCK for the "mark" qevent. Absence of these attributes signifies "don't care": no block is allocated in that case, or the existing blocks are left intact in case of the change callback. For purposes of offloading, blocks attached to these qevents appear with newly-introduced binder types, FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP and FLOW_BLOCK_BINDER_TYPE_RED_MARK. Signed-off-by: Petr Machata <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 65545ea commit aee9caa

File tree

3 files changed

+60
-2
lines changed

3 files changed

+60
-2
lines changed

include/net/flow_offload.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -424,6 +424,8 @@ enum flow_block_binder_type {
424424
FLOW_BLOCK_BINDER_TYPE_UNSPEC,
425425
FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
426426
FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
427+
FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
428+
FLOW_BLOCK_BINDER_TYPE_RED_MARK,
427429
};
428430

429431
struct flow_block {

include/uapi/linux/pkt_sched.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,8 @@ enum {
257257
TCA_RED_STAB,
258258
TCA_RED_MAX_P,
259259
TCA_RED_FLAGS, /* bitfield32 */
260+
TCA_RED_EARLY_DROP_BLOCK, /* u32 */
261+
TCA_RED_MARK_BLOCK, /* u32 */
260262
__TCA_RED_MAX,
261263
};
262264

net/sched/sch_red.c

Lines changed: 56 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,8 @@ struct red_sched_data {
4646
struct red_vars vars;
4747
struct red_stats stats;
4848
struct Qdisc *qdisc;
49+
struct tcf_qevent qe_early_drop;
50+
struct tcf_qevent qe_mark;
4951
};
5052

5153
#define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP)
@@ -92,6 +94,9 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
9294

9395
if (INET_ECN_set_ce(skb)) {
9496
q->stats.prob_mark++;
97+
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret);
98+
if (!skb)
99+
return NET_XMIT_CN | ret;
95100
} else if (!red_use_nodrop(q)) {
96101
q->stats.prob_drop++;
97102
goto congestion_drop;
@@ -109,6 +114,9 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
109114

110115
if (INET_ECN_set_ce(skb)) {
111116
q->stats.forced_mark++;
117+
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret);
118+
if (!skb)
119+
return NET_XMIT_CN | ret;
112120
} else if (!red_use_nodrop(q)) {
113121
q->stats.forced_drop++;
114122
goto congestion_drop;
@@ -129,6 +137,10 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
129137
return ret;
130138

131139
congestion_drop:
140+
skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, root_lock, to_free, &ret);
141+
if (!skb)
142+
return NET_XMIT_CN | ret;
143+
132144
qdisc_drop(skb, sch, to_free);
133145
return NET_XMIT_CN;
134146
}
@@ -202,6 +214,8 @@ static void red_destroy(struct Qdisc *sch)
202214
{
203215
struct red_sched_data *q = qdisc_priv(sch);
204216

217+
tcf_qevent_destroy(&q->qe_mark, sch);
218+
tcf_qevent_destroy(&q->qe_early_drop, sch);
205219
del_timer_sync(&q->adapt_timer);
206220
red_offload(sch, false);
207221
qdisc_put(q->qdisc);
@@ -213,6 +227,8 @@ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
213227
[TCA_RED_STAB] = { .len = RED_STAB_SIZE },
214228
[TCA_RED_MAX_P] = { .type = NLA_U32 },
215229
[TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS),
230+
[TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 },
231+
[TCA_RED_MARK_BLOCK] = { .type = NLA_U32 },
216232
};
217233

218234
static int __red_change(struct Qdisc *sch, struct nlattr **tb,
@@ -328,12 +344,38 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt,
328344
q->qdisc = &noop_qdisc;
329345
q->sch = sch;
330346
timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
331-
return __red_change(sch, tb, extack);
347+
348+
err = __red_change(sch, tb, extack);
349+
if (err)
350+
return err;
351+
352+
err = tcf_qevent_init(&q->qe_early_drop, sch,
353+
FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
354+
tb[TCA_RED_EARLY_DROP_BLOCK], extack);
355+
if (err)
356+
goto err_early_drop_init;
357+
358+
err = tcf_qevent_init(&q->qe_mark, sch,
359+
FLOW_BLOCK_BINDER_TYPE_RED_MARK,
360+
tb[TCA_RED_MARK_BLOCK], extack);
361+
if (err)
362+
goto err_mark_init;
363+
364+
return 0;
365+
366+
err_mark_init:
367+
tcf_qevent_destroy(&q->qe_early_drop, sch);
368+
err_early_drop_init:
369+
del_timer_sync(&q->adapt_timer);
370+
red_offload(sch, false);
371+
qdisc_put(q->qdisc);
372+
return err;
332373
}
333374

334375
static int red_change(struct Qdisc *sch, struct nlattr *opt,
335376
struct netlink_ext_ack *extack)
336377
{
378+
struct red_sched_data *q = qdisc_priv(sch);
337379
struct nlattr *tb[TCA_RED_MAX + 1];
338380
int err;
339381

@@ -345,6 +387,16 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
345387
if (err < 0)
346388
return err;
347389

390+
err = tcf_qevent_validate_change(&q->qe_early_drop,
391+
tb[TCA_RED_EARLY_DROP_BLOCK], extack);
392+
if (err)
393+
return err;
394+
395+
err = tcf_qevent_validate_change(&q->qe_mark,
396+
tb[TCA_RED_MARK_BLOCK], extack);
397+
if (err)
398+
return err;
399+
348400
return __red_change(sch, tb, extack);
349401
}
350402

@@ -389,7 +441,9 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
389441
if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
390442
nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
391443
nla_put_bitfield32(skb, TCA_RED_FLAGS,
392-
q->flags, TC_RED_SUPPORTED_FLAGS))
444+
q->flags, TC_RED_SUPPORTED_FLAGS) ||
445+
tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) ||
446+
tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop))
393447
goto nla_put_failure;
394448
return nla_nest_end(skb, opts);
395449

0 commit comments

Comments
 (0)