Skip to content

Commit 3625750

Browse files
pmachatadavem330
authored andcommitted
net: sched: Introduce helpers for qevent blocks
Qevents are attach points for TC blocks, where filters can be put that are executed when "interesting events" take place in a qdisc. The data to keep and the functions to invoke to maintain a qevent will be largely the same between qevents. Therefore introduce sched-wide helpers for qevent management. Currently, similarly to ingress and egress blocks of clsact pseudo-qdisc, blocks attachment cannot be changed after the qdisc is created. To that end, add a helper tcf_qevent_validate_change(), which verifies whether block index attribute is not attached, or if it is, whether its value matches the current one (i.e. there is no material change). The function tcf_qevent_handle() should be invoked when qdisc hits the "interesting event" corresponding to a block. This function releases root lock for the duration of executing the attached filters, to allow packets generated through user actions (notably mirred) to be reinserted to the same qdisc tree. Signed-off-by: Petr Machata <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent aebe442 commit 3625750

File tree

2 files changed

+168
-0
lines changed

2 files changed

+168
-0
lines changed

include/net/pkt_cls.h

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,12 @@ struct tcf_block_ext_info {
3232
u32 block_index;
3333
};
3434

35+
struct tcf_qevent {
36+
struct tcf_block *block;
37+
struct tcf_block_ext_info info;
38+
struct tcf_proto __rcu *filter_chain;
39+
};
40+
3541
struct tcf_block_cb;
3642
bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
3743

@@ -553,6 +559,49 @@ int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
553559
void *cb_priv, u32 *flags, unsigned int *in_hw_count);
554560
unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
555561

562+
#ifdef CONFIG_NET_CLS_ACT
563+
int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
564+
enum flow_block_binder_type binder_type,
565+
struct nlattr *block_index_attr,
566+
struct netlink_ext_ack *extack);
567+
void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
568+
int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
569+
struct netlink_ext_ack *extack);
570+
struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
571+
spinlock_t *root_lock, struct sk_buff **to_free, int *ret);
572+
int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
573+
#else
574+
static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
575+
enum flow_block_binder_type binder_type,
576+
struct nlattr *block_index_attr,
577+
struct netlink_ext_ack *extack)
578+
{
579+
return 0;
580+
}
581+
582+
static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
583+
{
584+
}
585+
586+
static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
587+
struct netlink_ext_ack *extack)
588+
{
589+
return 0;
590+
}
591+
592+
static inline struct sk_buff *
593+
tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
594+
spinlock_t *root_lock, struct sk_buff **to_free, int *ret)
595+
{
596+
return skb;
597+
}
598+
599+
static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
600+
{
601+
return 0;
602+
}
603+
#endif
604+
556605
struct tc_cls_u32_knode {
557606
struct tcf_exts *exts;
558607
struct tcf_result *res;

net/sched/cls_api.c

Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3748,6 +3748,125 @@ unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
37483748
}
37493749
EXPORT_SYMBOL(tcf_exts_num_actions);
37503750

3751+
#ifdef CONFIG_NET_CLS_ACT
3752+
static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3753+
u32 *p_block_index,
3754+
struct netlink_ext_ack *extack)
3755+
{
3756+
*p_block_index = nla_get_u32(block_index_attr);
3757+
if (!*p_block_index) {
3758+
NL_SET_ERR_MSG(extack, "Block number may not be zero");
3759+
return -EINVAL;
3760+
}
3761+
3762+
return 0;
3763+
}
3764+
3765+
int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3766+
enum flow_block_binder_type binder_type,
3767+
struct nlattr *block_index_attr,
3768+
struct netlink_ext_ack *extack)
3769+
{
3770+
u32 block_index;
3771+
int err;
3772+
3773+
if (!block_index_attr)
3774+
return 0;
3775+
3776+
err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3777+
if (err)
3778+
return err;
3779+
3780+
if (!block_index)
3781+
return 0;
3782+
3783+
qe->info.binder_type = binder_type;
3784+
qe->info.chain_head_change = tcf_chain_head_change_dflt;
3785+
qe->info.chain_head_change_priv = &qe->filter_chain;
3786+
qe->info.block_index = block_index;
3787+
3788+
return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3789+
}
3790+
EXPORT_SYMBOL(tcf_qevent_init);
3791+
3792+
void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3793+
{
3794+
if (qe->info.block_index)
3795+
tcf_block_put_ext(qe->block, sch, &qe->info);
3796+
}
3797+
EXPORT_SYMBOL(tcf_qevent_destroy);
3798+
3799+
int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3800+
struct netlink_ext_ack *extack)
3801+
{
3802+
u32 block_index;
3803+
int err;
3804+
3805+
if (!block_index_attr)
3806+
return 0;
3807+
3808+
err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3809+
if (err)
3810+
return err;
3811+
3812+
/* Bounce newly-configured block or change in block. */
3813+
if (block_index != qe->info.block_index) {
3814+
NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3815+
return -EINVAL;
3816+
}
3817+
3818+
return 0;
3819+
}
3820+
EXPORT_SYMBOL(tcf_qevent_validate_change);
3821+
3822+
struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3823+
spinlock_t *root_lock, struct sk_buff **to_free, int *ret)
3824+
{
3825+
struct tcf_result cl_res;
3826+
struct tcf_proto *fl;
3827+
3828+
if (!qe->info.block_index)
3829+
return skb;
3830+
3831+
fl = rcu_dereference_bh(qe->filter_chain);
3832+
3833+
if (root_lock)
3834+
spin_unlock(root_lock);
3835+
3836+
switch (tcf_classify(skb, fl, &cl_res, false)) {
3837+
case TC_ACT_SHOT:
3838+
qdisc_qstats_drop(sch);
3839+
__qdisc_drop(skb, to_free);
3840+
*ret = __NET_XMIT_BYPASS;
3841+
return NULL;
3842+
case TC_ACT_STOLEN:
3843+
case TC_ACT_QUEUED:
3844+
case TC_ACT_TRAP:
3845+
__qdisc_drop(skb, to_free);
3846+
*ret = __NET_XMIT_STOLEN;
3847+
return NULL;
3848+
case TC_ACT_REDIRECT:
3849+
skb_do_redirect(skb);
3850+
*ret = __NET_XMIT_STOLEN;
3851+
return NULL;
3852+
}
3853+
3854+
if (root_lock)
3855+
spin_lock(root_lock);
3856+
3857+
return skb;
3858+
}
3859+
EXPORT_SYMBOL(tcf_qevent_handle);
3860+
3861+
int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3862+
{
3863+
if (!qe->info.block_index)
3864+
return 0;
3865+
return nla_put_u32(skb, attr_name, qe->info.block_index);
3866+
}
3867+
EXPORT_SYMBOL(tcf_qevent_dump);
3868+
#endif
3869+
37513870
static __net_init int tcf_net_init(struct net *net)
37523871
{
37533872
struct tcf_net *tn = net_generic(net, tcf_net_id);

0 commit comments

Comments
 (0)