Skip to content

Commit 468e2f6

Browse files
4astdavem330
authored andcommitted
bpf: introduce BPF_PROG_QUERY command
introduce BPF_PROG_QUERY command to retrieve a set of either attached programs to given cgroup or a set of effective programs that will execute for events within a cgroup Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Acked-by: Martin KaFai Lau <[email protected]> for cgroup bits Acked-by: Tejun Heo <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 324bda9 commit 468e2f6

File tree

7 files changed

+148
-0
lines changed

7 files changed

+148
-0
lines changed

include/linux/bpf-cgroup.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,12 +44,16 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
4444
enum bpf_attach_type type, u32 flags);
4545
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
4646
enum bpf_attach_type type, u32 flags);
47+
int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
48+
union bpf_attr __user *uattr);
4749

4850
/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
4951
int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
5052
enum bpf_attach_type type, u32 flags);
5153
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
5254
enum bpf_attach_type type, u32 flags);
55+
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
56+
union bpf_attr __user *uattr);
5357

5458
int __cgroup_bpf_run_filter_skb(struct sock *sk,
5559
struct sk_buff *skb,

include/linux/bpf.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -260,6 +260,9 @@ struct bpf_prog_array {
260260

261261
struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
262262
void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
263+
int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
264+
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
265+
__u32 __user *prog_ids, u32 cnt);
263266

264267
#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
265268
({ \

include/uapi/linux/bpf.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ enum bpf_cmd {
9292
BPF_PROG_GET_FD_BY_ID,
9393
BPF_MAP_GET_FD_BY_ID,
9494
BPF_OBJ_GET_INFO_BY_FD,
95+
BPF_PROG_QUERY,
9596
};
9697

9798
enum bpf_map_type {
@@ -211,6 +212,9 @@ enum bpf_attach_type {
211212
/* Specify numa node during map creation */
212213
#define BPF_F_NUMA_NODE (1U << 2)
213214

215+
/* flags for BPF_PROG_QUERY */
216+
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
217+
214218
#define BPF_OBJ_NAME_LEN 16U
215219

216220
union bpf_attr {
@@ -289,6 +293,15 @@ union bpf_attr {
289293
__u32 info_len;
290294
__aligned_u64 info;
291295
} info;
296+
297+
struct { /* anonymous struct used by BPF_PROG_QUERY command */
298+
__u32 target_fd; /* container object to query */
299+
__u32 attach_type;
300+
__u32 query_flags;
301+
__u32 attach_flags;
302+
__aligned_u64 prog_ids;
303+
__u32 prog_cnt;
304+
} query;
292305
} __attribute__((aligned(8)));
293306

294307
/* BPF helper function descriptions:

kernel/bpf/cgroup.c

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -384,6 +384,52 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
384384
return err;
385385
}
386386

387+
/* Must be called with cgroup_mutex held to avoid races. */
388+
int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
389+
union bpf_attr __user *uattr)
390+
{
391+
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
392+
enum bpf_attach_type type = attr->query.attach_type;
393+
struct list_head *progs = &cgrp->bpf.progs[type];
394+
u32 flags = cgrp->bpf.flags[type];
395+
int cnt, ret = 0, i;
396+
397+
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
398+
cnt = bpf_prog_array_length(cgrp->bpf.effective[type]);
399+
else
400+
cnt = prog_list_length(progs);
401+
402+
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
403+
return -EFAULT;
404+
if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
405+
return -EFAULT;
406+
if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
407+
/* return early if user requested only program count + flags */
408+
return 0;
409+
if (attr->query.prog_cnt < cnt) {
410+
cnt = attr->query.prog_cnt;
411+
ret = -ENOSPC;
412+
}
413+
414+
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
415+
return bpf_prog_array_copy_to_user(cgrp->bpf.effective[type],
416+
prog_ids, cnt);
417+
} else {
418+
struct bpf_prog_list *pl;
419+
u32 id;
420+
421+
i = 0;
422+
list_for_each_entry(pl, progs, node) {
423+
id = pl->prog->aux->id;
424+
if (copy_to_user(prog_ids + i, &id, sizeof(id)))
425+
return -EFAULT;
426+
if (++i == cnt)
427+
break;
428+
}
429+
}
430+
return ret;
431+
}
432+
387433
/**
388434
* __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
389435
* @sk: The socket sending or receiving traffic

kernel/bpf/core.c

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1412,6 +1412,44 @@ void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
14121412
kfree_rcu(progs, rcu);
14131413
}
14141414

1415+
int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
1416+
{
1417+
struct bpf_prog **prog;
1418+
u32 cnt = 0;
1419+
1420+
rcu_read_lock();
1421+
prog = rcu_dereference(progs)->progs;
1422+
for (; *prog; prog++)
1423+
cnt++;
1424+
rcu_read_unlock();
1425+
return cnt;
1426+
}
1427+
1428+
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1429+
__u32 __user *prog_ids, u32 cnt)
1430+
{
1431+
struct bpf_prog **prog;
1432+
u32 i = 0, id;
1433+
1434+
rcu_read_lock();
1435+
prog = rcu_dereference(progs)->progs;
1436+
for (; *prog; prog++) {
1437+
id = (*prog)->aux->id;
1438+
if (copy_to_user(prog_ids + i, &id, sizeof(id))) {
1439+
rcu_read_unlock();
1440+
return -EFAULT;
1441+
}
1442+
if (++i == cnt) {
1443+
prog++;
1444+
break;
1445+
}
1446+
}
1447+
rcu_read_unlock();
1448+
if (*prog)
1449+
return -ENOSPC;
1450+
return 0;
1451+
}
1452+
14151453
static void bpf_prog_free_deferred(struct work_struct *work)
14161454
{
14171455
struct bpf_prog_aux *aux;

kernel/bpf/syscall.c

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1272,6 +1272,37 @@ static int bpf_prog_detach(const union bpf_attr *attr)
12721272
return ret;
12731273
}
12741274

1275+
#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
1276+
1277+
static int bpf_prog_query(const union bpf_attr *attr,
1278+
union bpf_attr __user *uattr)
1279+
{
1280+
struct cgroup *cgrp;
1281+
int ret;
1282+
1283+
if (!capable(CAP_NET_ADMIN))
1284+
return -EPERM;
1285+
if (CHECK_ATTR(BPF_PROG_QUERY))
1286+
return -EINVAL;
1287+
if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
1288+
return -EINVAL;
1289+
1290+
switch (attr->query.attach_type) {
1291+
case BPF_CGROUP_INET_INGRESS:
1292+
case BPF_CGROUP_INET_EGRESS:
1293+
case BPF_CGROUP_INET_SOCK_CREATE:
1294+
case BPF_CGROUP_SOCK_OPS:
1295+
break;
1296+
default:
1297+
return -EINVAL;
1298+
}
1299+
cgrp = cgroup_get_from_fd(attr->query.target_fd);
1300+
if (IS_ERR(cgrp))
1301+
return PTR_ERR(cgrp);
1302+
ret = cgroup_bpf_query(cgrp, attr, uattr);
1303+
cgroup_put(cgrp);
1304+
return ret;
1305+
}
12751306
#endif /* CONFIG_CGROUP_BPF */
12761307

12771308
#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
@@ -1568,6 +1599,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
15681599
case BPF_PROG_DETACH:
15691600
err = bpf_prog_detach(&attr);
15701601
break;
1602+
case BPF_PROG_QUERY:
1603+
err = bpf_prog_query(&attr, uattr);
1604+
break;
15711605
#endif
15721606
case BPF_PROG_TEST_RUN:
15731607
err = bpf_prog_test_run(&attr, uattr);

kernel/cgroup/cgroup.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5761,4 +5761,14 @@ int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
57615761
mutex_unlock(&cgroup_mutex);
57625762
return ret;
57635763
}
5764+
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
5765+
union bpf_attr __user *uattr)
5766+
{
5767+
int ret;
5768+
5769+
mutex_lock(&cgroup_mutex);
5770+
ret = __cgroup_bpf_query(cgrp, attr, uattr);
5771+
mutex_unlock(&cgroup_mutex);
5772+
return ret;
5773+
}
57645774
#endif /* CONFIG_CGROUP_BPF */

0 commit comments

Comments
 (0)