Skip to content

Commit ad98426

Browse files
committed
Daniel Borkmann says: ==================== pull-request: bpf 2023-10-11 We've added 14 non-merge commits during the last 5 day(s) which contain a total of 12 files changed, 398 insertions(+), 104 deletions(-). The main changes are: 1) Fix s390 JIT backchain issues in the trampoline code generation which previously clobbered the caller's backchain, from Ilya Leoshkevich. 2) Fix zero-size allocation warning in xsk sockets when the configured ring size was close to SIZE_MAX, from Andrew Kanner. 3) Fixes for bpf_mprog API that were found when implementing support in the ebpf-go library along with selftests, from Daniel Borkmann and Lorenz Bauer. 4) Fix riscv JIT to properly sign-extend the return register in programs. This fixes various test_progs selftests on riscv, from Björn Töpel. 5) Fix verifier log for async callback return values where the allowed range was displayed incorrectly, from David Vernet. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: s390/bpf: Fix unwinding past the trampoline s390/bpf: Fix clobbering the caller's backchain in the trampoline selftests/bpf: Add testcase for async callback return value failure bpf: Fix verifier log for async callback return values xdp: Fix zero-size allocation warning in xskq_create() riscv, bpf: Track both a0 (RISC-V ABI) and a5 (BPF) return values riscv, bpf: Sign-extend return values selftests/bpf: Make seen_tc* variable tests more robust selftests/bpf: Test query on empty mprog and pass revision into attach selftests/bpf: Adapt assert_mprog_count to always expect 0 count selftests/bpf: Test bpf_mprog query API via libbpf and raw syscall bpf: Refuse unused attributes in bpf_prog_{attach,detach} bpf: Handle bpf_mprog_query with NULL entry bpf: Fix BPF_PROG_QUERY last field check ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents 108a36d + 5356ba1 commit ad98426

File tree

12 files changed

+398
-104
lines changed

12 files changed

+398
-104
lines changed

arch/riscv/net/bpf_jit_comp64.c

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
245245
emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
246246
/* Set return value. */
247247
if (!is_tail_call)
248-
emit_mv(RV_REG_A0, RV_REG_A5, ctx);
248+
emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx);
249249
emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
250250
is_tail_call ? (RV_FENTRY_NINSNS + 1) * 4 : 0, /* skip reserved nops and TCC init */
251251
ctx);
@@ -759,8 +759,10 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
759759
if (ret)
760760
return ret;
761761

762-
if (save_ret)
763-
emit_sd(RV_REG_FP, -retval_off, regmap[BPF_REG_0], ctx);
762+
if (save_ret) {
763+
emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
764+
emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
765+
}
764766

765767
/* update branch with beqz */
766768
if (ctx->insns) {
@@ -853,7 +855,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
853855

854856
save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
855857
if (save_ret) {
856-
stack_size += 8;
858+
stack_size += 16; /* Save both A5 (BPF R0) and A0 */
857859
retval_off = stack_size;
858860
}
859861

@@ -957,6 +959,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
957959
if (ret)
958960
goto out;
959961
emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
962+
emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
960963
im->ip_after_call = ctx->insns + ctx->ninsns;
961964
/* 2 nops reserved for auipc+jalr pair */
962965
emit(rv_nop(), ctx);
@@ -988,8 +991,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
988991
if (flags & BPF_TRAMP_F_RESTORE_REGS)
989992
restore_args(nregs, args_off, ctx);
990993

991-
if (save_ret)
994+
if (save_ret) {
992995
emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
996+
emit_ld(regmap[BPF_REG_0], -(retval_off - 8), RV_REG_FP, ctx);
997+
}
993998

994999
emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
9951000

@@ -1515,7 +1520,8 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
15151520
if (ret)
15161521
return ret;
15171522

1518-
emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
1523+
if (insn->src_reg != BPF_PSEUDO_CALL)
1524+
emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
15191525
break;
15201526
}
15211527
/* tail call */

arch/s390/net/bpf_jit_comp.c

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2066,6 +2066,7 @@ struct bpf_tramp_jit {
20662066
* func_addr's original caller
20672067
*/
20682068
int stack_size; /* Trampoline stack size */
2069+
int backchain_off; /* Offset of backchain */
20692070
int stack_args_off; /* Offset of stack arguments for calling
20702071
* func_addr, has to be at the top
20712072
*/
@@ -2086,9 +2087,10 @@ struct bpf_tramp_jit {
20862087
* for __bpf_prog_enter() return value and
20872088
* func_addr respectively
20882089
*/
2089-
int r14_off; /* Offset of saved %r14 */
20902090
int run_ctx_off; /* Offset of struct bpf_tramp_run_ctx */
20912091
int tccnt_off; /* Offset of saved tailcall counter */
2092+
int r14_off; /* Offset of saved %r14, has to be at the
2093+
* bottom */
20922094
int do_fexit; /* do_fexit: label */
20932095
};
20942096

@@ -2247,25 +2249,38 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
22472249
* Calculate the stack layout.
22482250
*/
22492251

2250-
/* Reserve STACK_FRAME_OVERHEAD bytes for the callees. */
2252+
/*
2253+
* Allocate STACK_FRAME_OVERHEAD bytes for the callees. As the s390x
2254+
* ABI requires, put our backchain at the end of the allocated memory.
2255+
*/
22512256
tjit->stack_size = STACK_FRAME_OVERHEAD;
2257+
tjit->backchain_off = tjit->stack_size - sizeof(u64);
22522258
tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64));
22532259
tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64));
22542260
tjit->ip_off = alloc_stack(tjit, sizeof(u64));
22552261
tjit->arg_cnt_off = alloc_stack(tjit, sizeof(u64));
22562262
tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64));
22572263
tjit->retval_off = alloc_stack(tjit, sizeof(u64));
22582264
tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64));
2259-
tjit->r14_off = alloc_stack(tjit, sizeof(u64));
22602265
tjit->run_ctx_off = alloc_stack(tjit,
22612266
sizeof(struct bpf_tramp_run_ctx));
22622267
tjit->tccnt_off = alloc_stack(tjit, sizeof(u64));
2263-
/* The caller has already reserved STACK_FRAME_OVERHEAD bytes. */
2264-
tjit->stack_size -= STACK_FRAME_OVERHEAD;
2268+
tjit->r14_off = alloc_stack(tjit, sizeof(u64) * 2);
2269+
/*
2270+
* In accordance with the s390x ABI, the caller has allocated
2271+
* STACK_FRAME_OVERHEAD bytes for us. 8 of them contain the caller's
2272+
* backchain, and the rest we can use.
2273+
*/
2274+
tjit->stack_size -= STACK_FRAME_OVERHEAD - sizeof(u64);
22652275
tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
22662276

2277+
/* lgr %r1,%r15 */
2278+
EMIT4(0xb9040000, REG_1, REG_15);
22672279
/* aghi %r15,-stack_size */
22682280
EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
2281+
/* stg %r1,backchain_off(%r15) */
2282+
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15,
2283+
tjit->backchain_off);
22692284
/* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
22702285
_EMIT6(0xd203f000 | tjit->tccnt_off,
22712286
0xf000 | (tjit->stack_size + STK_OFF_TCCNT));

kernel/bpf/mprog.c

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -401,14 +401,16 @@ int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
401401
struct bpf_mprog_cp *cp;
402402
struct bpf_prog *prog;
403403
const u32 flags = 0;
404+
u32 id, count = 0;
405+
u64 revision = 1;
404406
int i, ret = 0;
405-
u32 id, count;
406-
u64 revision;
407407

408408
if (attr->query.query_flags || attr->query.attach_flags)
409409
return -EINVAL;
410-
revision = bpf_mprog_revision(entry);
411-
count = bpf_mprog_total(entry);
410+
if (entry) {
411+
revision = bpf_mprog_revision(entry);
412+
count = bpf_mprog_total(entry);
413+
}
412414
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
413415
return -EFAULT;
414416
if (copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))

kernel/bpf/syscall.c

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3796,7 +3796,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
37963796
{
37973797
enum bpf_prog_type ptype;
37983798
struct bpf_prog *prog;
3799-
u32 mask;
38003799
int ret;
38013800

38023801
if (CHECK_ATTR(BPF_PROG_ATTACH))
@@ -3805,10 +3804,16 @@ static int bpf_prog_attach(const union bpf_attr *attr)
38053804
ptype = attach_type_to_prog_type(attr->attach_type);
38063805
if (ptype == BPF_PROG_TYPE_UNSPEC)
38073806
return -EINVAL;
3808-
mask = bpf_mprog_supported(ptype) ?
3809-
BPF_F_ATTACH_MASK_MPROG : BPF_F_ATTACH_MASK_BASE;
3810-
if (attr->attach_flags & ~mask)
3811-
return -EINVAL;
3807+
if (bpf_mprog_supported(ptype)) {
3808+
if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
3809+
return -EINVAL;
3810+
} else {
3811+
if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
3812+
return -EINVAL;
3813+
if (attr->relative_fd ||
3814+
attr->expected_revision)
3815+
return -EINVAL;
3816+
}
38123817

38133818
prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
38143819
if (IS_ERR(prog))
@@ -3878,6 +3883,10 @@ static int bpf_prog_detach(const union bpf_attr *attr)
38783883
if (IS_ERR(prog))
38793884
return PTR_ERR(prog);
38803885
}
3886+
} else if (attr->attach_flags ||
3887+
attr->relative_fd ||
3888+
attr->expected_revision) {
3889+
return -EINVAL;
38813890
}
38823891

38833892
switch (ptype) {
@@ -3913,7 +3922,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
39133922
return ret;
39143923
}
39153924

3916-
#define BPF_PROG_QUERY_LAST_FIELD query.link_attach_flags
3925+
#define BPF_PROG_QUERY_LAST_FIELD query.revision
39173926

39183927
static int bpf_prog_query(const union bpf_attr *attr,
39193928
union bpf_attr __user *uattr)

kernel/bpf/tcx.c

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,6 @@ int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
123123
{
124124
bool ingress = attr->query.attach_type == BPF_TCX_INGRESS;
125125
struct net *net = current->nsproxy->net_ns;
126-
struct bpf_mprog_entry *entry;
127126
struct net_device *dev;
128127
int ret;
129128

@@ -133,12 +132,7 @@ int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
133132
ret = -ENODEV;
134133
goto out;
135134
}
136-
entry = tcx_entry_fetch(dev, ingress);
137-
if (!entry) {
138-
ret = -ENOENT;
139-
goto out;
140-
}
141-
ret = bpf_mprog_query(attr, uattr, entry);
135+
ret = bpf_mprog_query(attr, uattr, tcx_entry_fetch(dev, ingress));
142136
out:
143137
rtnl_unlock();
144138
return ret;

kernel/bpf/verifier.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14479,7 +14479,7 @@ static int check_return_code(struct bpf_verifier_env *env)
1447914479
struct tnum enforce_attach_type_range = tnum_unknown;
1448014480
const struct bpf_prog *prog = env->prog;
1448114481
struct bpf_reg_state *reg;
14482-
struct tnum range = tnum_range(0, 1);
14482+
struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0);
1448314483
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
1448414484
int err;
1448514485
struct bpf_func_state *frame = env->cur_state->frame[0];
@@ -14527,8 +14527,8 @@ static int check_return_code(struct bpf_verifier_env *env)
1452714527
return -EINVAL;
1452814528
}
1452914529

14530-
if (!tnum_in(tnum_const(0), reg->var_off)) {
14531-
verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
14530+
if (!tnum_in(const_0, reg->var_off)) {
14531+
verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0");
1453214532
return -EINVAL;
1453314533
}
1453414534
return 0;

net/xdp/xsk_queue.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,16 @@ struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
3434
q->ring_mask = nentries - 1;
3535

3636
size = xskq_get_ring_size(q, umem_queue);
37+
38+
/* size which is overflowing or close to SIZE_MAX will become 0 in
39+
* PAGE_ALIGN(), checking SIZE_MAX is enough due to the previous
40+
* is_power_of_2(), the rest will be handled by vmalloc_user()
41+
*/
42+
if (unlikely(size == SIZE_MAX)) {
43+
kfree(q);
44+
return NULL;
45+
}
46+
3747
size = PAGE_ALIGN(size);
3848

3949
q->ring = vmalloc_user(size);

tools/testing/selftests/bpf/prog_tests/tc_helpers.h

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -45,28 +45,30 @@ static inline __u32 ifindex_from_link_fd(int fd)
4545
return link_info.tcx.ifindex;
4646
}
4747

48-
static inline void __assert_mprog_count(int target, int expected, bool miniq, int ifindex)
48+
static inline void __assert_mprog_count(int target, int expected, int ifindex)
4949
{
5050
__u32 count = 0, attach_flags = 0;
5151
int err;
5252

5353
err = bpf_prog_query(ifindex, target, 0, &attach_flags,
5454
NULL, &count);
5555
ASSERT_EQ(count, expected, "count");
56-
if (!expected && !miniq)
57-
ASSERT_EQ(err, -ENOENT, "prog_query");
58-
else
59-
ASSERT_EQ(err, 0, "prog_query");
56+
ASSERT_EQ(err, 0, "prog_query");
6057
}
6158

6259
static inline void assert_mprog_count(int target, int expected)
6360
{
64-
__assert_mprog_count(target, expected, false, loopback);
61+
__assert_mprog_count(target, expected, loopback);
6562
}
6663

6764
static inline void assert_mprog_count_ifindex(int ifindex, int target, int expected)
6865
{
69-
__assert_mprog_count(target, expected, false, ifindex);
66+
__assert_mprog_count(target, expected, ifindex);
67+
}
68+
69+
static inline void tc_skel_reset_all_seen(struct test_tc_link *skel)
70+
{
71+
memset(skel->bss, 0, sizeof(*skel->bss));
7072
}
7173

7274
#endif /* TC_HELPERS */

0 commit comments

Comments
 (0)