Skip to content

Commit 7155f8f

Browse files
committed
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2018-01-18 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix a divide by zero due to wrong if (src_reg == 0) check in 64-bit mode. Properly handle this in interpreter and mask it also generically in verifier to guard against similar checks in JITs, from Eric and Alexei. 2) Fix a bug in arm64 JIT when tail calls are involved and progs have different stack sizes, from Daniel. 3) Reject stores into BPF context that are not expected BPF_STX | BPF_MEM variant, from Daniel. 4) Mark dst reg as unknown on {s,u}bounds adjustments when the src reg has derived bounds from dead branches, from Daniel. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents ad9294d + 6f16101 commit 7155f8f

File tree

5 files changed

+219
-25
lines changed

5 files changed

+219
-25
lines changed

arch/arm64/net/bpf_jit_comp.c

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,8 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
148148
/* Stack must be multiples of 16B */
149149
#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
150150

151-
#define PROLOGUE_OFFSET 8
151+
/* Tail call offset to jump into */
152+
#define PROLOGUE_OFFSET 7
152153

153154
static int build_prologue(struct jit_ctx *ctx)
154155
{
@@ -200,19 +201,19 @@ static int build_prologue(struct jit_ctx *ctx)
200201
/* Initialize tail_call_cnt */
201202
emit(A64_MOVZ(1, tcc, 0, 0), ctx);
202203

203-
/* 4 byte extra for skb_copy_bits buffer */
204-
ctx->stack_size = prog->aux->stack_depth + 4;
205-
ctx->stack_size = STACK_ALIGN(ctx->stack_size);
206-
207-
/* Set up function call stack */
208-
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
209-
210204
cur_offset = ctx->idx - idx0;
211205
if (cur_offset != PROLOGUE_OFFSET) {
212206
pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
213207
cur_offset, PROLOGUE_OFFSET);
214208
return -1;
215209
}
210+
211+
/* 4 byte extra for skb_copy_bits buffer */
212+
ctx->stack_size = prog->aux->stack_depth + 4;
213+
ctx->stack_size = STACK_ALIGN(ctx->stack_size);
214+
215+
/* Set up function call stack */
216+
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
216217
return 0;
217218
}
218219

@@ -260,11 +261,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
260261
emit(A64_LDR64(prg, tmp, prg), ctx);
261262
emit(A64_CBZ(1, prg, jmp_offset), ctx);
262263

263-
/* goto *(prog->bpf_func + prologue_size); */
264+
/* goto *(prog->bpf_func + prologue_offset); */
264265
off = offsetof(struct bpf_prog, bpf_func);
265266
emit_a64_mov_i64(tmp, off, ctx);
266267
emit(A64_LDR64(tmp, prg, tmp), ctx);
267268
emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
269+
emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
268270
emit(A64_BR(tmp), ctx);
269271

270272
/* out: */

kernel/bpf/core.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -956,7 +956,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
956956
DST = tmp;
957957
CONT;
958958
ALU_MOD_X:
959-
if (unlikely(SRC == 0))
959+
if (unlikely((u32)SRC == 0))
960960
return 0;
961961
tmp = (u32) DST;
962962
DST = do_div(tmp, (u32) SRC);
@@ -975,7 +975,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
975975
DST = div64_u64(DST, SRC);
976976
CONT;
977977
ALU_DIV_X:
978-
if (unlikely(SRC == 0))
978+
if (unlikely((u32)SRC == 0))
979979
return 0;
980980
tmp = (u32) DST;
981981
do_div(tmp, (u32) SRC);

kernel/bpf/verifier.c

Lines changed: 53 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -978,6 +978,13 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
978978
return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
979979
}
980980

981+
static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
982+
{
983+
const struct bpf_reg_state *reg = cur_regs(env) + regno;
984+
985+
return reg->type == PTR_TO_CTX;
986+
}
987+
981988
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
982989
const struct bpf_reg_state *reg,
983990
int off, int size, bool strict)
@@ -1258,6 +1265,12 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
12581265
return -EACCES;
12591266
}
12601267

1268+
if (is_ctx_reg(env, insn->dst_reg)) {
1269+
verbose(env, "BPF_XADD stores into R%d context is not allowed\n",
1270+
insn->dst_reg);
1271+
return -EACCES;
1272+
}
1273+
12611274
/* check whether atomic_add can read the memory */
12621275
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
12631276
BPF_SIZE(insn->code), BPF_READ, -1);
@@ -1882,17 +1895,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
18821895

18831896
dst_reg = &regs[dst];
18841897

1885-
if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
1886-
print_verifier_state(env, env->cur_state);
1887-
verbose(env,
1888-
"verifier internal error: known but bad sbounds\n");
1889-
return -EINVAL;
1890-
}
1891-
if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
1892-
print_verifier_state(env, env->cur_state);
1893-
verbose(env,
1894-
"verifier internal error: known but bad ubounds\n");
1895-
return -EINVAL;
1898+
if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
1899+
smin_val > smax_val || umin_val > umax_val) {
1900+
/* Taint dst register if offset had invalid bounds derived from
1901+
* e.g. dead branches.
1902+
*/
1903+
__mark_reg_unknown(dst_reg);
1904+
return 0;
18961905
}
18971906

18981907
if (BPF_CLASS(insn->code) != BPF_ALU64) {
@@ -2084,6 +2093,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
20842093
src_known = tnum_is_const(src_reg.var_off);
20852094
dst_known = tnum_is_const(dst_reg->var_off);
20862095

2096+
if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
2097+
smin_val > smax_val || umin_val > umax_val) {
2098+
/* Taint dst register if offset had invalid bounds derived from
2099+
* e.g. dead branches.
2100+
*/
2101+
__mark_reg_unknown(dst_reg);
2102+
return 0;
2103+
}
2104+
20872105
if (!src_known &&
20882106
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
20892107
__mark_reg_unknown(dst_reg);
@@ -3993,6 +4011,12 @@ static int do_check(struct bpf_verifier_env *env)
39934011
if (err)
39944012
return err;
39954013

4014+
if (is_ctx_reg(env, insn->dst_reg)) {
4015+
verbose(env, "BPF_ST stores into R%d context is not allowed\n",
4016+
insn->dst_reg);
4017+
return -EACCES;
4018+
}
4019+
39964020
/* check that memory (dst_reg + off) is writeable */
39974021
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
39984022
BPF_SIZE(insn->code), BPF_WRITE,
@@ -4445,6 +4469,24 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
44454469
int i, cnt, delta = 0;
44464470

44474471
for (i = 0; i < insn_cnt; i++, insn++) {
4472+
if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
4473+
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
4474+
/* due to JIT bugs clear upper 32-bits of src register
4475+
* before div/mod operation
4476+
*/
4477+
insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
4478+
insn_buf[1] = *insn;
4479+
cnt = 2;
4480+
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4481+
if (!new_prog)
4482+
return -ENOMEM;
4483+
4484+
delta += cnt - 1;
4485+
env->prog = prog = new_prog;
4486+
insn = new_prog->insnsi + i + delta;
4487+
continue;
4488+
}
4489+
44484490
if (insn->code != (BPF_JMP | BPF_CALL))
44494491
continue;
44504492

net/core/filter.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -458,6 +458,10 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
458458
convert_bpf_extensions(fp, &insn))
459459
break;
460460

461+
if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
462+
fp->code == (BPF_ALU | BPF_MOD | BPF_X))
463+
*insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
464+
461465
*insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
462466
break;
463467

tools/testing/selftests/bpf/test_verifier.c

Lines changed: 149 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2592,6 +2592,29 @@ static struct bpf_test tests[] = {
25922592
.result = ACCEPT,
25932593
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
25942594
},
2595+
{
2596+
"context stores via ST",
2597+
.insns = {
2598+
BPF_MOV64_IMM(BPF_REG_0, 0),
2599+
BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
2600+
BPF_EXIT_INSN(),
2601+
},
2602+
.errstr = "BPF_ST stores into R1 context is not allowed",
2603+
.result = REJECT,
2604+
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2605+
},
2606+
{
2607+
"context stores via XADD",
2608+
.insns = {
2609+
BPF_MOV64_IMM(BPF_REG_0, 0),
2610+
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
2611+
BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
2612+
BPF_EXIT_INSN(),
2613+
},
2614+
.errstr = "BPF_XADD stores into R1 context is not allowed",
2615+
.result = REJECT,
2616+
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2617+
},
25952618
{
25962619
"direct packet access: test1",
25972620
.insns = {
@@ -4312,7 +4335,8 @@ static struct bpf_test tests[] = {
43124335
.fixup_map1 = { 2 },
43134336
.errstr_unpriv = "R2 leaks addr into mem",
43144337
.result_unpriv = REJECT,
4315-
.result = ACCEPT,
4338+
.result = REJECT,
4339+
.errstr = "BPF_XADD stores into R1 context is not allowed",
43164340
},
43174341
{
43184342
"leak pointer into ctx 2",
@@ -4326,7 +4350,8 @@ static struct bpf_test tests[] = {
43264350
},
43274351
.errstr_unpriv = "R10 leaks addr into mem",
43284352
.result_unpriv = REJECT,
4329-
.result = ACCEPT,
4353+
.result = REJECT,
4354+
.errstr = "BPF_XADD stores into R1 context is not allowed",
43304355
},
43314356
{
43324357
"leak pointer into ctx 3",
@@ -6707,7 +6732,7 @@ static struct bpf_test tests[] = {
67076732
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
67086733
},
67096734
.fixup_map1 = { 4 },
6710-
.errstr = "unbounded min value",
6735+
.errstr = "R0 invalid mem access 'inv'",
67116736
.result = REJECT,
67126737
},
67136738
{
@@ -8608,6 +8633,127 @@ static struct bpf_test tests[] = {
86088633
.prog_type = BPF_PROG_TYPE_XDP,
86098634
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
86108635
},
8636+
{
8637+
"check deducing bounds from const, 1",
8638+
.insns = {
8639+
BPF_MOV64_IMM(BPF_REG_0, 1),
8640+
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
8641+
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8642+
BPF_EXIT_INSN(),
8643+
},
8644+
.result = REJECT,
8645+
.errstr = "R0 tried to subtract pointer from scalar",
8646+
},
8647+
{
8648+
"check deducing bounds from const, 2",
8649+
.insns = {
8650+
BPF_MOV64_IMM(BPF_REG_0, 1),
8651+
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
8652+
BPF_EXIT_INSN(),
8653+
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
8654+
BPF_EXIT_INSN(),
8655+
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
8656+
BPF_EXIT_INSN(),
8657+
},
8658+
.result = ACCEPT,
8659+
},
8660+
{
8661+
"check deducing bounds from const, 3",
8662+
.insns = {
8663+
BPF_MOV64_IMM(BPF_REG_0, 0),
8664+
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
8665+
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8666+
BPF_EXIT_INSN(),
8667+
},
8668+
.result = REJECT,
8669+
.errstr = "R0 tried to subtract pointer from scalar",
8670+
},
8671+
{
8672+
"check deducing bounds from const, 4",
8673+
.insns = {
8674+
BPF_MOV64_IMM(BPF_REG_0, 0),
8675+
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
8676+
BPF_EXIT_INSN(),
8677+
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
8678+
BPF_EXIT_INSN(),
8679+
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
8680+
BPF_EXIT_INSN(),
8681+
},
8682+
.result = ACCEPT,
8683+
},
8684+
{
8685+
"check deducing bounds from const, 5",
8686+
.insns = {
8687+
BPF_MOV64_IMM(BPF_REG_0, 0),
8688+
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
8689+
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8690+
BPF_EXIT_INSN(),
8691+
},
8692+
.result = REJECT,
8693+
.errstr = "R0 tried to subtract pointer from scalar",
8694+
},
8695+
{
8696+
"check deducing bounds from const, 6",
8697+
.insns = {
8698+
BPF_MOV64_IMM(BPF_REG_0, 0),
8699+
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
8700+
BPF_EXIT_INSN(),
8701+
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8702+
BPF_EXIT_INSN(),
8703+
},
8704+
.result = REJECT,
8705+
.errstr = "R0 tried to subtract pointer from scalar",
8706+
},
8707+
{
8708+
"check deducing bounds from const, 7",
8709+
.insns = {
8710+
BPF_MOV64_IMM(BPF_REG_0, ~0),
8711+
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
8712+
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
8713+
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8714+
offsetof(struct __sk_buff, mark)),
8715+
BPF_EXIT_INSN(),
8716+
},
8717+
.result = REJECT,
8718+
.errstr = "dereference of modified ctx ptr",
8719+
},
8720+
{
8721+
"check deducing bounds from const, 8",
8722+
.insns = {
8723+
BPF_MOV64_IMM(BPF_REG_0, ~0),
8724+
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
8725+
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
8726+
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8727+
offsetof(struct __sk_buff, mark)),
8728+
BPF_EXIT_INSN(),
8729+
},
8730+
.result = REJECT,
8731+
.errstr = "dereference of modified ctx ptr",
8732+
},
8733+
{
8734+
"check deducing bounds from const, 9",
8735+
.insns = {
8736+
BPF_MOV64_IMM(BPF_REG_0, 0),
8737+
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
8738+
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8739+
BPF_EXIT_INSN(),
8740+
},
8741+
.result = REJECT,
8742+
.errstr = "R0 tried to subtract pointer from scalar",
8743+
},
8744+
{
8745+
"check deducing bounds from const, 10",
8746+
.insns = {
8747+
BPF_MOV64_IMM(BPF_REG_0, 0),
8748+
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
8749+
/* Marks reg as unknown. */
8750+
BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
8751+
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8752+
BPF_EXIT_INSN(),
8753+
},
8754+
.result = REJECT,
8755+
.errstr = "math between ctx pointer and register with unbounded min value is not allowed",
8756+
},
86118757
{
86128758
"bpf_exit with invalid return code. test1",
86138759
.insns = {

0 commit comments

Comments
 (0)