@@ -2615,6 +2615,56 @@ static void reg_bounds_sync(struct bpf_reg_state *reg)
2615
2615
__update_reg_bounds(reg);
2616
2616
}
2617
2617
2618
+ static int reg_bounds_sanity_check(struct bpf_verifier_env *env,
2619
+ struct bpf_reg_state *reg, const char *ctx)
2620
+ {
2621
+ const char *msg;
2622
+
2623
+ if (reg->umin_value > reg->umax_value ||
2624
+ reg->smin_value > reg->smax_value ||
2625
+ reg->u32_min_value > reg->u32_max_value ||
2626
+ reg->s32_min_value > reg->s32_max_value) {
2627
+ msg = "range bounds violation";
2628
+ goto out;
2629
+ }
2630
+
2631
+ if (tnum_is_const(reg->var_off)) {
2632
+ u64 uval = reg->var_off.value;
2633
+ s64 sval = (s64)uval;
2634
+
2635
+ if (reg->umin_value != uval || reg->umax_value != uval ||
2636
+ reg->smin_value != sval || reg->smax_value != sval) {
2637
+ msg = "const tnum out of sync with range bounds";
2638
+ goto out;
2639
+ }
2640
+ }
2641
+
2642
+ if (tnum_subreg_is_const(reg->var_off)) {
2643
+ u32 uval32 = tnum_subreg(reg->var_off).value;
2644
+ s32 sval32 = (s32)uval32;
2645
+
2646
+ if (reg->u32_min_value != uval32 || reg->u32_max_value != uval32 ||
2647
+ reg->s32_min_value != sval32 || reg->s32_max_value != sval32) {
2648
+ msg = "const subreg tnum out of sync with range bounds";
2649
+ goto out;
2650
+ }
2651
+ }
2652
+
2653
+ return 0;
2654
+ out:
2655
+ verbose(env, "REG SANITY VIOLATION (%s): %s u64=[%#llx, %#llx] "
2656
+ "s64=[%#llx, %#llx] u32=[%#x, %#x] s32=[%#x, %#x] var_off=(%#llx, %#llx)\n",
2657
+ ctx, msg, reg->umin_value, reg->umax_value,
2658
+ reg->smin_value, reg->smax_value,
2659
+ reg->u32_min_value, reg->u32_max_value,
2660
+ reg->s32_min_value, reg->s32_max_value,
2661
+ reg->var_off.value, reg->var_off.mask);
2662
+ if (env->test_sanity_strict)
2663
+ return -EFAULT;
2664
+ __mark_reg_unbounded(reg);
2665
+ return 0;
2666
+ }
2667
+
2618
2668
static bool __reg32_bound_s64(s32 a)
2619
2669
{
2620
2670
return a >= 0 && a <= S32_MAX;
@@ -9982,14 +10032,15 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
9982
10032
return 0;
9983
10033
}
9984
10034
9985
- static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
9986
- int func_id,
9987
- struct bpf_call_arg_meta *meta)
10035
+ static int do_refine_retval_range(struct bpf_verifier_env *env,
10036
+ struct bpf_reg_state *regs, int ret_type,
10037
+ int func_id,
10038
+ struct bpf_call_arg_meta *meta)
9988
10039
{
9989
10040
struct bpf_reg_state *ret_reg = ®s[BPF_REG_0];
9990
10041
9991
10042
if (ret_type != RET_INTEGER)
9992
- return;
10043
+ return 0 ;
9993
10044
9994
10045
switch (func_id) {
9995
10046
case BPF_FUNC_get_stack:
@@ -10015,6 +10066,8 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
10015
10066
reg_bounds_sync(ret_reg);
10016
10067
break;
10017
10068
}
10069
+
10070
+ return reg_bounds_sanity_check(env, ret_reg, "retval");
10018
10071
}
10019
10072
10020
10073
static int
@@ -10666,7 +10719,9 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
10666
10719
regs[BPF_REG_0].ref_obj_id = id;
10667
10720
}
10668
10721
10669
- do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
10722
+ err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta);
10723
+ if (err)
10724
+ return err;
10670
10725
10671
10726
err = check_map_func_compatibility(env, meta.map_ptr, func_id);
10672
10727
if (err)
@@ -14166,13 +14221,12 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
14166
14221
14167
14222
/* check dest operand */
14168
14223
err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
14224
+ err = err ?: adjust_reg_min_max_vals(env, insn);
14169
14225
if (err)
14170
14226
return err;
14171
-
14172
- return adjust_reg_min_max_vals(env, insn);
14173
14227
}
14174
14228
14175
- return 0 ;
14229
+ return reg_bounds_sanity_check(env, ®s[insn->dst_reg], "alu") ;
14176
14230
}
14177
14231
14178
14232
static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
@@ -14653,18 +14707,21 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state
14653
14707
* Technically we can do similar adjustments for pointers to the same object,
14654
14708
* but we don't support that right now.
14655
14709
*/
14656
- static void reg_set_min_max(struct bpf_reg_state *true_reg1,
14657
- struct bpf_reg_state *true_reg2,
14658
- struct bpf_reg_state *false_reg1,
14659
- struct bpf_reg_state *false_reg2,
14660
- u8 opcode, bool is_jmp32)
14710
+ static int reg_set_min_max(struct bpf_verifier_env *env,
14711
+ struct bpf_reg_state *true_reg1,
14712
+ struct bpf_reg_state *true_reg2,
14713
+ struct bpf_reg_state *false_reg1,
14714
+ struct bpf_reg_state *false_reg2,
14715
+ u8 opcode, bool is_jmp32)
14661
14716
{
14717
+ int err;
14718
+
14662
14719
/* If either register is a pointer, we can't learn anything about its
14663
14720
* variable offset from the compare (unless they were a pointer into
14664
14721
* the same object, but we don't bother with that).
14665
14722
*/
14666
14723
if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE)
14667
- return;
14724
+ return 0 ;
14668
14725
14669
14726
/* fallthrough (FALSE) branch */
14670
14727
regs_refine_cond_op(false_reg1, false_reg2, rev_opcode(opcode), is_jmp32);
@@ -14675,6 +14732,12 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg1,
14675
14732
regs_refine_cond_op(true_reg1, true_reg2, opcode, is_jmp32);
14676
14733
reg_bounds_sync(true_reg1);
14677
14734
reg_bounds_sync(true_reg2);
14735
+
14736
+ err = reg_bounds_sanity_check(env, true_reg1, "true_reg1");
14737
+ err = err ?: reg_bounds_sanity_check(env, true_reg2, "true_reg2");
14738
+ err = err ?: reg_bounds_sanity_check(env, false_reg1, "false_reg1");
14739
+ err = err ?: reg_bounds_sanity_check(env, false_reg2, "false_reg2");
14740
+ return err;
14678
14741
}
14679
14742
14680
14743
static void mark_ptr_or_null_reg(struct bpf_func_state *state,
@@ -14968,15 +15031,20 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
14968
15031
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
14969
15032
14970
15033
if (BPF_SRC(insn->code) == BPF_X) {
14971
- reg_set_min_max(&other_branch_regs[insn->dst_reg],
14972
- &other_branch_regs[insn->src_reg],
14973
- dst_reg, src_reg, opcode, is_jmp32);
15034
+ err = reg_set_min_max(env,
15035
+ &other_branch_regs[insn->dst_reg],
15036
+ &other_branch_regs[insn->src_reg],
15037
+ dst_reg, src_reg, opcode, is_jmp32);
14974
15038
} else /* BPF_SRC(insn->code) == BPF_K */ {
14975
- reg_set_min_max(&other_branch_regs[insn->dst_reg],
14976
- src_reg /* fake one */,
14977
- dst_reg, src_reg /* same fake one */,
14978
- opcode, is_jmp32);
15039
+ err = reg_set_min_max(env,
15040
+ &other_branch_regs[insn->dst_reg],
15041
+ src_reg /* fake one */,
15042
+ dst_reg, src_reg /* same fake one */,
15043
+ opcode, is_jmp32);
14979
15044
}
15045
+ if (err)
15046
+ return err;
15047
+
14980
15048
if (BPF_SRC(insn->code) == BPF_X &&
14981
15049
src_reg->type == SCALAR_VALUE && src_reg->id &&
14982
15050
!WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
@@ -17479,10 +17547,8 @@ static int do_check(struct bpf_verifier_env *env)
17479
17547
insn->off, BPF_SIZE(insn->code),
17480
17548
BPF_READ, insn->dst_reg, false,
17481
17549
BPF_MODE(insn->code) == BPF_MEMSX);
17482
- if (err)
17483
- return err;
17484
-
17485
- err = save_aux_ptr_type(env, src_reg_type, true);
17550
+ err = err ?: save_aux_ptr_type(env, src_reg_type, true);
17551
+ err = err ?: reg_bounds_sanity_check(env, ®s[insn->dst_reg], "ldx");
17486
17552
if (err)
17487
17553
return err;
17488
17554
} else if (class == BPF_STX) {
@@ -20769,6 +20835,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
20769
20835
20770
20836
if (is_priv)
20771
20837
env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
20838
+ env->test_sanity_strict = attr->prog_flags & BPF_F_TEST_SANITY_STRICT;
20772
20839
20773
20840
env->explored_states = kvcalloc(state_htab_size(env),
20774
20841
sizeof(struct bpf_verifier_state_list *),
0 commit comments