@@ -5649,7 +5649,7 @@ static struct bpf_test tests[] = {
5649
5649
"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)" ,
5650
5650
.insns = {
5651
5651
BPF_MOV64_IMM (BPF_REG_1 , 0 ),
5652
- BPF_MOV64_IMM (BPF_REG_2 , 0 ),
5652
+ BPF_MOV64_IMM (BPF_REG_2 , 1 ),
5653
5653
BPF_STX_MEM (BPF_DW , BPF_REG_10 , BPF_REG_2 , -128 ),
5654
5654
BPF_LDX_MEM (BPF_DW , BPF_REG_2 , BPF_REG_10 , -128 ),
5655
5655
BPF_ALU64_IMM (BPF_AND , BPF_REG_2 , 64 ),
@@ -5884,7 +5884,7 @@ static struct bpf_test tests[] = {
5884
5884
BPF_STX_MEM (BPF_DW , BPF_REG_10 , BPF_REG_0 , -24 ),
5885
5885
BPF_STX_MEM (BPF_DW , BPF_REG_10 , BPF_REG_0 , -16 ),
5886
5886
BPF_STX_MEM (BPF_DW , BPF_REG_10 , BPF_REG_0 , -8 ),
5887
- BPF_MOV64_IMM (BPF_REG_2 , 0 ),
5887
+ BPF_MOV64_IMM (BPF_REG_2 , 1 ),
5888
5888
BPF_STX_MEM (BPF_DW , BPF_REG_10 , BPF_REG_2 , -128 ),
5889
5889
BPF_LDX_MEM (BPF_DW , BPF_REG_2 , BPF_REG_10 , -128 ),
5890
5890
BPF_ALU64_IMM (BPF_AND , BPF_REG_2 , 63 ),
@@ -9056,6 +9056,68 @@ static struct bpf_test tests[] = {
9056
9056
.result = ACCEPT ,
9057
9057
.prog_type = BPF_PROG_TYPE_SCHED_CLS ,
9058
9058
},
9059
+ {
9060
+ "calls: caller stack init to zero or map_value_or_null" ,
9061
+ .insns = {
9062
+ BPF_MOV64_IMM (BPF_REG_0 , 0 ),
9063
+ BPF_STX_MEM (BPF_DW , BPF_REG_10 , BPF_REG_0 , -8 ),
9064
+ BPF_MOV64_REG (BPF_REG_2 , BPF_REG_10 ),
9065
+ BPF_ALU64_IMM (BPF_ADD , BPF_REG_2 , -8 ),
9066
+ BPF_RAW_INSN (BPF_JMP | BPF_CALL , 0 , 1 , 0 , 4 ),
9067
+ /* fetch map_value_or_null or const_zero from stack */
9068
+ BPF_LDX_MEM (BPF_DW , BPF_REG_0 , BPF_REG_10 , -8 ),
9069
+ BPF_JMP_IMM (BPF_JEQ , BPF_REG_0 , 0 , 1 ),
9070
+ /* store into map_value */
9071
+ BPF_ST_MEM (BPF_W , BPF_REG_0 , 0 , 0 ),
9072
+ BPF_EXIT_INSN (),
9073
+
9074
+ /* subprog 1 */
9075
+ /* if (ctx == 0) return; */
9076
+ BPF_JMP_IMM (BPF_JEQ , BPF_REG_1 , 0 , 8 ),
9077
+ /* else bpf_map_lookup() and *(fp - 8) = r0 */
9078
+ BPF_MOV64_REG (BPF_REG_6 , BPF_REG_2 ),
9079
+ BPF_MOV64_REG (BPF_REG_2 , BPF_REG_10 ),
9080
+ BPF_ALU64_IMM (BPF_ADD , BPF_REG_2 , -8 ),
9081
+ BPF_LD_MAP_FD (BPF_REG_1 , 0 ),
9082
+ BPF_ST_MEM (BPF_DW , BPF_REG_10 , -8 , 0 ),
9083
+ BPF_RAW_INSN (BPF_JMP | BPF_CALL , 0 , 0 , 0 ,
9084
+ BPF_FUNC_map_lookup_elem ),
9085
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
9086
+ BPF_STX_MEM (BPF_DW , BPF_REG_6 , BPF_REG_0 , 0 ),
9087
+ BPF_EXIT_INSN (),
9088
+ },
9089
+ .fixup_map1 = { 13 },
9090
+ .result = ACCEPT ,
9091
+ .prog_type = BPF_PROG_TYPE_XDP ,
9092
+ },
9093
+ {
9094
+ "calls: stack init to zero and pruning" ,
9095
+ .insns = {
9096
+ /* first make allocated_stack 16 byte */
9097
+ BPF_ST_MEM (BPF_DW , BPF_REG_10 , -16 , 0 ),
9098
+ /* now fork the execution such that the false branch
9099
+ * of JGT insn will be verified second and it skisp zero
9100
+ * init of fp-8 stack slot. If stack liveness marking
9101
+ * is missing live_read marks from call map_lookup
9102
+ * processing then pruning will incorrectly assume
9103
+ * that fp-8 stack slot was unused in the fall-through
9104
+ * branch and will accept the program incorrectly
9105
+ */
9106
+ BPF_JMP_IMM (BPF_JGT , BPF_REG_1 , 2 , 2 ),
9107
+ BPF_ST_MEM (BPF_DW , BPF_REG_10 , -8 , 0 ),
9108
+ BPF_JMP_IMM (BPF_JA , 0 , 0 , 0 ),
9109
+ BPF_MOV64_REG (BPF_REG_2 , BPF_REG_10 ),
9110
+ BPF_ALU64_IMM (BPF_ADD , BPF_REG_2 , -8 ),
9111
+ BPF_LD_MAP_FD (BPF_REG_1 , 0 ),
9112
+ BPF_RAW_INSN (BPF_JMP | BPF_CALL , 0 , 0 , 0 ,
9113
+ BPF_FUNC_map_lookup_elem ),
9114
+ BPF_EXIT_INSN (),
9115
+ },
9116
+ .fixup_map2 = { 6 },
9117
+ .errstr = "invalid indirect read from stack off -8+0 size 8" ,
9118
+ .result = REJECT ,
9119
+ .prog_type = BPF_PROG_TYPE_XDP ,
9120
+ },
9059
9121
};
9060
9122
9061
9123
static int probe_filter_length (const struct bpf_insn * fp )
0 commit comments