@@ -216,6 +216,17 @@ static const char * const reg_type_str[] = {
216
216
[PTR_TO_PACKET_END ] = "pkt_end" ,
217
217
};
218
218
219
+ static void print_liveness (struct bpf_verifier_env * env ,
220
+ enum bpf_reg_liveness live )
221
+ {
222
+ if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN ))
223
+ verbose (env , "_" );
224
+ if (live & REG_LIVE_READ )
225
+ verbose (env , "r" );
226
+ if (live & REG_LIVE_WRITTEN )
227
+ verbose (env , "w" );
228
+ }
229
+
219
230
static void print_verifier_state (struct bpf_verifier_env * env ,
220
231
struct bpf_verifier_state * state )
221
232
{
@@ -228,7 +239,9 @@ static void print_verifier_state(struct bpf_verifier_env *env,
228
239
t = reg -> type ;
229
240
if (t == NOT_INIT )
230
241
continue ;
231
- verbose (env , " R%d=%s" , i , reg_type_str [t ]);
242
+ verbose (env , " R%d" , i );
243
+ print_liveness (env , reg -> live );
244
+ verbose (env , "=%s" , reg_type_str [t ]);
232
245
if ((t == SCALAR_VALUE || t == PTR_TO_STACK ) &&
233
246
tnum_is_const (reg -> var_off )) {
234
247
/* reg->off should be 0 for SCALAR_VALUE */
@@ -277,10 +290,13 @@ static void print_verifier_state(struct bpf_verifier_env *env,
277
290
}
278
291
}
279
292
for (i = 0 ; i < state -> allocated_stack / BPF_REG_SIZE ; i ++ ) {
280
- if (state -> stack [i ].slot_type [0 ] == STACK_SPILL )
281
- verbose (env , " fp%d=%s" ,
282
- - MAX_BPF_STACK + i * BPF_REG_SIZE ,
293
+ if (state -> stack [i ].slot_type [0 ] == STACK_SPILL ) {
294
+ verbose (env , " fp%d" ,
295
+ (- i - 1 ) * BPF_REG_SIZE );
296
+ print_liveness (env , state -> stack [i ].spilled_ptr .live );
297
+ verbose (env , "=%s" ,
283
298
reg_type_str [state -> stack [i ].spilled_ptr .type ]);
299
+ }
284
300
}
285
301
verbose (env , "\n" );
286
302
}
@@ -568,8 +584,8 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
568
584
{
569
585
if (WARN_ON (regno >= MAX_BPF_REG )) {
570
586
verbose (env , "mark_reg_unknown(regs, %u)\n" , regno );
571
- /* Something bad happened, let's kill all regs */
572
- for (regno = 0 ; regno < MAX_BPF_REG ; regno ++ )
587
+ /* Something bad happened, let's kill all regs except FP */
588
+ for (regno = 0 ; regno < BPF_REG_FP ; regno ++ )
573
589
__mark_reg_not_init (regs + regno );
574
590
return ;
575
591
}
@@ -587,8 +603,8 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
587
603
{
588
604
if (WARN_ON (regno >= MAX_BPF_REG )) {
589
605
verbose (env , "mark_reg_not_init(regs, %u)\n" , regno );
590
- /* Something bad happened, let's kill all regs */
591
- for (regno = 0 ; regno < MAX_BPF_REG ; regno ++ )
606
+ /* Something bad happened, let's kill all regs except FP */
607
+ for (regno = 0 ; regno < BPF_REG_FP ; regno ++ )
592
608
__mark_reg_not_init (regs + regno );
593
609
return ;
594
610
}
@@ -779,6 +795,11 @@ static int check_stack_read(struct bpf_verifier_env *env,
779
795
if (value_regno >= 0 ) {
780
796
/* restore register state from stack */
781
797
state -> regs [value_regno ] = state -> stack [spi ].spilled_ptr ;
798
+ /* mark reg as written since spilled pointer state likely
799
+ * has its liveness marks cleared by is_state_visited()
800
+ * which resets stack/reg liveness for state transitions
801
+ */
802
+ state -> regs [value_regno ].live |= REG_LIVE_WRITTEN ;
782
803
mark_stack_slot_read (state , spi );
783
804
}
784
805
return 0 ;
@@ -1244,9 +1265,9 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
1244
1265
}
1245
1266
1246
1267
/* Does this register contain a constant zero? */
1247
- static bool register_is_null (struct bpf_reg_state reg )
1268
+ static bool register_is_null (struct bpf_reg_state * reg )
1248
1269
{
1249
- return reg . type == SCALAR_VALUE && tnum_equals_const (reg . var_off , 0 );
1270
+ return reg -> type == SCALAR_VALUE && tnum_equals_const (reg -> var_off , 0 );
1250
1271
}
1251
1272
1252
1273
/* when register 'regno' is passed into function that will read 'access_size'
@@ -1259,31 +1280,31 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1259
1280
int access_size , bool zero_size_allowed ,
1260
1281
struct bpf_call_arg_meta * meta )
1261
1282
{
1283
+ struct bpf_reg_state * reg = cur_regs (env ) + regno ;
1262
1284
struct bpf_verifier_state * state = env -> cur_state ;
1263
- struct bpf_reg_state * regs = state -> regs ;
1264
1285
int off , i , slot , spi ;
1265
1286
1266
- if (regs [ regno ]. type != PTR_TO_STACK ) {
1287
+ if (reg -> type != PTR_TO_STACK ) {
1267
1288
/* Allow zero-byte read from NULL, regardless of pointer type */
1268
1289
if (zero_size_allowed && access_size == 0 &&
1269
- register_is_null (regs [ regno ] ))
1290
+ register_is_null (reg ))
1270
1291
return 0 ;
1271
1292
1272
1293
verbose (env , "R%d type=%s expected=%s\n" , regno ,
1273
- reg_type_str [regs [ regno ]. type ],
1294
+ reg_type_str [reg -> type ],
1274
1295
reg_type_str [PTR_TO_STACK ]);
1275
1296
return - EACCES ;
1276
1297
}
1277
1298
1278
1299
/* Only allow fixed-offset stack reads */
1279
- if (!tnum_is_const (regs [ regno ]. var_off )) {
1300
+ if (!tnum_is_const (reg -> var_off )) {
1280
1301
char tn_buf [48 ];
1281
1302
1282
- tnum_strn (tn_buf , sizeof (tn_buf ), regs [ regno ]. var_off );
1303
+ tnum_strn (tn_buf , sizeof (tn_buf ), reg -> var_off );
1283
1304
verbose (env , "invalid variable stack read R%d var_off=%s\n" ,
1284
1305
regno , tn_buf );
1285
1306
}
1286
- off = regs [ regno ]. off + regs [ regno ]. var_off .value ;
1307
+ off = reg -> off + reg -> var_off .value ;
1287
1308
if (off >= 0 || off < - MAX_BPF_STACK || off + access_size > 0 ||
1288
1309
access_size < 0 || (access_size == 0 && !zero_size_allowed )) {
1289
1310
verbose (env , "invalid stack type R%d off=%d access_size=%d\n" ,
@@ -1391,7 +1412,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
1391
1412
* passed in as argument, it's a SCALAR_VALUE type. Final test
1392
1413
* happens during stack boundary checking.
1393
1414
*/
1394
- if (register_is_null (* reg ) &&
1415
+ if (register_is_null (reg ) &&
1395
1416
arg_type == ARG_PTR_TO_MEM_OR_NULL )
1396
1417
/* final test in check_stack_boundary() */ ;
1397
1418
else if (!type_is_pkt_pointer (type ) &&
@@ -2934,8 +2955,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
2934
2955
if (BPF_SRC (insn -> code ) == BPF_K &&
2935
2956
(opcode == BPF_JEQ || opcode == BPF_JNE ) &&
2936
2957
dst_reg -> type == SCALAR_VALUE &&
2937
- tnum_equals_const (dst_reg -> var_off , insn -> imm )) {
2938
- if (opcode == BPF_JEQ ) {
2958
+ tnum_is_const (dst_reg -> var_off )) {
2959
+ if ((opcode == BPF_JEQ && dst_reg -> var_off .value == insn -> imm ) ||
2960
+ (opcode == BPF_JNE && dst_reg -> var_off .value != insn -> imm )) {
2939
2961
/* if (imm == imm) goto pc+off;
2940
2962
* only follow the goto, ignore fall-through
2941
2963
*/
0 commit comments