@@ -311,6 +311,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
311
311
verbose (env , "=%s" ,
312
312
reg_type_str [state -> stack [i ].spilled_ptr .type ]);
313
313
}
314
+ if (state -> stack [i ].slot_type [0 ] == STACK_ZERO )
315
+ verbose (env , " fp%d=0" , (- i - 1 ) * BPF_REG_SIZE );
314
316
}
315
317
verbose (env , "\n" );
316
318
}
@@ -522,6 +524,13 @@ static void __mark_reg_known_zero(struct bpf_reg_state *reg)
522
524
__mark_reg_known (reg , 0 );
523
525
}
524
526
527
+ static void __mark_reg_const_zero (struct bpf_reg_state * reg )
528
+ {
529
+ __mark_reg_known (reg , 0 );
530
+ reg -> off = 0 ;
531
+ reg -> type = SCALAR_VALUE ;
532
+ }
533
+
525
534
static void mark_reg_known_zero (struct bpf_verifier_env * env ,
526
535
struct bpf_reg_state * regs , u32 regno )
527
536
{
@@ -937,6 +946,12 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
937
946
}
938
947
}
939
948
949
+ /* Does this register contain a constant zero? */
950
+ static bool register_is_null (struct bpf_reg_state * reg )
951
+ {
952
+ return reg -> type == SCALAR_VALUE && tnum_equals_const (reg -> var_off , 0 );
953
+ }
954
+
940
955
/* check_stack_read/write functions track spill/fill of registers,
941
956
* stack boundary and alignment are checked in check_mem_access()
942
957
*/
@@ -984,12 +999,30 @@ static int check_stack_write(struct bpf_verifier_env *env,
984
999
for (i = 0 ; i < BPF_REG_SIZE ; i ++ )
985
1000
state -> stack [spi ].slot_type [i ] = STACK_SPILL ;
986
1001
} else {
1002
+ u8 type = STACK_MISC ;
1003
+
987
1004
/* regular write of data into stack */
988
1005
state -> stack [spi ].spilled_ptr = (struct bpf_reg_state ) {};
989
1006
1007
+ /* only mark the slot as written if all 8 bytes were written
1008
+ * otherwise read propagation may incorrectly stop too soon
1009
+ * when stack slots are partially written.
1010
+ * This heuristic means that read propagation will be
1011
+ * conservative, since it will add reg_live_read marks
1012
+ * to stack slots all the way to first state when programs
1013
+ * writes+reads less than 8 bytes
1014
+ */
1015
+ if (size == BPF_REG_SIZE )
1016
+ state -> stack [spi ].spilled_ptr .live |= REG_LIVE_WRITTEN ;
1017
+
1018
+ /* when we zero initialize stack slots mark them as such */
1019
+ if (value_regno >= 0 &&
1020
+ register_is_null (& cur -> regs [value_regno ]))
1021
+ type = STACK_ZERO ;
1022
+
990
1023
for (i = 0 ; i < size ; i ++ )
991
1024
state -> stack [spi ].slot_type [(slot - i ) % BPF_REG_SIZE ] =
992
- STACK_MISC ;
1025
+ type ;
993
1026
}
994
1027
return 0 ;
995
1028
}
@@ -1030,6 +1063,14 @@ static void mark_stack_slot_read(struct bpf_verifier_env *env,
1030
1063
bool writes = parent == state -> parent ; /* Observe write marks */
1031
1064
1032
1065
while (parent ) {
1066
+ if (parent -> frame [frameno ]-> allocated_stack <= slot * BPF_REG_SIZE )
1067
+ /* since LIVE_WRITTEN mark is only done for full 8-byte
1068
+ * write the read marks are conservative and parent
1069
+ * state may not even have the stack allocated. In such case
1070
+ * end the propagation, since the loop reached beginning
1071
+ * of the function
1072
+ */
1073
+ break ;
1033
1074
/* if read wasn't screened by an earlier write ... */
1034
1075
if (writes && state -> frame [frameno ]-> stack [slot ].spilled_ptr .live & REG_LIVE_WRITTEN )
1035
1076
break ;
@@ -1077,21 +1118,38 @@ static int check_stack_read(struct bpf_verifier_env *env,
1077
1118
* which resets stack/reg liveness for state transitions
1078
1119
*/
1079
1120
state -> regs [value_regno ].live |= REG_LIVE_WRITTEN ;
1080
- mark_stack_slot_read (env , vstate , vstate -> parent , spi ,
1081
- reg_state -> frameno );
1082
1121
}
1122
+ mark_stack_slot_read (env , vstate , vstate -> parent , spi ,
1123
+ reg_state -> frameno );
1083
1124
return 0 ;
1084
1125
} else {
1126
+ int zeros = 0 ;
1127
+
1085
1128
for (i = 0 ; i < size ; i ++ ) {
1086
- if (stype [(slot - i ) % BPF_REG_SIZE ] != STACK_MISC ) {
1087
- verbose (env , "invalid read from stack off %d+%d size %d\n" ,
1088
- off , i , size );
1089
- return - EACCES ;
1129
+ if (stype [(slot - i ) % BPF_REG_SIZE ] == STACK_MISC )
1130
+ continue ;
1131
+ if (stype [(slot - i ) % BPF_REG_SIZE ] == STACK_ZERO ) {
1132
+ zeros ++ ;
1133
+ continue ;
1090
1134
}
1135
+ verbose (env , "invalid read from stack off %d+%d size %d\n" ,
1136
+ off , i , size );
1137
+ return - EACCES ;
1138
+ }
1139
+ mark_stack_slot_read (env , vstate , vstate -> parent , spi ,
1140
+ reg_state -> frameno );
1141
+ if (value_regno >= 0 ) {
1142
+ if (zeros == size ) {
1143
+ /* any size read into register is zero extended,
1144
+ * so the whole register == const_zero
1145
+ */
1146
+ __mark_reg_const_zero (& state -> regs [value_regno ]);
1147
+ } else {
1148
+ /* have read misc data from the stack */
1149
+ mark_reg_unknown (env , state -> regs , value_regno );
1150
+ }
1151
+ state -> regs [value_regno ].live |= REG_LIVE_WRITTEN ;
1091
1152
}
1092
- if (value_regno >= 0 )
1093
- /* have read misc data from the stack */
1094
- mark_reg_unknown (env , state -> regs , value_regno );
1095
1153
return 0 ;
1096
1154
}
1097
1155
}
@@ -1578,12 +1636,6 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
1578
1636
BPF_SIZE (insn -> code ), BPF_WRITE , -1 );
1579
1637
}
1580
1638
1581
- /* Does this register contain a constant zero? */
1582
- static bool register_is_null (struct bpf_reg_state * reg )
1583
- {
1584
- return reg -> type == SCALAR_VALUE && tnum_equals_const (reg -> var_off , 0 );
1585
- }
1586
-
1587
1639
/* when register 'regno' is passed into function that will read 'access_size'
1588
1640
* bytes from that pointer, make sure that it's within stack boundary
1589
1641
* and all elements of stack are initialized.
@@ -1633,15 +1685,30 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1633
1685
}
1634
1686
1635
1687
for (i = 0 ; i < access_size ; i ++ ) {
1688
+ u8 * stype ;
1689
+
1636
1690
slot = - (off + i ) - 1 ;
1637
1691
spi = slot / BPF_REG_SIZE ;
1638
- if (state -> allocated_stack <= slot ||
1639
- state -> stack [spi ].slot_type [slot % BPF_REG_SIZE ] !=
1640
- STACK_MISC ) {
1641
- verbose (env , "invalid indirect read from stack off %d+%d size %d\n" ,
1642
- off , i , access_size );
1643
- return - EACCES ;
1692
+ if (state -> allocated_stack <= slot )
1693
+ goto err ;
1694
+ stype = & state -> stack [spi ].slot_type [slot % BPF_REG_SIZE ];
1695
+ if (* stype == STACK_MISC )
1696
+ goto mark ;
1697
+ if (* stype == STACK_ZERO ) {
1698
+ /* helper can write anything into the stack */
1699
+ * stype = STACK_MISC ;
1700
+ goto mark ;
1644
1701
}
1702
+ err :
1703
+ verbose (env , "invalid indirect read from stack off %d+%d size %d\n" ,
1704
+ off , i , access_size );
1705
+ return - EACCES ;
1706
+ mark :
1707
+ /* reading any byte out of 8-byte 'spill_slot' will cause
1708
+ * the whole slot to be marked as 'read'
1709
+ */
1710
+ mark_stack_slot_read (env , env -> cur_state , env -> cur_state -> parent ,
1711
+ spi , state -> frameno );
1645
1712
}
1646
1713
return update_stack_depth (env , state , off );
1647
1714
}
@@ -4022,8 +4089,19 @@ static bool stacksafe(struct bpf_func_state *old,
4022
4089
for (i = 0 ; i < old -> allocated_stack ; i ++ ) {
4023
4090
spi = i / BPF_REG_SIZE ;
4024
4091
4092
+ if (!(old -> stack [spi ].spilled_ptr .live & REG_LIVE_READ ))
4093
+ /* explored state didn't use this */
4094
+ return true;
4095
+
4025
4096
if (old -> stack [spi ].slot_type [i % BPF_REG_SIZE ] == STACK_INVALID )
4026
4097
continue ;
4098
+ /* if old state was safe with misc data in the stack
4099
+ * it will be safe with zero-initialized stack.
4100
+ * The opposite is not true
4101
+ */
4102
+ if (old -> stack [spi ].slot_type [i % BPF_REG_SIZE ] == STACK_MISC &&
4103
+ cur -> stack [spi ].slot_type [i % BPF_REG_SIZE ] == STACK_ZERO )
4104
+ continue ;
4027
4105
if (old -> stack [spi ].slot_type [i % BPF_REG_SIZE ] !=
4028
4106
cur -> stack [spi ].slot_type [i % BPF_REG_SIZE ])
4029
4107
/* Ex: old explored (safe) state has STACK_SPILL in
@@ -4164,10 +4242,6 @@ static int propagate_liveness(struct bpf_verifier_env *env,
4164
4242
parent = vparent -> frame [frame ];
4165
4243
for (i = 0 ; i < state -> allocated_stack / BPF_REG_SIZE &&
4166
4244
i < parent -> allocated_stack / BPF_REG_SIZE ; i ++ ) {
4167
- if (parent -> stack [i ].slot_type [0 ] != STACK_SPILL )
4168
- continue ;
4169
- if (state -> stack [i ].slot_type [0 ] != STACK_SPILL )
4170
- continue ;
4171
4245
if (parent -> stack [i ].spilled_ptr .live & REG_LIVE_READ )
4172
4246
continue ;
4173
4247
if (state -> stack [i ].spilled_ptr .live & REG_LIVE_READ )
@@ -4247,8 +4321,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
4247
4321
struct bpf_func_state * frame = cur -> frame [j ];
4248
4322
4249
4323
for (i = 0 ; i < frame -> allocated_stack / BPF_REG_SIZE ; i ++ )
4250
- if (frame -> stack [i ].slot_type [0 ] == STACK_SPILL )
4251
- frame -> stack [i ].spilled_ptr .live = REG_LIVE_NONE ;
4324
+ frame -> stack [i ].spilled_ptr .live = REG_LIVE_NONE ;
4252
4325
}
4253
4326
return 0 ;
4254
4327
}
0 commit comments