@@ -93,6 +93,10 @@ def main():
93
93
argparser .add_argument (
94
94
'-num-samples' , type = int ,
95
95
help = 'The (minimum) number of samples to run' , default = 3 )
96
+ argparser .add_argument (
97
+ '-num-reruns' , type = int ,
98
+ help = "The number of re-runs until it's assumed to be a real change" ,
99
+ default = 8 )
96
100
argparser .add_argument (
97
101
'-platform' , type = str ,
98
102
help = 'The benchmark build platform' , default = 'macosx' )
@@ -120,7 +124,7 @@ def test_opt_levels(args):
120
124
if test_performance (opt_level , args .oldbuilddir [0 ],
121
125
args .newbuilddir [0 ],
122
126
float (args .threshold ) / 100 , args .num_samples ,
123
- output_file ):
127
+ args . num_reruns , output_file ):
124
128
changes = True
125
129
126
130
# There is no point in reporting code size for Onone.
@@ -171,7 +175,7 @@ def merge(results, other_results):
171
175
172
176
173
177
def test_performance (opt_level , old_dir , new_dir , threshold , num_samples ,
174
- output_file ):
178
+ num_reruns , output_file ):
175
179
"""Detect performance changes in benchmarks.
176
180
177
181
Start fast with few samples per benchmark and gradually spend more time
@@ -185,7 +189,7 @@ def test_performance(opt_level, old_dir, new_dir, threshold, num_samples,
185
189
tests = TestComparator (results [0 ], results [1 ], threshold )
186
190
changed = tests .decreased + tests .increased
187
191
188
- while len (changed ) > 0 and unchanged_length_count < 10 :
192
+ while len (changed ) > 0 and unchanged_length_count < num_reruns :
189
193
i += 1
190
194
if VERBOSE :
191
195
log (' test again: ' + str ([test .name for test in changed ]))
0 commit comments