@@ -208,7 +208,7 @@ def test_test_harness(self):
208
208
self .args ,
209
209
tests = ["ignored" ],
210
210
_subprocess = self .subprocess_mock ).test_harness ,
211
- "/benchmarks/Benchmark_O" ,
211
+ "/benchmarks/Benchmark_O-* " ,
212
212
)
213
213
self .args .tests = "/path"
214
214
self .args .optimization = "Suffix"
@@ -217,28 +217,27 @@ def test_test_harness(self):
217
217
self .args ,
218
218
tests = ["ignored" ],
219
219
_subprocess = self .subprocess_mock ).test_harness ,
220
- "/path/Benchmark_Suffix" ,
220
+ "/path/Benchmark_Suffix-* " ,
221
221
)
222
222
223
223
def test_gets_list_of_precommit_benchmarks (self ):
224
224
self .subprocess_mock .expect (
225
- "/benchmarks/Benchmark_O --list" .split (" " ),
226
- "#\t Test\t [Tags]\n 1\t Benchmark1\t [t1, t2]\n 2\t Benchmark2\t [t3]\n " ,
225
+ "/benchmarks/Benchmark_O-* --list --json" .split (" " ),
226
+ """{"number":1,"name":"Benchmark1","tags":["t1","t2"]}\n """
227
+ + """{"number":2,"name":"Benchmark2","tags":["t3"]}\n """ ,
227
228
)
228
229
driver = BenchmarkDriver (self .args , _subprocess = self .subprocess_mock )
229
230
self .subprocess_mock .assert_called_all_expected ()
230
231
self .assertEqual (driver .tests , ["Benchmark1" , "Benchmark2" ])
231
232
self .assertEqual (driver .all_tests , ["Benchmark1" , "Benchmark2" ])
232
- self .assertEqual (driver .test_number ["Benchmark1" ], "1" )
233
- self .assertEqual (driver .test_number ["Benchmark2" ], "2" )
233
+ self .assertEqual (driver .test_number ["Benchmark1" ], 1 )
234
+ self .assertEqual (driver .test_number ["Benchmark2" ], 2 )
234
235
235
236
list_all_tests = (
236
- "/benchmarks/Benchmark_O --list --skip-tags=" .split (" " ),
237
- """# Test [Tags]
238
- 1 Benchmark1 [t1, t2]
239
- 2 Benchmark2 [t3]
240
- 3 Benchmark3 [t3, t4]
241
- """ ,
237
+ "/benchmarks/Benchmark_O-* --list --json --skip-tags=" .split (" " ),
238
+ """{"number":1, "name": "Benchmark1", "tags":["t1","t2"]}\n """
239
+ + """{"number":2, "name": "Benchmark2", "tags":["t3"]}\n """
240
+ + """{"number":3, "name": "Benchmark3", "tags":["t3","t4"]}\n """ ,
242
241
)
243
242
244
243
def test_gets_list_of_all_benchmarks_when_benchmarks_args_exist (self ):
@@ -251,7 +250,7 @@ def test_gets_list_of_all_benchmarks_when_benchmarks_args_exist(self):
251
250
self .assertEqual (driver .all_tests , ["Benchmark1" , "Benchmark2" , "Benchmark3" ])
252
251
253
252
def test_filters_benchmarks_by_pattern (self ):
254
- self .args .filters = "-f .+3". split ()
253
+ self .args .filters = [ " .+3"]
255
254
self .subprocess_mock .expect (* self .list_all_tests )
256
255
driver = BenchmarkDriver (self .args , _subprocess = self .subprocess_mock )
257
256
self .subprocess_mock .assert_called_all_expected ()
@@ -320,37 +319,37 @@ def setUp(self):
320
319
self .parser_stub = LogParserStub ()
321
320
self .subprocess_mock = SubprocessMock ()
322
321
self .subprocess_mock .expect (
323
- "/benchmarks/Benchmark_O --list" .split (" " ),
324
- "# \t Test \t [Tags] \n 1 \t b1 \t [ tag] \n " ,
322
+ "/benchmarks/Benchmark_O-* --list --json " .split (" " ),
323
+ """{"number":1, "name":"b1", "tags":[" tag"]}"" " ,
325
324
)
326
325
self .driver = BenchmarkDriver (
327
326
self .args , _subprocess = self .subprocess_mock , parser = self .parser_stub
328
327
)
329
328
330
329
def test_run_benchmark_with_multiple_samples (self ):
331
330
self .driver .run ("b1" )
332
- self .subprocess_mock .assert_called_with (("/benchmarks/Benchmark_O" , "b1" ))
331
+ self .subprocess_mock .assert_called_with (("/benchmarks/Benchmark_O-* " , "b1" , "--json " ))
333
332
self .driver .run ("b2" , num_samples = 5 )
334
333
self .subprocess_mock .assert_called_with (
335
- ("/benchmarks/Benchmark_O" , "b2" , "--num-samples=5" )
334
+ ("/benchmarks/Benchmark_O-* " , "b2" , "--num-samples=5" , "--json " )
336
335
)
337
336
338
337
def test_run_benchmark_with_specified_number_of_iterations (self ):
339
338
self .driver .run ("b" , num_iters = 1 )
340
339
self .subprocess_mock .assert_called_with (
341
- ("/benchmarks/Benchmark_O" , "b" , "--num-iters=1" )
340
+ ("/benchmarks/Benchmark_O-* " , "b" , "--num-iters=1" , "--json " )
342
341
)
343
342
344
343
def test_run_benchmark_for_specified_time (self ):
345
344
self .driver .run ("b" , sample_time = 0.5 )
346
345
self .subprocess_mock .assert_called_with (
347
- ("/benchmarks/Benchmark_O" , "b" , "--sample-time=0.5" )
346
+ ("/benchmarks/Benchmark_O-* " , "b" , "--sample-time=0.5" , "--json " )
348
347
)
349
348
350
349
def test_run_benchmark_in_verbose_mode (self ):
351
350
self .driver .run ("b" , verbose = True )
352
351
self .subprocess_mock .assert_called_with (
353
- ("/benchmarks/Benchmark_O" , "b" , "--verbose" )
352
+ ("/benchmarks/Benchmark_O-* " , "b" , "--verbose" , "--json " )
354
353
)
355
354
356
355
def test_run_batch (self ):
@@ -361,7 +360,7 @@ def test_run_batch(self):
361
360
"""
362
361
self .driver .tests = ["b1" , "bx" ]
363
362
self .driver .run ()
364
- self .subprocess_mock .assert_called_with (("/benchmarks/Benchmark_O" , "1" , "bx" ))
363
+ self .subprocess_mock .assert_called_with (("/benchmarks/Benchmark_O-* " , "1" , "bx" , "--json " ))
365
364
366
365
def test_parse_results_from_running_benchmarks (self ):
367
366
"""Parse measurements results using LogParser.
@@ -379,7 +378,7 @@ def test_parse_results_from_running_benchmarks(self):
379
378
def test_measure_memory (self ):
380
379
self .driver .run ("b" , measure_memory = True )
381
380
self .subprocess_mock .assert_called_with (
382
- ("/benchmarks/Benchmark_O" , "b" , "--memory" )
381
+ ("/benchmarks/Benchmark_O-* " , "b" , "--memory" , "--json " )
383
382
)
384
383
385
384
def test_run_benchmark_independent_samples (self ):
@@ -389,10 +388,11 @@ def test_run_benchmark_independent_samples(self):
389
388
self .assertEqual (
390
389
self .subprocess_mock .calls .count (
391
390
(
392
- "/benchmarks/Benchmark_O" ,
391
+ "/benchmarks/Benchmark_O-* " ,
393
392
"b1" ,
394
393
"--num-iters=1" ,
395
394
"--memory" ,
395
+ "--json" ,
396
396
)
397
397
),
398
398
3 ,
@@ -501,7 +501,7 @@ def _run(
501
501
def record_and_respond (self , test , num_samples , num_iters , verbose , measure_memory ):
502
502
args = (test , num_samples , num_iters , verbose , measure_memory )
503
503
self .calls .append (args )
504
- return self .respond .get (args , _PTR (min = 700 ))
504
+ return self .respond .get (args , _PTR (min_value = 700 ))
505
505
506
506
507
507
class TestLoggingReportFormatter (unittest .TestCase ):
@@ -604,9 +604,9 @@ def test_names_in_code_format(self):
604
604
self .assert_contains (["| `QuotedName`" ])
605
605
606
606
607
- def _PTR (min = 700 , mem_pages = 1000 , setup = None ):
607
+ def _PTR (min_value = 700 , mem_pages = 1000 , setup = None ):
608
608
"""Create PerformanceTestResult Stub."""
609
- return Stub (samples = Stub ( min = min ) , mem_pages = mem_pages , setup = setup )
609
+ return Stub (min_value = min_value , mem_pages = mem_pages , setup = setup )
610
610
611
611
612
612
def _run (test , num_samples = None , num_iters = None , verbose = None , measure_memory = False ):
@@ -677,7 +677,7 @@ def test_measure_10_independent_1s_benchmark_series(self):
677
677
# calibration run, returns a stand-in for PerformanceTestResult
678
678
(
679
679
_run ("B1" , num_samples = 3 , num_iters = 1 , verbose = True ),
680
- _PTR (min = 300 ),
680
+ _PTR (min_value = 300 ),
681
681
)
682
682
]
683
683
+
@@ -693,7 +693,7 @@ def test_measure_10_independent_1s_benchmark_series(self):
693
693
verbose = True ,
694
694
measure_memory = True ,
695
695
),
696
- _PTR (min = 300 ),
696
+ _PTR (min_value = 300 ),
697
697
)
698
698
]
699
699
* 5
@@ -710,7 +710,7 @@ def test_measure_10_independent_1s_benchmark_series(self):
710
710
verbose = True ,
711
711
measure_memory = True ,
712
712
),
713
- _PTR (min = 300 ),
713
+ _PTR (min_value = 300 ),
714
714
)
715
715
]
716
716
* 5
@@ -838,8 +838,8 @@ def test_benchmark_runtime_range(self):
838
838
def measurements (name , runtime ):
839
839
return {
840
840
"name" : name ,
841
- name + " O i1a" : _PTR (min = runtime + 2 ),
842
- name + " O i2a" : _PTR (min = runtime ),
841
+ name + " O i1a" : _PTR (min_value = runtime + 2 ),
842
+ name + " O i2a" : _PTR (min_value = runtime ),
843
843
}
844
844
845
845
with captured_output () as (out , _ ):
@@ -852,8 +852,8 @@ def measurements(name, runtime):
852
852
doctor .analyze (
853
853
{
854
854
"name" : "OverheadTurtle" ,
855
- "OverheadTurtle O i1a" : _PTR (min = 800000 ),
856
- "OverheadTurtle O i2a" : _PTR (min = 700000 ),
855
+ "OverheadTurtle O i1a" : _PTR (min_value = 800000 ),
856
+ "OverheadTurtle O i2a" : _PTR (min_value = 700000 ),
857
857
}
858
858
)
859
859
output = out .getvalue ()
@@ -909,30 +909,30 @@ def test_benchmark_has_no_significant_setup_overhead(self):
909
909
{
910
910
"name" : "NoOverhead" , # not 'significant' enough
911
911
# Based on DropFirstArray a10/e10: overhead 3.7% (6 μs)
912
- "NoOverhead O i1a" : _PTR (min = 162 ),
913
- "NoOverhead O i2a" : _PTR (min = 159 ),
912
+ "NoOverhead O i1a" : _PTR (min_value = 162 ),
913
+ "NoOverhead O i2a" : _PTR (min_value = 159 ),
914
914
}
915
915
)
916
916
doctor .analyze (
917
917
{
918
918
"name" : "SO" , # Setup Overhead
919
919
# Based on SuffixArrayLazy a10/e10: overhead 5.8% (4 μs)
920
- "SO O i1a" : _PTR (min = 69 ),
921
- "SO O i1b" : _PTR (min = 70 ),
922
- "SO O i2a" : _PTR (min = 67 ),
923
- "SO O i2b" : _PTR (min = 68 ),
920
+ "SO O i1a" : _PTR (min_value = 69 ),
921
+ "SO O i1b" : _PTR (min_value = 70 ),
922
+ "SO O i2a" : _PTR (min_value = 67 ),
923
+ "SO O i2b" : _PTR (min_value = 68 ),
924
924
}
925
925
)
926
926
doctor .analyze (
927
- {"name" : "Zero" , "Zero O i1a" : _PTR (min = 0 ), "Zero O i2a" : _PTR (min = 0 )}
927
+ {"name" : "Zero" , "Zero O i1a" : _PTR (min_value = 0 ), "Zero O i2a" : _PTR (min_value = 0 )}
928
928
)
929
929
doctor .analyze (
930
930
{
931
931
"name" : "LOA" , # Limit of Accuracy
932
932
# Impossible to detect overhead:
933
933
# Even 1μs change in 20μs runtime is 5%.
934
- "LOA O i1a" : _PTR (min = 21 ),
935
- "LOA O i2a" : _PTR (min = 20 ),
934
+ "LOA O i1a" : _PTR (min_value = 21 ),
935
+ "LOA O i2a" : _PTR (min_value = 20 ),
936
936
}
937
937
)
938
938
output = out .getvalue ()
0 commit comments