12
12
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
13
13
#
14
14
# ===---------------------------------------------------------------------===//
15
+ """
16
+ Benchmark_Driver is a tool for running and analysing Swift Benchmarking Suite.
17
+
18
+ Example:
19
+ $ Benchmark_Driver run
20
+
21
+ Use `Benchmark_Driver -h` for help on available commands and options.
22
+
23
+ class `BenchmarkDriver` runs performance tests and impements the `run` COMMAND.
24
+ class `BenchmarkDoctor` analyzes performance tests, implements `check` COMMAND.
25
+
26
+ """
15
27
16
28
import argparse
17
29
import glob
@@ -29,19 +41,26 @@ DRIVER_DIR = os.path.dirname(os.path.realpath(__file__))
29
41
30
42
31
43
class BenchmarkDriver (object ):
32
- """Executes tests from Swift Benchmark Suite."""
44
+ """Executes tests from Swift Benchmark Suite.
45
+
46
+ It's a higher level wrapper for the Benchmark_X family of binaries
47
+ (X = [O, Onone, Osize]).
48
+ """
33
49
34
50
def __init__ (self , args , tests = None , _subprocess = None , parser = None ):
35
- """Initialized with command line arguments.
51
+ """Initialize with command line arguments.
36
52
37
- Optional parameters for injecting dependencies; used for testing.
53
+ Optional parameters are for injecting dependencies -- used for testing.
38
54
"""
39
55
self .args = args
40
56
self ._subprocess = _subprocess or subprocess
41
57
self .all_tests = []
42
58
self .tests = tests or self ._get_tests ()
43
59
self .parser = parser or LogParser ()
44
60
self .results = {}
61
+ # Set a constant hash seed. Some tests are currently sensitive to
62
+ # fluctuations in the number of hash collisions.
63
+ os .environ ['SWIFT_DETERMINISTIC_HASHING' ] = '1'
45
64
46
65
def _invoke (self , cmd ):
47
66
return self ._subprocess .check_output (
@@ -54,6 +73,28 @@ class BenchmarkDriver(object):
54
73
else 'O' )
55
74
return os .path .join (self .args .tests , "Benchmark_" + suffix )
56
75
76
+ def _git (self , cmd ):
77
+ """Execute the Git command in the `swift-repo`."""
78
+ return self ._invoke (
79
+ ('git -C {0} ' .format (self .args .swift_repo ) + cmd ).split ()).strip ()
80
+
81
+ @property
82
+ def log_file (self ):
83
+ """Full path to log file.
84
+
85
+ If `swift-repo` is set, log file is tied to Git branch and revision.
86
+ """
87
+ if not self .args .output_dir :
88
+ return None
89
+ log_dir = self .args .output_dir
90
+ harness_name = os .path .basename (self .test_harness )
91
+ suffix = '-' + time .strftime ('%Y%m%d%H%M%S' , time .localtime ())
92
+ if self .args .swift_repo :
93
+ log_dir = os .path .join (
94
+ log_dir , self ._git ('rev-parse --abbrev-ref HEAD' )) # branch
95
+ suffix += '-' + self ._git ('rev-parse --short HEAD' ) # revision
96
+ return os .path .join (log_dir , harness_name + suffix + '.log' )
97
+
57
98
@property
58
99
def _cmd_list_benchmarks (self ):
59
100
# Use tab delimiter for easier parsing to override the default comma.
@@ -128,6 +169,65 @@ class BenchmarkDriver(object):
128
169
[self .run (test , measure_memory = True )
129
170
for _ in range (self .args .iterations )])
130
171
172
+ def log_results (self , output , log_file = None ):
173
+ """Log output to `log_file`.
174
+
175
+ Creates `args.output_dir` if it doesn't exist yet.
176
+ """
177
+ log_file = log_file or self .log_file
178
+ dir = os .path .dirname (log_file )
179
+ if not os .path .exists (dir ):
180
+ os .makedirs (dir )
181
+ print ('Logging results to: %s' % log_file )
182
+ with open (log_file , 'w' ) as f :
183
+ f .write (output )
184
+
185
+ RESULT = '{:>3} {:<25} {:>7} {:>7} {:>7} {:>8} {:>6} {:>10} {:>10}'
186
+
187
+ def run_and_log (self , csv_console = True ):
188
+ """Run benchmarks and continuously log results to the console.
189
+
190
+ There are two console log formats: CSV and justified columns. Both are
191
+ compatible with `LogParser`. Depending on the `csv_console` parameter,
192
+ the CSV log format is either printed to console or returned as a string
193
+ from this method. When `csv_console` is False, the console output
194
+ format is justified columns.
195
+ """
196
+
197
+ format = (
198
+ (lambda values : ',' .join (values )) if csv_console else
199
+ (lambda values : self .RESULT .format (* values ))) # justified columns
200
+
201
+ def console_log (values ):
202
+ print (format (values ))
203
+
204
+ console_log (['#' , 'TEST' , 'SAMPLES' , 'MIN(μs)' , 'MAX(μs)' , # header
205
+ 'MEAN(μs)' , 'SD(μs)' , 'MEDIAN(μs)' , 'MAX_RSS(B)' ])
206
+
207
+ def result_values (r ):
208
+ return map (str , [r .test_num , r .name , r .num_samples , r .min , r .max ,
209
+ int (r .mean ), int (r .sd ), r .median , r .max_rss ])
210
+
211
+ results = []
212
+ for test in self .tests :
213
+ result = result_values (self .run_independent_samples (test ))
214
+ console_log (result )
215
+ results .append (result )
216
+
217
+ print (
218
+ '\n Total performance tests executed: {0}' .format (len (self .tests )))
219
+ return (None if csv_console else
220
+ ('\n ' .join ([',' .join (r ) for r in results ]) + '\n ' )) # csv_log
221
+
222
+ @staticmethod
223
+ def run_benchmarks (args ):
224
+ """Run benchmarks and log results."""
225
+ driver = BenchmarkDriver (args )
226
+ csv_log = driver .run_and_log (csv_console = (args .output_dir is None ))
227
+ if csv_log :
228
+ driver .log_results (csv_log )
229
+ return 0
230
+
131
231
132
232
class LoggingReportFormatter (logging .Formatter ):
133
233
"""Format logs as plain text or with colors on the terminal.
@@ -356,118 +456,21 @@ class BenchmarkDoctor(object):
356
456
357
457
@staticmethod
358
458
def run_check (args ):
459
+ """Validate benchmarks conform to health rules, report violations."""
359
460
doctor = BenchmarkDoctor (args )
360
461
doctor .check ()
361
462
# TODO non-zero error code when errors are logged
362
463
# See https://stackoverflow.com/a/31142078/41307
363
464
return 0
364
465
365
466
366
- def get_current_git_branch (git_repo_path ):
367
- """Return the selected branch for the repo `git_repo_path`"""
368
- return subprocess .check_output (
369
- ['git' , '-C' , git_repo_path , 'rev-parse' ,
370
- '--abbrev-ref' , 'HEAD' ], stderr = subprocess .STDOUT ).strip ()
371
-
372
-
373
- def get_git_head_ID (git_repo_path ):
374
- """Return the short identifier for the HEAD commit of the repo
375
- `git_repo_path`"""
376
- return subprocess .check_output (
377
- ['git' , '-C' , git_repo_path , 'rev-parse' ,
378
- '--short' , 'HEAD' ], stderr = subprocess .STDOUT ).strip ()
379
-
380
-
381
- def log_results (log_directory , driver , formatted_output , swift_repo = None ):
382
- """Log `formatted_output` to a branch specific directory in
383
- `log_directory`
384
- """
385
- try :
386
- branch = get_current_git_branch (swift_repo )
387
- except (OSError , subprocess .CalledProcessError ):
388
- branch = None
389
- try :
390
- head_ID = '-' + get_git_head_ID (swift_repo )
391
- except (OSError , subprocess .CalledProcessError ):
392
- head_ID = ''
393
- timestamp = time .strftime ("%Y%m%d%H%M%S" , time .localtime ())
394
- if branch :
395
- output_directory = os .path .join (log_directory , branch )
396
- else :
397
- output_directory = log_directory
398
- driver_name = os .path .basename (driver )
399
- try :
400
- os .makedirs (output_directory )
401
- except OSError :
402
- pass
403
- log_file = os .path .join (output_directory ,
404
- driver_name + '-' + timestamp + head_ID + '.log' )
405
- print ('Logging results to: %s' % log_file )
406
- with open (log_file , 'w' ) as f :
407
- f .write (formatted_output )
408
-
409
-
410
- def run_benchmarks (driver ,
411
- log_directory = None , swift_repo = None ):
412
- """Run perf tests individually and return results in a format that's
413
- compatible with `LogParser`.
414
- """
415
- # Set a constant hash seed. Some tests are currently sensitive to
416
- # fluctuations in the number of hash collisions.
417
- #
418
- # FIXME: This should only be set in the environment of the child process
419
- # that runs the tests.
420
- os .environ ["SWIFT_DETERMINISTIC_HASHING" ] = "1"
421
-
422
- output = []
423
- headings = ['#' , 'TEST' , 'SAMPLES' , 'MIN(μs)' , 'MAX(μs)' , 'MEAN(μs)' ,
424
- 'SD(μs)' , 'MEDIAN(μs)' , 'MAX_RSS(B)' ]
425
- line_format = '{:>3} {:<25} {:>7} {:>7} {:>7} {:>8} {:>6} {:>10} {:>10}'
426
- if log_directory :
427
- print (line_format .format (* headings ))
428
- else :
429
- print (',' .join (headings ))
430
- for test in driver .tests :
431
- r = driver .run_independent_samples (test )
432
- test_output = map (str , [
433
- r .test_num , r .name , r .num_samples , r .min , r .max , int (r .mean ),
434
- int (r .sd ), r .median , r .max_rss ])
435
- if log_directory :
436
- print (line_format .format (* test_output ))
437
- else :
438
- print (',' .join (test_output ))
439
- output .append (test_output )
440
- if not output :
441
- return
442
- formatted_output = '\n ' .join ([',' .join (l ) for l in output ])
443
- totals = ['Totals' , str (len (driver .tests ))]
444
- totals_output = '\n \n ' + ',' .join (totals )
445
- if log_directory :
446
- print (line_format .format (* (['' ] + totals + (['' ] * 6 ))))
447
- else :
448
- print (totals_output [1 :])
449
- formatted_output += totals_output
450
- if log_directory :
451
- log_results (log_directory , driver .test_harness , formatted_output ,
452
- swift_repo )
453
- return formatted_output
454
-
455
-
456
- def run (args ):
457
- run_benchmarks (
458
- BenchmarkDriver (args ),
459
- log_directory = args .output_dir ,
460
- swift_repo = args .swift_repo )
461
- return 0
462
-
463
-
464
467
def format_name (log_path ):
465
- """Return the filename and directory for a log file"""
468
+ """Return the filename and directory for a log file. """
466
469
return '/' .join (log_path .split ('/' )[- 2 :])
467
470
468
471
469
472
def compare_logs (compare_script , new_log , old_log , log_dir , opt ):
470
- """Return diff of log files at paths `new_log` and `old_log`"""
473
+ """Return diff of log files at paths `new_log` and `old_log`. """
471
474
print ('Comparing %s %s ...' % (format_name (old_log ), format_name (new_log )))
472
475
subprocess .call ([compare_script , '--old-file' , old_log ,
473
476
'--new-file' , new_log , '--format' , 'markdown' ,
@@ -477,10 +480,10 @@ def compare_logs(compare_script, new_log, old_log, log_dir, opt):
477
480
478
481
def compare (args ):
479
482
log_dir = args .log_dir
480
- swift_repo = args .swift_repo
481
483
compare_script = args .compare_script
482
484
baseline_branch = args .baseline_branch
483
- current_branch = get_current_git_branch (swift_repo )
485
+ current_branch = \
486
+ BenchmarkDriver (args , tests = ['' ])._git ('rev-parse --abbrev-ref HEAD' )
484
487
current_branch_dir = os .path .join (log_dir , current_branch )
485
488
baseline_branch_dir = os .path .join (log_dir , baseline_branch )
486
489
@@ -557,6 +560,7 @@ def compare(args):
557
560
558
561
559
562
def positive_int (value ):
563
+ """Verify the value is a positive integer."""
560
564
ivalue = int (value )
561
565
if not (ivalue > 0 ):
562
566
raise ValueError
@@ -608,7 +612,7 @@ def parse_args(args):
608
612
run_parser .add_argument (
609
613
'--swift-repo' ,
610
614
help = 'absolute path to the Swift source repository' )
611
- run_parser .set_defaults (func = run )
615
+ run_parser .set_defaults (func = BenchmarkDriver . run_benchmarks )
612
616
613
617
check_parser = subparsers .add_parser (
614
618
'check' ,
@@ -641,6 +645,7 @@ def parse_args(args):
641
645
642
646
643
647
def main ():
648
+ """Parse command line arguments and execute the specified COMMAND."""
644
649
args = parse_args (sys .argv [1 :])
645
650
return args .func (args )
646
651
0 commit comments