Skip to content

Commit 49e8e69

Browse files
committed
[benchmark] Strangle run and run_benchmarks
Moved all `run` command related functionality to `BenchmarkDriver`.
1 parent 6bddcbe commit 49e8e69

File tree

2 files changed

+61
-52
lines changed

2 files changed

+61
-52
lines changed

benchmark/scripts/Benchmark_Driver

Lines changed: 50 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,9 @@ class BenchmarkDriver(object):
5555
self.tests = tests or self._get_tests()
5656
self.parser = parser or LogParser()
5757
self.results = {}
58+
# Set a constant hash seed. Some tests are currently sensitive to
59+
# fluctuations in the number of hash collisions.
60+
os.environ['SWIFT_DETERMINISTIC_HASHING'] = '1'
5861

5962
def _invoke(self, cmd):
6063
return self._subprocess.check_output(
@@ -176,6 +179,52 @@ class BenchmarkDriver(object):
176179
with open(log_file, 'w') as f:
177180
f.write(output)
178181

182+
RESULT = '{:>3} {:<25} {:>7} {:>7} {:>7} {:>8} {:>6} {:>10} {:>10}'
183+
184+
def run_and_log(self, csv_console=True):
185+
"""Run benchmarks and continuously log results to the console.
186+
187+
There are two console log formats: CSV and justified columns. Both are
188+
compatible with `LogParser`. Depending on the `csv_console` parameter,
189+
the CSV log format is either printed to console or returned as a string
190+
from this method. When `csv_console` is False, the console output
191+
format is justified columns.
192+
"""
193+
194+
format = (
195+
(lambda values: ','.join(values)) if csv_console else
196+
(lambda values: self.RESULT.format(*values))) # justified columns
197+
198+
def console_log(values):
199+
print(format(values))
200+
201+
console_log(['#', 'TEST', 'SAMPLES', 'MIN(μs)', 'MAX(μs)', # header
202+
'MEAN(μs)', 'SD(μs)', 'MEDIAN(μs)', 'MAX_RSS(B)'])
203+
204+
def result_values(r):
205+
return map(str, [r.test_num, r.name, r.num_samples, r.min, r.max,
206+
int(r.mean), int(r.sd), r.median, r.max_rss])
207+
208+
results = []
209+
for test in self.tests:
210+
result = result_values(self.run_independent_samples(test))
211+
console_log(result)
212+
results.append(result)
213+
214+
print(
215+
'\nTotal performance tests executed: {0}'.format(len(self.tests)))
216+
return (None if csv_console else
217+
('\n'.join([','.join(r) for r in results]) + '\n')) # csv_log
218+
219+
@staticmethod
220+
def run_benchmarks(args):
221+
"""Run benchmarks and log results."""
222+
driver = BenchmarkDriver(args)
223+
csv_log = driver.run_and_log(csv_console=(args.output_dir is None))
224+
if csv_log:
225+
driver.log_results(csv_log)
226+
return 0
227+
179228

180229
class LoggingReportFormatter(logging.Formatter):
181230
"""Format logs as plain text or with colors on the terminal.
@@ -412,49 +461,6 @@ class BenchmarkDoctor(object):
412461
return 0
413462

414463

415-
def run_benchmarks(driver):
416-
"""Run perf tests individually and return results in a format that's
417-
compatible with `LogParser`.
418-
"""
419-
# Set a constant hash seed. Some tests are currently sensitive to
420-
# fluctuations in the number of hash collisions.
421-
os.environ["SWIFT_DETERMINISTIC_HASHING"] = "1"
422-
423-
line_format = '{:>3} {:<25} {:>7} {:>7} {:>7} {:>8} {:>6} {:>10} {:>10}'
424-
format = (
425-
(lambda values: ','.join(values)) # CSV
426-
if (driver.args.output_dir is None) else
427-
(lambda values: line_format.format(*values))) # justified columns
428-
429-
def console_log(values):
430-
print(format(values))
431-
432-
console_log(['#', 'TEST', 'SAMPLES', 'MIN(μs)', 'MAX(μs)', # header
433-
'MEAN(μs)', 'SD(μs)', 'MEDIAN(μs)', 'MAX_RSS(B)'])
434-
435-
def result_values(r):
436-
return map(str, [r.test_num, r.name, r.num_samples, r.min, r.max,
437-
int(r.mean), int(r.sd), r.median, r.max_rss])
438-
439-
results = []
440-
for test in driver.tests:
441-
result = result_values(driver.run_independent_samples(test))
442-
console_log(result)
443-
results.append(result)
444-
445-
print('\nTotal performance tests executed: {0}'.format(len(driver.tests)))
446-
csv_log = '\n'.join([','.join(r) for r in results]) + '\n'
447-
return csv_log
448-
449-
450-
def run(args):
451-
driver = BenchmarkDriver(args)
452-
csv_log = run_benchmarks(driver)
453-
if args.output_dir and csv_log:
454-
driver.log_results(csv_log)
455-
return 0
456-
457-
458464
def format_name(log_path):
459465
"""Return the filename and directory for a log file."""
460466
return '/'.join(log_path.split('/')[-2:])
@@ -603,7 +609,7 @@ def parse_args(args):
603609
run_parser.add_argument(
604610
'--swift-repo',
605611
help='absolute path to the Swift source repository')
606-
run_parser.set_defaults(func=run)
612+
run_parser.set_defaults(func=BenchmarkDriver.run_benchmarks)
607613

608614
check_parser = subparsers.add_parser(
609615
'check',

benchmark/scripts/test_Benchmark_Driver.py

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -285,20 +285,19 @@ def test_run_benchmark_independent_samples(self):
285285
('/benchmarks/Benchmark_O', 'b1', '--memory')), 3)
286286
self.assertEquals(r.num_samples, 3) # results are merged
287287

288-
def test_run_bechmarks(self):
288+
def test_run_and_log(self):
289289
def mock_run(test):
290290
self.assertEquals(test, 'b1')
291291
return PerformanceTestResult(
292292
'3,b1,1,123,123,123,0,123,888'.split(','))
293-
run_benchmarks = Benchmark_Driver.run_benchmarks
294-
driver = Stub(tests=['b1'], args=Stub(output_dir=None))
295-
driver.run_independent_samples = mock_run
293+
driver = BenchmarkDriver(tests=['b1'], args=Stub(output_dir=None))
294+
driver.run_independent_samples = mock_run # patching
296295

297296
with captured_output() as (out, _):
298-
log = run_benchmarks(driver)
297+
log = driver.run_and_log()
299298

300299
csv_log = '3,b1,1,123,123,123,0,123,888\n'
301-
self.assertEquals(log, csv_log)
300+
self.assertEquals(log, None)
302301
self.assertEquals(
303302
out.getvalue(),
304303
'#,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),' +
@@ -307,9 +306,8 @@ def mock_run(test):
307306
'\n' +
308307
'Total performance tests executed: 1\n')
309308

310-
driver.args.output_dir = 'logs/'
311309
with captured_output() as (out, _):
312-
log = run_benchmarks(driver)
310+
log = driver.run_and_log(csv_console=False)
313311

314312
self.assertEquals(log, csv_log)
315313
self.assertEquals(
@@ -353,6 +351,11 @@ def assert_log_written(out, log_file, content):
353351
import shutil # tearDown
354352
shutil.rmtree(temp_dir)
355353

354+
def test_deterministing_hashing(self):
355+
cmd = ['printenv', 'SWIFT_DETERMINISTIC_HASHING']
356+
driver = BenchmarkDriver(['no args'], tests=['ignored'])
357+
self.assertEquals(driver._invoke(cmd).strip(), '1')
358+
356359

357360
class BenchmarkDriverMock(Mock):
358361
"""Mock for BenchmarkDriver's `run` method"""

0 commit comments

Comments
 (0)