Skip to content

Commit 08047e4

Browse files
committed
[benchmark] Python 3 Support
1 parent 85260ca commit 08047e4

File tree

5 files changed

+64
-41
lines changed

5 files changed

+64
-41
lines changed

benchmark/scripts/Benchmark_Driver

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ import re
3434
import subprocess
3535
import sys
3636
import time
37+
from functools import reduce
3738

3839
from compare_perf_tests import LogParser
3940

@@ -65,7 +66,7 @@ class BenchmarkDriver(object):
6566

6667
def _invoke(self, cmd):
6768
return self._subprocess.check_output(
68-
cmd, stderr=self._subprocess.STDOUT)
69+
cmd, stderr=self._subprocess.STDOUT, universal_newlines=True)
6970

7071
@property
7172
def test_harness(self):
@@ -144,7 +145,7 @@ class BenchmarkDriver(object):
144145
verbose, measure_memory, quantile, gather_metadata)
145146
output = self._invoke(cmd)
146147
results = self.parser.results_from_string(output)
147-
return results.items()[0][1] if test else results
148+
return list(results.items())[0][1] if test else results
148149

149150
def _cmd_run(self, test, num_samples, num_iters, sample_time, min_samples,
150151
verbose, measure_memory, quantile, gather_metadata):
@@ -219,9 +220,9 @@ class BenchmarkDriver(object):
219220
print(format(values))
220221

221222
def result_values(r):
222-
return map(str, [r.test_num, r.name, r.num_samples, r.min,
223-
r.samples.q1, r.median, r.samples.q3, r.max,
224-
r.max_rss])
223+
return [str(value) for value in
224+
[r.test_num, r.name, r.num_samples, r.min,
225+
r.samples.q1, r.median, r.samples.q3, r.max, r.max_rss]]
225226

226227
header = ['#', 'TEST', 'SAMPLES', 'MIN(μs)', 'Q1(μs)', 'MEDIAN(μs)',
227228
'Q3(μs)', 'MAX(μs)', 'MAX_RSS(B)']
@@ -303,7 +304,11 @@ class MarkdownReportHandler(logging.StreamHandler):
303304
msg = self.format(record)
304305
stream = self.stream
305306
try:
306-
if (isinstance(msg, unicode) and
307+
unicode_type = unicode # Python 2
308+
except NameError:
309+
unicode_type = str # Python 3
310+
try:
311+
if (isinstance(msg, unicode_type) and
307312
getattr(stream, 'encoding', None)):
308313
stream.write(msg.encode(stream.encoding))
309314
else:
@@ -415,10 +420,10 @@ class BenchmarkDoctor(object):
415420
setup, ratio = BenchmarkDoctor._setup_overhead(measurements)
416421
setup = 0 if ratio < 0.05 else setup
417422
runtime = min(
418-
[(result.samples.min - correction) for i_series in
419-
[BenchmarkDoctor._select(measurements, num_iters=i)
420-
for correction in [(setup / i) for i in [1, 2]]
421-
] for result in i_series])
423+
[(result.samples.min - correction) for correction, i_series in
424+
[(correction, BenchmarkDoctor._select(measurements, num_iters=i))
425+
for i, correction in [(i, setup // i) for i in [1, 2]]]
426+
for result in i_series])
422427

423428
threshold = 1000
424429
if threshold < runtime:
@@ -473,7 +478,7 @@ class BenchmarkDoctor(object):
473478

474479
@staticmethod
475480
def _reasonable_setup_time(measurements):
476-
setup = min([result.setup
481+
setup = min([result.setup or 0
477482
for result in BenchmarkDoctor._select(measurements)])
478483
if 200000 < setup: # 200 ms
479484
BenchmarkDoctor.log_runtime.error(
@@ -537,7 +542,7 @@ class BenchmarkDoctor(object):
537542

538543
def capped(s):
539544
return min(s, 200)
540-
run_args = [(capped(num_samples), 1), (capped(num_samples / 2), 2)]
545+
run_args = [(capped(num_samples), 1), (capped(num_samples // 2), 2)]
541546
opts = self.driver.args.optimization
542547
opts = opts if isinstance(opts, list) else [opts]
543548
self.log.debug(
@@ -691,6 +696,7 @@ def parse_args(args):
691696
subparsers = parser.add_subparsers(
692697
title='Swift benchmark driver commands',
693698
help='See COMMAND -h for additional arguments', metavar='COMMAND')
699+
subparsers.required = True
694700

695701
shared_benchmarks_parser = argparse.ArgumentParser(add_help=False)
696702
benchmarks_group = shared_benchmarks_parser.add_mutually_exclusive_group()

benchmark/scripts/compare_perf_tests.py

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#!/usr/bin/python
1+
#!/usr/bin/env python
22
# -*- coding: utf-8 -*-
33

44
# ===--- compare_perf_tests.py -------------------------------------------===//
@@ -33,6 +33,7 @@ class `ReportFormatter` creates the test comparison report in specified format.
3333
import sys
3434
from bisect import bisect_left, bisect_right
3535
from collections import namedtuple
36+
from functools import reduce
3637
from math import ceil, sqrt
3738

3839

@@ -164,13 +165,14 @@ def sd(self):
164165
sqrt(self.S_runtime / (self.count - 1)))
165166

166167
@staticmethod
167-
def running_mean_variance((k, M_, S_), x):
168+
def running_mean_variance(stats, x):
168169
"""Compute running variance, B. P. Welford's method.
169170
170171
See Knuth TAOCP vol 2, 3rd edition, page 232, or
171172
https://www.johndcook.com/blog/standard_deviation/
172173
M is mean, Standard Deviation is defined as sqrt(S/k-1)
173174
"""
175+
(k, M_, S_) = stats
174176
k = float(k + 1)
175177
M = M_ + (x - M_) / k
176178
S = S_ + (x - M_) * (x - M)
@@ -662,7 +664,7 @@ def _column_widths(self):
662664
def max_widths(maximum, widths):
663665
return map(max, zip(maximum, widths))
664666

665-
return reduce(max_widths, widths, [0] * 4)
667+
return list(reduce(max_widths, widths, [0] * 4))
666668

667669
def _formatted_text(self, label_formatter, ventile_formatter,
668670
COLUMN_SEPARATOR, DELIMITER_ROW, SEPARATOR, SECTION):
@@ -679,7 +681,8 @@ def row(contents):
679681

680682
def header(title, column_labels):
681683
labels = (column_labels if not self.single_table else
682-
map(label_formatter, (title, ) + column_labels[1:]))
684+
[label_formatter(c)
685+
for c in (title, ) + column_labels[1:]])
683686
h = (('' if not self.header_printed else SEPARATOR) +
684687
row(labels) +
685688
(row(DELIMITER_ROW) if not self.header_printed else ''))
@@ -852,8 +855,12 @@ def main():
852855
print(report)
853856

854857
if args.output:
855-
with open(args.output, 'w') as f:
856-
f.write(report)
858+
if sys.version_info < (3, 0):
859+
with open(args.output, 'w') as f:
860+
f.write(report)
861+
else:
862+
with open(args.output, 'w', encoding='utf-8') as f:
863+
f.write(report)
857864

858865

859866
if __name__ == '__main__':

benchmark/scripts/test_Benchmark_Driver.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#!/usr/bin/python
1+
#!/usr/bin/env python
22
# -*- coding: utf-8 -*-
33

44
# ===--- test_Benchmark_Driver.py ----------------------------------------===//
@@ -17,12 +17,11 @@
1717
import os
1818
import time
1919
import unittest
20-
from StringIO import StringIO
21-
from imp import load_source
2220

2321
from compare_perf_tests import PerformanceTestResult
2422

2523
from test_utils import Mock, MockLoggingHandler, Stub, captured_output
24+
from test_utils import StringIO, load_source
2625

2726
# import Benchmark_Driver # doesn't work because it misses '.py' extension
2827
Benchmark_Driver = load_source(
@@ -45,7 +44,7 @@ def assert_contains(self, texts, output):
4544
def test_requires_command_argument(self):
4645
with captured_output() as (_, err):
4746
self.assertRaises(SystemExit, parse_args, [])
48-
self.assert_contains(['usage:', 'COMMAND', 'too few arguments'],
47+
self.assert_contains(['usage:', 'COMMAND', 'error:', 'arguments'],
4948
err.getvalue())
5049

5150
def test_command_help_lists_commands(self):
@@ -150,7 +149,7 @@ def __init__(self, responses=None):
150149
super(SubprocessMock, self).__init__(responses)
151150

152151
def _check_output(args, stdin=None, stdout=None, stderr=None,
153-
shell=False):
152+
shell=False, universal_newlines=False):
154153
return self.record_and_respond(args, stdin, stdout, stderr, shell)
155154
self.check_output = _check_output
156155

@@ -387,7 +386,7 @@ def test_log_results(self):
387386
def assert_log_written(out, log_file, content):
388387
self.assertEqual(out.getvalue(),
389388
'Logging results to: ' + log_file + '\n')
390-
with open(log_file, 'rU') as f:
389+
with open(log_file, 'r') as f:
391390
text = f.read()
392391
self.assertEqual(text, "formatted output")
393392

benchmark/scripts/test_compare_perf_tests.py

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#!/usr/bin/python
1+
#!/usr/bin/env python
22
# -*- coding: utf-8 -*-
33

44
# ===--- test_compare_perf_tests.py --------------------------------------===//
@@ -377,8 +377,7 @@ def test_merge(self):
377377
1,AngryPhonebook,1,12325,12325,12325,0,12325,10510336
378378
1,AngryPhonebook,1,11616,11616,11616,0,11616,10502144
379379
1,AngryPhonebook,1,12270,12270,12270,0,12270,10498048""".split('\n')[1:]
380-
results = map(PerformanceTestResult,
381-
[line.split(',') for line in tests])
380+
results = [PerformanceTestResult(line.split(',')) for line in tests]
382381

383382
def as_tuple(r):
384383
return (r.num_samples, r.min, r.max, round(r.mean, 2),
@@ -524,17 +523,13 @@ class OldAndNewLog(unittest.TestCase):
524523

525524
old_results = dict([(r.name, r)
526525
for r in
527-
map(PerformanceTestResult,
528-
[line.split(',')
529-
for line in
530-
old_log_content.splitlines()])])
526+
[PerformanceTestResult(line.split(','))
527+
for line in old_log_content.splitlines()]])
531528

532529
new_results = dict([(r.name, r)
533530
for r in
534-
map(PerformanceTestResult,
535-
[line.split(',')
536-
for line in
537-
new_log_content.splitlines()])])
531+
[PerformanceTestResult(line.split(','))
532+
for line in new_log_content.splitlines()]])
538533

539534
old_results['D'] = PerformanceTestResult(
540535
'184,D,200,648,4,1,5,9,5,3,45,40,3,1,,,,1,1,,4,4,4,268'.split(','),
@@ -721,7 +716,7 @@ def test_results_from_merge(self):
721716
concatenated_logs = """4,ArrayAppend,20,23641,29000,24990,0,24990
722717
4,ArrayAppend,1,20000,20000,20000,0,20000"""
723718
results = LogParser.results_from_string(concatenated_logs)
724-
self.assertEqual(results.keys(), ['ArrayAppend'])
719+
self.assertEqual(list(results.keys()), ['ArrayAppend'])
725720
result = results['ArrayAppend']
726721
self.assertTrue(isinstance(result, PerformanceTestResult))
727722
self.assertEqual(result.min, 20000)
@@ -743,7 +738,7 @@ def test_results_from_merge_verbose(self):
743738
Sample 3,364245
744739
3,Array2D,4,363094,376131,368159,5931,369169"""
745740
results = LogParser.results_from_string(concatenated_logs)
746-
self.assertEqual(results.keys(), ['Array2D'])
741+
self.assertEqual(list(results.keys()), ['Array2D'])
747742
result = results['Array2D']
748743
self.assertTrue(isinstance(result, PerformanceTestResult))
749744
self.assertEqual(result.min, 350815)
@@ -1135,8 +1130,12 @@ def execute_main_with_format(self, report_format, test_output=False):
11351130
report_out = out.getvalue()
11361131

11371132
if test_output:
1138-
with open(report_file, 'r') as f:
1139-
report = f.read()
1133+
if sys.version_info < (3, 0):
1134+
with open(report_file, 'r') as f:
1135+
report = f.read()
1136+
else:
1137+
with open(report_file, 'r', encoding='utf-8') as f:
1138+
report = f.read()
11401139
# because print adds newline, add one here, too:
11411140
report_file = str(report + '\n')
11421141
else:

benchmark/scripts/test_utils.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#!/usr/bin/python
1+
#!/usr/bin/env python
22
# -*- coding: utf-8 -*-
33

44
# ===--- test_utils.py ---------------------------------------------------===//
@@ -24,9 +24,21 @@
2424

2525
import logging
2626
import sys
27-
from StringIO import StringIO
27+
28+
# Cross-version compatibility layer
29+
try:
30+
from StringIO import StringIO # for Python 2
31+
except ImportError:
32+
from io import StringIO # for Python 3
2833
from contextlib import contextmanager
2934

35+
if sys.version_info < (3, 4): # imp.load_source is deprecated in Python 3.4
36+
from imp import load_source
37+
else:
38+
def load_source(name, path):
39+
from importlib.machinery import SourceFileLoader
40+
return SourceFileLoader(name, path).load_module()
41+
3042

3143
@contextmanager
3244
def captured_output():

0 commit comments

Comments
 (0)