Skip to content

Commit 748dae7

Browse files
committed
Merge remote-tracking branch 'upstream/master' into swift5-version
# Conflicts: # validation-test/stdlib/HashedCollectionFilter3.swift # validation-test/stdlib/HashingPrototype.swift
2 parents 13dea85 + 17e5fa3 commit 748dae7

File tree

447 files changed

+11678
-4075
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

447 files changed

+11678
-4075
lines changed

.flake8

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
[flake8]
2-
ignore = W291
2+
ignore = W291 W504
33
filename = *.py,
44
./utils/80+-check,
55
./utils/backtrace-check,

benchmark/scripts/Benchmark_Driver

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -343,7 +343,7 @@ class BenchmarkDoctor(object):
343343
setup, ratio = BenchmarkDoctor._setup_overhead(measurements)
344344
setup = 0 if ratio < 0.05 else setup
345345
runtime = min(
346-
[(result.min - correction) for i_series in
346+
[(result.samples.min - correction) for i_series in
347347
[BenchmarkDoctor._select(measurements, num_iters=i)
348348
for correction in [(setup / i) for i in [1, 2]]
349349
] for result in i_series])
@@ -367,7 +367,8 @@ class BenchmarkDoctor(object):
367367
def _setup_overhead(measurements):
368368
select = BenchmarkDoctor._select
369369
ti1, ti2 = [float(min(mins)) for mins in
370-
[[result.min for result in i_series] for i_series in
370+
[[result.samples.min for result in i_series]
371+
for i_series in
371372
[select(measurements, num_iters=i) for i in [1, 2]]]]
372373
setup = int(round(2.0 * (ti1 - ti2)))
373374
ratio = (setup / ti1) if ti1 > 0 else 0
@@ -439,8 +440,9 @@ class BenchmarkDoctor(object):
439440
Returns a dictionary with benchmark name and `PerformanceTestResult`s.
440441
"""
441442
self.log.debug('Calibrating num-samples for {0}:'.format(benchmark))
442-
r = self.driver.run(benchmark, num_samples=3, num_iters=1) # calibrate
443-
num_samples = self._adjusted_1s_samples(r.min)
443+
r = self.driver.run(benchmark, num_samples=3, num_iters=1,
444+
verbose=True) # calibrate
445+
num_samples = self._adjusted_1s_samples(r.samples.min)
444446

445447
def capped(s):
446448
return min(s, 2048)
@@ -449,7 +451,7 @@ class BenchmarkDoctor(object):
449451
opts = opts if isinstance(opts, list) else [opts]
450452
self.log.debug(
451453
'Runtime {0} μs yields {1} adjusted samples per second.'.format(
452-
r.min, num_samples))
454+
r.samples.min, num_samples))
453455
self.log.debug(
454456
'Measuring {0}, 5 x i1 ({1} samples), 5 x i2 ({2} samples)'.format(
455457
benchmark, run_args[0][0], run_args[1][0]))

benchmark/scripts/Benchmark_RuntimeLeaksRunner.in

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -151,9 +151,9 @@ def parse_args():
151151

152152
if __name__ == "__main__":
153153
args = parse_args()
154-
l = LeaksRunnerBenchmarkDriver(
154+
driver = LeaksRunnerBenchmarkDriver(
155155
SWIFT_BIN_DIR, XFAIL_LIST, args.num_samples, args.num_iters)
156-
if l.run(args.filter):
156+
if driver.run(args.filter):
157157
sys.exit(0)
158158
else:
159159
sys.exit(-1)

benchmark/scripts/test_Benchmark_Driver.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -423,7 +423,7 @@ def test_no_prefix_for_base_logging(self):
423423

424424
def _PTR(min=700, mem_pages=1000, setup=None):
425425
"""Create PerformanceTestResult Stub."""
426-
return Stub(min=min, mem_pages=mem_pages, setup=setup)
426+
return Stub(samples=Stub(min=min), mem_pages=mem_pages, setup=setup)
427427

428428

429429
def _run(test, num_samples=None, num_iters=None, verbose=None,
@@ -483,7 +483,8 @@ def test_measure_10_independent_1s_benchmark_series(self):
483483
"""
484484
driver = BenchmarkDriverMock(tests=['B1'], responses=([
485485
# calibration run, returns a stand-in for PerformanceTestResult
486-
(_run('B1', num_samples=3, num_iters=1), _PTR(min=300))] +
486+
(_run('B1', num_samples=3, num_iters=1,
487+
verbose=True), _PTR(min=300))] +
487488
# 5x i1 series, with 300 μs runtime its possible to take 4098
488489
# samples/s, but it should be capped at 2k
489490
([(_run('B1', num_samples=2048, num_iters=1,

benchmark/scripts/test_compare_perf_tests.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -591,7 +591,7 @@ def test_results_from_merge(self):
591591
def test_results_from_merge_verbose(self):
592592
"""Parsing verbose log merges all PerformanceTestSamples.
593593
...this should technically be on TestPerformanceTestResult, but it's
594-
easier to write here. ¯\_(ツ)_/¯"""
594+
easier to write here. ¯\\_(ツ)_/¯"""
595595
concatenated_logs = """
596596
Sample 0,355883
597597
Sample 1,358817

benchmark/single-source/AnyHashableWithAClass.swift

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,12 @@ import TestsUtils
2525
public var AnyHashableWithAClass = BenchmarkInfo(
2626
name: "AnyHashableWithAClass",
2727
runFunction: run_AnyHashableWithAClass,
28-
tags: [.abstraction, .runtime, .cpubench]
28+
tags: [.abstraction, .runtime, .cpubench],
29+
legacyFactor: lf
2930
)
3031

32+
let lf = 500
33+
3134
class TestHashableBase : Hashable {
3235
var value: Int
3336
init(_ value: Int) {
@@ -55,8 +58,7 @@ class TestHashableDerived5 : TestHashableDerived4 {}
5558
@inline(never)
5659
public func run_AnyHashableWithAClass(_ N: Int) {
5760
let c = TestHashableDerived5(10)
58-
for _ in 0...(N*500000) {
61+
for _ in 0...(N*500000/lf) {
5962
_ = AnyHashable(c)
6063
}
6164
}
62-

benchmark/single-source/ArrayOfGenericRef.swift

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,15 @@
1313
// This benchmark tests creation and destruction of an array of enum
1414
// and generic type bound to nontrivial types.
1515
//
16-
// For comparison, we always create three arrays of 10,000 words.
16+
// For comparison, we always create three arrays of 1,000 words.
1717

1818
import TestsUtils
1919

2020
public let ArrayOfGenericRef = BenchmarkInfo(
2121
name: "ArrayOfGenericRef",
2222
runFunction: run_ArrayOfGenericRef,
23-
tags: [.validation, .api, .Array])
23+
tags: [.validation, .api, .Array],
24+
legacyFactor: 10)
2425

2526
protocol Constructible {
2627
associatedtype Element
@@ -31,8 +32,8 @@ class ConstructibleArray<T:Constructible> {
3132

3233
init(_ e:T.Element) {
3334
array = [T]()
34-
array.reserveCapacity(10_000)
35-
for _ in 0...10_000 {
35+
array.reserveCapacity(1_000)
36+
for _ in 0...1_000 {
3637
array.append(T(e:e) as T)
3738
}
3839
}
@@ -65,7 +66,7 @@ func genCommonRefArray() {
6566
class RefArray<T> {
6667
var array: [T]
6768

68-
init(_ i:T, count:Int = 10_000) {
69+
init(_ i:T, count:Int = 1_000) {
6970
array = [T](repeating: i, count: count)
7071
}
7172
}

benchmark/single-source/ArrayOfRef.swift

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,14 +14,15 @@
1414
// references. It is meant to be a baseline for comparison against
1515
// ArrayOfGenericRef.
1616
//
17-
// For comparison, we always create four arrays of 10,000 words.
17+
// For comparison, we always create four arrays of 1,000 words.
1818

1919
import TestsUtils
2020

2121
public let ArrayOfRef = BenchmarkInfo(
2222
name: "ArrayOfRef",
2323
runFunction: run_ArrayOfRef,
24-
tags: [.validation, .api, .Array])
24+
tags: [.validation, .api, .Array],
25+
legacyFactor: 10)
2526

2627
protocol Constructible {
2728
associatedtype Element
@@ -32,8 +33,8 @@ class ConstructibleArray<T:Constructible> {
3233

3334
init(_ e:T.Element) {
3435
array = [T]()
35-
array.reserveCapacity(10_000)
36-
for _ in 0...10_000 {
36+
array.reserveCapacity(1_000)
37+
for _ in 0...1_000 {
3738
array.append(T(e:e) as T)
3839
}
3940
}
@@ -77,7 +78,7 @@ enum RefEnum {
7778
class RefArray<T> {
7879
var array : [T]
7980

80-
init(_ i:T, count:Int = 10_000) {
81+
init(_ i:T, count:Int = 1_000) {
8182
array = [T](repeating: i, count: count)
8283
}
8384
}

benchmark/single-source/ArraySetElement.swift

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,8 @@ import TestsUtils
1818
public var ArraySetElement = BenchmarkInfo(
1919
name: "ArraySetElement",
2020
runFunction: run_ArraySetElement,
21-
tags: [.runtime, .cpubench, .unstable]
21+
tags: [.runtime, .cpubench, .unstable],
22+
legacyFactor: 10
2223
)
2324

2425
// This is an effort to defeat isUniquelyReferenced optimization. Ideally
@@ -29,9 +30,8 @@ func storeArrayElement(_ array: inout [Int], _ i: Int) {
2930
}
3031

3132
public func run_ArraySetElement(_ N: Int) {
32-
let scale = 10
3333
var array = [Int](repeating: 0, count: 10000)
34-
for _ in 0..<N*scale {
34+
for _ in 0..<N {
3535
for i in 0..<array.count {
3636
storeArrayElement(&array, i)
3737
}

0 commit comments

Comments
 (0)