Skip to content

[benchmark] Legacy Factor #20212

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Nov 6, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 7 additions & 5 deletions benchmark/scripts/Benchmark_Driver
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ class BenchmarkDoctor(object):
setup, ratio = BenchmarkDoctor._setup_overhead(measurements)
setup = 0 if ratio < 0.05 else setup
runtime = min(
[(result.min - correction) for i_series in
[(result.samples.min - correction) for i_series in
[BenchmarkDoctor._select(measurements, num_iters=i)
for correction in [(setup / i) for i in [1, 2]]
] for result in i_series])
Expand All @@ -367,7 +367,8 @@ class BenchmarkDoctor(object):
def _setup_overhead(measurements):
select = BenchmarkDoctor._select
ti1, ti2 = [float(min(mins)) for mins in
[[result.min for result in i_series] for i_series in
[[result.samples.min for result in i_series]
for i_series in
[select(measurements, num_iters=i) for i in [1, 2]]]]
setup = int(round(2.0 * (ti1 - ti2)))
ratio = (setup / ti1) if ti1 > 0 else 0
Expand Down Expand Up @@ -439,8 +440,9 @@ class BenchmarkDoctor(object):
Returns a dictionary with benchmark name and `PerformanceTestResult`s.
"""
self.log.debug('Calibrating num-samples for {0}:'.format(benchmark))
r = self.driver.run(benchmark, num_samples=3, num_iters=1) # calibrate
num_samples = self._adjusted_1s_samples(r.min)
r = self.driver.run(benchmark, num_samples=3, num_iters=1,
verbose=True) # calibrate
num_samples = self._adjusted_1s_samples(r.samples.min)

def capped(s):
return min(s, 2048)
Expand All @@ -449,7 +451,7 @@ class BenchmarkDoctor(object):
opts = opts if isinstance(opts, list) else [opts]
self.log.debug(
'Runtime {0} μs yields {1} adjusted samples per second.'.format(
r.min, num_samples))
r.samples.min, num_samples))
self.log.debug(
'Measuring {0}, 5 x i1 ({1} samples), 5 x i2 ({2} samples)'.format(
benchmark, run_args[0][0], run_args[1][0]))
Expand Down
5 changes: 3 additions & 2 deletions benchmark/scripts/test_Benchmark_Driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,7 @@ def test_no_prefix_for_base_logging(self):

def _PTR(min=700, mem_pages=1000, setup=None):
"""Create PerformanceTestResult Stub."""
return Stub(min=min, mem_pages=mem_pages, setup=setup)
return Stub(samples=Stub(min=min), mem_pages=mem_pages, setup=setup)


def _run(test, num_samples=None, num_iters=None, verbose=None,
Expand Down Expand Up @@ -483,7 +483,8 @@ def test_measure_10_independent_1s_benchmark_series(self):
"""
driver = BenchmarkDriverMock(tests=['B1'], responses=([
# calibration run, returns a stand-in for PerformanceTestResult
(_run('B1', num_samples=3, num_iters=1), _PTR(min=300))] +
(_run('B1', num_samples=3, num_iters=1,
verbose=True), _PTR(min=300))] +
# 5x i1 series, with 300 μs runtime its possible to take 4098
# samples/s, but it should be capped at 2k
([(_run('B1', num_samples=2048, num_iters=1,
Expand Down
8 changes: 5 additions & 3 deletions benchmark/single-source/AnyHashableWithAClass.swift
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,12 @@ import TestsUtils
public var AnyHashableWithAClass = BenchmarkInfo(
name: "AnyHashableWithAClass",
runFunction: run_AnyHashableWithAClass,
tags: [.abstraction, .runtime, .cpubench]
tags: [.abstraction, .runtime, .cpubench],
legacyFactor: lf
)

let lf = 500

class TestHashableBase : Hashable {
var value: Int
init(_ value: Int) {
Expand Down Expand Up @@ -55,8 +58,7 @@ class TestHashableDerived5 : TestHashableDerived4 {}
@inline(never)
public func run_AnyHashableWithAClass(_ N: Int) {
let c = TestHashableDerived5(10)
for _ in 0...(N*500000) {
for _ in 0...(N*500000/lf) {
_ = AnyHashable(c)
}
}

11 changes: 6 additions & 5 deletions benchmark/single-source/ArrayOfGenericRef.swift
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,15 @@
// This benchmark tests creation and destruction of an array of enum
// and generic type bound to nontrivial types.
//
// For comparison, we always create three arrays of 10,000 words.
// For comparison, we always create three arrays of 1,000 words.

import TestsUtils

public let ArrayOfGenericRef = BenchmarkInfo(
name: "ArrayOfGenericRef",
runFunction: run_ArrayOfGenericRef,
tags: [.validation, .api, .Array])
tags: [.validation, .api, .Array],
legacyFactor: 10)

protocol Constructible {
associatedtype Element
Expand All @@ -31,8 +32,8 @@ class ConstructibleArray<T:Constructible> {

init(_ e:T.Element) {
array = [T]()
array.reserveCapacity(10_000)
for _ in 0...10_000 {
array.reserveCapacity(1_000)
for _ in 0...1_000 {
array.append(T(e:e) as T)
}
}
Expand Down Expand Up @@ -65,7 +66,7 @@ func genCommonRefArray() {
class RefArray<T> {
var array: [T]

init(_ i:T, count:Int = 10_000) {
init(_ i:T, count:Int = 1_000) {
array = [T](repeating: i, count: count)
}
}
Expand Down
11 changes: 6 additions & 5 deletions benchmark/single-source/ArrayOfRef.swift
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,15 @@
// references. It is meant to be a baseline for comparison against
// ArrayOfGenericRef.
//
// For comparison, we always create four arrays of 10,000 words.
// For comparison, we always create four arrays of 1,000 words.

import TestsUtils

public let ArrayOfRef = BenchmarkInfo(
name: "ArrayOfRef",
runFunction: run_ArrayOfRef,
tags: [.validation, .api, .Array])
tags: [.validation, .api, .Array],
legacyFactor: 10)

protocol Constructible {
associatedtype Element
Expand All @@ -32,8 +33,8 @@ class ConstructibleArray<T:Constructible> {

init(_ e:T.Element) {
array = [T]()
array.reserveCapacity(10_000)
for _ in 0...10_000 {
array.reserveCapacity(1_000)
for _ in 0...1_000 {
array.append(T(e:e) as T)
}
}
Expand Down Expand Up @@ -77,7 +78,7 @@ enum RefEnum {
class RefArray<T> {
var array : [T]

init(_ i:T, count:Int = 10_000) {
init(_ i:T, count:Int = 1_000) {
array = [T](repeating: i, count: count)
}
}
Expand Down
6 changes: 3 additions & 3 deletions benchmark/single-source/ArraySetElement.swift
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@ import TestsUtils
public var ArraySetElement = BenchmarkInfo(
name: "ArraySetElement",
runFunction: run_ArraySetElement,
tags: [.runtime, .cpubench, .unstable]
tags: [.runtime, .cpubench, .unstable],
legacyFactor: 10
)

// This is an effort to defeat isUniquelyReferenced optimization. Ideally
Expand All @@ -29,9 +30,8 @@ func storeArrayElement(_ array: inout [Int], _ i: Int) {
}

public func run_ArraySetElement(_ N: Int) {
let scale = 10
var array = [Int](repeating: 0, count: 10000)
for _ in 0..<N*scale {
for _ in 0..<N {
for i in 0..<array.count {
storeArrayElement(&array, i)
}
Expand Down
4 changes: 4 additions & 0 deletions benchmark/utils/DriverUtils.swift
Original file line number Diff line number Diff line change
Expand Up @@ -524,6 +524,10 @@ final class TestRunner {
}

test.tearDownFunction?()
if let lf = test.legacyFactor {
logVerbose(" Applying legacy factor: \(lf)")
samples = samples.map { $0 * lf }
}

return BenchResults(samples, maxRSS: measureMemoryUsage())
}
Expand Down
17 changes: 10 additions & 7 deletions benchmark/utils/TestsUtils.swift
Original file line number Diff line number Diff line change
Expand Up @@ -26,18 +26,18 @@ public enum BenchmarkCategory : String {
case runtime, refcount, metadata
// Other general areas of compiled code validation.
case abstraction, safetychecks, exceptions, bridging, concurrency

// Algorithms are "micro" that test some well-known algorithm in isolation:
// sorting, searching, hashing, fibonaci, crypto, etc.
case algorithm

// Miniapplications are contrived to mimic some subset of application behavior
// in a way that can be easily measured. They are larger than micro-benchmarks,
// combining multiple APIs, data structures, or algorithms. This includes small
// standardized benchmarks, pieces of real applications that have been extracted
// into a benchmark, important functionality like JSON parsing, etc.
case miniapplication

// Regression benchmarks is a catch-all for less important "micro"
// benchmarks. This could be a random piece of code that was attached to a bug
// report. We want to make sure the optimizer as a whole continues to handle
Expand All @@ -46,12 +46,12 @@ public enum BenchmarkCategory : String {
// as highly as "validation" benchmarks and likely won't be the subject of
// future investigation unless they significantly regress.
case regression

// Most benchmarks are assumed to be "stable" and will be regularly tracked at
// each commit. A handful may be marked unstable if continually tracking them is
// counterproductive.
case unstable

// CPU benchmarks represent instrinsic Swift performance. They are useful for
// measuring a fully baked Swift implementation across different platforms and
// hardware. The benchmark should also be reasonably applicable to real Swift
Expand Down Expand Up @@ -151,16 +151,20 @@ public struct BenchmarkInfo {
return _tearDownFunction
}

public var legacyFactor: Int?

public init(name: String, runFunction: @escaping (Int) -> (), tags: [BenchmarkCategory],
setUpFunction: (() -> ())? = nil,
tearDownFunction: (() -> ())? = nil,
unsupportedPlatforms: BenchmarkPlatformSet = []) {
unsupportedPlatforms: BenchmarkPlatformSet = [],
legacyFactor: Int? = nil) {
self.name = name
self._runFunction = runFunction
self.tags = Set(tags)
self._setUpFunction = setUpFunction
self._tearDownFunction = tearDownFunction
self.unsupportedPlatforms = unsupportedPlatforms
self.legacyFactor = legacyFactor
}

/// Returns true if this benchmark should be run on the current platform.
Expand Down Expand Up @@ -266,4 +270,3 @@ public func getString(_ s: String) -> String { return s }
// The same for Substring.
@inline(never)
public func getSubstring(_ s: Substring) -> Substring { return s }