Skip to content

[benchmark] Add the ability for a benchmark to specify that it does n… #16872

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 19 additions & 5 deletions benchmark/utils/DriverUtils.swift
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ struct Test {
}

/// The "main routine" of the benchmark.
var runFunction: (Int) -> () {
var runFunction: ((Int) -> ())? {
return benchInfo.runFunction
}

Expand Down Expand Up @@ -358,9 +358,19 @@ class SampleRunner {
}

/// Invoke the benchmark entry point and return the run time in milliseconds.
func runBench(_ test: Test, _ c: TestConfig) -> BenchResults {
func runBench(_ test: Test, _ c: TestConfig) -> BenchResults? {
var samples = [UInt64](repeating: 0, count: c.numSamples)

// Before we do anything, check that we actually have a function to
// run. If we don't it is because the benchmark is not supported on
// the platform and we should skip it.
guard let testFn = test.runFunction else {
if c.verbose {
print("Skipping unsupported benchmark \(test.name)!")
}
return nil
}

if c.verbose {
print("Running \(test.name) for \(c.numSamples) samples.")
}
Expand All @@ -373,7 +383,7 @@ func runBench(_ test: Test, _ c: TestConfig) -> BenchResults {
var elapsed_time : UInt64 = 0
if c.fixedNumIters == 0 {
test.setUpFunction?()
elapsed_time = sampler.run(test.name, fn: test.runFunction, num_iters: 1)
elapsed_time = sampler.run(test.name, fn: testFn, num_iters: 1)
test.tearDownFunction?()

if elapsed_time > 0 {
Expand All @@ -395,7 +405,7 @@ func runBench(_ test: Test, _ c: TestConfig) -> BenchResults {
print(" Measuring with scale \(scale).")
}
test.setUpFunction?()
elapsed_time = sampler.run(test.name, fn: test.runFunction, num_iters: scale)
elapsed_time = sampler.run(test.name, fn: testFn, num_iters: scale)
test.tearDownFunction?()
} else {
scale = 1
Expand Down Expand Up @@ -442,7 +452,11 @@ func runBenchmarks(_ c: TestConfig) {
sumBenchResults.sampleCount = 0

for t in c.tests {
let results = runBench(t, c)
guard let results = runBench(t, c) else {
print("\(t.index)\(c.delim)\(t.name)\(c.delim)Unsupported")
fflush(stdout)
continue
}
print("\(t.index)\(c.delim)\(t.name)\(c.delim)\(results.description)")
fflush(stdout)

Expand Down
71 changes: 64 additions & 7 deletions benchmark/utils/TestsUtils.swift
Original file line number Diff line number Diff line change
Expand Up @@ -70,34 +70,91 @@ public enum BenchmarkCategory : String {
case skip
}

public struct BenchmarkPlatformSet : OptionSet {
public let rawValue: Int

public init(rawValue: Int) {
self.rawValue = rawValue
}

public static let darwin = BenchmarkPlatformSet(rawValue: 1 << 0)
public static let linux = BenchmarkPlatformSet(rawValue: 1 << 1)

public static var currentPlatform: BenchmarkPlatformSet {
#if os(Linux)
return .linux
#else
return .darwin
#endif
}

public static var allPlatforms: BenchmarkPlatformSet {
return [.darwin, .linux]
}
}

public struct BenchmarkInfo {
/// The name of the benchmark that should be displayed by the harness.
public var name: String

/// Shadow static variable for runFunction.
private var _runFunction: (Int) -> ()

/// A function that invokes the specific benchmark routine.
public var runFunction: (Int) -> ()
public var runFunction: ((Int) -> ())? {
if !shouldRun {
return nil
}
return _runFunction
}

/// A set of category tags that describe this benchmark. This is used by the
/// harness to allow for easy slicing of the set of benchmarks along tag
/// boundaries, e.x.: run all string benchmarks or ref count benchmarks, etc.
public var tags: [BenchmarkCategory]

/// The platforms that this benchmark supports. This is an OptionSet.
private var unsupportedPlatforms: BenchmarkPlatformSet

/// Shadow variable for setUpFunction.
private var _setUpFunction: (() -> ())?

/// An optional function that if non-null is run before benchmark samples
/// are timed.
public var setUpFunction: (() -> ())?
public var setUpFunction : (() -> ())? {
if !shouldRun {
return nil
}
return _setUpFunction
}

/// Shadow static variable for computed property tearDownFunction.
private var _tearDownFunction: (() -> ())?

/// An optional function that if non-null is run immediately after a sample is
/// taken.
public var tearDownFunction: (() -> ())?
public var tearDownFunction: (() -> ())? {
if !shouldRun {
return nil
}
return _tearDownFunction
}

public init(name: String, runFunction: @escaping (Int) -> (), tags: [BenchmarkCategory],
setUpFunction: (() -> ())? = nil,
tearDownFunction: (() -> ())? = nil) {
tearDownFunction: (() -> ())? = nil,
unsupportedPlatforms: BenchmarkPlatformSet = []) {
self.name = name
self.runFunction = runFunction
self._runFunction = runFunction
self.tags = tags
self.setUpFunction = setUpFunction
self.tearDownFunction = tearDownFunction
self._setUpFunction = setUpFunction
self._tearDownFunction = tearDownFunction
self.unsupportedPlatforms = unsupportedPlatforms
}

/// Returns true if this benchmark should be run on the current platform.
var shouldRun: Bool {
return !unsupportedPlatforms.contains(.currentPlatform)
}
}

Expand Down