Skip to content

Commit df0aa84

Browse files
authored
Merge pull request #2456 from EuphoricThinking/benchmark_job6_final_pr
[benchmarks] add umf suite
2 parents 75d5b8d + 0b7d7ae commit df0aa84

File tree

6 files changed

+195
-5
lines changed

6 files changed

+195
-5
lines changed

.github/workflows/benchmarks-reusable.yml

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,26 @@ jobs:
156156
- name: Install UR
157157
run: cmake --install ${{github.workspace}}/ur_build
158158

159+
- name: Checkout UMF
160+
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
161+
with:
162+
repository: oneapi-src/unified-memory-framework
163+
ref: main
164+
path: umf-repo
165+
fetch-depth: 1
166+
fetch-tags: false
167+
168+
- name: Configure UMF
169+
run: >
170+
cmake -DCMAKE_BUILD_TYPE=Release
171+
-S${{github.workspace}}/umf-repo
172+
-B${{github.workspace}}/umf_build
173+
-DUMF_BUILD_BENCHMARKS=ON
174+
-DUMF_TESTS_FAIL_ON_SKIP=ON
175+
176+
- name: Build UMF
177+
run: cmake --build ${{github.workspace}}/umf_build -j $(nproc)
178+
159179
- name: Run benchmarks
160180
working-directory: ${{ github.workspace }}/ur-repo/
161181
id: benchmarks
@@ -164,6 +184,7 @@ jobs:
164184
~/bench_workdir
165185
--sycl ${{ github.workspace }}/sycl_build
166186
--ur ${{ github.workspace }}/ur_install
187+
--umf ${{ github.workspace }}/umf_build
167188
--adapter ${{ matrix.adapter.str_name }}
168189
${{ inputs.upload_report && '--output-html' || '' }}
169190
${{ inputs.bench_script_params }}

scripts/benchmarks/benches/base.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def get_adapter_full_path():
2626
assert False, \
2727
f"could not find adapter file {adapter_path} (and in similar lib paths)"
2828

29-
def run_bench(self, command, env_vars, ld_library=[]):
29+
def run_bench(self, command, env_vars, ld_library=[], add_sycl=True):
3030
env_vars_with_forced_adapter = env_vars.copy()
3131
if options.ur is not None:
3232
env_vars_with_forced_adapter.update(
@@ -35,7 +35,7 @@ def run_bench(self, command, env_vars, ld_library=[]):
3535
return run(
3636
command=command,
3737
env_vars=env_vars_with_forced_adapter,
38-
add_sycl=True,
38+
add_sycl=add_sycl,
3939
cwd=options.benchmark_cwd,
4040
ld_library=ld_library
4141
).stdout.decode()
@@ -71,6 +71,9 @@ def run(self, env_vars) -> list[Result]:
7171
def teardown(self):
7272
raise NotImplementedError()
7373

74+
def stddev_threshold(self):
75+
return None
76+
7477
class Suite:
7578
def benchmarks(self) -> list[Benchmark]:
7679
raise NotImplementedError()

scripts/benchmarks/benches/options.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ class Options:
1212
sycl: str = None
1313
ur: str = None
1414
ur_adapter: str = None
15+
umf: str = None
1516
rebuild: bool = True
1617
benchmark_cwd: str = "INVALID"
1718
timeout: float = 600

scripts/benchmarks/benches/umf.py

Lines changed: 158 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,158 @@
1+
# Copyright (C) 2024 Intel Corporation
2+
# Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions.
3+
# See LICENSE.TXT
4+
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5+
6+
import random
7+
from utils.utils import git_clone
8+
from .base import Benchmark, Suite
9+
from .result import Result
10+
from utils.utils import run, create_build_path
11+
from .options import options
12+
from .oneapi import get_oneapi
13+
import os
14+
import csv
15+
import io
16+
17+
def isUMFAvailable():
18+
return options.umf is not None
19+
20+
class UMFSuite(Suite):
21+
def __init__(self, directory):
22+
self.directory = directory
23+
if not isUMFAvailable():
24+
print("UMF not provided. Related benchmarks will not run")
25+
26+
def setup(self):
27+
if not isUMFAvailable():
28+
return []
29+
self.built = True
30+
31+
def benchmarks(self) -> list[Benchmark]:
32+
if not isUMFAvailable():
33+
return
34+
35+
benches = [
36+
GBench(self),
37+
]
38+
39+
return benches
40+
41+
class ComputeUMFBenchmark(Benchmark):
42+
def __init__(self, bench, name):
43+
self.bench = bench
44+
self.bench_name = name
45+
self.oneapi = get_oneapi()
46+
47+
self.col_name = None
48+
self.col_iterations = None
49+
self.col_real_time = None
50+
self.col_cpu_time = None
51+
self.col_time_unit = None
52+
53+
self.col_statistics_time = None
54+
55+
super().__init__(bench.directory)
56+
57+
def bin_args(self) -> list[str]:
58+
return []
59+
60+
def extra_env_vars(self) -> dict:
61+
return {}
62+
63+
def setup(self):
64+
if not isUMFAvailable():
65+
print("UMF prefix path not provided")
66+
return
67+
68+
self.benchmark_bin = os.path.join(options.umf, 'benchmark', self.bench_name)
69+
70+
def run(self, env_vars) -> list[Result]:
71+
command = [
72+
f"{self.benchmark_bin}",
73+
]
74+
75+
command += self.bin_args()
76+
env_vars.update(self.extra_env_vars())
77+
78+
result = self.run_bench(command, env_vars, add_sycl=False, ld_library=[self.oneapi.tbb_lib()])
79+
parsed = self.parse_output(result)
80+
results = []
81+
for r in parsed:
82+
(config, pool, mean) = r
83+
label = f"{config} {pool}"
84+
results.append(Result(label=label, value=mean, command=command, env=env_vars, stdout=result, unit="ns", explicit_group=config))
85+
return results
86+
87+
# Implementation with self.col_* indices could lead to the division by None
88+
def get_mean(self, datarow):
89+
raise NotImplementedError()
90+
91+
def teardown(self):
92+
return
93+
94+
class GBench(ComputeUMFBenchmark):
95+
def __init__(self, bench):
96+
super().__init__(bench, "umf-benchmark")
97+
98+
self.col_name = 0
99+
self.col_iterations = 1
100+
self.col_real_time = 2
101+
self.col_cpu_time = 3
102+
self.col_time_unit = 4
103+
104+
self.idx_pool = 0
105+
self.idx_config = 1
106+
self.name_separator = '/'
107+
108+
self.col_statistics_time = self.col_real_time
109+
110+
def name(self):
111+
return self.bench_name
112+
113+
# --benchmark_format describes stdout output
114+
# --benchmark_out=<file> and --benchmark_out_format=<format>
115+
# describe output to a file
116+
def bin_args(self):
117+
return ["--benchmark_format=csv"]
118+
119+
# the default unit
120+
# might be changed globally with --benchmark_time_unit={ns|us|ms|s}
121+
# the change affects only benchmark where time unit has not been set
122+
# explicitly
123+
def unit(self):
124+
return "ns"
125+
126+
# these benchmarks are not stable, so set this at a large value
127+
def stddev_threshold(self) -> float:
128+
return 0.2 # 20%
129+
130+
def get_pool_and_config(self, full_name):
131+
list_split = full_name.split(self.name_separator, 1)
132+
if len(list_split) != 2:
133+
raise ValueError("Incorrect benchmark name format: ", full_name)
134+
135+
return list_split[self.idx_pool], list_split[self.idx_config]
136+
137+
def get_mean(self, datarow):
138+
return float(datarow[self.col_statistics_time])
139+
140+
def parse_output(self, output):
141+
csv_file = io.StringIO(output)
142+
reader = csv.reader(csv_file)
143+
144+
data_row = next(reader, None)
145+
if data_row is None:
146+
raise ValueError("Benchmark output does not contain data.")
147+
148+
results = []
149+
for row in reader:
150+
try:
151+
full_name = row[self.col_name]
152+
pool, config = self.get_pool_and_config(full_name)
153+
mean = self.get_mean(row)
154+
results.append((config, pool, mean))
155+
except KeyError as e:
156+
raise ValueError(f"Error parsing output: {e}")
157+
158+
return results

scripts/benchmarks/main.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
from benches.velocity import VelocityBench
1010
from benches.syclbench import *
1111
from benches.llamacpp import *
12+
from benches.umf import *
1213
from benches.test import TestSuite
1314
from benches.options import Compare, options
1415
from output_markdown import generate_markdown
@@ -74,7 +75,7 @@ def remove_outliers(results: dict[str, list[Result]], threshold: float = 3.5) ->
7475

7576
return new_results
7677

77-
def process_results(results: dict[str, list[Result]]) -> tuple[bool, list[Result]]:
78+
def process_results(results: dict[str, list[Result]], stddev_threshold_override) -> tuple[bool, list[Result]]:
7879
processed: list[Result] = []
7980
# technically, we can detect whether result is below or above threshold per
8081
# individual result. However, we can't repeat benchmark runs with that
@@ -94,7 +95,7 @@ def process_results(results: dict[str, list[Result]]) -> tuple[bool, list[Result
9495
mean_value = statistics.mean(values)
9596
stddev = statistics.stdev(values)
9697

97-
threshold = options.stddev_threshold * mean_value
98+
threshold = (stddev_threshold_override if stddev_threshold_override is not None else options.stddev_threshold) * mean_value
9899

99100
if stddev > threshold:
100101
print(f"stddev {stddev} above the threshold {threshold} for {label}")
@@ -120,6 +121,7 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
120121
VelocityBench(directory),
121122
SyclBench(directory),
122123
LlamaCppBench(directory),
124+
UMFSuite(directory),
123125
#TestSuite()
124126
] if not options.dry_run else []
125127

@@ -159,7 +161,7 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
159161
processed: list[Result] = []
160162
for _ in range(5):
161163
run_iterations(benchmark, merged_env_vars, options.iterations, intermediate_results)
162-
valid, processed = process_results(intermediate_results)
164+
valid, processed = process_results(intermediate_results, benchmark.stddev_threshold())
163165
if valid:
164166
break
165167
results += processed
@@ -231,6 +233,7 @@ def validate_and_parse_env_args(env_args):
231233
parser.add_argument('benchmark_directory', type=str, help='Working directory to setup benchmarks.')
232234
parser.add_argument('--sycl', type=str, help='Root directory of the SYCL compiler.', default=None)
233235
parser.add_argument('--ur', type=str, help='UR install prefix path', default=None)
236+
parser.add_argument('--umf', type=str, help='UMF install prefix path', default=None)
234237
parser.add_argument('--adapter', type=str, help='Options to build the Unified Runtime as part of the benchmark', default="level_zero")
235238
parser.add_argument("--no-rebuild", help='Rebuild the benchmarks from scratch.', action="store_true")
236239
parser.add_argument("--env", type=str, help='Use env variable for a benchmark run.', action="append", default=[])
@@ -267,6 +270,7 @@ def validate_and_parse_env_args(env_args):
267270
options.output_html = args.output_html
268271
options.output_markdown = args.output_markdown
269272
options.dry_run = args.dry_run
273+
options.umf = args.umf
270274

271275
benchmark_filter = re.compile(args.filter) if args.filter else None
272276

scripts/benchmarks/output_html.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,9 @@ def create_grouped_bar_charts(groups: list[ExplicitGroup]) -> list[BenchmarkChar
157157
ax.bar_label(rects, fmt='')
158158

159159
for rect, run, res in zip(rects, run_results.keys(), run_results.values()):
160+
if res is None:
161+
continue
162+
160163
height = rect.get_height()
161164
if height > max_height:
162165
max_height = height

0 commit comments

Comments
 (0)