|
| 1 | +#!/usr/bin/env python3 |
| 2 | + |
| 3 | +# Copyright (C) 2024 Intel Corporation |
| 4 | +# Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions. |
| 5 | +# See LICENSE.TXT |
| 6 | +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 7 | + |
| 8 | +import os |
| 9 | +import subprocess # nosec B404 |
| 10 | +import csv |
| 11 | +import argparse |
| 12 | +import io |
| 13 | +import json |
| 14 | +from pathlib import Path |
| 15 | + |
| 16 | +# Function to run the benchmark with the given parameters and environment variables |
| 17 | +def run_benchmark(directory, ioq, env_vars): |
| 18 | + env = os.environ.copy() |
| 19 | + env.update(env_vars) |
| 20 | + command = [ |
| 21 | + f"{directory}/api_overhead_benchmark_sycl", |
| 22 | + "--test=SubmitKernel", |
| 23 | + f"--Ioq={ioq}", |
| 24 | + "--DiscardEvents=0", |
| 25 | + "--MeasureCompletion=0", |
| 26 | + "--iterations=10000", |
| 27 | + "--Profiling=0", |
| 28 | + "--NumKernels=10", |
| 29 | + "--KernelExecTime=1", |
| 30 | + "--csv", |
| 31 | + "--noHeaders" |
| 32 | + ] |
| 33 | + result = subprocess.run(command, capture_output=True, text=True, env=env) # nosec B603 |
| 34 | + return command, result.stdout |
| 35 | + |
| 36 | +# Function to parse the CSV output and extract the mean execution time |
| 37 | +def parse_output(output): |
| 38 | + # Use StringIO to turn the string output into a file-like object for the csv reader |
| 39 | + csv_file = io.StringIO(output) |
| 40 | + reader = csv.reader(csv_file) |
| 41 | + |
| 42 | + # Skip the header row |
| 43 | + next(reader, None) |
| 44 | + data_row = next(reader, None) |
| 45 | + if data_row is None: |
| 46 | + raise ValueError("Benchmark output does not contain data.") |
| 47 | + try: |
| 48 | + name = data_row[0] # Name of the benchmark is the first value |
| 49 | + mean = float(data_row[1]) # Mean is the second value |
| 50 | + return (name, mean) |
| 51 | + except ValueError: |
| 52 | + raise ValueError(f"Could not convert mean execution time to float: '{data_row[1]}'") |
| 53 | + except IndexError: |
| 54 | + raise ValueError("Data row does not contain enough values.") |
| 55 | + |
| 56 | +# Function to generate the mermaid bar chart script |
| 57 | +def generate_mermaid_script(labels, chart_data): |
| 58 | + mermaid_script=f""" |
| 59 | +--- |
| 60 | +config: |
| 61 | + gantt: |
| 62 | + rightPadding: 10 |
| 63 | + leftPadding: 120 |
| 64 | + sectionFontSize: 10 |
| 65 | + numberSectionStyles: 2 |
| 66 | +--- |
| 67 | +gantt |
| 68 | + title api_overhead_benchmark_sycl, mean execution time per 10 kernels (μs) |
| 69 | + todayMarker off |
| 70 | + dateFormat X |
| 71 | + axisFormat %s |
| 72 | +""" |
| 73 | + for label in labels: |
| 74 | + nbars = 0 |
| 75 | + print_label = label.replace(" ", "<br>") |
| 76 | + mermaid_script += f""" |
| 77 | + section {print_label} |
| 78 | +""" |
| 79 | + for (name, data) in chart_data: |
| 80 | + if data is not None: |
| 81 | + if label in data: |
| 82 | + nbars += 1 |
| 83 | + mean = data[label] |
| 84 | + crit = "crit," if name == "This PR" else "" |
| 85 | + mermaid_script += f""" |
| 86 | + {name} ({mean} us) : {crit} 0, {int(mean)} |
| 87 | +""" |
| 88 | + padding = 4 - nbars |
| 89 | + if padding > 0: |
| 90 | + for _ in range(padding): |
| 91 | + mermaid_script += f""" |
| 92 | + - : 0, 0 |
| 93 | +""" |
| 94 | + |
| 95 | + return mermaid_script |
| 96 | + |
| 97 | +# Function to generate the markdown collapsible sections for each variant |
| 98 | +def generate_markdown_details(variant_details): |
| 99 | + markdown_sections = [] |
| 100 | + for label, command, env_vars, output in variant_details: |
| 101 | + env_vars_str = '\n'.join(f"{key}={value}" for key, value in env_vars.items()) |
| 102 | + markdown_sections.append(f""" |
| 103 | +<details> |
| 104 | +<summary>{label}</summary> |
| 105 | +
|
| 106 | +#### Environment Variables: |
| 107 | +{env_vars_str} |
| 108 | +
|
| 109 | +#### Command: |
| 110 | +{' '.join(command)} |
| 111 | +
|
| 112 | +#### Output: |
| 113 | +{output} |
| 114 | +
|
| 115 | +</details> |
| 116 | +""") |
| 117 | + return "\n".join(markdown_sections) |
| 118 | + |
| 119 | +# Function to generate the full markdown |
| 120 | +def generate_markdown_with_mermaid_chart(mermaid_script, variant_details): |
| 121 | + return f""" |
| 122 | +# Benchmark Results |
| 123 | +```mermaid |
| 124 | +{mermaid_script} |
| 125 | +``` |
| 126 | +## Details |
| 127 | +{generate_markdown_details(variant_details)} |
| 128 | +""" |
| 129 | + |
| 130 | +def save_benchmark_results(save_name, benchmark_data): |
| 131 | + benchmarks_dir = Path.home() / 'benchmarks' |
| 132 | + benchmarks_dir.mkdir(exist_ok=True) |
| 133 | + file_path = benchmarks_dir / f"{save_name}.json" |
| 134 | + with file_path.open('w') as file: |
| 135 | + json.dump(benchmark_data, file, indent=4) |
| 136 | + print(f"Benchmark results saved to {file_path}") |
| 137 | + |
| 138 | +def load_benchmark_results(compare_name): |
| 139 | + benchmarks_dir = Path.home() / 'benchmarks' |
| 140 | + file_path = benchmarks_dir / f"{compare_name}.json" |
| 141 | + if file_path.exists(): |
| 142 | + with file_path.open('r') as file: |
| 143 | + return json.load(file) |
| 144 | + else: |
| 145 | + return None |
| 146 | + |
| 147 | +def main(directory, additional_env_vars, save_name, compare_names): |
| 148 | + variants = [ |
| 149 | + (1, {'UR_L0_USE_IMMEDIATE_COMMANDLISTS': '0'}, "Imm-CmdLists-OFF"), |
| 150 | + (0, {'UR_L0_USE_IMMEDIATE_COMMANDLISTS': '0'}, "Imm-CmdLists-OFF"), |
| 151 | + (1, {'UR_L0_USE_IMMEDIATE_COMMANDLISTS': '1'}, ""), |
| 152 | + (0, {'UR_L0_USE_IMMEDIATE_COMMANDLISTS': '1'}, ""), |
| 153 | + ] |
| 154 | + |
| 155 | + # Run benchmarks and collect means, labels, and variant details |
| 156 | + means = [] |
| 157 | + labels = [] |
| 158 | + variant_details = [] |
| 159 | + for ioq, env_vars, extra_label in variants: |
| 160 | + merged_env_vars = {**env_vars, **additional_env_vars} |
| 161 | + command, output = run_benchmark(directory, ioq, merged_env_vars) |
| 162 | + (label, mean) = parse_output(output) |
| 163 | + label += f" {extra_label}" |
| 164 | + means.append(mean) |
| 165 | + labels.append(label) |
| 166 | + variant_details.append((label, command, merged_env_vars, output)) |
| 167 | + |
| 168 | + benchmark_data = {label: mean for label, mean in zip(labels, means)} |
| 169 | + |
| 170 | + chart_data = [("This PR", benchmark_data)] |
| 171 | + for name in compare_names: |
| 172 | + chart_data.append((name, load_benchmark_results(name))) |
| 173 | + |
| 174 | + if save_name: |
| 175 | + save_benchmark_results(save_name, benchmark_data) |
| 176 | + |
| 177 | + mermaid_script = generate_mermaid_script(labels, chart_data) |
| 178 | + |
| 179 | + markdown_content = generate_markdown_with_mermaid_chart(mermaid_script, variant_details) |
| 180 | + |
| 181 | + with open('benchmark_results.md', 'w') as file: |
| 182 | + file.write(markdown_content) |
| 183 | + |
| 184 | + print("Markdown with benchmark results has been written to benchmark_results.md") |
| 185 | + |
| 186 | +def validate_and_parse_env_args(env_args): |
| 187 | + env_vars = {} |
| 188 | + for arg in env_args: |
| 189 | + if '=' not in arg: |
| 190 | + raise ValueError(f"Environment variable argument '{arg}' is not in the form Variable=Value.") |
| 191 | + key, value = arg.split('=', 1) |
| 192 | + env_vars[key] = value |
| 193 | + return env_vars |
| 194 | + |
| 195 | +if __name__ == "__main__": |
| 196 | + parser = argparse.ArgumentParser(description='Run benchmarks and generate a Mermaid bar chart script.') |
| 197 | + parser.add_argument('benchmark_directory', type=str, help='The directory where the benchmarks are located.') |
| 198 | + parser.add_argument("--env", type=str, help='Use env variable for a benchmark run.', action="append", default=[]) |
| 199 | + parser.add_argument("--save", type=str, help='Save the results for comparison under a specified name.') |
| 200 | + parser.add_argument("--compare", type=str, help='Compare results against previously saved data.', action="append", default=["baseline"]) |
| 201 | + |
| 202 | + args = parser.parse_args() |
| 203 | + |
| 204 | + additional_env_vars = validate_and_parse_env_args(args.env) |
| 205 | + |
| 206 | + main(args.benchmark_directory, additional_env_vars, args.save, args.compare) |
0 commit comments