Skip to content

Commit dbef422

Browse files
committed
[benchmarks] HTML output improvements
This patch is mostly a few minor changes to the bar charts to make them more legible.
1 parent 85476e1 commit dbef422

File tree

2 files changed

+52
-14
lines changed

2 files changed

+52
-14
lines changed

scripts/benchmarks/main.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,6 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
114114
history.load(1000)
115115

116116
for name in compare_names:
117-
print(f"compare name: {name}")
118117
compare_result = history.get_compare(name)
119118
if compare_result:
120119
chart_data[name] = compare_result.results
@@ -125,14 +124,15 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
125124
with open('benchmark_results.md', 'w') as file:
126125
file.write(markdown_content)
127126

127+
print(f"Markdown with benchmark results has been written to {os.getcwd()}/benchmark_results.md")
128+
128129
saved_name = save_name if save_name is not None else this_name
129130

130131
# It's important we don't save the current results into history before
131132
# we calculate historical averages or get latest results for compare.
132133
# Otherwise we might be comparing the results to themselves.
133134
if not options.dry_run:
134135
history.save(saved_name, results, save_name is not None)
135-
print(f"Markdown with benchmark results has been written to {os.getcwd()}/benchmark_results.md")
136136
compare_names.append(saved_name)
137137

138138
if options.output_html:
@@ -141,7 +141,7 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
141141
with open('benchmark_results.html', 'w') as file:
142142
file.write(html_content)
143143

144-
print(f"HTML with benchmark results has been written to {os.getcwd()}/benchmark_results.html")
144+
print(f"HTML with benchmark results has been written to {os.getcwd()}/benchmark_results.html")
145145

146146
def validate_and_parse_env_args(env_args):
147147
env_vars = {}

scripts/benchmarks/output_html.py

Lines changed: 49 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
# See LICENSE.TXT
44
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
55

6+
import re
67
import matplotlib.pyplot as plt
78
import mpld3
89
from collections import defaultdict
@@ -67,12 +68,22 @@ def prepare_normalized_data(latest_results: dict[str, LatestResults],
6768
return normalized_data
6869

6970
def format_benchmark_label(label: str) -> list[str]:
70-
words = label.split()
71-
if len(words) <= 2:
72-
return [label]
71+
words = re.split(' |_', label)
72+
lines = []
73+
current_line = []
7374

74-
mid = len(words) // 2
75-
return [' '.join(words[:mid]), ' '.join(words[mid:])]
75+
# max line length 30
76+
for word in words:
77+
if len(' '.join(current_line + [word])) > 30:
78+
lines.append(' '.join(current_line))
79+
current_line = [word]
80+
else:
81+
current_line.append(word)
82+
83+
if current_line:
84+
lines.append(' '.join(current_line))
85+
86+
return lines
7687

7788
def create_bar_plot(ax: plt.Axes,
7889
normalized_data: list[list[float]],
@@ -109,9 +120,8 @@ def create_bar_plot(ax: plt.Axes,
109120

110121
tooltip_labels = [
111122
f"Run: {run_name}\n"
112-
f"Benchmark: {benchmark_label}\n"
113123
f"Value: {current_value:.2f} {unit}\n"
114-
f"Baseline ({baseline_name}): {baseline_value:.2f} {unit}\n"
124+
f"Normalized to ({baseline_name}): {baseline_value:.2f} {unit}\n"
115125
f"Normalized: {value:.1f}%"
116126
]
117127
tooltip = mpld3.plugins.LineHTMLTooltip(rect, tooltip_labels, css='.mpld3-tooltip{background:white;padding:8px;border:1px solid #ddd;border-radius:4px;font-family:monospace;white-space:pre;}')
@@ -141,6 +151,37 @@ def add_chart_elements(ax: plt.Axes,
141151
ax.grid(True, axis='y', alpha=0.2)
142152
ax.legend(bbox_to_anchor=(1, 1), loc='upper left')
143153

154+
def split_large_groups(benchmark_groups):
155+
miscellaneous = []
156+
new_groups = defaultdict(list)
157+
158+
split_happened = False
159+
for group, labels in benchmark_groups.items():
160+
if len(labels) == 1:
161+
miscellaneous.extend(labels)
162+
elif len(labels) > 5:
163+
split_happened = True
164+
mid = len(labels) // 2
165+
new_groups[group] = labels[:mid]
166+
new_groups[group + '_'] = labels[mid:]
167+
else:
168+
new_groups[group] = labels
169+
170+
if miscellaneous:
171+
new_groups['Miscellaneous'] = miscellaneous
172+
173+
if split_happened:
174+
return split_large_groups(new_groups)
175+
else:
176+
return new_groups
177+
178+
def group_benchmark_labels(benchmark_labels):
179+
benchmark_groups = defaultdict(list)
180+
for label in benchmark_labels:
181+
group = re.match(r'^[^_\s]+', label)[0]
182+
benchmark_groups[group].append(label)
183+
return split_large_groups(benchmark_groups)
184+
144185
def create_normalized_bar_chart(benchmarks: list[BenchmarkSeries], baseline_name: str) -> list[str]:
145186
latest_results = get_latest_results(benchmarks)
146187

@@ -154,10 +195,7 @@ def create_normalized_bar_chart(benchmarks: list[BenchmarkSeries], baseline_name
154195

155196
benchmark_labels = [b.label for b in benchmarks]
156197

157-
benchmark_groups = defaultdict(list)
158-
for label in benchmark_labels:
159-
group_name = label.split()[0]
160-
benchmark_groups[group_name].append(label)
198+
benchmark_groups = group_benchmark_labels(benchmark_labels)
161199

162200
html_charts = []
163201

0 commit comments

Comments
 (0)