Skip to content

Commit d371bc7

Browse files
authored
Merge branch 'pytorch:main' into Arm-backend-Test-TOSA,-Ethos-U55-and-Ethos-U85-on-github
2 parents 1ad6338 + 241cd0c commit d371bc7

File tree

209 files changed

+5193
-1630
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

209 files changed

+5193
-1630
lines changed

.github/scripts/extract_benchmark_results.py

Lines changed: 76 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
# This source code is licensed under the BSD-style license found in the
66
# LICENSE file in the root directory of this source tree.
77

8+
import glob
89
import json
910
import logging
1011
import os
@@ -22,6 +23,7 @@
2223

2324
BENCHMARK_RESULTS_FILENAME = "benchmark_results.json"
2425
ARTIFACTS_FILENAME_REGEX = re.compile(r"(android|ios)-artifacts-(?P<job_id>\d+).json")
26+
BENCHMARK_CONFIG_REGEX = re.compile(r"The benchmark config is (?P<benchmark_config>.+)")
2527

2628
# iOS-related regexes and variables
2729
IOS_TEST_SPEC_REGEX = re.compile(
@@ -51,7 +53,7 @@ def __call__(
5153
parser.error(f"{values} is not a valid JSON file (*.json)")
5254

5355

54-
class ValidateOutputDir(Action):
56+
class ValidateDir(Action):
5557
def __call__(
5658
self,
5759
parser: ArgumentParser,
@@ -81,7 +83,7 @@ def parse_args() -> Any:
8183
"--output-dir",
8284
type=str,
8385
required=True,
84-
action=ValidateOutputDir,
86+
action=ValidateDir,
8587
help="the directory to keep the benchmark results",
8688
)
8789
parser.add_argument(
@@ -114,6 +116,13 @@ def parse_args() -> Any:
114116
required=True,
115117
help="which retry of the workflow this is",
116118
)
119+
parser.add_argument(
120+
"--benchmark-configs",
121+
type=str,
122+
required=True,
123+
action=ValidateDir,
124+
help="the directory to keep the benchmark configs",
125+
)
117126

118127
return parser.parse_args()
119128

@@ -300,9 +309,60 @@ def extract_job_id(artifacts_filename: str) -> int:
300309
return int(m.group("job_id"))
301310

302311

312+
def read_all_benchmark_configs() -> Dict[str, Dict[str, str]]:
313+
"""
314+
Read all the benchmark configs that we can find
315+
"""
316+
benchmark_configs = {}
317+
318+
for file in glob.glob(f"{benchmark_configs}/*.json"):
319+
filename = os.path.basename(file)
320+
with open(file) as f:
321+
try:
322+
benchmark_configs[filename] = json.load(f)
323+
except json.JSONDecodeError as e:
324+
warning(f"Fail to load benchmark config {file}: {e}")
325+
326+
return benchmark_configs
327+
328+
329+
def read_benchmark_config(
330+
artifact_s3_url: str, benchmark_configs_dir: str
331+
) -> Dict[str, str]:
332+
"""
333+
Get the correct benchmark config for this benchmark run
334+
"""
335+
try:
336+
with request.urlopen(artifact_s3_url) as data:
337+
for line in data.read().decode("utf8").splitlines():
338+
m = BENCHMARK_CONFIG_REGEX.match(line)
339+
if not m:
340+
continue
341+
342+
benchmark_config = m.group("benchmark_config")
343+
filename = os.path.join(
344+
benchmark_configs_dir, f"{benchmark_config}.json"
345+
)
346+
347+
if not os.path.exists(filename):
348+
warning(f"There is no benchmark config {filename}")
349+
continue
350+
351+
with open(filename) as f:
352+
try:
353+
return json.load(f)
354+
except json.JSONDecodeError as e:
355+
warning(f"Fail to load benchmark config {filename}: {e}")
356+
except error.HTTPError:
357+
warning(f"Fail to read the test spec output at {artifact_s3_url}")
358+
359+
return {}
360+
361+
303362
def transform(
304363
app_type: str,
305364
benchmark_results: List,
365+
benchmark_config: Dict[str, str],
306366
repo: str,
307367
head_branch: str,
308368
workflow_name: str,
@@ -352,29 +412,25 @@ def transform(
352412
for r in benchmark_results
353413
]
354414
elif schema_version == "v3":
355-
quantization = (
356-
r["benchmarkModel"]["quantization"]
357-
if r["benchmarkModel"]["quantization"]
358-
else "unknown"
359-
)
415+
v3_benchmark_results = []
360416
# From https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database
361417
return [
362418
{
363419
"benchmark": {
364420
"name": "ExecuTorch",
365421
"mode": "inference",
366-
"dtype": quantization,
367422
"extra_info": {
368423
"app_type": app_type,
424+
# Just keep a copy of the benchmark config here
425+
"benchmark_config": json.dumps(benchmark_config),
369426
},
370427
},
371428
"model": {
372-
"name": r["benchmarkModel"]["name"],
429+
"name": benchmark_config.get("model", r["benchmarkModel"]["name"]),
373430
"type": "OSS model",
374-
"backend": r["benchmarkModel"].get("backend", ""),
375-
"extra_info": {
376-
"quantization": quantization,
377-
},
431+
"backend": benchmark_config.get(
432+
"config", r["benchmarkModel"].get("backend", "")
433+
),
378434
},
379435
"metric": {
380436
"name": r["metric"],
@@ -405,6 +461,7 @@ def main() -> None:
405461
"v2": [],
406462
"v3": [],
407463
}
464+
benchmark_config = {}
408465

409466
with open(args.artifacts) as f:
410467
for artifact in json.load(f):
@@ -420,6 +477,11 @@ def main() -> None:
420477
artifact_type = artifact["type"]
421478
artifact_s3_url = artifact["s3_url"]
422479

480+
if artifact_type == "TESTSPEC_OUTPUT":
481+
benchmark_config = read_benchmark_config(
482+
artifact_s3_url, args.benchmark_configs
483+
)
484+
423485
if app_type == "ANDROID_APP":
424486
benchmark_results = extract_android_benchmark_results(
425487
job_name, artifact_type, artifact_s3_url
@@ -435,6 +497,7 @@ def main() -> None:
435497
results = transform(
436498
app_type,
437499
benchmark_results,
500+
benchmark_config,
438501
args.repo,
439502
args.head_branch,
440503
args.workflow_name,

.github/workflows/android-perf.yml

Lines changed: 39 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,10 @@ jobs:
9898
- uses: actions/checkout@v3
9999

100100
- name: Prepare the spec
101+
id: prepare
101102
shell: bash
103+
env:
104+
BENCHMARK_CONFIG: ${{ toJSON(matrix) }}
102105
working-directory: extension/benchmark/android/benchmark
103106
run: |
104107
set -eux
@@ -108,11 +111,20 @@ jobs:
108111
# We could write a script to properly use jinja here, but there is only one variable,
109112
# so let's just sed it
110113
sed -i -e 's,{{ model_path }},'"${MODEL_PATH}"',g' android-llm-device-farm-test-spec.yml.j2
111-
cp android-llm-device-farm-test-spec.yml.j2 android-llm-device-farm-test-spec.yml
112114
115+
BENCHMARK_CONFIG_ID=$(echo "${{ matrix.model }}_${{ matrix.config }}" | sed -e 's/[^A-Za-z0-9._-]/_/g')
116+
# The config for this benchmark runs, we save it in the test spec so that it can be fetched
117+
# later by the upload script
118+
sed -i -e 's,{{ benchmark_config_id }},'"${BENCHMARK_CONFIG_ID}"',g' android-llm-device-farm-test-spec.yml.j2
119+
120+
cp android-llm-device-farm-test-spec.yml.j2 android-llm-device-farm-test-spec.yml
113121
# Just print the test spec for debugging
114122
cat android-llm-device-farm-test-spec.yml
115123
124+
# Save the benchmark configs so that we can use it later in the dashboard
125+
echo "${BENCHMARK_CONFIG}" > "${BENCHMARK_CONFIG_ID}.json"
126+
echo "benchmark-config-id=${BENCHMARK_CONFIG_ID}" >> $GITHUB_OUTPUT
127+
116128
- name: Upload the spec
117129
uses: seemethere/upload-artifact-s3@v5
118130
with:
@@ -123,6 +135,16 @@ jobs:
123135
if-no-files-found: error
124136
path: extension/benchmark/android/benchmark/android-llm-device-farm-test-spec.yml
125137

138+
- name: Update the benchmark configs
139+
uses: seemethere/upload-artifact-s3@v5
140+
with:
141+
s3-bucket: gha-artifacts
142+
s3-prefix: |
143+
${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
144+
retention-days: 1
145+
if-no-files-found: error
146+
path: extension/benchmark/android/benchmark/${{ steps.prepare.outputs.benchmark-config-id }}.json
147+
126148
export-models:
127149
name: export-models
128150
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
@@ -397,6 +419,20 @@ jobs:
397419
398420
ls -lah artifacts
399421
422+
- name: Download the list of benchmark configs from S3
423+
env:
424+
BENCHMARK_CONFIGS_DIR: s3://gha-artifacts/${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
425+
shell: bash
426+
run: |
427+
set -eux
428+
429+
mkdir -p benchmark-configs
430+
pushd benchmark-configs
431+
${CONDA_RUN} aws s3 sync "${BENCHMARK_CONFIGS_DIR}" .
432+
popd
433+
434+
ls -lah benchmark-configs
435+
400436
- name: Extract the benchmark results JSON
401437
shell: bash
402438
run: |
@@ -414,7 +450,8 @@ jobs:
414450
--head-branch ${{ github.head_ref || github.ref_name }} \
415451
--workflow-name "${{ github.workflow }}" \
416452
--workflow-run-id ${{ github.run_id }} \
417-
--workflow-run-attempt ${{ github.run_attempt }}
453+
--workflow-run-attempt ${{ github.run_attempt }} \
454+
--benchmark-configs benchmark-configs
418455
done
419456
420457
for SCHEMA in v2 v3; do

.github/workflows/apple-perf.yml

Lines changed: 37 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,21 +100,33 @@ jobs:
100100
- uses: actions/checkout@v3
101101

102102
- name: Prepare the spec
103+
id: prepare
103104
shell: bash
105+
env:
106+
BENCHMARK_CONFIG: ${{ toJSON(matrix) }}
104107
working-directory: extension/benchmark/apple/Benchmark
105108
run: |
106109
set -eux
107110
108-
echo "DEBUG: ${{ matrix.model }}"
109111
# The model will be exported in the next step to this S3 path
110112
MODEL_PATH="https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/${{ matrix.model }}_${{ matrix.config }}/model.zip"
111113
# We could write a script to properly use jinja here, but there is only one variable,
112114
# so let's just sed it
113115
sed -i -e 's,{{ model_path }},'"${MODEL_PATH}"',g' default-ios-device-farm-appium-test-spec.yml.j2
116+
117+
BENCHMARK_CONFIG_ID=$(echo "${{ matrix.model }}_${{ matrix.config }}" | sed -e 's/[^A-Za-z0-9._-]/_/g')
118+
# The config for this benchmark runs, we save it in the test spec so that it can be fetched
119+
# later by the upload script
120+
sed -i -e 's,{{ benchmark_config_id }},'"${BENCHMARK_CONFIG_ID}"',g' default-ios-device-farm-appium-test-spec.yml.j2
121+
114122
cp default-ios-device-farm-appium-test-spec.yml.j2 default-ios-device-farm-appium-test-spec.yml
115123
# Just print the test spec for debugging
116124
cat default-ios-device-farm-appium-test-spec.yml
117125
126+
# Save the benchmark configs so that we can use it later in the dashboard
127+
echo "${BENCHMARK_CONFIG}" > "${BENCHMARK_CONFIG_ID}.json"
128+
echo "benchmark-config-id=${BENCHMARK_CONFIG_ID}" >> $GITHUB_OUTPUT
129+
118130
- name: Upload the spec
119131
uses: seemethere/upload-artifact-s3@v5
120132
with:
@@ -125,6 +137,16 @@ jobs:
125137
if-no-files-found: error
126138
path: extension/benchmark/apple/Benchmark/default-ios-device-farm-appium-test-spec.yml
127139

140+
- name: Update the benchmark configs
141+
uses: seemethere/upload-artifact-s3@v5
142+
with:
143+
s3-bucket: gha-artifacts
144+
s3-prefix: |
145+
${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
146+
retention-days: 1
147+
if-no-files-found: error
148+
path: extension/benchmark/apple/Benchmark/${{ steps.prepare.outputs.benchmark-config-id }}.json
149+
128150
export-models:
129151
name: export-models
130152
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
@@ -481,6 +503,18 @@ jobs:
481503
482504
ls -lah artifacts
483505
506+
- name: Download the list of benchmark configs from S3
507+
env:
508+
BENCHMARK_CONFIGS_DIR: s3://gha-artifacts/${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
509+
shell: bash
510+
run: |
511+
set -eux
512+
mkdir -p benchmark-configs
513+
pushd benchmark-configs
514+
${CONDA_RUN} aws s3 sync "${BENCHMARK_CONFIGS_DIR}" .
515+
popd
516+
ls -lah benchmark-configs
517+
484518
- name: Extract the benchmark results JSON
485519
shell: bash
486520
run: |
@@ -498,7 +532,8 @@ jobs:
498532
--head-branch ${{ github.head_ref || github.ref_name }} \
499533
--workflow-name "${{ github.workflow }}" \
500534
--workflow-run-id ${{ github.run_id }} \
501-
--workflow-run-attempt ${{ github.run_attempt }}
535+
--workflow-run-attempt ${{ github.run_attempt }} \
536+
--benchmark-configs benchmark-configs
502537
done
503538
504539
for SCHEMA in v2 v3; do

.github/workflows/lint.yml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,19 @@ jobs:
2020
with:
2121
runner: linux.2xlarge
2222
docker-image: executorch-ubuntu-22.04-linter
23+
submodules: 'true'
2324
fetch-depth: 0
2425
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
2526
timeout: 90
2627
script: |
2728
# The generic Linux job chooses to use base env, not the one setup by the image
2829
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
2930
conda activate "${CONDA_ENV}"
31+
32+
# For mypy linting, we need to first install executorch first so that
33+
# it builds the python package information.
34+
BUILD_TOOL="cmake"
35+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "${BUILD_TOOL}"
3036
3137
CACHE_DIRECTORY="/tmp/.lintbin"
3238
# Try to recover the cached binaries

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
.hypothesis
22
buck-out/
3+
.mypy_cache/
34
buck2-bin/
45
cmake-out*
56
.DS_Store

0 commit comments

Comments
 (0)