Skip to content

Commit 4ae5bff

Browse files
authored
Merge branch 'main' into setup_debug_empty_fix2
2 parents 2379dea + 13b5605 commit 4ae5bff

File tree

593 files changed

+17728
-6194
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

593 files changed

+17728
-6194
lines changed

.buckconfig

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,15 @@
88
root = .
99
prelude = third-party/prelude
1010
shim = shim
11+
shim_et = shim_et
1112

1213
[repository_aliases]
1314
config = prelude
1415
ovr_config = prelude
15-
toolchains = shim
16-
fbcode = shim
17-
fbcode_macros = shim
18-
fbsource = shim
16+
toolchains = shim_et
17+
fbcode = shim_et
18+
fbcode_macros = shim_et
19+
fbsource = shim_et
1920
buck = shim
2021

2122
[cxx]

.ci/scripts/build_llama_android.sh

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,11 @@ set -exu
1010
# shellcheck source=/dev/null
1111
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
1212

13+
if [[ -z "${PYTHON_EXECUTABLE:-}" ]]; then
14+
PYTHON_EXECUTABLE=python3
15+
fi
16+
which "${PYTHON_EXECUTABLE}"
17+
1318
install_executorch_and_backend_lib() {
1419
echo "Installing executorch and xnnpack backend"
1520
clean_executorch_install_folders

.ci/scripts/gather_test_models.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,8 +104,12 @@ def model_should_run_on_target_os(model: str, target_os: str) -> bool:
104104
For example, a big model can be disabled in macos due to the limited macos resources.
105105
"""
106106
if target_os == "macos":
107+
# Disabled in macos due to limited resources, and should stay that way even if
108+
# we otherwise re-enable.
107109
return model not in ["llava"]
108-
return True
110+
# Disabled globally because we have test-llava-runner-linux that does a more
111+
# comprehensive E2E test of llava.
112+
return model not in ["llava"]
109113

110114

111115
def export_models_for_ci() -> dict[str, dict]:

.ci/scripts/setup-linux.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ fi
2222
# have already been installed, so we use PyTorch build from source here instead
2323
# of nightly. This allows CI to test against latest commits from PyTorch
2424
install_executorch "use-pt-pinned-commit"
25-
build_executorch_runner "${BUILD_TOOL}"
25+
build_executorch_runner "${BUILD_TOOL}" "${2:-Release}"
2626

2727
if [[ "${GITHUB_BASE_REF:-}" == *main* || "${GITHUB_BASE_REF:-}" == *gh* ]]; then
2828
do_not_use_nightly_on_ci

.ci/scripts/setup-macos.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ install_pytorch_and_domains
136136
# We build PyTorch from source here instead of using nightly. This allows CI to test against
137137
# the pinned commit from PyTorch
138138
install_executorch "use-pt-pinned-commit"
139-
build_executorch_runner "${BUILD_TOOL}"
139+
build_executorch_runner "${BUILD_TOOL}" "${2:-Release}"
140140

141141
if [[ "${GITHUB_BASE_REF:-}" == *main* || "${GITHUB_BASE_REF:-}" == *gh* ]]; then
142142
do_not_use_nightly_on_ci

.ci/scripts/test_llava.sh

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,10 @@ fi
3030
NPROC=8
3131
if hash nproc &> /dev/null; then NPROC=$(nproc); fi
3232

33+
python_lib=$($PYTHON_EXECUTABLE -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')
3334
EXECUTORCH_COMMON_CMAKE_ARGS=" \
3435
-DCMAKE_INSTALL_PREFIX=${BUILD_DIR} \
35-
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
36+
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
3637
-DEXECUTORCH_ENABLE_LOGGING=ON \
3738
-DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
3839
-DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \
@@ -92,7 +93,7 @@ cmake_build_llava_runner_for_android() {
9293
-DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake \
9394
-DANDROID_ABI=arm64-v8a \
9495
${LLAVA_COMMON_CMAKE_ARGS} \
95-
-DCMAKE_PREFIX_PATH="$python_lib" \
96+
-DCMAKE_PREFIX_PATH="$python_lib" \
9697
-DLLAVA_RUNNER_NO_TORCH_DUMMY_IMAGE=ON \
9798
-B${BUILD_DIR}/${dir} \
9899
${dir}

.ci/scripts/test_model.sh

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -91,22 +91,29 @@ test_model() {
9191
# Install requirements for llama vision.
9292
bash examples/models/llama3_2_vision/install_requirements.sh
9393
fi
94-
# python3 -m examples.portable.scripts.export --model_name="llama2" should works too
94+
if [[ "${MODEL_NAME}" == "qwen2_5" ]]; then
95+
# Install requirements for export_llama
96+
bash examples/models/llama/install_requirements.sh
97+
# Test export_llama script: python3 -m examples.models.llama.export_llama.
98+
# Use Llama random checkpoint with Qwen 2.5 1.5b model configuration.
99+
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -c examples/models/llama/params/demo_rand_params.pth -p examples/models/qwen2_5/1_5b_config.json
100+
rm "./${MODEL_NAME}.pte"
101+
return # Skip running with portable executor runnner since portable doesn't support Qwen's biased linears.
102+
fi
103+
104+
# Export a basic .pte and run the model.
95105
"${PYTHON_EXECUTABLE}" -m examples.portable.scripts.export --model_name="${MODEL_NAME}" "${STRICT}"
96106
run_portable_executor_runner
97107
}
98108

99109
build_cmake_xnn_executor_runner() {
100110
echo "Building xnn_executor_runner"
101-
SITE_PACKAGES="$(${PYTHON_EXECUTABLE} -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
102-
CMAKE_PREFIX_PATH="${SITE_PACKAGES}/torch"
103111

104112
(rm -rf ${CMAKE_OUTPUT_DIR} \
105113
&& mkdir ${CMAKE_OUTPUT_DIR} \
106114
&& cd ${CMAKE_OUTPUT_DIR} \
107115
&& retry cmake -DCMAKE_BUILD_TYPE=Release \
108116
-DEXECUTORCH_BUILD_XNNPACK=ON \
109-
-DCMAKE_PREFIX_PATH="$CMAKE_PREFIX_PATH" \
110117
-DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" ..)
111118

112119
cmake --build ${CMAKE_OUTPUT_DIR} -j4

.ci/scripts/test_qnn_static_llama.sh

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
#!/bin/bash
2+
# Copyright (c) Qualcomm Innovation Center, Inc.
3+
# All rights reserved
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
8+
set -exu
9+
10+
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
11+
12+
export EXECUTORCH_ROOT="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/.." && pwd)"
13+
export QNN_SDK_ROOT=/tmp/qnn/2.28.0.241029
14+
export LD_LIBRARY_PATH="${QNN_SDK_ROOT}/lib/x86_64-linux-clang"
15+
export PYTHONPATH=".."
16+
cp schema/program.fbs exir/_serialize/program.fbs
17+
cp schema/scalar_type.fbs exir/_serialize/scalar_type.fbs
18+
cp -f build-x86/backends/qualcomm/PyQnnManagerAdaptor.cpython-310-x86_64-linux-gnu.so backends/qualcomm/python
19+
cp -f build-x86/backends/qualcomm/PyQnnWrapperAdaptor.cpython-310-x86_64-linux-gnu.so backends/qualcomm/python
20+
21+
if [[ -z "${PYTHON_EXECUTABLE:-}" ]]; then
22+
PYTHON_EXECUTABLE=python3
23+
fi
24+
25+
which "${PYTHON_EXECUTABLE}"
26+
27+
# Although static llama CI does not require graphviz, it is required by test_qnn_delegate.py
28+
pip install graphviz
29+
30+
# Download stories llama110m artifacts
31+
download_stories_model_artifacts
32+
echo "Creating tokenizer.bin"
33+
$PYTHON_EXECUTABLE -m extension.llm.tokenizer.tokenizer -t tokenizer.model -o tokenizer.bin
34+
35+
set +e
36+
# Compile only as weight sharing is not applicable on x86
37+
$PYTHON_EXECUTABLE backends/qualcomm/tests/test_qnn_delegate.py -k TestExampleLLMScript.test_llama_stories_110m --model SM8650 --build_folder build-android/ --executorch_root . --artifact_dir . --llama_artifacts . --compile_only
38+
exit_code1=$?
39+
40+
# Checks accuracy with weight sharing disabled since x86 does not support weight sharing.
41+
$PYTHON_EXECUTABLE backends/qualcomm/tests/test_qnn_delegate.py -k TestExampleLLMScript.test_llama_stories_110m --model SM8650 --build_folder build-x86/ --executorch_root . --artifact_dir . --llama_artifacts . --enable_x86_64
42+
exit_code2=$?
43+
44+
# Check the exit codes and print messages
45+
if [ $exit_code1 -ne 0 ]; then
46+
echo "Static Llama compile only with weight sharing test failed. $exit_code1."
47+
fi
48+
49+
if [ $exit_code2 -ne 0 ]; then
50+
echo "Static Llama accuracy test failed. $exit_code2."
51+
fi
52+
53+
# Return failure if either program failed
54+
if [ $exit_code1 -ne 0 ] || [ $exit_code2 -ne 0 ]; then
55+
exit 1
56+
else
57+
exit 0
58+
fi
59+
set -e

.ci/scripts/test_quantized_aot_lib.sh

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,10 @@ CMAKE_OUTPUT_DIR=cmake-out
1616

1717
build_cmake_quantized_aot_lib() {
1818
echo "Building quantized aot lib"
19-
SITE_PACKAGES="$(${PYTHON_EXECUTABLE} -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
20-
CMAKE_PREFIX_PATH="${SITE_PACKAGES}/torch"
2119
(rm -rf ${CMAKE_OUTPUT_DIR} \
2220
&& mkdir ${CMAKE_OUTPUT_DIR} \
2321
&& cd ${CMAKE_OUTPUT_DIR} \
2422
&& retry cmake -DCMAKE_BUILD_TYPE=Release \
25-
-DCMAKE_PREFIX_PATH="$CMAKE_PREFIX_PATH" \
2623
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED_AOT=ON \
2724
-DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" ..)
2825

.ci/scripts/unittest-buck2.sh

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
#!/usr/bin/env bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
set -eux
8+
9+
# TODO: expand this to //...
10+
buck2 query //runtime/...
11+
12+
# TODO: expand the covered scope of Buck targets.
13+
buck2 build //runtime/core/portable_type/...
14+
buck2 test //runtime/core/portable_type/...

.ci/scripts/unittest-linux-cmake.sh

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
#!/usr/bin/env bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
set -eux
8+
9+
# Run pytest with coverage
10+
pytest -n auto --cov=./ --cov-report=xml
11+
# Run gtest
12+
LLVM_PROFDATA=llvm-profdata-12 LLVM_COV=llvm-cov-12 \
13+
test/run_oss_cpp_tests.sh

.ci/scripts/unittest-linux.sh

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
#!/usr/bin/env bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
set -eux
8+
9+
BUILD_TOOL=$1
10+
if [[ $BUILD_TOOL =~ ^(cmake|buck2)$ ]]; then
11+
echo "Running unittests for ${BUILD_TOOL} ..."
12+
else
13+
echo "Missing build tool (require buck2 or cmake), exiting..."
14+
exit 1
15+
fi
16+
17+
BUILD_MODE=$2
18+
if [[ "${BUILD_MODE:-}" =~ ^(Debug|Release)$ ]]; then
19+
echo "Running tests in build mode ${BUILD_MODE} ..."
20+
else
21+
echo "Unsupported build mode ${BUILD_MODE}, options are Debug or Release."
22+
exit 1
23+
fi
24+
25+
# The generic Linux job chooses to use base env, not the one setup by the image
26+
eval "$(conda shell.bash hook)"
27+
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
28+
conda activate "${CONDA_ENV}"
29+
30+
# Setup swiftshader and Vulkan SDK which are required to build the Vulkan delegate
31+
source .ci/scripts/setup-vulkan-linux-deps.sh
32+
33+
PYTHON_EXECUTABLE=python \
34+
EXECUTORCH_BUILD_PYBIND=ON \
35+
CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
36+
.ci/scripts/setup-linux.sh "$BUILD_TOOL" "$BUILD_MODE"
37+
38+
# Install llama3_2_vision dependencies.
39+
PYTHON_EXECUTABLE=python ./examples/models/llama3_2_vision/install_requirements.sh
40+
41+
if [[ "$BUILD_TOOL" == "cmake" ]]; then
42+
.ci/scripts/unittest-linux-cmake.sh
43+
elif [[ "$BUILD_TOOL" == "buck2" ]]; then
44+
.ci/scripts/unittest-buck2.sh
45+
else
46+
echo "Unknown build tool $BUILD_TOOL"
47+
exit 1
48+
fi

.ci/scripts/unittest-macos-cmake.sh

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
#!/usr/bin/env bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
set -eux
8+
9+
# Run pytest with coverage
10+
${CONDA_RUN} pytest -n auto --cov=./ --cov-report=xml
11+
# Run gtest
12+
LLVM_PROFDATA="xcrun llvm-profdata" LLVM_COV="xcrun llvm-cov" \
13+
${CONDA_RUN} test/run_oss_cpp_tests.sh

.ci/scripts/unittest-macos.sh

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
#!/usr/bin/env bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
set -eux
8+
9+
BUILD_TOOL=$1
10+
if [[ $BUILD_TOOL =~ ^(cmake|buck2)$ ]]; then
11+
echo "Running unittests for ${BUILD_TOOL} ..."
12+
else
13+
echo "Missing build tool (require buck2 or cmake), exiting..."
14+
exit 1
15+
fi
16+
17+
BUILD_MODE=$2
18+
if [[ $BUILD_MODE =~ ^(Debug|Release)$ ]]; then
19+
echo "Running tests in build mode ${BUILD_MODE} ..."
20+
else
21+
echo "Unsupported build mode ${BUILD_MODE}, options are Debug or Release."
22+
exit 1
23+
fi
24+
25+
bash .ci/scripts/setup-conda.sh
26+
eval "$(conda shell.bash hook)"
27+
28+
# Create temp directory for sccache shims
29+
export TMP_DIR=$(mktemp -d)
30+
export PATH="${TMP_DIR}:$PATH"
31+
trap 'rm -rfv ${TMP_DIR}' EXIT
32+
33+
# Setup MacOS dependencies as there is no Docker support on MacOS atm
34+
PYTHON_EXECUTABLE=python \
35+
EXECUTORCH_BUILD_PYBIND=ON \
36+
CMAKE_ARGS="-DEXECUTORCH_BUILD_COREML=ON -DEXECUTORCH_BUILD_MPS=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
37+
${CONDA_RUN} --no-capture-output \
38+
.ci/scripts/setup-macos.sh "${BUILD_TOOL}" "${BUILD_MODE}"
39+
40+
# Install llama3_2_vision dependencies.
41+
PYTHON_EXECUTABLE=python \
42+
${CONDA_RUN} --no-capture-output \
43+
./examples/models/llama3_2_vision/install_requirements.sh
44+
45+
if [[ "$BUILD_TOOL" == "cmake" ]]; then
46+
.ci/scripts/unittest-macos-cmake.sh
47+
elif [[ "$BUILD_TOOL" == "buck2" ]]; then
48+
.ci/scripts/unittest-buck2.sh
49+
else
50+
echo "Unknown build tool $BUILD_TOOL"
51+
exit 1
52+
fi

.ci/scripts/utils.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ build_executorch_runner_cmake() {
109109
pushd "${CMAKE_OUTPUT_DIR}" || return
110110
# This command uses buck2 to gather source files and buck2 could crash flakily
111111
# on MacOS
112-
retry cmake -DPYTHON_EXECUTABLE="${PYTHON_EXECUTABLE}" -DCMAKE_BUILD_TYPE=Release ..
112+
retry cmake -DPYTHON_EXECUTABLE="${PYTHON_EXECUTABLE}" -DCMAKE_BUILD_TYPE="${1:-Release}" ..
113113
popd || return
114114

115115
if [ "$(uname)" == "Darwin" ]; then
@@ -124,7 +124,7 @@ build_executorch_runner() {
124124
if [[ $1 == "buck2" ]]; then
125125
build_executorch_runner_buck2
126126
elif [[ $1 == "cmake" ]]; then
127-
build_executorch_runner_cmake
127+
build_executorch_runner_cmake "$2"
128128
else
129129
echo "Invalid build tool $1. Only buck2 and cmake are supported atm"
130130
exit 1

.github/ISSUE_TEMPLATE/bug-report.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ body:
2121
A clear and concise description of what the bug is.
2222
2323
```python
24-
# Sample code to reproduce the problem
24+
# Sample code to reproduce the problem. If applicable, also include your model export command.
2525
```
2626
2727
```

.github/scripts/extract_benchmark_results.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -229,21 +229,22 @@ def extract_ios_metric(
229229

230230
elif method == "forward":
231231
if metric_name == "Clock Monotonic Time, s":
232-
benchmark_result["metric"] = (
233-
"generate_time(ms)"
234-
if "llama" in test_name
235-
else "avg_inference_latency(ms)"
236-
)
232+
benchmark_result["metric"] = "avg_inference_latency(ms)"
237233
benchmark_result["actualValue"] = metric_value * 1000
238234

239235
elif metric_name == "Memory Peak Physical, kB":
240236
# NB: Showing the value in mB is friendlier IMO
241237
benchmark_result["metric"] = "peak_inference_mem_usage(mb)"
242238
benchmark_result["actualValue"] = metric_value / 1024
243239

244-
elif method == "generate" and metric_name == "Tokens Per Second, t/s":
245-
benchmark_result["metric"] = "token_per_sec"
246-
benchmark_result["actualValue"] = metric_value
240+
elif method == "generate":
241+
if metric_name == "Clock Monotonic Time, s":
242+
benchmark_result["metric"] = "generate_time(ms)"
243+
benchmark_result["actualValue"] = metric_value * 1000
244+
245+
elif metric_name == "Tokens Per Second, t/s":
246+
benchmark_result["metric"] = "token_per_sec"
247+
benchmark_result["actualValue"] = metric_value
247248

248249
return benchmark_result
249250

0 commit comments

Comments
 (0)