Skip to content

Commit 40d66b8

Browse files
committed
Update on "Add CMakeLists.txt to run GTEST for runtime/kernel/test"
Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags: [ghstack-poisoned]
2 parents 9f9fe4f + d4e221e commit 40d66b8

File tree

399 files changed

+14308
-2194
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

399 files changed

+14308
-2194
lines changed

.ci/docker/ci_commit_pins/buck2.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
2023-08-01
1+
2024-02-15

.ci/docker/ci_commit_pins/pytorch.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
930b60f5aa7e17402491900a545d96822796b638
1+
f5b99976adcbb01fd71bd0a39ea15bdac6c9e48a

.ci/scripts/setup-macos.sh

Lines changed: 15 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -27,34 +27,25 @@ install_buck() {
2727
brew install wget
2828
fi
2929

30-
BUCK2_NOT_AVAILABLE=false
31-
if ! command -v buck2 &> /dev/null; then
32-
BUCK2_NOT_AVAILABLE=true
33-
else
34-
BUCK2_BINARY=$(which buck2)
35-
BUCK2_ARCH=$(file -b "${BUCK2_BINARY}")
36-
37-
if [[ "${BUCK2_ARCH}" != "Mach-O 64-bit executable arm64" ]]; then
38-
echo "Reinstall buck2 because ${BUCK2_BINARY} is ${BUCK2_ARCH}, not 64-bit arm64"
39-
BUCK2_NOT_AVAILABLE=true
40-
fi
41-
fi
42-
43-
if [[ "${BUCK2_NOT_AVAILABLE}" == true ]]; then
44-
pushd .ci/docker
30+
pushd .ci/docker
4531

46-
BUCK2=buck2-aarch64-apple-darwin.zst
47-
BUCK2_VERSION=$(cat ci_commit_pins/buck2.txt)
32+
# TODO(huydo): This is a one-off copy of buck2 2024-02-15 to unblock Jon and
33+
# re-enable ShipIt. It’s not ideal that upgrading buck2 will require a manual
34+
# update the cached binary on S3 bucket too. Let me figure out if there is a
35+
# way to correctly implement the previous setup of installing a new version of
36+
# buck2 only when it’s needed. AFAIK, the complicated part was that buck2
37+
# --version doesn't say anything w.r.t its release version, i.e. 2024-02-15.
38+
# See D53878006 for more details.
39+
BUCK2=buck2-aarch64-apple-darwin.zst
40+
wget -q "https://ossci-macos.s3.amazonaws.com/${BUCK2}"
4841

49-
wget -q "https://github.com/facebook/buck2/releases/download/${BUCK2_VERSION}/${BUCK2}"
50-
zstd -d "${BUCK2}" -o buck2
42+
zstd -d "${BUCK2}" -o buck2
5143

52-
chmod +x buck2
53-
mv buck2 /opt/homebrew/bin
44+
chmod +x buck2
45+
mv buck2 /opt/homebrew/bin
5446

55-
rm "${BUCK2}"
56-
popd
57-
fi
47+
rm "${BUCK2}"
48+
popd
5849
}
5950

6051
function write_sccache_stub() {

.ci/scripts/test_llama.sh

Lines changed: 64 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,11 @@
66
# LICENSE file in the root directory of this source tree.
77

88
set -exu
9+
# shellcheck source=/dev/null
10+
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
911

1012
MODEL_NAME=$1 # stories110M.pt
11-
BUILD_TOOL=$2 # buck2
13+
BUILD_TOOL=$2 # buck2 or cmake
1214
DTYPE=$3 # fp16 or fp32
1315

1416
if [[ -z "${MODEL_NAME:-}" ]]; then
@@ -26,33 +28,58 @@ if [[ -z "${DTYPE:-}" ]]; then
2628
exit 1
2729
fi
2830

29-
which "${PYTHON_EXECUTABLE}"
31+
if [[ -z "${BUCK:-}" ]]; then
32+
BUCK=buck2
33+
fi
3034

31-
# Check build tool.
32-
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
33-
:
34-
else
35-
echo "Invalid build tool ${BUILD_TOOL}. Only buck2 is supported atm"
36-
exit 1
35+
if [[ -z "${PYTHON_EXECUTABLE:-}" ]]; then
36+
PYTHON_EXECUTABLE=python3
3737
fi
3838

39+
which "${PYTHON_EXECUTABLE}"
40+
41+
42+
cmake_install_executorch_libraries() {
43+
echo "Installing libexecutorch.a, libextension_module.so, libportable_ops_lib.a"
44+
rm -rf cmake-out
45+
retry cmake -DBUCK2="$BUCK" \
46+
-DCMAKE_INSTALL_PREFIX=cmake-out \
47+
-DCMAKE_BUILD_TYPE=Release \
48+
-DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
49+
-DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \
50+
-DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" \
51+
-Bcmake-out .
52+
cmake --build cmake-out -j9 --target install --config Release
53+
}
54+
55+
cmake_build_llama_runner() {
56+
echo "Building llama runner"
57+
dir="examples/models/llama2"
58+
retry cmake -DBUCK2="$BUCK" \
59+
-DCMAKE_INSTALL_PREFIX=cmake-out \
60+
-DCMAKE_BUILD_TYPE=Release \
61+
-DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" \
62+
-Bcmake-out/${dir} \
63+
${dir}
64+
cmake --build cmake-out/${dir} -j9 --config Release
65+
66+
}
67+
3968
cleanup_files() {
4069
echo "Deleting downloaded and generated files"
4170
rm "${MODEL_NAME}"
4271
rm tokenizer.model
4372
rm tokenizer.bin
4473
rm "${EXPORTED_MODEL_NAME}"
74+
rm result.txt
75+
rm params.json
4576
}
4677

4778
# Download and create artifacts.
4879
PARAMS="params.json"
4980
touch "${PARAMS}"
5081
if [[ "${MODEL_NAME}" == "stories110M.pt" ]]; then
51-
# Download stories110M.pt and tokenizer from Github
52-
wget "https://huggingface.co/karpathy/tinyllamas/resolve/main/stories110M.pt"
53-
wget "https://raw.githubusercontent.com/karpathy/llama2.c/master/tokenizer.model"
54-
# Create params.json file
55-
echo '{"dim": 768, "multiple_of": 32, "n_heads": 12, "n_layers": 12, "norm_eps": 1e-05, "vocab_size": 32000}' > "${PARAMS}"
82+
download_stories_model_artifacts
5683
else
5784
echo "Unsupported model name ${MODEL_NAME}"
5885
exit 1
@@ -72,16 +99,35 @@ fi
7299
# Export model.
73100
EXPORTED_MODEL_NAME="${EXPORTED_MODEL_NAME}.pte"
74101
echo "Exporting ${EXPORTED_MODEL_NAME}"
75-
python3 -m examples.models.llama2.export_llama -c stories110M.pt -p "${PARAMS}" -d "${DTYPE}"
102+
$PYTHON_EXECUTABLE -m examples.models.llama2.export_llama -c stories110M.pt -p "${PARAMS}" -d "${DTYPE}"
76103

77104
# Create tokenizer.bin.
78105
echo "Creating tokenizer.bin"
79-
buck2 run examples/models/llama2/tokenizer:tokenizer_py -- -t tokenizer.model -o tokenizer.bin
106+
$PYTHON_EXECUTABLE -m examples.models.llama2.tokenizer.tokenizer -t tokenizer.model -o tokenizer.bin
80107

81-
# Run model.
82-
echo "Running ${EXPORTED_MODEL_NAME} in portable mode"
83-
RESULT=$(timeout 500s buck2 run examples/models/llama2:main -- --model_path="${EXPORTED_MODEL_NAME}" --tokenizer_path=tokenizer.bin --prompt="Once" --temperature=0) || true
84108

109+
RUNTIME_ARGS="--model_path=${EXPORTED_MODEL_NAME} --tokenizer_path=tokenizer.bin --prompt=Once --temperature=0 --seq_len=10"
110+
# Check build tool.
111+
echo "Running ${EXPORTED_MODEL_NAME} in portable mode"
112+
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
113+
# Run model.
114+
# shellcheck source=/dev/null
115+
$BUCK run examples/models/llama2:main -- ${RUNTIME_ARGS} > result.txt
116+
elif [[ "${BUILD_TOOL}" == "cmake" ]]; then
117+
cmake_install_executorch_libraries
118+
cmake_build_llama_runner
119+
# Run llama runner
120+
NOW=$(date +"%H:%M:%S")
121+
echo "Starting to run llama runner at ${NOW}"
122+
# shellcheck source=/dev/null
123+
cmake-out/examples/models/llama2/llama_main ${RUNTIME_ARGS} > result.txt
124+
NOW=$(date +"%H:%M:%S")
125+
echo "Finished at ${NOW}"
126+
else
127+
echo "Invalid build tool ${BUILD_TOOL}. Only buck2 is supported atm"
128+
exit 1
129+
fi
130+
RESULT=$(cat result.txt)
85131
# Check results.
86132
EXPECTED_PREFIX="Once upon a time,"
87133
# Expected result - may take too long to generate:

.ci/scripts/utils.sh

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,3 +131,12 @@ cmake_install_executorch_lib() {
131131
-Bcmake-out .
132132
cmake --build cmake-out -j9 --target install --config Release
133133
}
134+
135+
download_stories_model_artifacts() {
136+
# Download stories110M.pt and tokenizer from Github
137+
wget "https://huggingface.co/karpathy/tinyllamas/resolve/main/stories110M.pt"
138+
wget "https://raw.githubusercontent.com/karpathy/llama2.c/master/tokenizer.model"
139+
# Create params.json file
140+
touch params.json
141+
echo '{"dim": 768, "multiple_of": 32, "n_heads": 12, "n_layers": 12, "norm_eps": 1e-05, "vocab_size": 32000}' > params.json
142+
}

.github/merge_rules.yaml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
- name: superuser
2+
patterns:
3+
- '*'
4+
approved_by:
5+
- pytorch/metamates
6+
mandatory_checks_name:
7+
- Facebook CLA Check
8+
- Lint
9+
- pull

.github/workflows/app-build.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ on:
1414
- build/build_apple_frameworks.sh
1515
- build/test_ios_ci.sh
1616
- examples/demo-apps/**
17+
- extension/module/**
1718
workflow_dispatch:
1819

1920
concurrency:

.github/workflows/pull.yml

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -83,12 +83,13 @@ jobs:
8383
# Build and test ExecuTorch
8484
PYTHON_EXECUTABLE=python bash .ci/scripts/test.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}" "${DEMO_BACKEND_DELEGATION}"
8585
86-
test-llama-linux:
87-
name: test-llama-linux
86+
test-llama-runner-linux:
87+
name: test-llama-runner-linux
8888
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
8989
strategy:
9090
matrix:
9191
dtype: [fp16, fp32]
92+
build-tool: [buck2, cmake]
9293
fail-fast: false
9394
with:
9495
runner: linux.2xlarge
@@ -102,13 +103,14 @@ jobs:
102103
conda activate "${CONDA_ENV}"
103104
104105
DTYPE=${{ matrix.dtype }}
106+
BUILD_TOOL=${{ matrix.build-tool }}
105107
106108
# Setup executorch
107109
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh buck2
108110
# Install requirements for export_llama
109111
PYTHON_EXECUTABLE=python bash examples/models/llama2/install_requirements.sh
110112
# Test llama2
111-
PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh stories110M.pt buck2 "${DTYPE}"
113+
PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh stories110M.pt "${BUILD_TOOL}" "${DTYPE}"
112114
113115
test-custom-ops-linux:
114116
name: test-custom-ops-linux

.github/workflows/trunk.yml

Lines changed: 29 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -137,27 +137,31 @@ jobs:
137137
# Test selective build
138138
PYTHON_EXECUTABLE=python bash examples/portable/scripts/test_demo_backend_delegation.sh "${BUILD_TOOL}"
139139
140-
# TODO(Jerry-Ge): Enable this back https://github.com/pytorch/executorch/issues/1947
141-
# test-arm-backend-delegation:
142-
# name: test-arm-backend-delegation
143-
# uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
144-
# with:
145-
# runner: linux.2xlarge
146-
# docker-image: executorch-ubuntu-22.04-arm-sdk
147-
# submodules: 'true'
148-
# ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
149-
# script: |
150-
# # The generic Linux job chooses to use base env, not the one setup by the image
151-
# CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
152-
# conda activate "${CONDA_ENV}"
153-
154-
# source .ci/scripts/utils.sh
155-
# install_flatc_from_source
156-
# install_executorch
157-
158-
# # Test selective build
159-
# source /opt/arm-sdk/setup_path.sh
160-
# PYTHON_EXECUTABLE=python bash examples/arm/run.sh /opt/arm-sdk buck2
140+
test-arm-backend-delegation:
141+
name: test-arm-backend-delegation
142+
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
143+
with:
144+
runner: linux.2xlarge
145+
docker-image: executorch-ubuntu-22.04-arm-sdk
146+
submodules: 'true'
147+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
148+
script: |
149+
# The generic Linux job chooses to use base env, not the one setup by the image
150+
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
151+
conda activate "${CONDA_ENV}"
152+
153+
source .ci/scripts/utils.sh
154+
install_flatc_from_source
155+
install_executorch
156+
157+
# Setup arm example environment (including TOSA tools)
158+
git config --global user.email "[email protected]"
159+
git config --global user.name "Github Executorch"
160+
bash examples/arm/setup.sh --i-agree-to-the-contained-eula
161+
162+
# Test ethos-u delegate examples with run.sh
163+
source examples/arm/ethos-u-scratch/setup_path.sh
164+
PYTHON_EXECUTABLE=python bash examples/arm/run.sh examples/arm/ethos-u-scratch/ buck2
161165
162166
test-arm-reference-delegation:
163167
name: test-arm-reference-delegation
@@ -176,13 +180,16 @@ jobs:
176180
install_flatc_from_source
177181
install_executorch
178182
183+
# Setup arm example environment (including TOSA tools)
179184
git config --global user.email "[email protected]"
180185
git config --global user.name "Github Executorch"
181186
bash examples/arm/setup.sh --i-agree-to-the-contained-eula
182187
183-
source /opt/arm-sdk/setup_path.sh
188+
# Test tosa_reference flow
189+
source examples/arm/ethos-u-scratch/setup_path.sh
184190
PYTHON_EXECUTABLE=python bash backends/arm/test/run_tosa_reference.sh
185191
192+
# Run Arm specific unit-tests
186193
# Run pytest only on specified folders. These test should migrate into
187194
# the _unittest.yml once that test env is fixed
188195
pytest -c /dev/null -v -n auto --cov=./ --cov-report=xml backends/arm/test/ops/ backends/arm/test/models

.gitmodules

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,3 +46,12 @@
4646
[submodule "backends/arm/third-party/ethos-u-core-driver"]
4747
path = backends/arm/third-party/ethos-u-core-driver
4848
url = https://review.mlplatform.org/ml/ethos-u/ethos-u-core-driver
49+
[submodule "backends/vulkan/third-party/VulkanMemoryAllocator"]
50+
path = backends/vulkan/third-party/VulkanMemoryAllocator
51+
url = https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git
52+
[submodule "backends/vulkan/third-party/volk"]
53+
path = backends/vulkan/third-party/volk
54+
url = https://github.com/zeux/volk
55+
[submodule "backends/vulkan/third-party/Vulkan-Headers"]
56+
path = backends/vulkan/third-party/Vulkan-Headers
57+
url = https://github.com/KhronosGroup/Vulkan-Headers

CMakeLists.txt

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -94,14 +94,24 @@ if(EXECUTORCH_ENABLE_EVENT_TRACER)
9494
add_definitions(-DET_EVENT_TRACER_ENABLED)
9595
endif()
9696

97-
# -O2: Moderate opt. -ffunction-sections -fdata-sections: breaks function and
97+
# -ffunction-sections -fdata-sections: breaks function and
9898
# data into sections so they can be properly gc'd. -s: strip symbol.
9999
# -fno-exceptions -fno-rtti: disables exceptions and runtime type.
100100
set(CMAKE_CXX_FLAGS_RELEASE
101101
"-O2 -ffunction-sections -fdata-sections -fno-exceptions -fno-rtti")
102102
if(NOT APPLE)
103103
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -s")
104104
endif()
105+
106+
option(OPTIMIZE_SIZE "Build executorch runtime optimizing for binary size" OFF)
107+
if(OPTIMIZE_SIZE)
108+
# -Os: Optimize for size
109+
set(CMAKE_CXX_FLAGS_RELEASE "-Os ${CMAKE_CXX_FLAGS_RELEASE}")
110+
else()
111+
# -O2: Moderate opt.
112+
set(CMAKE_CXX_FLAGS_RELEASE "-O2 ${CMAKE_CXX_FLAGS_RELEASE}")
113+
endif()
114+
105115
set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g")
106116

107117
# Option to register quantized ops with quantized kernels. See
@@ -368,7 +378,10 @@ endif()
368378
# Build pybind
369379
option(EXECUTORCH_BUILD_PYBIND "Build pybindings" OFF)
370380
if(EXECUTORCH_BUILD_PYBIND)
371-
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/data_loader)
381+
if(NOT EXECUTORCH_BUILD_EXTENSION_DATA_LOADER)
382+
# This has already been added if above flag is on
383+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/data_loader)
384+
endif()
372385
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/sdk)
373386
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/third-party/pybind11)
374387

@@ -382,8 +395,8 @@ if(EXECUTORCH_BUILD_PYBIND)
382395
set(PYBIND_LINK_MPS "mpsdelegate")
383396
endif()
384397

385-
if(PYBIND_LINK_XNNPACK)
386-
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/xnnpack)
398+
if(EXECUTORCH_BUILD_XNNPACK)
399+
# set PYBIND_LINK_XNNPACK variable to link with portable lib library
387400
set(PYBIND_LINK_XNNPACK "xnnpack_backend")
388401
endif()
389402

@@ -424,6 +437,7 @@ if(EXECUTORCH_BUILD_PYBIND)
424437
flatcc
425438
portable_ops_lib
426439
util
440+
torch
427441
${PYBIND_LINK_COREML}
428442
${PYBIND_LINK_MPS}
429443
${PYBIND_LINK_XNNPACK}

0 commit comments

Comments
 (0)