Skip to content

Commit 93fa61f

Browse files
author
Guang Yang
committed
Add the example non-genai qnn model to ci and benchinfra
1 parent 9a98abb commit 93fa61f

File tree

4 files changed

+135
-7
lines changed

4 files changed

+135
-7
lines changed

.ci/scripts/build-qnn-sdk.sh

Lines changed: 27 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
# This source code is licensed under the BSD-style license found in the
66
# LICENSE file in the root directory of this source tree.
77

8-
set -ex
8+
set -eux
99

1010
build_qnn_backend() {
1111
echo "Start building qnn backend."
@@ -16,4 +16,30 @@ build_qnn_backend() {
1616
bash backends/qualcomm/scripts/build.sh --skip_aarch64 --job_number 2 --release
1717
}
1818

19+
set_up_aot() {
20+
cd $EXECUTORCH_ROOT
21+
mkdir cmake-out
22+
pushd cmake-out
23+
cmake .. \
24+
-DCMAKE_INSTALL_PREFIX=$PWD \
25+
-DEXECUTORCH_BUILD_QNN=ON \
26+
-DQNN_SDK_ROOT=${QNN_SDK_ROOT} \
27+
-DEXECUTORCH_BUILD_SDK=ON \
28+
-DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
29+
-DEXECUTORCH_ENABLE_EVENT_TRACER=ON \
30+
-DPYTHON_EXECUTABLE=python3 \
31+
-DEXECUTORCH_SEPARATE_FLATCC_HOST_PROJECT=OFF
32+
cmake --build $PWD --target "PyQnnManagerAdaptor" "PyQnnWrapperAdaptor" -j$(nproc)
33+
# install Python APIs to correct import path
34+
# The filename might vary depending on your Python and host version.
35+
cp -f backends/qualcomm/PyQnnManagerAdaptor.cpython-310-x86_64-linux-gnu.so $EXECUTORCH_ROOT/backends/qualcomm/python
36+
cp -f backends/qualcomm/PyQnnWrapperAdaptor.cpython-310-x86_64-linux-gnu.so $EXECUTORCH_ROOT/backends/qualcomm/python
37+
popd
38+
39+
# Workaround for fbs files in exir/_serialize
40+
cp schema/program.fbs exir/_serialize/program.fbs
41+
cp schema/scalar_type.fbs exir/_serialize/scalar_type.fbs
42+
}
43+
1944
build_qnn_backend
45+
set_up_aot

.ci/scripts/test_qnn_model.sh

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
#!/bin/bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
8+
set -exu
9+
10+
# shellcheck source=/dev/null
11+
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
12+
13+
MODEL_NAME=$1
14+
if [[ -z "${MODEL_NAME:-}" ]]; then
15+
echo "Missing model name, exiting..."
16+
exit 1
17+
fi
18+
19+
which "${PYTHON_EXECUTABLE}"
20+
CMAKE_OUTPUT_DIR=cmake-out
21+
22+
test_qnn_model() {
23+
if [[ "${MODEL_NAME}" == "dl3" ]]; then
24+
"${PYTHON_EXECUTABLE}" -m examples.qualcomm.scripts.deeplab_v3 -b cmake-out -m SM8550 --compile_only --download
25+
fi
26+
}
27+
28+
test_qnn_model

.github/workflows/android-perf.yml

Lines changed: 57 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -135,24 +135,75 @@ jobs:
135135
fail-fast: false
136136
with:
137137
runner: linux.2xlarge
138-
docker-image: executorch-ubuntu-22.04-clang12
138+
docker-image: executorch-ubuntu-22.04-clang12-android
139139
submodules: 'true'
140140
timeout: 60
141141
upload-artifact: android-models
142142
script: |
143143
# The generic Linux job chooses to use base env, not the one setup by the image
144+
echo "::group::Setting up dev environment"
144145
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
145146
conda activate "${CONDA_ENV}"
146-
147+
if [[ ${{ matrix.delegate }} == "qnn" ]]; then
148+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh
149+
PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh
150+
fi
147151
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
148-
echo "Exporting model: ${{ matrix.model }}"
149-
export ARTIFACTS_DIR_NAME=artifacts-to-be-uploaded/${{ matrix.model }}_${{ matrix.delegate }}
152+
ARTIFACTS_DIR_NAME=artifacts-to-be-uploaded/${{ matrix.model }}_${{ matrix.delegate }}
153+
echo "::endgroup::"
154+
155+
echo "::group::Exporting ${{ matrix.delegate }} model: ${{ matrix.model }}"
156+
BUILD_MODE="cmake"
157+
DTYPE="fp32"
158+
if [[ ${{ matrix.delegate }} == "qnn" ]]; then
159+
DELEGATE_CONFIG="qnn"
160+
elif [[ ${{ matrix.delegate }} == "xnnpack" ]]; then
161+
DELEGATE_CONFIG="xnnpack+custom+qe"
162+
fi
163+
164+
if [[ ${{ matrix.model }} == "llama*" ]]; then
165+
# Install requirements for export_llama
166+
PYTHON_EXECUTABLE=python bash examples/models/llama2/install_requirements.sh
167+
# Test llama2
168+
PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh "${{ matrix.model }}.pt" "${BUILD_MODE}" "${DTYPE}" "${DELEGATE_CONFIG}" "${ARTIFACTS_DIR_NAME}"
169+
else
170+
PYTHON_EXECUTABLE=python bash .ci/scripts/test_qnn_model.sh "${{ matrix.model }}"
171+
fi
172+
echo "::endgroup::"
173+
174+
test-llama-runner-qnn-linux:
175+
name: test-llama-runner-qnn-linux
176+
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
177+
strategy:
178+
matrix:
179+
dtype: [fp32]
180+
build-tool: [cmake]
181+
mode: [qnn]
182+
fail-fast: false
183+
with:
184+
runner: linux.2xlarge
185+
docker-image: executorch-ubuntu-22.04-clang12-android
186+
submodules: 'true'
187+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
188+
timeout: 900
189+
script: |
190+
# The generic Linux job chooses to use base env, not the one setup by the image
191+
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
192+
conda activate "${CONDA_ENV}"
193+
194+
DTYPE=${{ matrix.dtype }}
195+
BUILD_TOOL=${{ matrix.build-tool }}
196+
MODE=${{ matrix.mode }}
197+
198+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh
199+
PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh
150200
151-
# TODO(T197546696): Note that the following scripts/steps only work for llama. It's expected to fail for other models+delegates.
201+
# Setup executorch
202+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh buck2
152203
# Install requirements for export_llama
153204
PYTHON_EXECUTABLE=python bash examples/models/llama2/install_requirements.sh
154205
# Test llama2
155-
PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh "${{ matrix.model }}.pt" "cmake" "fp32" "xnnpack+custom+qe" "${ARTIFACTS_DIR_NAME}"\
206+
PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh stories110M.pt "${BUILD_TOOL}" "${DTYPE}" "${MODE}"
156207
157208
# Upload models to S3. The artifacts are needed not only by the device farm but also TorchChat
158209
upload-models:

.github/workflows/trunk.yml

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -270,3 +270,26 @@ jobs:
270270
PYTHON_EXECUTABLE=python ${CONDA_RUN} bash examples/models/llama2/install_requirements.sh
271271
# Test llama2
272272
PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llama.sh stories110M.pt "${BUILD_TOOL}" "${DTYPE}" "${MODE}"
273+
274+
test-qnn-model:
275+
name: test-qnn-model
276+
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
277+
strategy:
278+
matrix:
279+
dtype: [fp32]
280+
model: [dl3]
281+
fail-fast: false
282+
with:
283+
runner: linux.2xlarge
284+
docker-image: executorch-ubuntu-22.04-clang12-android
285+
submodules: 'true'
286+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
287+
timeout: 900
288+
script: |
289+
# The generic Linux job chooses to use base env, not the one setup by the image
290+
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
291+
conda activate "${CONDA_ENV}"
292+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh cmake
293+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh
294+
PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh
295+
PYTHON_EXECUTABLE=python bash .ci/scripts/test_qnn_model.sh ${{ matrix.model }}

0 commit comments

Comments
 (0)