Skip to content

Commit 5ef38d3

Browse files
authored
Consolidating executor runners in CMake - xnnpack (#11239)
### Summary Continuing the process of consolidating the executor runners to top level CMakeLists.txt file. Completing this process for the xnnpack backend in the cmake build flow. `Partially Fixes #<10819>` : #10819. ### Test plan Successfully ran the commands in the [building from source](https://github.com/BujSet/executorch/blob/main/docs/source/using-executorch-building-from-source.md) guide. Updated the github actions to not use the generic `executor_runner` and remove the invocation for the backend-specific `xnn_executor_runner`. Also ran the following formatinng and lint checks: ```cmake-format -i backends/xnnpack/CMakeLists.txt``` ```cmake-lint backends/xnnpack/CMakeLists.txt``` Includes updates to documentation and tutorials (flows from examples and tutorials were verify to ensure build functionality). Note: the buck flow side of things was unmodified, and support for that flow remains for a future change.
1 parent 4348319 commit 5ef38d3

File tree

6 files changed

+39
-77
lines changed

6 files changed

+39
-77
lines changed

.ci/scripts/test_model.sh

Lines changed: 19 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -49,14 +49,24 @@ prepare_artifacts_upload() {
4949
}
5050

5151
build_cmake_executor_runner() {
52+
local backend_string_select="${1:-}"
5253
echo "Building executor_runner"
5354
rm -rf ${CMAKE_OUTPUT_DIR}
54-
cmake -DCMAKE_BUILD_TYPE=Debug \
55-
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
56-
-DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" \
57-
-B${CMAKE_OUTPUT_DIR} .
58-
59-
cmake --build ${CMAKE_OUTPUT_DIR} -j4 --config Debug
55+
mkdir ${CMAKE_OUTPUT_DIR}
56+
if [[ "$backend_string_select" == "XNNPACK" ]]; then
57+
echo "Backend $backend_string_select selected"
58+
(cd ${CMAKE_OUTPUT_DIR} \
59+
&& cmake -DCMAKE_BUILD_TYPE=Release \
60+
-DEXECUTORCH_BUILD_XNNPACK=ON \
61+
-DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" ..)
62+
cmake --build ${CMAKE_OUTPUT_DIR} -j4
63+
else
64+
cmake -DCMAKE_BUILD_TYPE=Debug \
65+
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
66+
-DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" \
67+
-B${CMAKE_OUTPUT_DIR} .
68+
cmake --build ${CMAKE_OUTPUT_DIR} -j4 --config Debug
69+
fi
6070
}
6171

6272
run_portable_executor_runner() {
@@ -111,19 +121,6 @@ test_model() {
111121
run_portable_executor_runner
112122
}
113123

114-
build_cmake_xnn_executor_runner() {
115-
echo "Building xnn_executor_runner"
116-
117-
(rm -rf ${CMAKE_OUTPUT_DIR} \
118-
&& mkdir ${CMAKE_OUTPUT_DIR} \
119-
&& cd ${CMAKE_OUTPUT_DIR} \
120-
&& retry cmake -DCMAKE_BUILD_TYPE=Release \
121-
-DEXECUTORCH_BUILD_XNNPACK=ON \
122-
-DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" ..)
123-
124-
cmake --build ${CMAKE_OUTPUT_DIR} -j4
125-
}
126-
127124
test_model_with_xnnpack() {
128125
WITH_QUANTIZATION=$1
129126
WITH_DELEGATION=$2
@@ -148,12 +145,11 @@ test_model_with_xnnpack() {
148145

149146
# Run test model
150147
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
148+
# TODO eventually buck should also use consolidated executor runners
151149
buck2 run //examples/xnnpack:xnn_executor_runner -- --model_path "${OUTPUT_MODEL_PATH}"
152150
elif [[ "${BUILD_TOOL}" == "cmake" ]]; then
153-
if [[ ! -f ${CMAKE_OUTPUT_DIR}/backends/xnnpack/xnn_executor_runner ]]; then
154-
build_cmake_xnn_executor_runner
155-
fi
156-
./${CMAKE_OUTPUT_DIR}/backends/xnnpack/xnn_executor_runner --model_path "${OUTPUT_MODEL_PATH}"
151+
build_cmake_executor_runner "XNNPACK"
152+
./${CMAKE_OUTPUT_DIR}/executor_runner --model_path "${OUTPUT_MODEL_PATH}"
157153
else
158154
echo "Invalid build tool ${BUILD_TOOL}. Only buck2 and cmake are supported atm"
159155
exit 1

backends/xnnpack/CMakeLists.txt

Lines changed: 6 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,10 @@ foreach(fbs_file ${_xnnpack_schema__srcs})
6161
endforeach()
6262

6363
if(WIN32)
64-
set(MV_COMMAND powershell -Command "Move-Item -Path ${_xnnpack_flatbuffer__outputs} -Destination ${_xnnpack_schema__outputs}")
64+
set(MV_COMMAND
65+
powershell -Command
66+
"Move-Item -Path ${_xnnpack_flatbuffer__outputs} -Destination ${_xnnpack_schema__outputs}"
67+
)
6568
else()
6669
set(MV_COMMAND mv ${_xnnpack_flatbuffer__outputs} ${_xnnpack_schema__outputs})
6770
endif()
@@ -96,7 +99,8 @@ include(cmake/Dependencies.cmake)
9699
list(TRANSFORM _xnnpack_backend__srcs PREPEND "${EXECUTORCH_ROOT}/")
97100
add_library(xnnpack_backend ${_xnnpack_backend__srcs})
98101
target_link_libraries(
99-
xnnpack_backend PUBLIC ${xnnpack_third_party} executorch_core xnnpack_schema extension_threadpool
102+
xnnpack_backend PUBLIC ${xnnpack_third_party} executorch_core xnnpack_schema
103+
extension_threadpool
100104
)
101105

102106
target_include_directories(
@@ -114,46 +118,8 @@ target_include_directories(
114118
target_compile_options(xnnpack_backend PUBLIC ${_common_compile_options})
115119
target_link_options_shared_lib(xnnpack_backend)
116120

117-
if(EXECUTORCH_BUILD_KERNELS_OPTIMIZED)
118-
list(APPEND xnn_executor_runner_libs optimized_native_cpu_ops_lib)
119-
else()
120-
list(APPEND xnn_executor_runner_libs portable_ops_lib)
121-
endif()
122-
123-
if(EXECUTORCH_BUILD_KERNELS_CUSTOM)
124-
list(APPEND xnn_executor_runner_libs $<LINK_LIBRARY:WHOLE_ARCHIVE,custom_ops>)
125-
endif()
126-
127-
if(EXECUTORCH_BUILD_KERNELS_QUANTIZED)
128-
list(APPEND xnn_executor_runner_libs quantized_ops_lib)
129-
endif()
130-
131-
list(APPEND xnn_executor_runner_libs xnnpack_backend executorch)
132-
133-
# ios can only build library but not binary
134-
if(NOT CMAKE_TOOLCHAIN_FILE MATCHES ".*(iOS|ios\.toolchain)\.cmake$")
135-
#
136-
# xnn_executor_runner: Like executor_runner but with XNNPACK, the binary will
137-
# be at ${CMAKE_BINARY_DIR}/backends/xnnpack
138-
#
139-
list(TRANSFORM _xnn_executor_runner__srcs PREPEND "${EXECUTORCH_ROOT}/")
140-
add_executable(xnn_executor_runner ${_xnn_executor_runner__srcs})
141-
142-
if(EXECUTORCH_ENABLE_EVENT_TRACER)
143-
list(APPEND xnn_executor_runner_libs etdump)
144-
endif()
145-
146-
target_link_libraries(xnn_executor_runner gflags ${xnn_executor_runner_libs})
147-
target_compile_options(xnn_executor_runner PUBLIC ${_common_compile_options})
148-
if(EXECUTORCH_BUILD_PTHREADPOOL)
149-
target_link_libraries(xnn_executor_runner extension_threadpool pthreadpool)
150-
target_compile_definitions(xnn_executor_runner PRIVATE ET_USE_THREADPOOL)
151-
endif()
152-
endif()
153-
154121
install(
155122
TARGETS xnnpack_backend
156-
DESTINATION lib
157123
INCLUDES
158124
DESTINATION ${_common_include_directories}
159125
)

backends/xnnpack/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ After lowering to the XNNPACK Program, we can then prepare it for executorch and
9292

9393

9494
### Running the XNNPACK Model with CMake
95-
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the xnn_executor_runner, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
95+
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the executor_runner, which is a sample wrapper for the ExecuTorch Runtime. The XNNPACK Backend is enabled via the compilation flag `-DEXECUTORCH_BUILD_XNNPACK=ON`. We first begin by configuring the CMake build like such:
9696
```bash
9797
# cd to the root of executorch repo
9898
cd executorch
@@ -119,9 +119,9 @@ Then you can build the runtime componenets with
119119
cmake --build cmake-out -j9 --target install --config Release
120120
```
121121

122-
Now you should be able to find the executable built at `./cmake-out/backends/xnnpack/xnn_executor_runner` you can run the executable with the model you generated as such
122+
Now you should be able to find the executable built at `./cmake-out/executor_runner` you can run the executable with the model you generated as such
123123
```bash
124-
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_xnnpack_fp32.pte
124+
./cmake-out/executor_runner --model_path=./mv2_xnnpack_fp32.pte
125125
```
126126

127127
## Help & Improvements

docs/source/backend-delegates-xnnpack-reference.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ Since weight packing creates an extra copy of the weights inside XNNPACK, We fre
7070
When executing the XNNPACK subgraphs, we prepare the tensor inputs and outputs and feed them to the XNNPACK runtime graph. After executing the runtime graph, the output pointers are filled with the computed tensors.
7171

7272
#### **Profiling**
73-
We have enabled basic profiling for the XNNPACK delegate that can be enabled with the compiler flag `-DEXECUTORCH_ENABLE_EVENT_TRACER` (add `-DENABLE_XNNPACK_PROFILING` for additional details). With ExecuTorch's Developer Tools integration, you can also now use the Developer Tools to profile the model. You can follow the steps in [Using the ExecuTorch Developer Tools to Profile a Model](https://pytorch.org/executorch/main/tutorials/devtools-integration-tutorial) on how to profile ExecuTorch models and use Developer Tools' Inspector API to view XNNPACK's internal profiling information. An example implementation is available in the `xnn_executor_runner` (see [tutorial here](tutorial-xnnpack-delegate-lowering.md#profiling)).
73+
We have enabled basic profiling for the XNNPACK delegate that can be enabled with the compiler flag `-DEXECUTORCH_ENABLE_EVENT_TRACER` (add `-DENABLE_XNNPACK_PROFILING` for additional details). With ExecuTorch's Developer Tools integration, you can also now use the Developer Tools to profile the model. You can follow the steps in [Using the ExecuTorch Developer Tools to Profile a Model](https://pytorch.org/executorch/main/tutorials/devtools-integration-tutorial) on how to profile ExecuTorch models and use Developer Tools' Inspector API to view XNNPACK's internal profiling information. An example implementation is available in the `executor_runner` (see [tutorial here](tutorial-xnnpack-delegate-lowering.md#profiling)).
7474

7575

7676
[comment]: <> (TODO: Refactor quantizer to a more official quantization doc)

docs/source/tutorial-xnnpack-delegate-lowering.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ Note in the example above,
141141
The generated model file will be named `[model_name]_xnnpack_[qs8/fp32].pte` depending on the arguments supplied.
142142

143143
## Running the XNNPACK Model with CMake
144-
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the xnn_executor_runner, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
144+
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the executor_runner, which is a sample wrapper for the ExecuTorch Runtime. The XNNPACK Backend is enabled via the compilation flag `-DEXECUTORCH_BUILD_XNNPACK=ON`. We first begin by configuring the CMake build like such:
145145
```bash
146146
# cd to the root of executorch repo
147147
cd executorch
@@ -168,15 +168,15 @@ Then you can build the runtime componenets with
168168
cmake --build cmake-out -j9 --target install --config Release
169169
```
170170

171-
Now you should be able to find the executable built at `./cmake-out/backends/xnnpack/xnn_executor_runner` you can run the executable with the model you generated as such
171+
Now you should be able to find the executable built at `./cmake-out/executor_runner` you can run the executable with the model you generated as such
172172
```bash
173-
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_xnnpack_fp32.pte
173+
./cmake-out/executor_runner --model_path=./mv2_xnnpack_fp32.pte
174174
# or to run the quantized variant
175-
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_xnnpack_q8.pte
175+
./cmake-out/executor_runner --model_path=./mv2_xnnpack_q8.pte
176176
```
177177

178178
## Building and Linking with the XNNPACK Backend
179179
You can build the XNNPACK backend [CMake target](https://github.com/pytorch/executorch/blob/main/backends/xnnpack/CMakeLists.txt#L83), and link it with your application binary such as an Android or iOS application. For more information on this you may take a look at this [resource](using-executorch-android.md) next.
180180

181181
## Profiling
182-
To enable profiling in the `xnn_executor_runner` pass the flags `-DEXECUTORCH_ENABLE_EVENT_TRACER=ON` and `-DEXECUTORCH_BUILD_DEVTOOLS=ON` to the build command (add `-DENABLE_XNNPACK_PROFILING=ON` for additional details). This will enable ETDump generation when running the inference and enables command line flags for profiling (see `xnn_executor_runner --help` for details).
182+
To enable profiling in the `executor_runner` pass the flags `-DEXECUTORCH_ENABLE_EVENT_TRACER=ON` and `-DEXECUTORCH_BUILD_DEVTOOLS=ON` to the build command (add `-DENABLE_XNNPACK_PROFILING=ON` for additional details). This will enable ETDump generation when running the inference and enables command line flags for profiling (see `executor_runner --help` for details).

examples/xnnpack/README.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ The following command will produce a floating-point XNNPACK delegated model `mv2
2424
python3 -m examples.xnnpack.aot_compiler --model_name="mv2" --delegate
2525
```
2626

27-
Once we have the model binary (pte) file, then let's run it with ExecuTorch runtime using the `xnn_executor_runner`. With cmake, you first configure your cmake with the following:
27+
Once we have the model binary (pte) file, then let's run it with ExecuTorch runtime using the `executor_runner`. With cmake, you first configure your cmake with the following:
2828

2929
```bash
3030
# cd to the root of executorch repo
@@ -56,7 +56,7 @@ cmake --build cmake-out -j9 --target install --config Release
5656
Now finally you should be able to run this model with the following command
5757

5858
```bash
59-
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path ./mv2_xnnpack_fp32.pte
59+
./cmake-out/executor_runner --model_path ./mv2_xnnpack_fp32.pte
6060
```
6161

6262
## Quantization
@@ -80,7 +80,7 @@ python3 -m examples.xnnpack.quantization.example --help
8080
```
8181

8282
## Running the XNNPACK Model with CMake
83-
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the xnn_executor_runner, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
83+
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the executor_runner, which is a sample wrapper for the ExecuTorch Runtime. The XNNPACK Backend is enabled via the compilation flag `-DEXECUTORCH_BUILD_XNNPACK=ON`. We first begin by configuring the CMake build like such:
8484
```bash
8585
# cd to the root of executorch repo
8686
cd executorch
@@ -107,9 +107,9 @@ Then you can build the runtime componenets with
107107
cmake --build cmake-out -j9 --target install --config Release
108108
```
109109

110-
Now you should be able to find the executable built at `./cmake-out/backends/xnnpack/xnn_executor_runner` you can run the executable with the model you generated as such
110+
Now you should be able to find the executable built at `./cmake-out/executor_runner` you can run the executable with the model you generated as such
111111
```bash
112-
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_quantized.pte
112+
./cmake-out/executor_runner --model_path=./mv2_quantized.pte
113113
```
114114

115115
## Delegating a Quantized Model

0 commit comments

Comments
 (0)