Skip to content

Commit 24e02e4

Browse files
authored
Merge branch 'main' into gh/trivedivivek/66/orig
2 parents 6b58d46 + 6fb42ef commit 24e02e4

File tree

68 files changed

+607
-3102
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

68 files changed

+607
-3102
lines changed

.ci/docker/build.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ case "${IMAGE_NAME}" in
4848
executorch-ubuntu-22.04-mediatek-sdk)
4949
MEDIATEK_SDK=yes
5050
CLANG_VERSION=12
51+
ANDROID_NDK_VERSION=r27b
5152
;;
5253
executorch-ubuntu-22.04-clang12-android)
5354
LINTRUNNER=""

.ci/scripts/build_llama_android.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,5 @@ build_llama_runner() {
6060

6161
cmake --build cmake-android-out/examples/models/llama -j4 --config Release
6262
}
63-
install_flatc_from_source
6463
install_executorch_and_backend_lib
6564
build_llama_runner
File renamed without changes.

.ci/scripts/test_model.sh

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -209,9 +209,14 @@ test_model_with_qnn() {
209209
EXPORTED_MODEL=$(find "./${EXPORT_SCRIPT}" -type f -name "${MODEL_NAME}*.pte" -print -quit)
210210
}
211211

212+
# Run CoreML tests.
213+
#
214+
# @param should_test If true, build and test the model using the coreml_executor_runner.
212215
test_model_with_coreml() {
213-
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
214-
echo "coreml doesn't support buck2."
216+
local should_test="$1"
217+
218+
if [[ "${BUILD_TOOL}" != "cmake" ]]; then
219+
echo "coreml only supports cmake."
215220
exit 1
216221
fi
217222

@@ -229,6 +234,14 @@ test_model_with_coreml() {
229234
echo "No .pte file found"
230235
exit 1
231236
fi
237+
238+
# Run the model
239+
if [ "${should_test}" = true ]; then
240+
echo "Testing exported model with coreml_executor_runner..."
241+
local out_dir=$(mktemp -d)
242+
COREML_EXECUTOR_RUNNER_OUT_DIR="${out_dir}" examples/apple/coreml/scripts/build_executor_runner.sh
243+
"${out_dir}/coreml_executor_runner" --model_path "${EXPORTED_MODEL}"
244+
fi
232245
}
233246

234247
test_model_with_mps() {
@@ -247,7 +260,11 @@ elif [[ "${BACKEND}" == *"qnn"* ]]; then
247260
fi
248261
elif [[ "${BACKEND}" == *"coreml"* ]]; then
249262
echo "Testing ${MODEL_NAME} with coreml..."
250-
test_model_with_coreml
263+
should_test_coreml=false
264+
if [[ "${BACKEND}" == *"test"* ]]; then
265+
should_test_coreml=true
266+
fi
267+
test_model_with_coreml "${should_test_coreml}"
251268
if [[ $? -eq 0 ]]; then
252269
prepare_artifacts_upload
253270
fi

.ci/scripts/utils.sh

Lines changed: 6 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -80,25 +80,6 @@ install_pytorch_and_domains() {
8080
sccache --show-stats || true
8181
}
8282

83-
install_flatc_from_source() {
84-
# NB: This function could be used to install flatbuffer from source
85-
pushd third-party/flatbuffers || return
86-
87-
cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release
88-
if [ "$(uname)" == "Darwin" ]; then
89-
CMAKE_JOBS=$(( $(sysctl -n hw.ncpu) - 1 ))
90-
else
91-
CMAKE_JOBS=$(( $(nproc) - 1 ))
92-
fi
93-
cmake --build . -j "${CMAKE_JOBS}"
94-
95-
# Copy the flatc binary to conda path
96-
EXEC_PATH=$(dirname "$(which python)")
97-
cp flatc "${EXEC_PATH}"
98-
99-
popd || return
100-
}
101-
10283
build_executorch_runner_buck2() {
10384
# Build executorch runtime with retry as this step is flaky on macos CI
10485
retry buck2 build //examples/portable/executor_runner:executor_runner
@@ -111,9 +92,14 @@ build_executorch_runner_cmake() {
11192
mkdir "${CMAKE_OUTPUT_DIR}"
11293

11394
pushd "${CMAKE_OUTPUT_DIR}" || return
95+
if [[ $1 == "Debug" ]]; then
96+
CXXFLAGS="-fsanitize=address,undefined"
97+
else
98+
CXXFLAGS=""
99+
fi
114100
# This command uses buck2 to gather source files and buck2 could crash flakily
115101
# on MacOS
116-
retry cmake -DPYTHON_EXECUTABLE="${PYTHON_EXECUTABLE}" -DCMAKE_BUILD_TYPE="${1:-Release}" ..
102+
CXXFLAGS="$CXXFLAGS" retry cmake -DPYTHON_EXECUTABLE="${PYTHON_EXECUTABLE}" -DCMAKE_BUILD_TYPE="${1:-Release}" ..
117103
popd || return
118104

119105
if [ "$(uname)" == "Darwin" ]; then

.ci/scripts/wheel/test_macos.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,12 @@
1414
test_base.ModelTest(
1515
model=Model.Mv3,
1616
backend=Backend.XnnpackQuantizationDelegation,
17-
)
17+
),
18+
# Enable this once CoreML is suppported out-of-the-box
19+
# https://github.com/pytorch/executorch/issues/9019
20+
# test_base.ModelTest(
21+
# model=Model.Mv3,
22+
# backend=Backend.CoreMlTest,
23+
# )
1824
]
1925
)

.github/workflows/apple.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ on:
1414
- build/build_apple_frameworks.sh
1515
- build/build_apple_llm_demo.sh
1616
- build/create_frameworks.sh
17-
- build/test_ios_ci.sh
17+
- .ci/scripts/test_ios_ci.sh
1818
- examples/demo-apps/apple_ios/**
1919
- extension/apple/**
2020
- extension/benchmark/apple/**
@@ -75,7 +75,7 @@ jobs:
7575
7676
# Build and test iOS Demo App
7777
PYTHON_EXECUTABLE=python ${CONDA_RUN} --no-capture-output \
78-
build/test_ios_ci.sh "${ARTIFACTS_DIR_NAME}"
78+
.ci/scripts/test_ios_ci.sh "${ARTIFACTS_DIR_NAME}"
7979
8080
# Upload the test demo app to S3
8181
upload-demo-ios:

.github/workflows/trunk.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ jobs:
176176
id-token: write
177177
contents: read
178178
with:
179-
runner: linux.2xlarge
179+
runner: linux.2xlarge.memory
180180
docker-image: executorch-ubuntu-22.04-arm-sdk
181181
submodules: 'true'
182182
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
@@ -206,7 +206,7 @@ jobs:
206206
id-token: write
207207
contents: read
208208
with:
209-
runner: linux.2xlarge
209+
runner: linux.2xlarge.memory
210210
docker-image: executorch-ubuntu-22.04-arm-sdk
211211
submodules: 'true'
212212
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}

CMakeLists.txt

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -460,7 +460,7 @@ endif()
460460
# tools like `flatc`, along with example executables like `executor_runner` and
461461
# libraries that it uses, like `gflags`. Disabling this can be helpful when
462462
# cross-compiling, but some required tools that would have been built need to be
463-
# provided directly (via, for example, FLATC_EXECUTABLE).
463+
# provided directly.
464464
cmake_dependent_option(
465465
EXECUTORCH_BUILD_HOST_TARGETS "Build host-only targets." ON
466466
"NOT CMAKE_TOOLCHAIN_IOS" OFF
@@ -471,10 +471,9 @@ cmake_dependent_option(
471471
#
472472
cmake_dependent_option(
473473
EXECUTORCH_BUILD_FLATC "Build the flatc executable." ON
474-
"NOT FLATC_EXECUTABLE;EXECUTORCH_BUILD_HOST_TARGETS" OFF
474+
"NOT FLATC_EXECUTABLE" OFF
475475
)
476476

477-
478477
set(FLATBUFFERS_BUILD_FLATC OFF CACHE BOOL "")
479478
set(FLATBUFFERS_BUILD_FLATHASH OFF CACHE BOOL "")
480479
set(FLATBUFFERS_BUILD_FLATLIB OFF CACHE BOOL "")
@@ -507,6 +506,8 @@ if(EXECUTORCH_BUILD_FLATC)
507506
-DFLATBUFFERS_BUILD_TESTS=${FLATBUFFERS_BUILD_TESTS}
508507
-DFLATBUFFERS_INSTALL=${FLATBUFFERS_INSTALL}
509508
-DCMAKE_CXX_FLAGS="-DFLATBUFFERS_MAX_ALIGNMENT=${FLATBUFFERS_MAX_ALIGNMENT}"
509+
# If building for iOS, "unset" these variables to rely on the host (macOS) defaults.
510+
$<$<AND:$<BOOL:${CMAKE_TOOLCHAIN_IOS}>,$<BOOL:$<FILTER:${PLATFORM},EXCLUDE,^MAC>>>:-DCMAKE_OSX_SYSROOT=>
510511
INSTALL_COMMAND ""
511512
BUILD_BYPRODUCTS <BINARY_DIR>/flatc
512513
)
@@ -515,6 +516,8 @@ if(EXECUTORCH_BUILD_FLATC)
515516
# flatbuffers does not use CMAKE_BUILD_TYPE. Internally, the build forces Release
516517
# config, but from CMake's perspective the build type is always Debug.
517518
set(FLATC_EXECUTABLE ${BINARY_DIR}/$<CONFIG>/flatc.exe)
519+
elseif(CMAKE_GENERATOR STREQUAL "Xcode")
520+
set(FLATC_EXECUTABLE ${BINARY_DIR}/$<CONFIG>/flatc)
518521
else()
519522
set(FLATC_EXECUTABLE ${BINARY_DIR}/flatc)
520523
endif()
@@ -528,12 +531,7 @@ if(NOT FLATC_EXECUTABLE)
528531
find_program(FLATC_EXECUTABLE flatc)
529532

530533
if(NOT FLATC_EXECUTABLE)
531-
message(
532-
FATAL_ERROR
533-
"FLATC_EXECUTABLE must be set when EXECUTORCH_BUILD_FLATC is disabled. "
534-
"Note that EXECUTORCH_BUILD_FLATC may be disabled implicitly when "
535-
"cross-compiling or when EXECUTORCH_BUILD_HOST_TARGETS is disabled."
536-
)
534+
message(FATAL_ERROR "FLATC_EXECUTABLE must be set when EXECUTORCH_BUILD_FLATC is disabled.")
537535
endif()
538536
endif()
539537

backends/apple/coreml/scripts/build_tests.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ cmake "$EXECUTORCH_ROOT_PATH" -B"$CMAKE_EXECUTORCH_BUILD_DIR_PATH" \
3232
-DCMAKE_TOOLCHAIN_FILE="$IOS_TOOLCHAIN_PATH" \
3333
-DPLATFORM=MAC_UNIVERSAL \
3434
-DDEPLOYMENT_TARGET=13.0 \
35-
-DFLATC_EXECUTABLE="$(which flatc)" \
3635
-DEXECUTORCH_BUILD_EXECUTOR_RUNNER=OFF \
3736
-DEXECUTORCH_BUILD_XNNPACK=OFF \
3837
-DEXECUTORCH_BUILD_GFLAGS=OFF

backends/arm/operator_support/convolution_support.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,9 @@ def is_node_tosa_supported(self, node: fx.Node, tosa_spec: TosaSpecification):
3434

3535
for pad in output_padding:
3636
if pad != 0:
37+
self.reporter.report_reject(
38+
node, "Convolutions with non-zero output padding not implemented."
39+
)
3740
return False
3841

3942
# Hardware specific constraints
@@ -56,19 +59,33 @@ def _is_node_supported_u55(self, node: fx.Node):
5659
# Depthwise convolution
5760
for dim in shape_in[1:]:
5861
if not 1 <= dim <= 65536:
62+
self.reporter.report_reject(
63+
node,
64+
f"Depthwise convolution must have CWH <= 65536, got {dim})",
65+
)
5966
return False
6067
else:
6168
# Convolution
6269
if not 1 <= C_in <= 65536:
70+
self.reporter.report_reject(
71+
node, f"Convolution must have C <= 65536, got {C_in})"
72+
)
6373
return False
6474

6575
kernel_w = kernel[2]
6676
kernel_h = kernel[3] if len(kernel) > 3 else 1
6777
# Kernel condition misses constraint on sum of absolute weights
6878
if not 1 <= kernel_h <= 64 or not 1 <= kernel_w * kernel_h <= 4096:
79+
self.reporter.report_reject(
80+
node,
81+
f"Convolution needs to have kernel_y<=64, kernel_x*kernel_y<=4096, got kernel ({kernel_w}, {kernel_h})",
82+
)
6983
return False
7084

7185
if not self._stride_condition(node):
86+
self.reporter.report_reject(
87+
node, "Failed condition on stride, pad and dilation combination."
88+
)
7289
return False
7390

7491
return True

backends/arm/operator_support/pool_2d_support.py

Lines changed: 42 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,12 +54,35 @@ def is_node_tosa_supported(self, node: fx.Node, tosa_spec: TosaSpecification):
5454
if len(node.args) > 3:
5555
# Padding case
5656
if not all(1 <= k <= 8 for k in kernel):
57+
self.reporter.report_reject(
58+
node, f"Avgpool2d with padding needs kernel dims < 8, got {kernel}"
59+
)
5760
return False
5861
else:
5962
if not kernel_check(kernel):
63+
self.reporter.report_reject(
64+
node,
65+
f"Avgpool2d needs kernel_y < 256, kernel_x*kernel_y<=65536, got {kernel}",
66+
)
6067
return False
6168

62-
return dim_check(shape) and shape[0] == 1 and stride_check(stride)
69+
if not dim_check(shape):
70+
self.reporter.report_reject(
71+
node,
72+
f"Avgpool2d needs N == 1, rest dims <= 65536, got shape {list(shape)}",
73+
)
74+
return False
75+
if not stride_check(stride):
76+
self.reporter.report_reject(
77+
node, f"Avgpool2d needs stride <= 3, got {stride}"
78+
)
79+
return False
80+
if not shape[0] == 1:
81+
self.reporter.report_reject(
82+
node, f"Avgpool2d needs N==1, got N=={shape[0]}"
83+
)
84+
return False
85+
return True
6386

6487

6588
@register_tosa_support_check
@@ -82,4 +105,21 @@ def is_node_tosa_supported(self, node: fx.Node, tosa_spec: TosaSpecification):
82105
kernel = cast(tuple[int, int], node.args[1])
83106
stride = cast(tuple[int, int], node.args[2])
84107

85-
return kernel_check(kernel) and dim_check(shape) and stride_check(stride)
108+
if not kernel_check(kernel):
109+
self.reporter.report_reject(
110+
node,
111+
f"Maxpool2d needs kernel_y < 256, kernel_x*kernel_y<=65536, got {kernel}",
112+
)
113+
return False
114+
if not dim_check(shape):
115+
self.reporter.report_reject(
116+
node,
117+
f"Maxpool2d needs N == 1, rest dims <= 65536, got shape {list(shape)}",
118+
)
119+
return False
120+
if not stride_check(stride):
121+
self.reporter.report_reject(
122+
node, f"Maxpool2d needs stride <= 3, got {stride}"
123+
)
124+
return False
125+
return True

backends/arm/operator_support/reduce_sum_support.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,9 @@ def is_node_tosa_supported(self, node: fx.Node, tosa_spec: TosaSpecification):
3434

3535
for dim in dim_list:
3636
if not 1 <= input_shape[dim] <= 65536:
37+
self.reporter.report_reject(
38+
node, f"sum needs dims < 65536, got shape {input_shape}"
39+
)
3740
return False
3841

3942
# We can't be certain of which dim is the last in memory yet,
@@ -45,7 +48,9 @@ def is_node_tosa_supported(self, node: fx.Node, tosa_spec: TosaSpecification):
4548
for length in input_shape[dim + 1 :]:
4649
post_R_product *= length
4750
if not 1 <= pre_R_product <= 65536:
51+
self.reporter.report_reject(node, "Failed dim check")
4852
return False
4953
if not 1 <= post_R_product <= 65536:
54+
self.reporter.report_reject(node, "Failed dim check")
5055
return False
5156
return True

0 commit comments

Comments
 (0)