Skip to content

Commit 7f829bb

Browse files
authored
Merge branch 'main' into gh/trivedivivek/109/orig
2 parents b966fce + 0e35c30 commit 7f829bb

File tree

42 files changed

+1077
-224
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+1077
-224
lines changed

.ci/docker/ci_commit_pins/pytorch.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
01f1cc44cbbfdf6307aa01b803a4ee22f9ade946
1+
5616fa4a68718ead203314a3467f7dd9547153ae

.github/workflows/pull.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -371,7 +371,7 @@ jobs:
371371
size=${arr[4]}
372372
# threshold=48120 on devserver with gcc11.4
373373
# todo(lfq): update once binary size is below 50kb.
374-
threshold="51408"
374+
threshold="55504"
375375
if [[ "$size" -le "$threshold" ]]; then
376376
echo "Success $size <= $threshold"
377377
else
@@ -406,7 +406,7 @@ jobs:
406406
output=$(ls -la cmake-out/test/size_test)
407407
arr=($output)
408408
size=${arr[4]}
409-
threshold="47560"
409+
threshold="51656"
410410
if [[ "$size" -le "$threshold" ]]; then
411411
echo "Success $size <= $threshold"
412412
else

backends/arm/quantizer/quantization_annotator.py

Lines changed: 35 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,10 @@ def _is_ok_for_quantization(
9595
continue
9696

9797
for n_arg in _as_list(node.args[quant_property.index]):
98-
assert isinstance(n_arg, Node)
98+
if not isinstance(n_arg, Node):
99+
raise TypeError(
100+
f"n_arg must be a Node instance, got {type(n_arg).__name__!r}"
101+
)
99102
if not is_ok_for_quantization(n_arg, gm): # type: ignore[attr-defined]
100103
logger.debug(
101104
f'could not quantize node due to input "{node}": '
@@ -108,7 +111,10 @@ def _is_ok_for_quantization(
108111

109112

110113
def _annotate_input(node: Node, quant_property: _QuantProperty):
111-
assert not is_annotated(node)
114+
if is_annotated(node):
115+
raise RuntimeError(
116+
f"Cannot annotate input: node '{node.name}' is already annotated"
117+
)
112118
if quant_property.optional and (
113119
quant_property.index >= len(node.args)
114120
or node.args[quant_property.index] is None
@@ -120,17 +126,28 @@ def _annotate_input(node: Node, quant_property: _QuantProperty):
120126
_as_list(quant_property.qspec),
121127
strict=True,
122128
):
123-
assert isinstance(n_arg, Node)
129+
if not isinstance(n_arg, Node):
130+
raise TypeError(
131+
f"n_arg must be a Node instance, got {type(n_arg).__name__!r}"
132+
)
124133
annotate_input_qspec_map(node, n_arg, qspec)
125134
if quant_property.mark_annotated:
126135
mark_node_as_annotated(n_arg) # type: ignore[attr-defined]
127136

128137

129138
def _annotate_output(node: Node, quant_property: _QuantProperty):
130-
assert not is_annotated(node)
131-
assert not quant_property.mark_annotated
132-
assert not quant_property.optional
133-
assert quant_property.index == 0, "Only one output annotation supported currently"
139+
if is_annotated(node):
140+
raise RuntimeError(
141+
f"Cannot annotate output: node '{node.name}' is already annotated"
142+
)
143+
if quant_property.mark_annotated:
144+
raise ValueError(
145+
"quant_property.mark_annotated must be False for output annotation"
146+
)
147+
if quant_property.optional:
148+
raise ValueError("quant_property.optional must be False for output annotation")
149+
if quant_property.index != 0:
150+
raise ValueError("Only one output annotation supported currently")
134151

135152
annotate_output_qspec(node, quant_property.qspec)
136153

@@ -145,7 +162,9 @@ def _match_pattern(
145162
146163
Each 'pattern' element is composed of a list of disjunctive nodes types.
147164
"""
148-
assert len(pattern) > 0, "No pattern provided"
165+
if len(pattern) < 1:
166+
raise ValueError("No pattern provided")
167+
149168
if filter_fn is not None:
150169
if not filter_fn(node):
151170
return False
@@ -417,8 +436,14 @@ def any_or_hardtanh_min_zero(n: Node):
417436
torch.ops.aten.concatenate.default,
418437
torch.ops.aten.stack.default,
419438
):
420-
assert isinstance(node.args[0], list)
421-
assert len(node.args[0]) != 0
439+
# first argument should be a non-empty list of nodes
440+
if not isinstance(node.args[0], list):
441+
raise TypeError(
442+
"Expected node.args[0] to be a list, got "
443+
f"{type(node.args[0]).__name__!r}"
444+
)
445+
if len(node.args[0]) == 0:
446+
raise ValueError("Expected non-empty list for node.args[0]")
422447

423448
shared_qspec = SharedQuantizationSpec((node.args[0][0], node))
424449
quant_properties.quant_inputs = [

backends/arm/scripts/build_executorch.sh

Lines changed: 1 addition & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -54,47 +54,9 @@ source ${setup_path_script}
5454

5555
et_build_dir="${et_build_root}/cmake-out"
5656

57-
# Used for flatcc host excutable if Devtools is used
58-
et_build_host_dir=${et_build_root}/cmake-out-host-tools
59-
6057
set -x
6158
cd "${et_root_dir}"
6259

63-
if [ "$build_with_etdump" = true ] ; then
64-
( set +x ;
65-
echo "--------------------------------------------------------------------------------" ;
66-
echo "Build ExecuTorch Libraries host flatcc bin ${build_type} into ${et_build_host_dir}/bin/flatcc" ;
67-
echo "--------------------------------------------------------------------------------" )
68-
69-
# Build host flatcc bin
70-
# This is a way to work around that the flatcc executable get build for target (e.g. Arm) later
71-
# and get replaced. flatcc is a tool used on the host for etdump and BundleIO handling.
72-
# The way to solve this is to generate it once for the host, then copy it to ${et_build_host_dir}/bin
73-
# and later point that out with -DFLATCC_EXECUTABLE=${et_build_host_dir}/bin/flatcc later.
74-
75-
cmake \
76-
-DCMAKE_INSTALL_PREFIX=${et_build_host_dir} \
77-
-DCMAKE_BUILD_TYPE=${build_type} \
78-
-DEXECUTORCH_BUILD_EXECUTOR_RUNNER=OFF \
79-
-DEXECUTORCH_ENABLE_LOGGING=ON \
80-
-DEXECUTORCH_BUILD_ARM_BAREMETAL=ON \
81-
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
82-
-DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON \
83-
-DEXECUTORCH_BUILD_DEVTOOLS=ON \
84-
-DEXECUTORCH_ENABLE_EVENT_TRACER=ON \
85-
-DFLATCC_ALLOW_WERROR=OFF \
86-
-B"${et_build_host_dir}" \
87-
"${et_root_dir}"
88-
89-
# third-party/flatcc/bin/flatcc gets build already in the in the cmake config step above
90-
# so there is no cmake building step done
91-
92-
# Copy host flatcc excutable so it's saved when we build for target (Arm) later
93-
et_build_host_dir=$(realpath ${et_build_host_dir})
94-
mkdir -p ${et_build_host_dir}/bin
95-
cp third-party/flatcc/bin/flatcc ${et_build_host_dir}/bin
96-
fi
97-
9860
( set +x ;
9961
echo "--------------------------------------------------------------------------------" ;
10062
echo "Build ExecuTorch target libs ${build_type} into '${et_build_dir}'" ;
@@ -111,8 +73,7 @@ if [ "$build_with_etdump" = true ] ; then
11173
build_with_etdump_flags="-DEXECUTORCH_BUILD_DEVTOOLS=ON \
11274
-DEXECUTORCH_ENABLE_EVENT_TRACER=ON \
11375
-DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=OFF \
114-
-DFLATCC_ALLOW_WERROR=OFF \
115-
-DFLATCC_EXECUTABLE=${et_build_host_dir}/bin/flatcc "
76+
-DFLATCC_ALLOW_WERROR=OFF "
11677
fi
11778

11879
echo "Building with Devtools: ${build_devtools_flags} ${build_with_etdump_flags}"

backends/arm/test/tester/analyze_output_utils.py

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -154,6 +154,13 @@ def print_error_diffs(
154154
output_str += f"BATCH {n}\n"
155155
result_batch = result[n, :, :, :]
156156
reference_batch = reference[n, :, :, :]
157+
158+
if reference_batch.dtype == torch.bool or result_batch.dtype == torch.bool:
159+
mismatches = (reference_batch != result_batch).sum().item()
160+
total = reference_batch.numel()
161+
output_str += f"(BOOLEAN tensor) {mismatches} / {total} elements differ ({mismatches / total:.2%})\n"
162+
continue
163+
157164
is_close = torch.allclose(result_batch, reference_batch, rtol, atol)
158165
if is_close:
159166
output_str += ".\n"
@@ -180,14 +187,15 @@ def print_error_diffs(
180187
output_str += _print_elements(
181188
result[n, :, :, :], reference[n, :, :, :], C, H, W, rtol, atol
182189
)
183-
184-
reference_range = torch.max(reference) - torch.min(reference)
185-
diff = torch.abs(reference - result).flatten()
186-
diff = diff[diff.nonzero()]
187-
if not len(diff) == 0:
188-
diff_percent = diff / reference_range
189-
output_str += "\nMEAN MEDIAN MAX MIN (error as % of reference output range)\n"
190-
output_str += f"{torch.mean(diff_percent):<8.2%} {torch.median(diff_percent):<8.2%} {torch.max(diff_percent):<8.2%} {torch.min(diff_percent):<8.2%}\n"
190+
# Only compute numeric error metrics if tensor is not boolean
191+
if reference.dtype != torch.bool and result.dtype != torch.bool:
192+
reference_range = torch.max(reference) - torch.min(reference)
193+
diff = torch.abs(reference - result).flatten()
194+
diff = diff[diff.nonzero()]
195+
if not len(diff) == 0:
196+
diff_percent = diff / reference_range
197+
output_str += "\nMEAN MEDIAN MAX MIN (error as % of reference output range)\n"
198+
output_str += f"{torch.mean(diff_percent):<8.2%} {torch.median(diff_percent):<8.2%} {torch.max(diff_percent):<8.2%} {torch.min(diff_percent):<8.2%}\n"
191199

192200
# Over-engineer separators to match output width
193201
lines = output_str.split("\n")

backends/mediatek/partitioner.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ def ops_to_not_decompose(
8181
torch.ops.aten.upsample_bilinear2d.vec,
8282
torch.ops.aten.upsample_nearest2d.default,
8383
torch.ops.aten.upsample_nearest2d.vec,
84+
torch.ops.aten._safe_softmax.default,
8485
]
8586
return (ops_not_decompose, None)
8687

backends/mediatek/scripts/mtk_build.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ rm -rf cmake-android-out && mkdir cmake-android-out && cd cmake-android-out
3333
cmake -DBUCK2="$BUCK_PATH" \
3434
-DCMAKE_TOOLCHAIN_FILE="$ANDROID_NDK/build/cmake/android.toolchain.cmake" \
3535
-DANDROID_ABI=arm64-v8a \
36+
-DANDROID_PLATFORM=android-26 \
3637
-DEXECUTORCH_BUILD_NEURON=ON \
3738
-DNEURON_BUFFER_ALLOCATOR_LIB="$NEURON_BUFFER_ALLOCATOR_LIB" \
3839
..

backends/xnnpack/test/tester/tester.py

Lines changed: 25 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Copyright (c) Meta Platforms, Inc. and affiliates.
2-
# Copyright 2024-2025 Arm Limited and/or its affiliates.
32
# All rights reserved.
3+
# Copyright 2024-2025 Arm Limited and/or its affiliates.
44
#
55
# This source code is licensed under the BSD-style license found in the
66
# LICENSE file in the root directory of this source tree.
@@ -714,23 +714,30 @@ def _assert_outputs_equal(model_output, ref_output, atol=1e-03, rtol=1e-03):
714714
assert (
715715
ref.shape == model.shape
716716
), f"Output {i} shape {model.shape} does not match reference output shape {ref.shape}"
717-
assert torch.allclose(
718-
model,
719-
ref,
720-
atol=atol,
721-
rtol=rtol,
722-
), (
723-
f"Output {i} does not match reference output.\n"
724-
f"\tGiven atol: {atol}, rtol: {rtol}.\n"
725-
f"\tOutput tensor shape: {model.shape}, dtype: {model.dtype}\n"
726-
f"\tDifference: max: {torch.max(model-ref)}, abs: {torch.max(torch.abs(model-ref))}, mean abs error: {torch.mean(torch.abs(model-ref))}.\n"
727-
f"\t-- Model vs. Reference --\n"
728-
f"\t Numel: {model.numel()}, {ref.numel()}\n"
729-
f"\tMedian: {model.median()}, {ref.median()}\n"
730-
f"\t Mean: {model.mean()}, {ref.mean()}\n"
731-
f"\t Max: {model.max()}, {ref.max()}\n"
732-
f"\t Min: {model.min()}, {ref.min()}\n"
733-
)
717+
if model.dtype == torch.bool:
718+
assert torch.equal(model, ref), (
719+
f"Output {i} (bool tensor) does not match reference output.\n"
720+
f"\tShape: {model.shape}\n"
721+
f"\tMismatched count: {(model != ref).sum().item()} / {model.numel()}\n"
722+
)
723+
else:
724+
assert torch.allclose(
725+
model,
726+
ref,
727+
atol=atol,
728+
rtol=rtol,
729+
), (
730+
f"Output {i} does not match reference output.\n"
731+
f"\tGiven atol: {atol}, rtol: {rtol}.\n"
732+
f"\tOutput tensor shape: {model.shape}, dtype: {model.dtype}\n"
733+
f"\tDifference: max: {torch.max(model-ref)}, abs: {torch.max(torch.abs(model-ref))}, mean abs error: {torch.mean(torch.abs(model-ref))}.\n"
734+
f"\t-- Model vs. Reference --\n"
735+
f"\t Numel: {model.numel()}, {ref.numel()}\n"
736+
f"\tMedian: {model.median()}, {ref.median()}\n"
737+
f"\t Mean: {model.mean()}, {ref.mean()}\n"
738+
f"\t Max: {model.max()}, {ref.max()}\n"
739+
f"\t Min: {model.min()}, {ref.min()}\n"
740+
)
734741

735742
@staticmethod
736743
def _compare_outputs(

0 commit comments

Comments
 (0)