Skip to content

Commit a8dbac4

Browse files
committed
Merge remote-tracking branch 'origin/main' into android-metadata
2 parents 8da7261 + 1b063ca commit a8dbac4

File tree

72 files changed

+720
-131
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

72 files changed

+720
-131
lines changed

.lintrunner.toml

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -378,3 +378,48 @@ command = [
378378
'--',
379379
'@{{PATHSFILE}}',
380380
]
381+
382+
[[linter]]
383+
code = "TORCH_AO_IMPORT"
384+
include_patterns = ["**/*.py"]
385+
exclude_patterns = [
386+
"third-party/**",
387+
# TODO: remove exceptions as we migrate
388+
# backends
389+
"backends/arm/quantizer/**",
390+
"backends/arm/test/ops/**",
391+
"backends/vulkan/quantizer/**",
392+
"backends/vulkan/test/**",
393+
"backends/cadence/aot/quantizer/**",
394+
"backends/qualcomm/quantizer/**",
395+
"examples/qualcomm/**",
396+
"backends/xnnpack/quantizer/**",
397+
"backends/xnnpack/test/**",
398+
"exir/tests/test_passes.py",
399+
"extension/llm/export/builder.py",
400+
"extension/llm/export/quantizer_lib.py",
401+
"exir/tests/test_memory_planning.py",
402+
"backends/transforms/duplicate_dynamic_quant_chain.py",
403+
"exir/backend/test/demos/test_xnnpack_qnnpack.py",
404+
]
405+
406+
command = [
407+
"python3",
408+
"-m",
409+
"lintrunner_adapters",
410+
"run",
411+
"grep_linter",
412+
"--pattern=\\bfrom torch\\.ao\\.quantization\\.(?:quantizer|observer|quantize_pt2e|pt2e)(?:\\.[A-Za-z0-9_]+)*\\b",
413+
"--linter-name=TorchAOImport",
414+
"--error-name=Prohibited torch.ao.quantization import",
415+
"""--error-description=\
416+
Imports from torch.ao.quantization are not allowed. \
417+
Please import from torchao.quantization.pt2e instead.\n \
418+
* torchao.quantization.pt2e (includes all the utils, including observers, fake quants etc.) \n \
419+
* torchao.quantization.pt2e.quantizer (quantizer related objects and utils) \n \
420+
* torchao.quantization.pt2e.quantize_pt2e (prepare_pt2e, prepare_qat_pt2e, convert_pt2e) \n\n \
421+
If you need something from torch.ao.quantization, you can add your file to an exclude_patterns for TORCH_AO_IMPORT in .lintrunner.toml. \
422+
""",
423+
"--",
424+
"@{{PATHSFILE}}",
425+
]

.mypy.ini

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,3 +97,6 @@ ignore_missing_imports = True
9797

9898
[mypy-zstd]
9999
ignore_missing_imports = True
100+
101+
[mypy-torchao.*]
102+
follow_untyped_imports = True

CMakeLists.txt

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -362,6 +362,9 @@ add_library(executorch_core ${_executorch_core__srcs})
362362
add_library(executorch_no_prim_ops ALIAS executorch_core)
363363

364364
target_link_libraries(executorch_core PRIVATE program_schema)
365+
if(ANDROID)
366+
target_link_libraries(executorch_core PUBLIC log)
367+
endif()
365368
if(EXECUTORCH_USE_DL)
366369
# Check if dl exists for this toolchain and only then link it.
367370
find_library(DL_LIBRARY_EXISTS NAMES dl)
@@ -653,10 +656,6 @@ if(EXECUTORCH_BUILD_PYBIND)
653656
install(TARGETS portable_lib
654657
LIBRARY DESTINATION executorch/extension/pybindings
655658
)
656-
657-
if(EXECUTORCH_BUILD_EXTENSION_TRAINING)
658-
659-
endif()
660659
endif()
661660

662661
if(EXECUTORCH_BUILD_EXTENSION_TRAINING)

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -420,7 +420,7 @@ for basics.
420420
- Use [this](docs/source/backend-delegates-integration.md) guide when
421421
integrating your delegate with ExecuTorch.
422422
- Refer to [this](docs/source/backend-delegates-dependencies.md) set of
423-
guidelines when including a third-party depenency for your delegate.
423+
guidelines when including a third-party dependency for your delegate.
424424

425425
 
426426

backends/apple/coreml/test/test_coreml_quantizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,12 @@
1515
)
1616

1717
from executorch.backends.apple.coreml.quantizer import CoreMLQuantizer
18-
from torch.ao.quantization.quantize_pt2e import (
18+
from torch.export import export_for_training
19+
from torchao.quantization.pt2e.quantize_pt2e import (
1920
convert_pt2e,
2021
prepare_pt2e,
2122
prepare_qat_pt2e,
2223
)
23-
from torch.export import export_for_training
2424

2525

2626
class TestCoreMLQuantizer:

backends/arm/_passes/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
from .convert_any_default_dim_dims_pass import ConvertAnyDefaultDimDimsPass # noqa
1616
from .convert_expand_copy_to_repeat import ConvertExpandCopyToRepeatPass # noqa
1717
from .convert_full_like_to_full_pass import ConvertFullLikeToFullPass # noqa
18+
from .convert_int_pow_to_mul import ConvertIntPowToMuls # noqa
1819
from .convert_minmax_pass import ConvertMinMaxPass # noqa
1920
from .convert_split_to_slice import ConvertSplitToSlicePass # noqa
2021
from .convert_squeezes_to_view import ConvertSqueezesToViewPass # noqa

backends/arm/_passes/arm_pass_manager.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
ConvertAnyDefaultDimDimsPass,
1919
ConvertExpandCopyToRepeatPass,
2020
ConvertFullLikeToFullPass,
21+
ConvertIntPowToMuls,
2122
ConvertMinMaxPass,
2223
ConvertMmToBmmPass,
2324
ConvertSplitToSlicePass,
@@ -131,14 +132,14 @@ def _tosa_080_BI_pipeline(self, exported_program: ExportedProgram) -> GraphModul
131132

132133
def _tosa_080_MI_pipeline(self, exported_program: ExportedProgram) -> GraphModule:
133134
self.add_pass(DecomposeSqrtPass())
135+
self.add_pass(ConvertIntPowToMuls())
134136
self.add_pass(ReplaceScalarWithTensorArgPassTOSAMI())
135137
self.add_pass(FuseQuantizedActivationPass())
136138
self.add_pass(RemoveGetItemPass())
137139
self.add_pass(ConvertSplitToSlicePass())
138140
self.add_pass(FuseBatchnorm2DPass(exported_program))
139141
self.add_pass(ConvertMmToBmmPass())
140142
self.add_pass(DecomposeLinearPass())
141-
self.add_pass(DecomposeLinearVectorNormPass())
142143
self.add_pass(DecomposeLeakyReLUPass())
143144
self.add_pass(DecomposeBatchNormPass())
144145
self.add_pass(DecomposeLayerNormPass())
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
# Copyright 2025 Arm Limited and/or its affiliates.
2+
#
3+
# This source code is licensed under the BSD-style license found in the
4+
# LICENSE file in the root directory of this source tree.
5+
6+
# pyre-unsafe
7+
8+
from executorch.backends.arm._passes import ArmPass
9+
from executorch.exir.dialects._ops import ops as exir_ops
10+
11+
12+
class ConvertIntPowToMuls(ArmPass):
13+
"""
14+
Replaces pow with integer exponent with a series of multiplications.
15+
Only handles pow.Tensor_Scalar and not pow.Tensor_Tensor.
16+
Needs to be run before doing scalar to tensor conversion.
17+
"""
18+
19+
def call_operator(self, op, args, kwargs, meta):
20+
if op != exir_ops.edge.aten.pow.Tensor_Scalar:
21+
return super().call_operator(op, args, kwargs, meta)
22+
23+
x = args[0]
24+
exp = args[1]
25+
26+
# Handle zero first and return early
27+
if exp == 0:
28+
# return a tensor of ones with the same shape as x
29+
return super().call_operator(
30+
exir_ops.edge.aten.full_like.default, (x, 1), {}, meta, True
31+
)
32+
33+
if not isinstance(exp, int):
34+
return super().call_operator(op, args, kwargs, meta)
35+
36+
# Handle negative exponent
37+
if exp < 0:
38+
x = super().call_operator(
39+
exir_ops.edge.aten.reciprocal.default, (x,), {}, meta, True
40+
)
41+
exp = -exp
42+
43+
res = x
44+
45+
# Consider exponentiation by squaring, if exp turns out to be large.
46+
# Now we just roll out the multiplications.
47+
for _ in range(exp - 1):
48+
res = super().call_operator(
49+
exir_ops.edge.aten.mul.Tensor, (res, x), {}, meta, True
50+
)
51+
52+
return res

backends/arm/test/ops/test_eye.py

Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,113 @@
1+
# Copyright 2025 Arm Limited and/or its affiliates.
2+
#
3+
# This source code is licensed under the BSD-style license found in the
4+
# LICENSE file in the root directory of this source tree.
5+
6+
import torch
7+
from executorch.backends.arm.test import common
8+
from executorch.backends.arm.test.tester.test_pipeline import (
9+
EthosU55PipelineBI,
10+
EthosU85PipelineBI,
11+
OpNotSupportedPipeline,
12+
TosaPipelineBI,
13+
TosaPipelineMI,
14+
)
15+
16+
input_t = tuple[torch.Tensor]
17+
test_data_t = tuple[int, torch.dtype]
18+
19+
20+
class EyeAdd(torch.nn.Module):
21+
aten_op: str = "torch.ops.aten.eye.default"
22+
23+
def __init__(self, n: int, dtype: torch.dtype):
24+
super().__init__()
25+
self.args = (n,)
26+
self.dtype = dtype
27+
28+
def forward(self, x: torch.Tensor) -> torch.Tensor:
29+
return torch.eye(*self.args, dtype=self.dtype) + x
30+
31+
test_data: dict[str, test_data_t] = {
32+
"10x3x3": (lambda: (torch.randn(10, 3, 3),), (3, torch.float32)),
33+
"10x1": (lambda: (torch.randn(10, 1),), (10, torch.float32)),
34+
"int32_int32": (
35+
lambda: (torch.randint(0, 10, [10], dtype=torch.int32),),
36+
(10, torch.int32),
37+
),
38+
}
39+
40+
test_data_not_delegated: dict[str, test_data_t] = {
41+
"fp32_int64": (lambda: (torch.randn(10),), (10, torch.int64)),
42+
"fp32_int32": (lambda: (torch.randn(10),), (10, torch.int32)),
43+
"int32_int64": (
44+
lambda: (torch.randint(0, 10, [10], dtype=torch.int32),),
45+
(10, torch.int64),
46+
),
47+
}
48+
49+
50+
@common.parametrize("test_data", EyeAdd.test_data)
51+
def test_eye_tosa_MI(test_data: test_data_t):
52+
input_data, init_data = test_data
53+
pipeline = TosaPipelineMI[input_t](
54+
EyeAdd(*init_data),
55+
input_data(),
56+
EyeAdd.aten_op,
57+
)
58+
pipeline.run()
59+
60+
61+
@common.parametrize("test_data", EyeAdd.test_data)
62+
def test_eye_tosa_BI(test_data: test_data_t):
63+
input_data, init_data = test_data
64+
pipeline = TosaPipelineBI[input_t](
65+
EyeAdd(*init_data),
66+
input_data(),
67+
EyeAdd.aten_op,
68+
)
69+
pipeline.pop_stage("check.quant_nodes")
70+
pipeline.run()
71+
72+
73+
@common.parametrize("test_data", EyeAdd.test_data)
74+
def test_eye_u55_BI(test_data: test_data_t):
75+
input_data, init_data = test_data
76+
pipeline = EthosU55PipelineBI[input_t](
77+
EyeAdd(*init_data),
78+
input_data(),
79+
EyeAdd.aten_op,
80+
use_to_edge_transform_and_lower=True,
81+
)
82+
pipeline.pop_stage("check.quant_nodes")
83+
pipeline.run()
84+
85+
86+
@common.parametrize("test_data", EyeAdd.test_data)
87+
def test_eye_u85_BI(test_data: test_data_t):
88+
input_data, init_data = test_data
89+
pipeline = EthosU85PipelineBI[input_t](
90+
EyeAdd(*init_data),
91+
input_data(),
92+
EyeAdd.aten_op,
93+
use_to_edge_transform_and_lower=True,
94+
).dump_artifact("to_edge_transform_and_lower")
95+
pipeline.pop_stage("check.quant_nodes")
96+
pipeline.run()
97+
98+
99+
@common.parametrize(
100+
"test_data",
101+
EyeAdd.test_data_not_delegated,
102+
xfails={
103+
"fp32_int32": "MLETORCG-716: Do not delegate empty networks to vela",
104+
"fp32_int64": "MLETORCG-716: Do not delegate empty networks to vela",
105+
"int32_int64": "MLETORCG-716: Do not delegate empty networks to vela",
106+
},
107+
)
108+
def test_eye_tosa_BI_not_delegated(test_data: test_data_t):
109+
input_data, init_data = test_data
110+
pipeline = OpNotSupportedPipeline[input_t](
111+
EyeAdd(*init_data), input_data(), non_delegated_ops={}, quantize=True
112+
)
113+
pipeline.run()

0 commit comments

Comments
 (0)