Skip to content

Commit d5b1c04

Browse files
authored
Merge branch 'main' into gh/jackzhxng/14/orig
2 parents bc91c87 + 954f0e9 commit d5b1c04

File tree

164 files changed

+7469
-1673
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

164 files changed

+7469
-1673
lines changed

.ci/scripts/unittest-buck2.sh

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ buck2 query "//backends/apple/... + //backends/example/... + \
1515
//kernels/optimized/... + //kernels/portable/... + //kernels/quantized/... + \
1616
//kernels/test/... + //runtime/... + //schema/... + //test/... + //util/..."
1717

18-
UNBUILDABLE_OPTIMIZED_OPS_REGEX="gelu|fft_r2c|log_softmax"
18+
UNBUILDABLE_OPTIMIZED_OPS_REGEX="_elu|gelu|fft|log_softmax"
1919
BUILDABLE_OPTIMIZED_OPS=$(buck2 query //kernels/optimized/cpu/... | grep -E -v $UNBUILDABLE_OPTIMIZED_OPS_REGEX)
2020

2121
# TODO: build prim_ops_test_cpp again once supported_features works in
@@ -24,6 +24,8 @@ BUILDABLE_KERNELS_PRIM_OPS_TARGETS=$(buck2 query //kernels/prim_ops/... | grep -
2424
# TODO: expand the covered scope of Buck targets.
2525
# //runtime/kernel/... is failing because //third-party:torchgen_files's shell script can't find python on PATH.
2626
# //runtime/test/... requires Python torch, which we don't have in our OSS buck setup.
27-
buck2 test $BUILDABLE_OPTIMIZED_OPS //kernels/portable/... \
28-
$BUILDABLE_KERNELS_PRIM_OPS_TARGETS //runtime/backend/... //runtime/core/... \
29-
//runtime/executor: //runtime/kernel/... //runtime/platform/...
27+
for op in "build" "test"; do
28+
buck2 $op $BUILDABLE_OPTIMIZED_OPS //kernels/portable/... \
29+
$BUILDABLE_KERNELS_PRIM_OPS_TARGETS //runtime/backend/... //runtime/core/... \
30+
//runtime/executor: //runtime/kernel/... //runtime/platform/...
31+
done

.lintrunner.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -271,6 +271,10 @@ exclude_patterns = [
271271
'examples/**',
272272
'exir/verification/bindings.cpp',
273273
'extension/**',
274+
# Uses properly-gated (ET_USE_PYTORCH_HEADERS) ATen include.
275+
'kernels/portable/cpu/util/elementwise_util.h',
276+
'kernels/portable/cpu/util/math_util.h',
277+
'kernels/portable/cpu/util/vectorized_math.h',
274278
'kernels/optimized/**',
275279
'runtime/core/exec_aten/**',
276280
# Want to be able to keep c10 in sync with PyTorch core.

CMakeLists.txt

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -430,14 +430,6 @@ endif()
430430

431431
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/configurations)
432432

433-
#
434-
# gflags: Commandline flag host library.
435-
#
436-
437-
if(EXECUTORCH_BUILD_GFLAGS)
438-
add_subdirectory(third-party/gflags)
439-
endif()
440-
441433
# Install `executorch` library as well as `executorch-config.cmake` under
442434
# ${CMAKE_INSTALL_PREFIX}/
443435
install(

backends/apple/coreml/scripts/build_tests.sh

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,7 @@ cmake "$EXECUTORCH_ROOT_PATH" -B"$CMAKE_EXECUTORCH_BUILD_DIR_PATH" \
3333
-DPLATFORM=MAC_UNIVERSAL \
3434
-DDEPLOYMENT_TARGET=13.0 \
3535
-DEXECUTORCH_BUILD_EXECUTOR_RUNNER=OFF \
36-
-DEXECUTORCH_BUILD_XNNPACK=OFF \
37-
-DEXECUTORCH_BUILD_GFLAGS=OFF
36+
-DEXECUTORCH_BUILD_XNNPACK=OFF
3837

3938
cmake --build "$CMAKE_EXECUTORCH_BUILD_DIR_PATH" -j9 -t executorch
4039

backends/arm/_passes/arm_pass_manager.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,10 @@
6262
UnsqueezeScalarPlaceholdersPass,
6363
)
6464

65-
from executorch.backends.arm.tosa_specification import TosaSpecification
65+
from executorch.backends.arm.tosa_specification import (
66+
TosaLoweringContext,
67+
TosaSpecification,
68+
)
6669
from executorch.backends.transforms.decompose_sdpa import (
6770
DecomposeScaledDotProductAttention,
6871
)
@@ -80,7 +83,8 @@ def __init__(self, tosa_spec: TosaSpecification) -> None:
8083
super().__init__()
8184

8285
def _transform(self, graph_module: GraphModule):
83-
return self(graph_module).graph_module
86+
with TosaLoweringContext(self.tosa_spec):
87+
return self(graph_module).graph_module
8488

8589
def _tosa_080_BI_pipeline(self, exported_program: ExportedProgram) -> GraphModule:
8690
self.add_pass(FuseQuantizedActivationPass())

backends/arm/_passes/scalars_to_attribute_pass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@
1212
from executorch.backends.arm._passes.arm_pass_utils import get_first_fake_tensor
1313

1414
from executorch.exir.pass_base import ExportPass, PassResult
15-
from torch.ao.quantization.fx.utils import get_new_attr_name_with_prefix
1615
from torch.fx import GraphModule, Node
16+
from torchao.quantization.pt2e.utils import get_new_attr_name_with_prefix
1717

1818

1919
class ScalarsToAttributePass(ExportPass):

backends/arm/quantizer/arm_quantizer.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -247,9 +247,9 @@ def set_module_name(
247247
quantizer.set_module_name("blocks.sub"), it will quantize all supported operator/operator
248248
patterns in the submodule with this module name with the given `quantization_config`
249249
"""
250-
assert (
251-
quantization_config is not None
252-
), " quantization_config == None is not supported yet"
250+
# Validate that quantization_config is provided
251+
if quantization_config is None:
252+
raise ValueError("quantization_config == None is not supported yet")
253253
self.module_name_config[module_name] = quantization_config
254254
return self
255255

backends/arm/quantizer/quantization_annotator.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -221,10 +221,12 @@ def _match_pattern(
221221
torch.ops.aten.squeeze_copy.dim,
222222
torch.ops.aten.squeeze.dim,
223223
torch.ops.aten.squeeze.dims,
224+
torch.ops.aten.unbind.int,
224225
torch.ops.aten.unsqueeze.default,
225226
torch.ops.aten.unsqueeze_copy.default,
226227
torch.ops.aten.reshape.default,
227228
torch.ops.aten.repeat.default,
229+
torch.ops.aten.repeat_interleave.self_int,
228230
torch.ops.aten.expand_copy.default,
229231
torch.ops.aten.expand.default,
230232
# Disabling these as there seems to be an issue with support for complex
@@ -256,6 +258,7 @@ def _match_pattern(
256258
torch.ops.aten.amin.default,
257259
torch.ops.aten.clamp.default,
258260
torch.ops.aten.clamp.Tensor,
261+
torch.ops.aten.unflatten.int,
259262
]
260263

261264
_one_to_one_shared_input_or_input_act_qspec = [
@@ -271,6 +274,7 @@ def _match_pattern(
271274
torch.ops.aten.avg_pool2d.default,
272275
torch.ops.aten.max_pool2d.default,
273276
torch.ops.aten.full.default,
277+
torch.ops.aten.full,
274278
torch.ops.aten.flatten.using_ints,
275279
torch.ops.aten.dropout.default,
276280
torch.ops.aten.dropout_.default,
@@ -539,6 +543,7 @@ def annotate_graph( # type: ignore[return]
539543
if node.target in [
540544
torch.ops.aten.full_like.default,
541545
torch.ops.aten.full.default,
546+
torch.ops.aten.full,
542547
torch.ops.aten.scalar_tensor.default,
543548
]:
544549
node.kwargs = {}

backends/arm/quantizer/quantization_config.py

Lines changed: 26 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -29,30 +29,40 @@ def get_input_act_qspec(self) -> QuantizationSpec | None:
2929
"""Returns QuantizationSpec 'input_activation' after asserting that input_activation.qscheme is valid."""
3030
if self.input_activation is None:
3131
return None
32-
assert self.input_activation.qscheme in [
32+
# Validate that input_activation uses a supported qscheme
33+
if self.input_activation.qscheme not in [
3334
torch.per_tensor_affine,
3435
torch.per_tensor_symmetric,
35-
], f"Unsupported quantization_spec {self.input_activation} for input_activation."
36+
]:
37+
raise ValueError(
38+
f"Unsupported quantization_spec {self.input_activation} for input_activation."
39+
)
3640
return self.input_activation
3741

3842
def get_output_act_qspec(self) -> QuantizationSpec | None:
3943
"""Returns QuantizationSpec 'output_activation' after asserting that output_activation.qscheme is valid."""
4044
if self.output_activation is None:
4145
return None
42-
assert self.output_activation.qscheme in [
46+
# Validate that output_activation uses a supported qscheme
47+
if self.output_activation.qscheme not in [
4348
torch.per_tensor_affine,
4449
torch.per_tensor_symmetric,
45-
], f"Unsupported quantization_spec {self.output_activation} for output_activation."
50+
]:
51+
raise ValueError(
52+
f"Unsupported quantization_spec {self.output_activation} for output_activation."
53+
)
4654
return self.output_activation
4755

4856
def get_weight_qspec(self) -> QuantizationSpec | None:
4957
"""Returns QuantizationSpec 'weight' after asserting that weight.qscheme is valid."""
5058
if self.weight is None:
5159
return None
52-
assert self.weight.qscheme in [
60+
# Validate that weight uses a supported qscheme
61+
if self.weight.qscheme not in [
5362
torch.per_tensor_symmetric,
5463
torch.per_channel_symmetric,
55-
], f"Unsupported quantization_spec {self.weight} for weight"
64+
]:
65+
raise ValueError(f"Unsupported quantization_spec {self.weight} for weight")
5666
return self.weight
5767

5868
def get_bias_qspec(self, node: torch.fx.Node) -> QuantizationSpec | None:
@@ -61,11 +71,11 @@ def get_bias_qspec(self, node: torch.fx.Node) -> QuantizationSpec | None:
6171
def _derive_qparams_fn(
6272
obs_or_fqs: list[ObserverOrFakeQuantize],
6373
) -> tuple[torch.Tensor, torch.Tensor]:
64-
assert (
65-
len(obs_or_fqs) == 2
66-
), "Expecting two obs/fqs, one for activation and one for weight, got: {}".format(
67-
len(obs_or_fqs)
68-
)
74+
# Validate expected number of observers/fake-quantizes
75+
if len(obs_or_fqs) != 2:
76+
raise ValueError(
77+
f"Expecting two obs/fqs, one for activation and one for weight, got: {len(obs_or_fqs)}"
78+
)
6979
act_obs_or_fq = obs_or_fqs[0]
7080
weight_obs_or_fq = obs_or_fqs[1]
7181
act_scale, act_zp = act_obs_or_fq.calculate_qparams()
@@ -94,9 +104,11 @@ def _derive_qparams_fn(
94104

95105
if self.bias is None:
96106
return None
97-
assert (
98-
self.bias.dtype == torch.float
99-
), "Only float dtype for bias is supported for bias right now"
107+
# Validate that bias dtype is floating-point
108+
if self.bias.dtype != torch.float:
109+
raise ValueError(
110+
"Only float dtype for bias is supported for bias right now"
111+
)
100112
return self.bias
101113

102114
def get_fixed_qspec(

backends/arm/scripts/parse_test_names.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717
"bitwise_right_shift.Tensor",
1818
"bitwise_left_shift.Tensor",
1919
"native_group_norm.default",
20+
"unbind.int",
21+
"unflatten.int",
2022
"_native_batch_norm_legit_no_training.default",
2123
"_native_batch_norm_legit.no_stats",
2224
]

backends/arm/test/models/test_deit_tiny_arm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ def test_deit_tiny_tosa_BI():
5252
aten_op=[],
5353
exir_op=[],
5454
use_to_edge_transform_and_lower=True,
55-
atol=2.5, # This needs to go down: MLETORCH-956
55+
atol=1,
5656
qtol=1,
5757
)
5858
pipeline.run()

backends/arm/test/models/test_llama.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -128,10 +128,4 @@ def test_llama_tosa_BI():
128128
exir_op=[],
129129
use_to_edge_transform_and_lower=True,
130130
)
131-
pipeline.change_args(
132-
"run_method_and_compare_outputs",
133-
atol=9.9,
134-
rtol=1.5, # TODO: Tolerance needs to be updated after MLETORCH-907
135-
inputs=llama_inputs,
136-
)
137131
pipeline.run()

backends/arm/test/ops/test_repeat.py

Lines changed: 55 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -21,68 +21,91 @@
2121
)
2222

2323
input_t1 = Tuple[torch.Tensor, torch.Tensor] # Input x, Input y
24-
aten_op = "torch.ops.aten.repeat.default"
2524

2625

2726
"""Tests Tensor.repeat for different ranks and dimensions."""
2827

2928

3029
class Repeat(torch.nn.Module):
31-
# (input tensor, multiples)
32-
test_parameters = {
33-
"1_x_1": lambda: (torch.randn(3), (2,)),
34-
"2_x_2": lambda: (torch.randn(3, 4), (2, 1)),
35-
"4_x_4": lambda: (torch.randn(1, 1, 2, 2), (1, 2, 3, 4)),
36-
"1_x_2": lambda: (torch.randn(3), (2, 2)),
37-
"1_x_3": lambda: (torch.randn(3), (1, 2, 3)),
38-
"2_x_3": lambda: (torch.randn((3, 3)), (2, 2, 2)),
39-
"1_x_4": lambda: (torch.randn((3, 3, 3)), (2, 1, 2, 4)),
40-
}
41-
42-
def forward(self, x: torch.Tensor, multiples: Sequence):
43-
return x.repeat(multiples)
44-
45-
46-
@common.parametrize("test_data", Repeat.test_parameters)
30+
aten_op = "torch.ops.aten.repeat.default"
31+
32+
def __init__(self, multiples: Sequence[int]):
33+
super().__init__()
34+
self.multiples = multiples
35+
36+
def forward(self, x: torch.Tensor):
37+
return x.repeat(self.multiples)
38+
39+
40+
class RepeatInterleaveInt(torch.nn.Module):
41+
aten_op = "torch.ops.aten.repeat_interleave.self_int"
42+
43+
def __init__(self, repeats: int, dim: int):
44+
super().__init__()
45+
self.repeats = repeats
46+
self.dim = dim
47+
48+
def forward(self, x: torch.Tensor):
49+
return x.repeat_interleave(self.repeats, self.dim)
50+
51+
52+
test_data_suite = {
53+
# test_name : lambda: (module, test_data)
54+
"1_x_1": lambda: (Repeat((2,)), (torch.randn(3),)),
55+
"2_x_2": lambda: (Repeat((2, 1)), (torch.randn(3, 4),)),
56+
"4_x_4": lambda: (Repeat((1, 2, 3, 4)), (torch.randn(1, 1, 2, 2),)),
57+
"1_x_2": lambda: (Repeat((2, 2)), (torch.randn(3),)),
58+
"1_x_3": lambda: (Repeat((1, 2, 3)), (torch.randn(3),)),
59+
"2_x_3": lambda: (Repeat((2, 2, 2)), (torch.randn((3, 3)),)),
60+
"1_x_4": lambda: (Repeat((2, 1, 2, 4)), (torch.randn((3, 3, 3)),)),
61+
"interleave_int_3_x_1": lambda: (RepeatInterleaveInt(3, 1), (torch.randn(3, 4),)),
62+
}
63+
64+
65+
@common.parametrize("test_data", test_data_suite)
4766
def test_repeat_tosa_MI(test_data: Tuple):
67+
module, test_data = test_data()
4868
pipeline = TosaPipelineMI[input_t1](
49-
Repeat(),
50-
test_data(),
51-
aten_op,
69+
module,
70+
test_data,
71+
module.aten_op,
5272
exir_op=[],
5373
)
5474
pipeline.run()
5575

5676

57-
@common.parametrize("test_data", Repeat.test_parameters)
77+
@common.parametrize("test_data", test_data_suite)
5878
def test_repeat_tosa_BI(test_data: Tuple):
79+
module, test_data = test_data()
5980
pipeline = TosaPipelineBI[input_t1](
60-
Repeat(),
61-
test_data(),
62-
aten_op,
81+
module,
82+
test_data,
83+
module.aten_op,
6384
exir_op=[],
6485
)
6586
pipeline.run()
6687

6788

68-
@common.parametrize("test_data", Repeat.test_parameters)
89+
@common.parametrize("test_data", test_data_suite)
6990
def test_repeat_u55_BI(test_data: Tuple):
91+
module, test_data = test_data()
7092
pipeline = EthosU55PipelineBI[input_t1](
71-
Repeat(),
72-
test_data(),
73-
aten_op,
93+
module,
94+
test_data,
95+
module.aten_op,
7496
exir_ops=[],
7597
run_on_fvp=False,
7698
)
7799
pipeline.run()
78100

79101

80-
@common.parametrize("test_data", Repeat.test_parameters)
102+
@common.parametrize("test_data", test_data_suite)
81103
def test_repeat_u85_BI(test_data: Tuple):
104+
module, test_data = test_data()
82105
pipeline = EthosU85PipelineBI[input_t1](
83-
Repeat(),
84-
test_data(),
85-
aten_op,
106+
module,
107+
test_data,
108+
module.aten_op,
86109
exir_ops=[],
87110
run_on_fvp=False,
88111
)

0 commit comments

Comments
 (0)