Skip to content

Commit a32563d

Browse files
committed
Update on "use-pt-pinned-commit for test-arm-{backend,reference}-delegation"
Without this, these builds don't respect the torchgen pinned commit and thus fail with #7546. Differential Revision: [D67996459](https://our.internmc.facebook.com/intern/diff/D67996459/) [ghstack-poisoned]
2 parents 91b4ea8 + ba5b236 commit a32563d

File tree

437 files changed

+2660
-2743
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

437 files changed

+2660
-2743
lines changed

backends/cadence/aot/compiler.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
ExecutorchProgramManager,
3434
to_edge,
3535
)
36+
from executorch.exir.dialects._ops import ops as exir_ops
3637
from executorch.exir.pass_base import PassResult
3738
from executorch.exir.passes import ToOutVarPass
3839
from executorch.exir.passes.sym_shape_eval_pass import HintBasedSymShapeEvalPass
@@ -186,14 +187,17 @@ def export_to_edge(
186187
edge_prog_manager = to_edge(
187188
expo_program,
188189
compile_config=EdgeCompileConfig(
189-
_skip_dim_order=True,
190190
# Allow specific non-core aten ops in the IR.
191191
_core_aten_ops_exception_list=[
192192
torch.ops.aten._native_batch_norm_legit_functional.default,
193193
torch.ops.aten.linear.default,
194194
torch.ops.aten.linalg_vector_norm.default,
195195
torch.ops.aten.unfold.default,
196196
torch.ops.aten.angle.default,
197+
# cadence replaced to_dim_order_copy with _to_copy for performance
198+
# skip _to_copy op to get around of dim order check
199+
# We should remove this op once cadence can support dim order
200+
exir_ops.edge.aten._to_copy.default,
197201
],
198202
),
199203
constant_methods=constant_methods,

backends/cadence/aot/replace_ops.py

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
# pyre-unsafe
1313

14+
import copy
1415
import math
1516
from operator import neg
1617
from typing import cast, Dict, Iterable, Sequence, Set, Tuple
@@ -35,7 +36,12 @@
3536
from executorch.backends.cadence.aot.utils import get_edge_overload_packet
3637
from executorch.exir.dialects._ops import ops as exir_ops
3738
from executorch.exir.dialects.edge._ops import EdgeOpOverload, EdgeOpOverloadPacket
39+
from executorch.exir.dim_order_utils import get_memory_format
3840
from executorch.exir.pass_base import ExportPass, NodeMetadata, PassResult, ProxyValue
41+
from executorch.exir.passes.dim_order_ops_registry import (
42+
DimOrderOpsMap,
43+
MemoryFormatOpsMap,
44+
)
3945
from torch._subclasses import FakeTensor
4046
from torch.fx.node import Argument
4147

@@ -1799,6 +1805,72 @@ def call_operator(
17991805
)
18001806

18011807

1808+
@register_cadence_pass(CadencePassAttribute(opt_level=0))
1809+
class ReplaceToDimOrderCopyWithToCopyPass(ExportPass):
1810+
"""
1811+
dim_order_ops::to_dim_order_copy is not supported, so this is an opt_level=0 pass.
1812+
If the dim order is sequential, we don't need the extra work with strides and
1813+
can just use to_copy.
1814+
"""
1815+
1816+
def call_operator(
1817+
self,
1818+
op,
1819+
args: Tuple[Argument, ...],
1820+
kwargs: Dict[str, Argument],
1821+
meta: NodeMetadata,
1822+
) -> ProxyValue:
1823+
if op not in DimOrderOpsMap:
1824+
return super().call_operator(op, args, kwargs, meta)
1825+
1826+
# new kwargs with dim_order, and no memory_format for the new op
1827+
nkwargs = dict(copy.deepcopy(kwargs)) # orig kwargs are immutable
1828+
1829+
ndim = None
1830+
1831+
# can always get the shape, assuming rank is specialized
1832+
1833+
# pyre-ignore[16]: `None` has no attribute `to_tensor`
1834+
if isinstance(args[0], ProxyValue) and args[0].is_tensor():
1835+
# pyre-ignore[16]: `None` has no attribute `to_tensor`
1836+
ndim = args[0].to_tensor().dim()
1837+
elif isinstance(args[0], torch.Tensor):
1838+
# pyre-ignore[16]: `None` has no attribute `dim`
1839+
ndim = args[0].dim()
1840+
elif isinstance(args[0], torch.fx.immutable_collections.immutable_list):
1841+
# pyre-ignore[6]: Incompatible parameter type
1842+
ndim = len(args[0])
1843+
else:
1844+
assert 0, f"Expecting a Tensor or a ProxyValue but got {type(args[0])}"
1845+
1846+
# get the "to" memory format for the EdgeOp
1847+
contiguous_dim_order = list(range(ndim))
1848+
dim_order = nkwargs.pop("dim_order", None)
1849+
1850+
# Cadence only supports contiguous memory format
1851+
assert (
1852+
dim_order is None
1853+
# pyre-ignore[6]: Incompatible parameter type
1854+
or len(dim_order) == 0
1855+
or dim_order == contiguous_dim_order
1856+
), "Expected dim order in congituous or prevserve memory format, but got {}".format(
1857+
dim_order
1858+
)
1859+
1860+
# bring back memory format
1861+
# pyre-ignore[6]: Incompatible parameter type
1862+
nkwargs["memory_format"] = get_memory_format(dim_order)
1863+
1864+
memory_format_op = MemoryFormatOpsMap[op]
1865+
1866+
return super().call_operator(
1867+
memory_format_op,
1868+
args,
1869+
nkwargs,
1870+
meta,
1871+
)
1872+
1873+
18021874
@register_cadence_pass(CadencePassAttribute(opt_level=0))
18031875
class ReplaceFullLikeWithFullPass(ExportPass):
18041876
"""
@@ -2108,4 +2180,5 @@ class CadenceReplaceOpsInGraph:
21082180
ReplaceSingleElementTensorArgumentsFromFullOpWithScalarPass,
21092181
ReplaceAtenAvgPoolWithJarvisAvgPoolPass,
21102182
ReplaceAtenLinalgVectorNormWithCadenceLinalgVectorNormPass,
2183+
ReplaceToDimOrderCopyWithToCopyPass,
21112184
]

backends/cadence/hifi/operators/op_add.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@
1616
#include <executorch/runtime/kernel/kernel_includes.h>
1717
#include <executorch/runtime/platform/assert.h>
1818

19-
using executorch::aten::Scalar;
20-
using executorch::aten::ScalarType;
21-
using executorch::aten::Tensor;
19+
using exec_aten::Scalar;
20+
using exec_aten::ScalarType;
21+
using exec_aten::Tensor;
2222
using executorch::runtime::can_cast;
2323
using executorch::runtime::CppTypeToScalarType;
2424
using executorch::runtime::KernelRuntimeContext;

backends/cadence/hifi/operators/op_cat.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ namespace native {
3030

3131
Tensor& cat_out(
3232
RuntimeContext& ctx,
33-
executorch::aten::ArrayRef<Tensor> tensors,
33+
exec_aten::ArrayRef<Tensor> tensors,
3434
int64_t dim,
3535
Tensor& out) {
3636
if (dim < 0) {

backends/cadence/hifi/operators/op_clamp.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ namespace native {
5151
Tensor& clamp_tensor_out(
5252
RuntimeContext& ctx,
5353
const Tensor& in,
54-
const executorch::aten::optional<Tensor>& min_opt,
55-
const executorch::aten::optional<Tensor>& max_opt,
54+
const exec_aten::optional<Tensor>& min_opt,
55+
const exec_aten::optional<Tensor>& max_opt,
5656
Tensor& out) {
5757
(void)ctx;
5858

backends/cadence/hifi/operators/op_div.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,10 @@
1717
#include <executorch/runtime/platform/assert.h>
1818
#include <cmath>
1919

20+
using exec_aten::Scalar;
21+
using exec_aten::ScalarType;
22+
using exec_aten::Tensor;
2023
using executorch::aten::RuntimeContext;
21-
using executorch::aten::Scalar;
22-
using executorch::aten::ScalarType;
23-
using executorch::aten::Tensor;
2424
using torch::executor::Error;
2525

2626
namespace cadence {
@@ -165,7 +165,7 @@ Tensor& div_out_mode(
165165
RuntimeContext& ctx,
166166
const Tensor& a,
167167
const Tensor& b,
168-
executorch::aten::optional<executorch::aten::string_view> mode,
168+
exec_aten::optional<exec_aten::string_view> mode,
169169
Tensor& out) {
170170
ET_KERNEL_CHECK(
171171
ctx,

backends/cadence/hifi/operators/op_maximum.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@
1212
#include <executorch/kernels/portable/cpu/util/math_util.h>
1313
#include <executorch/runtime/kernel/kernel_includes.h>
1414

15+
using exec_aten::ScalarType;
16+
using exec_aten::Tensor;
1517
using executorch::aten::RuntimeContext;
16-
using executorch::aten::ScalarType;
17-
using executorch::aten::Tensor;
1818
using executorch::runtime::can_cast;
1919
using executorch::runtime::canCast;
2020
using executorch::runtime::CppTypeToScalarType;

backends/cadence/hifi/operators/op_minimum.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@
1212
#include <executorch/kernels/portable/cpu/util/math_util.h>
1313
#include <executorch/runtime/kernel/kernel_includes.h>
1414

15+
using exec_aten::ScalarType;
16+
using exec_aten::Tensor;
1517
using executorch::aten::RuntimeContext;
16-
using executorch::aten::ScalarType;
17-
using executorch::aten::Tensor;
1818
using executorch::runtime::can_cast;
1919
using executorch::runtime::canCast;
2020
using executorch::runtime::CppTypeToScalarType;

backends/cadence/hifi/operators/op_mul.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@
1515
#include <executorch/runtime/kernel/kernel_includes.h>
1616
#include <executorch/runtime/platform/assert.h>
1717

18+
using exec_aten::Scalar;
19+
using exec_aten::ScalarType;
20+
using exec_aten::Tensor;
1821
using executorch::aten::RuntimeContext;
19-
using executorch::aten::Scalar;
20-
using executorch::aten::ScalarType;
21-
using executorch::aten::Tensor;
2222
using executorch::runtime::can_cast;
2323
using executorch::runtime::CppTypeToScalarType;
2424
using torch::executor::Error;

backends/cadence/hifi/operators/op_rsqrt.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@
1111

1212
#include <executorch/backends/cadence/hifi/kernels/kernels.h>
1313

14+
using exec_aten::ScalarType;
15+
using exec_aten::Tensor;
1416
using executorch::aten::RuntimeContext;
15-
using executorch::aten::ScalarType;
16-
using executorch::aten::Tensor;
1717

1818
namespace cadence {
1919
namespace impl {

backends/cadence/hifi/operators/op_sigmoid.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,17 +14,17 @@
1414
#include <executorch/kernels/portable/cpu/util/functional_util.h>
1515
#include <executorch/runtime/kernel/kernel_includes.h>
1616

17+
using exec_aten::ScalarType;
18+
using exec_aten::Tensor;
1719
using executorch::aten::RuntimeContext;
18-
using executorch::aten::ScalarType;
19-
using executorch::aten::Tensor;
2020
using torch::executor::Error;
2121

2222
namespace cadence {
2323
namespace impl {
2424
namespace HiFi {
2525
namespace native {
2626

27-
using Tensor = executorch::aten::Tensor;
27+
using Tensor = exec_aten::Tensor;
2828

2929
Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
3030
(void)ctx;

backends/cadence/hifi/operators/op_softmax.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ Tensor& softmax_out(
5050
// Adjust for negative dim
5151
dim = dim < 0 ? dim + executorch::runtime::nonzero_dim(in) : dim;
5252

53-
const executorch::aten::optional<int64_t>& dim_t = dim;
53+
const exec_aten::optional<int64_t>& dim_t = dim;
5454
const size_t d = ET_NORMALIZE_IX(dim_t.value(), in.dim());
5555
const size_t size = in.size(d);
5656

backends/cadence/hifi/operators/op_sub.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,10 @@
1616
#include <executorch/runtime/kernel/kernel_includes.h>
1717
#include <executorch/runtime/platform/assert.h>
1818

19+
using exec_aten::Scalar;
20+
using exec_aten::ScalarType;
21+
using exec_aten::Tensor;
1922
using executorch::aten::RuntimeContext;
20-
using executorch::aten::Scalar;
21-
using executorch::aten::ScalarType;
22-
using executorch::aten::Tensor;
2323
using executorch::runtime::can_cast;
2424
using executorch::runtime::CppTypeToScalarType;
2525
using torch::executor::Error;

backends/cadence/hifi/operators/op_tanh.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@
1111
#include <executorch/runtime/kernel/kernel_includes.h>
1212
#include <cmath>
1313

14+
using exec_aten::ScalarType;
15+
using exec_aten::Tensor;
1416
using executorch::aten::RuntimeContext;
15-
using executorch::aten::ScalarType;
16-
using executorch::aten::Tensor;
1717
using torch::executor::Error;
1818

1919
namespace cadence {

backends/cadence/hifi/operators/quantized_linear_out.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ void quantized_linear_out(
219219
int64_t out_zero_point,
220220
__ET_UNUSED const optional<Tensor>& offset,
221221
Tensor& out) {
222-
if (out.scalar_type() == executorch::aten::ScalarType::Byte) {
222+
if (out.scalar_type() == exec_aten::ScalarType::Byte) {
223223
_quantized_linear_asym8u(
224224
in,
225225
weight,
@@ -231,7 +231,7 @@ void quantized_linear_out(
231231
out_zero_point,
232232
offset,
233233
out);
234-
} else if (out.scalar_type() == executorch::aten::ScalarType::Char) {
234+
} else if (out.scalar_type() == exec_aten::ScalarType::Char) {
235235
_quantized_linear_asym8s(
236236
in,
237237
weight,
@@ -261,7 +261,7 @@ void quantized_linear_per_tensor_out(
261261
int64_t out_zero_point,
262262
__ET_UNUSED const optional<Tensor>& offset,
263263
Tensor& out) {
264-
if (out.scalar_type() == executorch::aten::ScalarType::Byte) {
264+
if (out.scalar_type() == exec_aten::ScalarType::Byte) {
265265
_quantized_linear_per_tensor_asym8u(
266266
in,
267267
weight,
@@ -273,7 +273,7 @@ void quantized_linear_per_tensor_out(
273273
out_zero_point,
274274
offset,
275275
out);
276-
} else if (out.scalar_type() == executorch::aten::ScalarType::Char) {
276+
} else if (out.scalar_type() == exec_aten::ScalarType::Char) {
277277
_quantized_linear_per_tensor_asym8s(
278278
in,
279279
weight,

backends/vulkan/runtime/VulkanBackend.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -417,10 +417,10 @@ bool maybe_update_scalar_tensor(
417417
executorch::aten::Tensor& scalar_tensor_src) {
418418
const int32_t cur_val = graph->read_symint(ref);
419419
int32_t scalar_tensor_val = 0;
420-
executorch::aten::ScalarType dtype = scalar_tensor_src.scalar_type();
421-
if (dtype == executorch::aten::ScalarType::Int) {
420+
exec_aten::ScalarType dtype = scalar_tensor_src.scalar_type();
421+
if (dtype == exec_aten::ScalarType::Int) {
422422
scalar_tensor_val = *scalar_tensor_src.const_data_ptr<int32_t>();
423-
} else if (dtype == executorch::aten::ScalarType::Long) {
423+
} else if (dtype == exec_aten::ScalarType::Long) {
424424
scalar_tensor_val = int32_t(*scalar_tensor_src.const_data_ptr<int64_t>());
425425
}
426426
bool was_updated = false;

codegen/tools/gen_selected_op_variants.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
from torchgen.code_template import CodeTemplate
1818

1919

20-
ops_and_dtypes_template_str = """((executorch::aten::string_view(operator_name).compare("$operator_name") == 0)\n && ($dtype_checks))"""
20+
ops_and_dtypes_template_str = """((exec_aten::string_view(operator_name).compare("$operator_name") == 0)\n && ($dtype_checks))"""
2121
ops_and_dtypes_template = CodeTemplate(ops_and_dtypes_template_str)
2222

2323
selected_kernel_dtypes_h_template_str = """#pragma once
@@ -27,7 +27,7 @@
2727
2828
inline constexpr bool should_include_kernel_dtype(
2929
const char *operator_name,
30-
executorch::aten::ScalarType scalar_type
30+
exec_aten::ScalarType scalar_type
3131
) {
3232
return $body;
3333
}
@@ -91,8 +91,7 @@ def write_selected_op_variants(yaml_file_path: str, output_dir: str) -> None:
9191
dtype_set = set([x.split(";")[0] for x in tensor_meta])
9292
dtype_list = sorted([dtype_enum_to_type[x] for x in dtype_set])
9393
conditions = [
94-
"scalar_type == executorch::aten::ScalarType::" + x
95-
for x in dtype_list
94+
"scalar_type == exec_aten::ScalarType::" + x for x in dtype_list
9695
]
9796
body_parts.append(
9897
ops_and_dtypes_template.substitute(

codegen/tools/test/test_gen_selected_op_variants.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -71,13 +71,13 @@ def test_generates_correct_header(self) -> None:
7171
7272
inline constexpr bool should_include_kernel_dtype(
7373
const char *operator_name,
74-
executorch::aten::ScalarType scalar_type
74+
exec_aten::ScalarType scalar_type
7575
) {
76-
return ((executorch::aten::string_view(operator_name).compare("add.out") == 0)
77-
&& (scalar_type == executorch::aten::ScalarType::Float || scalar_type == executorch::aten::ScalarType::Int))
78-
|| ((executorch::aten::string_view(operator_name).compare("mul.out") == 0)
79-
&& (scalar_type == executorch::aten::ScalarType::Float))
80-
|| ((executorch::aten::string_view(operator_name).compare("sub.out") == 0)
76+
return ((exec_aten::string_view(operator_name).compare("add.out") == 0)
77+
&& (scalar_type == exec_aten::ScalarType::Float || scalar_type == exec_aten::ScalarType::Int))
78+
|| ((exec_aten::string_view(operator_name).compare("mul.out") == 0)
79+
&& (scalar_type == exec_aten::ScalarType::Float))
80+
|| ((exec_aten::string_view(operator_name).compare("sub.out") == 0)
8181
&& (true));
8282
}
8383
""",
@@ -124,7 +124,7 @@ def test_generates_correct_header(self) -> None:
124124
125125
inline constexpr bool should_include_kernel_dtype(
126126
const char *operator_name,
127-
executorch::aten::ScalarType scalar_type
127+
exec_aten::ScalarType scalar_type
128128
) {
129129
return true;
130130
}

0 commit comments

Comments
 (0)