Skip to content

Commit 98697f6

Browse files
authored
Revert D68246404
Differential Revision: D68780760 Pull Request resolved: #8002
1 parent e37a585 commit 98697f6

File tree

2 files changed

+1
-78
lines changed

2 files changed

+1
-78
lines changed

backends/cadence/aot/compiler.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@
3333
ExecutorchProgramManager,
3434
to_edge,
3535
)
36-
from executorch.exir.dialects._ops import ops as exir_ops
3736
from executorch.exir.pass_base import PassResult
3837
from executorch.exir.passes import ToOutVarPass
3938
from executorch.exir.passes.sym_shape_eval_pass import HintBasedSymShapeEvalPass
@@ -187,17 +186,14 @@ def export_to_edge(
187186
edge_prog_manager = to_edge(
188187
expo_program,
189188
compile_config=EdgeCompileConfig(
189+
_skip_dim_order=True,
190190
# Allow specific non-core aten ops in the IR.
191191
_core_aten_ops_exception_list=[
192192
torch.ops.aten._native_batch_norm_legit_functional.default,
193193
torch.ops.aten.linear.default,
194194
torch.ops.aten.linalg_vector_norm.default,
195195
torch.ops.aten.unfold.default,
196196
torch.ops.aten.angle.default,
197-
# cadence replaced to_dim_order_copy with _to_copy for performance
198-
# skip _to_copy op to get around of dim order check
199-
# We should remove this op once cadence can support dim order
200-
exir_ops.edge.aten._to_copy.default,
201197
],
202198
),
203199
constant_methods=constant_methods,

backends/cadence/aot/replace_ops.py

Lines changed: 0 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111

1212
# pyre-unsafe
1313

14-
import copy
1514
import math
1615
from operator import neg
1716
from typing import cast, Dict, Iterable, Sequence, Set, Tuple
@@ -36,12 +35,7 @@
3635
from executorch.backends.cadence.aot.utils import get_edge_overload_packet
3736
from executorch.exir.dialects._ops import ops as exir_ops
3837
from executorch.exir.dialects.edge._ops import EdgeOpOverload, EdgeOpOverloadPacket
39-
from executorch.exir.dim_order_utils import get_memory_format
4038
from executorch.exir.pass_base import ExportPass, NodeMetadata, PassResult, ProxyValue
41-
from executorch.exir.passes.dim_order_ops_registry import (
42-
DimOrderOpsMap,
43-
MemoryFormatOpsMap,
44-
)
4539
from torch._subclasses import FakeTensor
4640
from torch.fx.node import Argument
4741

@@ -1805,72 +1799,6 @@ def call_operator(
18051799
)
18061800

18071801

1808-
@register_cadence_pass(CadencePassAttribute(opt_level=0))
1809-
class ReplaceToDimOrderCopyWithToCopyPass(ExportPass):
1810-
"""
1811-
dim_order_ops::to_dim_order_copy is not supported, so this is an opt_level=0 pass.
1812-
If the dim order is sequential, we don't need the extra work with strides and
1813-
can just use to_copy.
1814-
"""
1815-
1816-
def call_operator(
1817-
self,
1818-
op,
1819-
args: Tuple[Argument, ...],
1820-
kwargs: Dict[str, Argument],
1821-
meta: NodeMetadata,
1822-
) -> ProxyValue:
1823-
if op not in DimOrderOpsMap:
1824-
return super().call_operator(op, args, kwargs, meta)
1825-
1826-
# new kwargs with dim_order, and no memory_format for the new op
1827-
nkwargs = dict(copy.deepcopy(kwargs)) # orig kwargs are immutable
1828-
1829-
ndim = None
1830-
1831-
# can always get the shape, assuming rank is specialized
1832-
1833-
# pyre-ignore[16]: `None` has no attribute `to_tensor`
1834-
if isinstance(args[0], ProxyValue) and args[0].is_tensor():
1835-
# pyre-ignore[16]: `None` has no attribute `to_tensor`
1836-
ndim = args[0].to_tensor().dim()
1837-
elif isinstance(args[0], torch.Tensor):
1838-
# pyre-ignore[16]: `None` has no attribute `dim`
1839-
ndim = args[0].dim()
1840-
elif isinstance(args[0], torch.fx.immutable_collections.immutable_list):
1841-
# pyre-ignore[6]: Incompatible parameter type
1842-
ndim = len(args[0])
1843-
else:
1844-
assert 0, f"Expecting a Tensor or a ProxyValue but got {type(args[0])}"
1845-
1846-
# get the "to" memory format for the EdgeOp
1847-
contiguous_dim_order = list(range(ndim))
1848-
dim_order = nkwargs.pop("dim_order", None)
1849-
1850-
# Cadence only supports contiguous memory format
1851-
assert (
1852-
dim_order is None
1853-
# pyre-ignore[6]: Incompatible parameter type
1854-
or len(dim_order) == 0
1855-
or dim_order == contiguous_dim_order
1856-
), "Expected dim order in congituous or prevserve memory format, but got {}".format(
1857-
dim_order
1858-
)
1859-
1860-
# bring back memory format
1861-
# pyre-ignore[6]: Incompatible parameter type
1862-
nkwargs["memory_format"] = get_memory_format(dim_order)
1863-
1864-
memory_format_op = MemoryFormatOpsMap[op]
1865-
1866-
return super().call_operator(
1867-
memory_format_op,
1868-
args,
1869-
nkwargs,
1870-
meta,
1871-
)
1872-
1873-
18741802
@register_cadence_pass(CadencePassAttribute(opt_level=0))
18751803
class ReplaceFullLikeWithFullPass(ExportPass):
18761804
"""
@@ -2180,5 +2108,4 @@ class CadenceReplaceOpsInGraph:
21802108
ReplaceSingleElementTensorArgumentsFromFullOpWithScalarPass,
21812109
ReplaceAtenAvgPoolWithJarvisAvgPoolPass,
21822110
ReplaceAtenLinalgVectorNormWithCadenceLinalgVectorNormPass,
2183-
ReplaceToDimOrderCopyWithToCopyPass,
21842111
]

0 commit comments

Comments
 (0)