Skip to content

Commit 00be112

Browse files
cccclaifacebook-github-bot
authored andcommitted
refactor the transform as a standalone function (#2593)
Summary: bypass-github-export-checks bypass-github-executorch-ci-checks bypass-github-pytorch-ci-checks Pull Request resolved: #2593 Move the transform passes to a standalone function so it's easy to reuse. Reviewed By: mergennachin Differential Revision: D55230973 fbshipit-source-id: 8648fc44a5d6091b15b334ddb9eeaa931ff33c0e
1 parent 8532e79 commit 00be112

File tree

1 file changed

+19
-17
lines changed

1 file changed

+19
-17
lines changed

backends/qualcomm/utils/utils.py

Lines changed: 19 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -86,26 +86,10 @@ def canonicalize_program(prog: ExportedProgram):
8686
)
8787

8888

89-
def capture_program(
90-
module: torch.nn.Module,
91-
inputs: Tuple[torch.Tensor],
92-
) -> exir.ExirExportedProgram:
93-
# TODO: should switch to torch.export.export & custom deomposition
94-
# to reduce maintaining effort.
95-
exir_exported_program = exir.capture(
96-
module,
97-
inputs,
98-
qnn_capture_config(),
99-
)
100-
# We choose call_operator by target in ConvertBinaryOpsWithScalar
101-
# because it is the same source_fn_stack for MultiheadAttention
102-
exir_exported_program.transform(ConvertBinaryOpsWithScalar())
103-
ex_prog = exir_exported_program.to_edge(qnn_edge_config())
104-
89+
def _transform(edge_program: ExportedProgram) -> None:
10590
# currently ExirExportedProgram.transform does not accept
10691
# changes of input number which was caused by FoldQDQ
10792
# apply passes one by one here to avoid IR capture failure
108-
edge_program = ex_prog.exported_program
10993
graph_module = edge_program.graph_module
11094
RemoveClone()(graph_module)
11195
RecomposePixelShuffle()(graph_module)
@@ -121,6 +105,24 @@ def capture_program(
121105
FoldQDQ()(graph_module)
122106
InsertRequantize(edge_program)(graph_module)
123107
LayoutTransform(edge_program)(graph_module)
108+
109+
110+
def capture_program(
111+
module: torch.nn.Module,
112+
inputs: Tuple[torch.Tensor],
113+
) -> exir.ExirExportedProgram:
114+
# TODO: should switch to torch.export.export & custom deomposition
115+
# to reduce maintaining effort.
116+
exir_exported_program = exir.capture(
117+
module,
118+
inputs,
119+
qnn_capture_config(),
120+
)
121+
# We choose call_operator by target in ConvertBinaryOpsWithScalar
122+
# because it is the same source_fn_stack for MultiheadAttention
123+
exir_exported_program.transform(ConvertBinaryOpsWithScalar())
124+
ex_prog = exir_exported_program.to_edge(qnn_edge_config())
125+
_transform(ex_prog.exported_program)
124126
return ex_prog
125127

126128

0 commit comments

Comments
 (0)