Skip to content

Commit 5fcf6b4

Browse files
mcr229facebook-github-bot
authored andcommitted
Fix Long Term Quant Testing (#78)
Summary: Pull Request resolved: #78 Long term Quant seems to be using their custom graph capture before running prepare and convert. Let us mirror this so we are testing with the proper quant flow https://fb.workplace.com/groups/257735836456307/permalink/545316467698241/ The change here would be that Quantize2 would run on the stage before Export. Testing wise the stages are as follows ``` export.capture_pre_autograd_graph --> prepare --> convert --> exir.capture() |--------------------------------------------------------| |-------------| Quantize2(stage) Export(stage) ``` Differential Revision: D48488929 fbshipit-source-id: 229b1783cc2c89222e3e5d19216c1fd645e30e82
1 parent 29293a2 commit 5fcf6b4

File tree

2 files changed

+12
-12
lines changed

2 files changed

+12
-12
lines changed

backends/xnnpack/test/ops/add.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,9 @@ def test_add_quantized_pt2e(self):
7575

7676
(
7777
Tester(add_module, model_inputs)
78+
.quantize2()
7879
.export()
7980
.check_count({"torch.ops.aten.add.Tensor": 4})
80-
.quantize2()
8181
.check(["torch.ops.quantized_decomposed"])
8282
.to_edge()
8383
.check_count({"executorch_exir_dialects_edge__ops_aten_add_Tensor": 4})

backends/xnnpack/test/tester/tester.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
from typing import Any, Dict, List, Optional, Tuple
1111

1212
import torch
13+
import torch._export as export
1314
from executorch import exir
1415
from executorch.backends.xnnpack.partition.xnnpack_partitioner import (
1516
XnnpackFloatingPointPartitioner,
@@ -145,28 +146,28 @@ def __init__(
145146

146147
self.quantizer.set_global(self.quantization_config)
147148

148-
self.converted_program = None
149+
self.converted_graph = None
149150

150151
def run(
151-
self, artifact: ExirExportedProgram, inputs: Optional[Tuple[torch.Tensor]]
152+
self, artifact: torch.nn.Module, inputs: Optional[Tuple[torch.Tensor]]
152153
) -> None:
153-
prepared = prepare_pt2e(artifact.exported_program.graph_module, self.quantizer)
154+
captured_graph = export.capture_pre_autograd_graph(artifact, inputs)
155+
prepared = prepare_pt2e(captured_graph, self.quantizer)
154156
converted = convert_pt2e(prepared)
155-
artifact.exported_program._graph_module = converted
156-
self.converted_program = artifact
157+
self.converted_graph = converted
157158

158159
@property
159-
def artifact(self) -> ExirExportedProgram:
160-
return self.converted_program
160+
def artifact(self) -> torch.fx.GraphModule:
161+
return self.converted_graph
161162

162163
@property
163164
def graph_module(self) -> str:
164-
return self.converted_program.exported_program.graph_module
165+
return self.converted_graph
165166

166167

167168
@register_stage
168169
class Export(Stage):
169-
def __init__(self, capture_config: Optional[CaptureConfig] = None):
170+
def __init__(self, for_quant=False, capture_config: Optional[CaptureConfig] = None):
170171
self.capture_conf = capture_config or get_xnnpack_capture_config()
171172
self.exir_exported_program = None
172173

@@ -274,12 +275,11 @@ def __init__(
274275
self.inputs = inputs
275276
self.stages: Dict[str, Stage] = OrderedDict.fromkeys(list(_stages_.keys()))
276277
self.pipeline = {
278+
self._stage_name(Quantize2): [self._stage_name(Export)],
277279
self._stage_name(Quantize): [self._stage_name(Export)],
278280
self._stage_name(Export): [
279-
self._stage_name(Quantize2),
280281
self._stage_name(ToEdge),
281282
],
282-
self._stage_name(Quantize2): [self._stage_name(ToEdge)],
283283
self._stage_name(ToEdge): [self._stage_name(Partition)],
284284
# TODO Make this Stage optional
285285
self._stage_name(Partition): [self._stage_name(ToExecutorch)],

0 commit comments

Comments
 (0)