Skip to content

Commit 55af840

Browse files
mcr229facebook-github-bot
authored andcommitted
tests remove run_decompositions from Export Stage (#4109)
Summary: Pull Request resolved: #4109 https://fb.workplace.com/groups/257735836456307/permalink/704200805143139/ suggests we should not be running this in our tests, as our tests are meant to mimic the natural workflow of a user delegating to XNNPACK Reviewed By: digantdesai, kirklandsign Differential Revision: D59253645 fbshipit-source-id: 39f71657e546e4f072a3a8e056a4257dd04a6edf
1 parent 152988d commit 55af840

File tree

8 files changed

+12
-16
lines changed

8 files changed

+12
-16
lines changed

backends/xnnpack/test/ops/conv1d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ def _test_conv1d(
104104
else Tester(module, inputs)
105105
)
106106
.export()
107-
.check_count({"torch.ops.aten.convolution.default": conv_count})
107+
.check_count({"torch.ops.aten.conv1d.default": conv_count})
108108
.to_edge()
109109
.check_count(
110110
{

backends/xnnpack/test/ops/conv2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ def _test(
163163

164164
(
165165
tester.export()
166-
.check_count({"torch.ops.aten.convolution.default": conv_count})
166+
.check_count({"torch.ops.aten.conv2d": conv_count})
167167
.to_edge()
168168
.check_count(
169169
{

backends/xnnpack/test/ops/linear.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -832,7 +832,6 @@ def _test_linear(
832832
tester.quantize(Quantize(quantization_config=quant_config))
833833

834834
tester.export()
835-
tester.check_count({aten_op: 1})
836835
if quant:
837836
tester.check(["torch.ops.quantized_decomposed"])
838837

@@ -882,8 +881,6 @@ def _test_dqlinear(
882881
tester.quantize(Quantize(quantization_config=quant_config))
883882

884883
tester.export()
885-
tester.check_count({aten_op: linear_count})
886-
tester.check(["torch.ops.quantized_decomposed"])
887884
tester.to_edge()
888885
tester.check_count({edge_op: linear_count})
889886

backends/xnnpack/test/ops/maxpool2d.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,7 @@ def _test_maxpool2d(self, inputs):
5454
(
5555
Tester(self.MaxPool2d(3, 1, 0, 1), inputs)
5656
.export()
57-
.check_count({"torch.ops.aten.max_pool2d_with_indices.default": 1})
58-
.check(["getitem"])
57+
.check_count({"torch.ops.aten.max_pool2d.default": 1})
5958
.to_edge()
6059
.check_count(
6160
{
@@ -115,7 +114,7 @@ def test_fp32_maxpool2d_unsupported_ceilmode(self):
115114
(
116115
Tester(self.MaxPool2dUnsupportedCeilMode(), inputs)
117116
.export()
118-
.check_count({"torch.ops.aten.max_pool2d_with_indices.default": 1})
117+
.check_count({"torch.ops.aten.max_pool2d.default": 1})
119118
.to_edge()
120119
.check_count(
121120
{
@@ -152,7 +151,7 @@ def forward(self, x):
152151
Tester(MaxPool(maxpool_params), inputs)
153152
.quantize()
154153
.export()
155-
.check_count({"torch.ops.aten.max_pool2d_with_indices.default": 1})
154+
.check_count({"torch.ops.aten.max_pool2d.default": 1})
156155
.check(["torch.ops.quantized_decomposed"])
157156
.to_edge()
158157
.check_count(

backends/xnnpack/test/ops/softmax.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def _test_softmax(self, inputs):
2828
(
2929
Tester(self.Softmax(dim), inputs)
3030
.export()
31-
.check_count({"torch.ops.aten._softmax.default": 1})
31+
.check_count({"torch.ops.aten.softmax": 1})
3232
.to_edge()
3333
.check_count(
3434
{"executorch_exir_dialects_edge__ops_aten__softmax_default": 1}
@@ -62,7 +62,7 @@ def test_fp32_softmax_unsupported(self):
6262
(
6363
Tester(self.Softmax(dim), inputs)
6464
.export()
65-
.check_count({"torch.ops.aten._softmax.default": 1})
65+
.check_count({"torch.ops.aten.softmax": 1})
6666
.to_edge()
6767
.check_count(
6868
{"executorch_exir_dialects_edge__ops_aten__softmax_default": 1}

backends/xnnpack/test/ops/square.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def _test_square(self, inputs):
2727
(
2828
Tester(self.Square(), inputs)
2929
.export()
30-
.check_count({"torch.ops.aten.pow.Tensor_Scalar": 1})
30+
.check_count({"torch.ops.aten.square.default": 1})
3131
.to_edge()
3232
.check_count(
3333
{"executorch_exir_dialects_edge__ops_aten_pow_Tensor_Scalar": 1}

backends/xnnpack/test/ops/static_constant_pad.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def _test_static_constant_pad_functional(self, inputs):
8787
(
8888
Tester(self.StaticConstantPadFunctional(), inputs)
8989
.export()
90-
.check_count({"torch.ops.aten.constant_pad_nd.default": 8})
90+
.check_count({"torch.ops.aten.pad.default": 8})
9191
.to_edge()
9292
.check_count(
9393
{"executorch_exir_dialects_edge__ops_aten_constant_pad_nd_default": 8}
@@ -137,7 +137,7 @@ def forward(self, x):
137137
Tester(Pad(), inputs)
138138
.quantize()
139139
.export()
140-
.check_count({"torch.ops.aten.constant_pad_nd.default": 1})
140+
.check_count({"torch.ops.aten.pad.default": 1})
141141
.check(["torch.ops.quantized_decomposed"])
142142
.to_edge()
143143
.check_count(
@@ -162,7 +162,7 @@ def test_qs8_static_constant_pad_2d(self):
162162
Tester(self.StaticConstantPad2d(), inputs)
163163
.quantize()
164164
.export()
165-
.check_count({"torch.ops.aten.constant_pad_nd.default": 1})
165+
.check_count({"torch.ops.aten.pad.default": 1})
166166
.check(["torch.ops.quantized_decomposed"])
167167
.to_edge()
168168
.check_count(

backends/xnnpack/test/tester/tester.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ def run(
189189
) -> None:
190190
self.exported_program = export(
191191
artifact, inputs, dynamic_shapes=self.dynamic_shapes
192-
).run_decompositions()
192+
)
193193

194194
@property
195195
def artifact(self) -> ExportedProgram:

0 commit comments

Comments
 (0)