Skip to content

Commit 1ed6250

Browse files
mcr229facebook-github-bot
authored andcommitted
Test (#578)
Summary: Pull Request resolved: #578 Adding an End to End test for inception v4 for both fp32 and quantized models. As with inception v3, we currently don't have avg_pool2d because of a missing op name. (We do support mean.dim(-1, -2) which is the same). this is somethign we can add after mvp Reviewed By: digantdesai Differential Revision: D49850148 fbshipit-source-id: 52ccf8044c9bb0e540ddacdf90fb4510aeeb1dcd
1 parent f2273df commit 1ed6250

File tree

2 files changed

+66
-0
lines changed

2 files changed

+66
-0
lines changed

backends/xnnpack/test/TARGETS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,7 @@ python_unittest(
127127
]),
128128
tags = ["long_running"],
129129
deps = [
130+
"fbsource//third-party/pypi/timm:timm",
130131
"fbsource//third-party/pypi/torchsr:torchsr", # @manual
131132
"//caffe2:torch",
132133
"//executorch/backends/xnnpack/test/tester:tester",
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import unittest
8+
9+
import torch
10+
from executorch.backends.xnnpack.test.tester import Tester
11+
from timm.models import inception_v4
12+
13+
14+
class TestInceptionV4(unittest.TestCase):
15+
ic4 = inception_v4(pretrained=False).eval()
16+
model_inputs = (torch.ones(3, 299, 299).unsqueeze(0),)
17+
18+
all_operators = {
19+
"executorch_exir_dialects_edge__ops_aten_addmm_default",
20+
# "executorch.exir.dialects.edge._ops.aten.avg_pool2d.default", Currently do not have avg_pool2d partitioned
21+
"executorch_exir_dialects_edge__ops_aten_cat_default",
22+
"executorch_exir_dialects_edge__ops_aten_convolution_default",
23+
"executorch_exir_dialects_edge__ops_aten_max_pool2d_with_indices_default",
24+
"executorch_exir_dialects_edge__ops_aten_mean_dim",
25+
"executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default",
26+
"executorch_exir_dialects_edge__ops_aten_permute_copy_default",
27+
"executorch_exir_dialects_edge__ops_aten_relu_default",
28+
}
29+
30+
def test_fp32_ic4(self):
31+
32+
(
33+
Tester(self.ic4, self.model_inputs)
34+
.export()
35+
.to_edge()
36+
.check(list(self.all_operators))
37+
.partition()
38+
.check(["torch.ops.executorch_call_delegate"])
39+
.check_not(list(self.all_operators))
40+
.to_executorch()
41+
.serialize()
42+
.run_method()
43+
.compare_outputs()
44+
)
45+
46+
def test_qs8_ic4(self):
47+
# Quantization fuses away batchnorm, so it is no longer in the graph
48+
ops_after_quantization = self.all_operators - {
49+
"executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default",
50+
}
51+
52+
(
53+
Tester(self.ic4, self.model_inputs)
54+
.quantize()
55+
.export()
56+
.to_edge()
57+
.check(list(ops_after_quantization))
58+
.partition()
59+
.check(["torch.ops.executorch_call_delegate"])
60+
.check_not(list(ops_after_quantization))
61+
.to_executorch()
62+
.serialize()
63+
.run_method()
64+
.compare_outputs()
65+
)

0 commit comments

Comments
 (0)