Skip to content

Commit 6086d66

Browse files
mcr229facebook-github-bot
authored andcommitted
MobileNetv2 FP32 + QS8 Test (#82)
Summary: Pull Request resolved: #82 Adding some CI for Mobilenetv2. The test tests for FP32 model and QS8 Model via long term quantization flow. Reviewed By: kirklandsign Differential Revision: D48488928 fbshipit-source-id: 8d0dd396fc7fa8019abd94947c6a955ae8d50625
1 parent ff2d7f2 commit 6086d66

File tree

2 files changed

+82
-0
lines changed

2 files changed

+82
-0
lines changed

backends/xnnpack/test/TARGETS

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -120,3 +120,16 @@ python_unittest(
120120
"//executorch/backends/xnnpack/test/tester:tester",
121121
],
122122
)
123+
124+
python_unittest(
125+
name = "test_xnnpack_models",
126+
srcs = glob([
127+
"models/*.py",
128+
]),
129+
deps = [
130+
"//caffe2:torch",
131+
"//executorch/backends/xnnpack/partition:xnnpack_partitioner",
132+
"//executorch/backends/xnnpack/test/tester:tester",
133+
"//pytorch/vision:torchvision",
134+
],
135+
)
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import unittest
8+
9+
import torch
10+
import torchvision.models as models
11+
from executorch.backends.xnnpack.partition.xnnpack_partitioner import (
12+
XnnpackQuantizedPartitioner2,
13+
)
14+
from executorch.backends.xnnpack.test.tester import Partition, Tester
15+
from torchvision.models.mobilenetv2 import MobileNet_V2_Weights
16+
17+
18+
class TestXNNPACKMobileNetV2(unittest.TestCase):
19+
20+
mv2 = models.__dict__["mobilenet_v2"](weights=MobileNet_V2_Weights)
21+
mv2 = mv2.eval()
22+
model_inputs = (torch.ones(1, 3, 224, 244),)
23+
24+
all_operators = {
25+
"executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default",
26+
"executorch_exir_dialects_edge__ops_aten_add_Tensor",
27+
"executorch_exir_dialects_edge__ops_aten_permute_copy_default",
28+
"executorch_exir_dialects_edge__ops_aten_addmm_default",
29+
"executorch_exir_dialects_edge__ops_aten_mean_dim",
30+
"executorch_exir_dialects_edge__ops_aten_hardtanh_default",
31+
"executorch_exir_dialects_edge__ops_aten_convolution_default",
32+
}
33+
34+
def test_fp32(self):
35+
36+
(
37+
Tester(self.mv2, self.model_inputs)
38+
.export()
39+
.to_edge()
40+
.check(list(self.all_operators))
41+
.partition()
42+
.check(["torch.ops.executorch_call_delegate"])
43+
.check_not(list(self.all_operators))
44+
.to_executorch()
45+
.serialize()
46+
.run_method()
47+
.compare_outputs()
48+
)
49+
50+
def test_qs8_pt2e(self):
51+
# Quantization fuses away batchnorm, so it is no longer in the graph
52+
ops_after_quantization = self.all_operators - {
53+
"executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default",
54+
}
55+
56+
(
57+
Tester(self.mv2, self.model_inputs)
58+
.quantize2()
59+
.export()
60+
.to_edge()
61+
.check(list(ops_after_quantization))
62+
.partition(Partition(partitioner=XnnpackQuantizedPartitioner2))
63+
.check(["torch.ops.executorch_call_delegate"])
64+
.check_not(list(ops_after_quantization))
65+
.to_executorch()
66+
.serialize()
67+
.run_method()
68+
.compare_outputs()
69+
)

0 commit comments

Comments
 (0)