Skip to content

Commit 9bc3662

Browse files
guangy10facebook-github-bot
authored andcommitted
Add torchvision_vit model to the examples (#33)
Summary: Pull Request resolved: #33 Info about the the model: https://pytorch.org/vision/main/models/generated/torchvision.models.vit_b_16.html#torchvision.models.vit_b_16 Reviewed By: kimishpatel, kirklandsign Differential Revision: D48012005 fbshipit-source-id: 4c3703c40ca0a4d91f57500a4b4063cdf7ec4388
1 parent f529efc commit 9bc3662

File tree

7 files changed

+73
-4
lines changed

7 files changed

+73
-4
lines changed

examples/export/test/test_export.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,14 +27,14 @@ def _assert_eager_lowered_same_result(
2727
_EDGE_COMPILE_CONFIG
2828
)
2929

30-
executorch_model = edge_model.to_executorch()
30+
executorch_prog = edge_model.to_executorch()
3131
# pyre-ignore
32-
pte_model = _load_for_executorch_from_buffer(executorch_model.buffer)
32+
pte_model = _load_for_executorch_from_buffer(executorch_prog.buffer)
3333

3434
with torch.no_grad():
3535
eager_output = eager_model(*example_inputs)
3636
with torch.no_grad():
37-
executorch_output = pte_model.forward(example_inputs)
37+
executorch_output = pte_model.run_method("forward", example_inputs)
3838

3939
if isinstance(eager_output, tuple):
4040
# TODO: Allow validating other items
@@ -65,3 +65,9 @@ def test_emformer_export_to_executorch(self):
6565
eager_model = eager_model.eval()
6666

6767
self._assert_eager_lowered_same_result(eager_model, example_inputs)
68+
69+
def test_vit_export_to_executorch(self):
70+
eager_model, example_inputs = MODEL_NAME_TO_MODEL["vit"]()
71+
eager_model = eager_model.eval()
72+
73+
self._assert_eager_lowered_same_result(eager_model, example_inputs)

examples/models/TARGETS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ python_library(
1111
"//executorch/examples/models/emformer:emformer_export",
1212
"//executorch/examples/models/mobilenet_v2:mv2_export",
1313
"//executorch/examples/models/mobilenet_v3:mv3_export",
14+
"//executorch/examples/models/torchvision_vit:vit_export",
1415
"//executorch/exir/backend:compile_spec_schema",
1516
],
1617
)

examples/models/models.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,12 @@ def gen_emformer_model_inputs() -> Tuple[torch.nn.Module, Any]:
9595
return EmformerModel.get_model(), EmformerModel.get_example_inputs()
9696

9797

98+
def gen_torchvision_vit_model_and_inputs() -> Tuple[torch.nn.Module, Any]:
99+
from ..models.torchvision_vit import TorchVisionViTModel
100+
101+
return TorchVisionViTModel.get_model(), TorchVisionViTModel.get_example_inputs()
102+
103+
98104
MODEL_NAME_TO_MODEL = {
99105
"mul": lambda: (MulModule(), MulModule.get_example_inputs()),
100106
"linear": lambda: (LinearModule(), LinearModule.get_example_inputs()),
@@ -103,4 +109,5 @@ def gen_emformer_model_inputs() -> Tuple[torch.nn.Module, Any]:
103109
"mv2": gen_mobilenet_v2_model_inputs,
104110
"mv3": gen_mobilenet_v3_model_inputs,
105111
"emformer": gen_emformer_model_inputs,
112+
"vit": gen_torchvision_vit_model_and_inputs,
106113
}
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
load("@fbcode_macros//build_defs:python_library.bzl", "python_library")
2+
3+
python_library(
4+
name = "vit_export",
5+
srcs = [
6+
"__init__.py",
7+
"export.py",
8+
],
9+
base_module = "executorch.examples.models.torchvision_vit",
10+
deps = [
11+
"//caffe2:torch",
12+
"//pytorch/vision:torchvision",
13+
],
14+
)
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
from .export import TorchVisionViTModel
8+
9+
__all__ = [
10+
TorchVisionViTModel,
11+
]
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import logging
8+
9+
import torch
10+
from torchvision import models
11+
12+
FORMAT = "[%(filename)s:%(lineno)s] %(message)s"
13+
logging.basicConfig(format=FORMAT)
14+
15+
16+
class TorchVisionViTModel:
17+
def __init__(self):
18+
pass
19+
20+
@staticmethod
21+
def get_model():
22+
logging.info("loading torchvision vit_b_16 model")
23+
vit_b_16 = models.vit_b_16(weights="IMAGENET1K_V1")
24+
logging.info("loaded torchvision vit_b_16 model")
25+
return vit_b_16
26+
27+
@staticmethod
28+
def get_example_inputs():
29+
input_shape = (1, 3, 224, 224)
30+
return (torch.randn(input_shape),)

extension/pybindings/module.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,7 @@ py::object pyFromEValue(const EValue& v, KeepAlive& keep_alive) {
283283
}
284284

285285
static constexpr size_t kDEFAULT_NON_CONSTANT_POOL_SIZE =
286-
256 * 1024U * 1024U; // 256 MB
286+
2 * 256 * 1024U * 1024U; // 512 MB
287287
static constexpr size_t kRUNTIME_POOL_SIZE = 256 * 1024U * 1024U; // 256 MB
288288
static constexpr size_t kDEFAULT_BUNDLED_INPUT_POOL_SIZE = 16 * 1024U;
289289

0 commit comments

Comments
 (0)