Skip to content

Commit dd9483c

Browse files
JacobSzwejbkafacebook-github-bot
authored andcommitted
move codegen pybindings out of public pybindings lib (#131)
Summary: Pull Request resolved: #131 These are only used by codegen. Moving them out of the general pybindings lib and restricting visibility. As a side note I dont think any of this stuff actually has to happen in c++. In fact it seems like itd be easier to do this on the Python Version of the schema object through deserialize_for_json. Reviewed By: larryliu0820 Differential Revision: D48671946 fbshipit-source-id: af5d51d59253534c3d318fa01316a502e7ec0446
1 parent da993fd commit dd9483c

File tree

10 files changed

+207
-116
lines changed

10 files changed

+207
-116
lines changed

codegen/tools/gen_oplist.py

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -78,43 +78,38 @@ class KernelType(IntEnum):
7878

7979

8080
def _get_operators(model_file: str) -> List[str]:
81-
# pyre-ignore: Undefined import [21]: Could not find a module corresponding to import `executorch.extension.pybindings.operator`.
82-
# pyre-ignore: Undefined attribute [16]: Module `executorch.extension.pybindings` has no attribute `operator`
83-
from executorch.extension.pybindings.operator import (
81+
from executorch.codegen.tools.selective_build import (
8482
_get_program_from_buffer,
8583
_get_program_operators,
8684
)
8785

8886
print("Processing model file: ", model_file)
8987
with open(model_file, "rb") as f:
9088
buf = f.read()
91-
# pyre-ignore: Undefined attribute [16]: Module `executorch.extension.pybindings` has no attribute `operator`.
89+
9290
program = _get_program_from_buffer(buf)
93-
# pyre-ignore: Undefined attribute [16]: Module `executorch.extension.pybindings` has no attribute `operator`.
9491
operators = _get_program_operators(program)
9592
print(f"Model file loaded, operators are: {operators}")
9693
return operators
9794

9895

9996
def _get_kernel_metadata_for_model(model_file: str) -> Dict[str, List[str]]:
10097

101-
from executorch.extension.pybindings.operator import (
98+
from executorch.codegen.tools.selective_build import (
10299
_get_io_metadata_for_program_operators,
103100
_get_program_from_buffer,
104-
IOMetaData,
101+
_IOMetaData,
105102
)
106103

107104
with open(model_file, "rb") as f:
108105
buf = f.read()
109-
# pyre-ignore: Undefined attribute [16]: Module `executorch.extension.pybindings` has no attribute `operator`.
106+
110107
program = _get_program_from_buffer(buf)
111-
# pyre-ignore: Undefined attribute [16]: Module `executorch.extension.pybindings` has no attribute `operator`.
112108
operators_with_io_metadata = _get_io_metadata_for_program_operators(program)
113109

114110
op_kernel_key_list: Dict[str, List[str]] = {}
115111

116-
# pyre-ignore: Undefined or invalid type [11]: Annotation `IOMetaData` is not defined as a type.Pyre
117-
specialized_kernels: Set[List[IOMetaData]]
112+
specialized_kernels: Set[List[_IOMetaData]]
118113
for op_name, specialized_kernels in operators_with_io_metadata.items():
119114
print(op_name)
120115
if op_name not in op_kernel_key_list:
@@ -124,7 +119,7 @@ def _get_kernel_metadata_for_model(model_file: str) -> Dict[str, List[str]]:
124119
version = "v1"
125120
kernel_key = version + "/"
126121
for io_metadata in specialized_kernel:
127-
if io_metadata.type in [
122+
if io_metadata.kernel_type in [
128123
KernelType.TENSOR,
129124
KernelType.TENSOR_LIST,
130125
KernelType.OPTIONAL_TENSOR_LIST,

extension/pybindings/pybindings.cpp renamed to codegen/tools/selective_build.cpp

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -23,13 +23,14 @@ namespace {
2323
// Metadata for kernel call io variables.
2424
// dtype and dim_order will exist only if corresponding variable is Tensor.
2525
struct IOMetaData {
26-
int type;
26+
int kernel_type;
2727
int dtype;
2828
std::vector<unsigned int> dim_order;
2929

3030
// Create tensor metadata. It records tensor's dtype and dim order.
3131
explicit IOMetaData(const executorch_flatbuffer::Tensor* t)
32-
: type(static_cast<int>(executorch_flatbuffer::KernelTypes::Tensor)),
32+
: kernel_type(
33+
static_cast<int>(executorch_flatbuffer::KernelTypes::Tensor)),
3334
dtype(static_cast<int>(t->scalar_type())) {
3435
for (size_t i = 0; i < t->dim_order()->size(); i++) {
3536
dim_order.push_back(static_cast<unsigned int>(t->dim_order()->Get(i)));
@@ -38,7 +39,7 @@ struct IOMetaData {
3839

3940
// Create metadata for non-tensor variable.
4041
explicit IOMetaData(executorch_flatbuffer::KernelTypes type)
41-
: type(static_cast<int>(type)) {
42+
: kernel_type(static_cast<int>(type)) {
4243
ET_CHECK(
4344
type != executorch_flatbuffer::KernelTypes::Tensor &&
4445
type != executorch_flatbuffer::KernelTypes::TensorList &&
@@ -54,10 +55,10 @@ struct KernelIOMetaDataComparsion {
5455
return lhs.size() < rhs.size();
5556
}
5657
for (size_t i = 0; i < lhs.size(); i++) {
57-
if (lhs[i].type != rhs[i].type) {
58-
return lhs[i].type < rhs[i].type;
58+
if (lhs[i].kernel_type != rhs[i].kernel_type) {
59+
return lhs[i].kernel_type < rhs[i].kernel_type;
5960
}
60-
if (lhs[i].type !=
61+
if (lhs[i].kernel_type !=
6162
static_cast<int>(executorch_flatbuffer::KernelTypes::Tensor)) {
6263
continue;
6364
}
@@ -241,12 +242,8 @@ py::dict _get_io_metadata_for_program_operators(
241242
return py_program_op_io_metadata;
242243
}
243244

244-
void init_module_functions(py::module_&);
245-
246245
PYBIND11_MODULE(EXECUTORCH_PYTHON_MODULE_NAME, m) {
247-
init_module_functions(m);
248-
249-
py::class_<executorch_flatbuffer::Program>(m, "Program");
246+
py::class_<executorch_flatbuffer::Program>(m, "_Program");
250247

251248
m.def(
252249
"_get_program_from_buffer",
@@ -263,8 +260,8 @@ PYBIND11_MODULE(EXECUTORCH_PYTHON_MODULE_NAME, m) {
263260
&_get_io_metadata_for_program_operators,
264261
py::return_value_policy::copy);
265262

266-
py::class_<IOMetaData>(m, "IOMetaData")
267-
.def_readwrite("type", &IOMetaData::type)
263+
py::class_<IOMetaData>(m, "_IOMetaData")
264+
.def_readwrite("kernel_type", &IOMetaData::kernel_type)
268265
.def_readwrite("dtype", &IOMetaData::dtype)
269266
.def_readwrite("dim_order", &IOMetaData::dim_order);
270267
}

codegen/tools/selective_build.pyi

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
from typing import Any, Dict, List
8+
9+
class _Program: ...
10+
11+
class _IOMetaData:
12+
@property
13+
def kernel_type(self) -> int: ...
14+
@property
15+
def dtype(self) -> int: ...
16+
@property
17+
def dim_order(self) -> List[int]: ...
18+
19+
def _get_program_from_buffer(buffer: bytes) -> _Program: ...
20+
def _get_program_operators(program: _Program) -> List[str]: ...
21+
def _get_io_metadata_for_program_operators(
22+
program: _Program,
23+
) -> Dict[str, Any]: ...

codegen/tools/targets.bzl

Lines changed: 41 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ def define_common_targets(is_fbcode = False):
1818
external_deps = ["torchgen"],
1919
deps = select({
2020
"DEFAULT": [],
21-
"ovr_config//os:linux": [] if runtime.is_oss else ["//executorch/extension/pybindings:operator"], # TODO(larryliu0820): pybindings:operator doesn't build in OSS yet
21+
"ovr_config//os:linux": [] if runtime.is_oss else ["//executorch/codegen/tools:selective_build"], # TODO(larryliu0820) :selective_build doesn't build in OSS yet
2222
}),
2323
)
2424

@@ -153,3 +153,43 @@ def define_common_targets(is_fbcode = False):
153153
"//libfb/py:parutil",
154154
],
155155
)
156+
157+
module_name = "selective_build"
158+
runtime.cxx_python_extension(
159+
name = module_name,
160+
srcs = [
161+
"selective_build.cpp",
162+
],
163+
types = ["{}.pyi".format(module_name)],
164+
preprocessor_flags = [
165+
"-DEXECUTORCH_PYTHON_MODULE_NAME={}".format(module_name),
166+
],
167+
deps = [
168+
"//executorch/schema:program",
169+
"//executorch/util:read_file",
170+
],
171+
external_deps = [
172+
"pybind11",
173+
],
174+
use_static_deps = True,
175+
visibility = ["//executorch/codegen/..."],
176+
)
177+
178+
runtime.python_test(
179+
# @autodeps-skip
180+
name = "test_selective_build",
181+
srcs = [
182+
"test/test_selective_build.py",
183+
],
184+
deps = [
185+
"//caffe2:torch",
186+
"//caffe2:torch_fx",
187+
"//executorch/codegen/tools:selective_build",
188+
"//executorch/exir:lib",
189+
"//executorch/exir:pass_manager",
190+
"//executorch/exir:scalar_type",
191+
"//executorch/exir/emit:lib",
192+
"//executorch/exir/passes:lib",
193+
"//executorch/exir/serialize:lib",
194+
],
195+
)
Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import unittest
8+
from typing import Any, Optional, Tuple
9+
10+
import executorch.exir as exir
11+
12+
import torch
13+
14+
from executorch.codegen.tools.selective_build import (
15+
_get_io_metadata_for_program_operators,
16+
_get_program_from_buffer,
17+
_get_program_operators,
18+
_IOMetaData,
19+
)
20+
from executorch.exir import CaptureConfig
21+
from executorch.exir.print_program import pretty_print
22+
from executorch.exir.scalar_type import ScalarType
23+
from executorch.exir.schema import Program
24+
25+
26+
class ModuleAdd(torch.nn.Module):
27+
"""The module to serialize and execute."""
28+
29+
def __init__(self):
30+
super(ModuleAdd, self).__init__()
31+
32+
def forward(self, x, y):
33+
return x + y
34+
35+
def get_methods_to_export(self):
36+
return ("forward",)
37+
38+
39+
class ModuleMulti(torch.nn.Module):
40+
"""The module to serialize and execute."""
41+
42+
def __init__(self):
43+
super(ModuleMulti, self).__init__()
44+
45+
def forward(self, x, y):
46+
return x + y
47+
48+
def forward2(self, x, y):
49+
return x + y + 1
50+
51+
def get_methods_to_export(self):
52+
return ("forward", "forward2")
53+
54+
55+
def create_program(
56+
eager_module: Optional[torch.nn.Module] = None,
57+
) -> Tuple[Program, Tuple[Any, ...]]:
58+
"""Returns an executorch program based on ModuleAdd, along with inputs."""
59+
60+
if eager_module is None:
61+
eager_module = ModuleAdd()
62+
63+
# Trace the test module and create a serialized Executorch program.
64+
inputs = (torch.ones(2, 2), torch.ones(2, 2))
65+
input_map = {}
66+
for method in eager_module.get_methods_to_export():
67+
input_map[method] = inputs
68+
69+
# These cleanup passes are required to convert the `add` op to its out
70+
# variant, along with some other transformations.
71+
exec_prog = (
72+
exir.capture_multiple(eager_module, input_map, config=CaptureConfig())
73+
.to_edge()
74+
.to_executorch()
75+
)
76+
77+
# Create the Executorch program from the graph.
78+
pretty_print(exec_prog.program)
79+
return (exec_prog, inputs)
80+
81+
82+
class PybindingsTest(unittest.TestCase):
83+
def test_dump_operators(self):
84+
# Create and serialize a program.
85+
orig_program, _ = create_program()
86+
87+
# Deserialize the program and demonstrate that we could get its operator
88+
# list.
89+
program = _get_program_from_buffer(orig_program.buffer)
90+
operators = _get_program_operators(program)
91+
self.assertEqual(operators, ["aten::add.out"])
92+
93+
def test_get_op_io_meta(self):
94+
# Checking whether get_op_io_meta returns the correct metadata for all its ios.
95+
orig_program, inputs = create_program()
96+
97+
# Deserialize the program and demonstrate that we could get its operator
98+
# list.
99+
program = _get_program_from_buffer(orig_program.buffer)
100+
program_op_io_metadata = _get_io_metadata_for_program_operators(program)
101+
102+
self.assertTrue(len(program_op_io_metadata) == 1)
103+
self.assertTrue(isinstance(program_op_io_metadata, dict))
104+
105+
self.assertTrue("aten::add.out" in program_op_io_metadata)
106+
self.assertTrue(isinstance(program_op_io_metadata["aten::add.out"], set))
107+
self.assertTrue(len(program_op_io_metadata["aten::add.out"]) == 1)
108+
109+
for op_io_metadata in program_op_io_metadata["aten::add.out"]:
110+
self.assertTrue(len(op_io_metadata) == 5)
111+
self.assertTrue(isinstance(op_io_metadata, tuple))
112+
113+
for io_idx, io_metadata in enumerate(op_io_metadata):
114+
self.assertTrue(isinstance(io_metadata, _IOMetaData))
115+
if io_idx == 2:
116+
# TODO(gasoonjia): Create a enum class to map KernelTypes to int, remove the hardcoded 2 and 5 below.
117+
self.assertEqual(io_metadata.kernel_type, 2)
118+
else:
119+
self.assertEqual(io_metadata.kernel_type, 5)
120+
self.assertEqual(io_metadata.dtype, ScalarType.FLOAT)
121+
self.assertEqual(io_metadata.dim_order, [0, 1])

extension/pybindings/module.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@
2626
#include <executorch/runtime/platform/profiler.h>
2727
#include <executorch/runtime/platform/runtime.h>
2828
#include <executorch/schema/bundled_program_schema_generated.h>
29-
#include <executorch/schema/program_generated.h>
3029
#include <executorch/util/TestMemoryConfig.h>
3130
#include <executorch/util/bundled_program_verification.h>
3231
#include <executorch/util/read_file.h>
@@ -463,7 +462,7 @@ void create_profile_block(const std::string& name) {
463462

464463
} // namespace
465464

466-
void init_module_functions(py::module_& m) {
465+
PYBIND11_MODULE(EXECUTORCH_PYTHON_MODULE_NAME, m) {
467466
m.def("_load_for_executorch", PyModule::load_from_file, py::arg("path"));
468467
m.def(
469468
"_load_for_executorch_from_buffer",

extension/pybindings/module_stub.cpp

Lines changed: 0 additions & 19 deletions
This file was deleted.

0 commit comments

Comments
 (0)