Skip to content

Commit fe910e1

Browse files
shoumikhinfacebook-github-bot
authored andcommitted
Buckify Core ML AOT components. (#3701)
Summary: Pull Request resolved: #3701 . Reviewed By: kirklandsign Differential Revision: D57650038 fbshipit-source-id: 581f1afe14262a5fc32117fffadf3b10a5a75f41
1 parent 1681837 commit fe910e1

File tree

8 files changed

+137
-26
lines changed

8 files changed

+137
-26
lines changed

backends/apple/coreml/TARGETS

Lines changed: 100 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,100 @@
1-
# This file needs to exist to avoid build system breakage, see https://fburl.com/workplace/jtdlgdmd
1+
# Any targets that should be shared between fbcode and xplat must be defined in
2+
# targets.bzl. This file can contain fbcode-only targets.
3+
4+
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
5+
6+
oncall("executorch")
7+
8+
runtime.python_library(
9+
name = "backend",
10+
srcs = glob([
11+
"compiler/*.py",
12+
]),
13+
visibility = [
14+
"@EXECUTORCH_CLIENTS",
15+
],
16+
deps = [
17+
":executorchcoreml",
18+
"//executorch/exir/backend:backend_details",
19+
"//executorch/exir/backend:compile_spec_schema",
20+
"fbsource//third-party/pypi/coremltools:coremltools",
21+
],
22+
)
23+
24+
runtime.python_library(
25+
name = "partitioner",
26+
srcs = glob([
27+
"partition/*.py",
28+
]),
29+
visibility = [
30+
"@EXECUTORCH_CLIENTS",
31+
],
32+
deps = [
33+
":backend",
34+
"//caffe2:torch",
35+
"//executorch/exir:lib",
36+
"//executorch/exir/backend:compile_spec_schema",
37+
"//executorch/exir/backend:partitioner",
38+
"//executorch/exir/backend:utils",
39+
"fbsource//third-party/pypi/coremltools:coremltools",
40+
],
41+
)
42+
43+
runtime.python_library(
44+
name = "quantizer",
45+
srcs = glob([
46+
"quantizer/*.py",
47+
]),
48+
visibility = [
49+
"@EXECUTORCH_CLIENTS",
50+
],
51+
)
52+
53+
runtime.cxx_python_extension(
54+
name = "executorchcoreml",
55+
srcs = [
56+
"runtime/inmemoryfs/inmemory_filesystem.cpp",
57+
"runtime/inmemoryfs/inmemory_filesystem_py.cpp",
58+
"runtime/inmemoryfs/inmemory_filesystem_utils.cpp",
59+
"runtime/inmemoryfs/memory_buffer.cpp",
60+
"runtime/inmemoryfs/memory_stream.cpp",
61+
"runtime/inmemoryfs/reversed_memory_stream.cpp",
62+
"runtime/util/json_util.cpp",
63+
],
64+
headers = glob([
65+
"runtime/inmemoryfs/**/*.hpp",
66+
]),
67+
preprocessor_flags = [
68+
"-Iexecutorch/backends/apple/coreml/runtime/util",
69+
],
70+
types = [
71+
"executorchcoreml.pyi",
72+
],
73+
compiler_flags = [
74+
"-std=c++17",
75+
],
76+
base_module = "",
77+
visibility = [
78+
"//executorch/examples/apple/coreml/...",
79+
],
80+
external_deps = [
81+
"pybind11",
82+
],
83+
deps = [
84+
"fbsource//third-party/nlohmann-json:nlohmann-json",
85+
],
86+
)
87+
88+
runtime.python_test(
89+
name = "test",
90+
srcs = glob([
91+
"test/*.py",
92+
]),
93+
deps = [
94+
":partitioner",
95+
":quantizer",
96+
"//caffe2:torch",
97+
"//pytorch/vision:torchvision",
98+
"fbsource//third-party/pypi/pytest:pytest",
99+
],
100+
)

backends/apple/coreml/compiler/coreml_preprocess.py

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -184,8 +184,8 @@ def generate_compile_specs(
184184

185185
@staticmethod
186186
def model_metadata_from_spec(
187-
model_spec: ct.proto.Model_pb2, identifier: str
188-
) -> Dict[str, str]:
187+
model_spec: ct.proto.Model_pb2, identifier: str # pyre-ignore
188+
) -> ModelMetadata:
189189
input_names: List[str] = [input.name for input in model_spec.description.input]
190190
output_names = [output.name for output in model_spec.description.output]
191191

@@ -300,7 +300,7 @@ def preprocess_model(
300300

301301
# Save model.
302302
model_path = model_dir_path / MODEL_PATHS.MODEL.value
303-
mlmodel.save(model_path)
303+
mlmodel.save(str(model_path))
304304
# Extract delegate mapping file.
305305
model_debug_info: Optional[ModelDebugInfo] = CoreMLBackend.get_model_debug_info(
306306
model_path
@@ -327,40 +327,41 @@ def preprocess_model(
327327
model_debug_info=model_debug_info, model_dir_path=model_dir_path
328328
)
329329

330-
processed_bytes: bytes = executorchcoreml.flatten_directory_contents(
331-
str(model_dir_path.resolve())
330+
processed_bytes: bytes = (
331+
executorchcoreml.flatten_directory_contents(str(model_dir_path.resolve()))
332+
or b""
332333
)
333334

334335
debug_handle_map: Optional[Dict[str, Tuple[int]]] = None
335336
if model_debug_info is not None:
336-
debug_handle_map = model_debug_info.debugSymbolToHandles
337+
debug_handle_map = {
338+
key: tuple(value)
339+
for key, value in model_debug_info.debugSymbolToHandles.items()
340+
}
337341

338342
shutil.rmtree(str(dir_path.resolve()))
339343
return PreprocessResult(
340344
processed_bytes=processed_bytes,
341345
debug_handle_map=debug_handle_map,
342346
)
343347

344-
@classmethod
348+
@staticmethod
345349
def preprocess(
346-
cls,
347350
edge_program: ExportedProgram,
348-
module_compile_specs: List[CompileSpec],
351+
compile_specs: List[CompileSpec],
349352
) -> PreprocessResult:
350353
model_type: CoreMLBackend.MODEL_TYPE = (
351354
CoreMLBackend.model_type_from_compile_specs(
352-
module_compile_specs,
355+
compile_specs,
353356
)
354357
)
355358

356359
model_compute_precision: ct.precision = (
357-
CoreMLBackend.model_compute_precision_from_compile_specs(
358-
module_compile_specs
359-
)
360+
CoreMLBackend.model_compute_precision_from_compile_specs(compile_specs)
360361
)
361362

362363
minimum_deployment_target: ct.target = (
363-
CoreMLBackend.min_deployment_target_from_compile_specs(module_compile_specs)
364+
CoreMLBackend.min_deployment_target_from_compile_specs(compile_specs)
364365
)
365366

366367
mlmodel = ct.convert(
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
from typing import Optional
8+
9+
def flatten_directory_contents(path: str) -> Optional[bytes]: ...
10+
def unflatten_directory_contents(bytes: bytes, path: str) -> bool: ...

backends/apple/coreml/partition/coreml_partitioner.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,8 @@ def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
4242
# check if the PyTorch op get called is supported in Core ML
4343
elif node.op == "call_function":
4444
# skip ops if specified by user
45-
node_target_name = node.target.__name__.lower()
46-
if node_target_name in self.skip_ops_for_coreml_delegation:
45+
node_target_name = getattr(node.target, "__name__", "").lower()
46+
if node_target_name in (self.skip_ops_for_coreml_delegation or []):
4747
return False
4848
# query coremltools to see if node is supported
4949
return ct.converters.mil.frontend.torch.is_torch_fx_node_supported(node)

backends/apple/coreml/test/test_coreml_quantizer.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,8 @@ def quantize_and_compare(
5151
prepared_graph = prepare_pt2e(pre_autograd_aten_dialect, quantizer)
5252
elif quantization_type == "QAT":
5353
prepared_graph = prepare_qat_pt2e(pre_autograd_aten_dialect, quantizer)
54+
else:
55+
raise ValueError("Invalid quantization type")
5456

5557
prepared_graph(*example_inputs)
5658
converted_graph = convert_pt2e(prepared_graph)

examples/models/llama2/TARGETS

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -82,23 +82,26 @@ runtime.python_library(
8282
],
8383
deps = [
8484
"//caffe2:torch",
85+
"//executorch/backends/apple/coreml:backend",
86+
"//executorch/backends/apple/coreml:partitioner",
8587
"//executorch/backends/transforms:duplicate_dynamic_quant_chain",
88+
"//executorch/backends/vulkan/partitioner:vulkan_partitioner",
8689
"//executorch/backends/xnnpack:xnnpack_backend",
8790
"//executorch/backends/xnnpack/partition:xnnpack_partitioner",
88-
"//executorch/backends/vulkan/partitioner:vulkan_partitioner",
8991
"//executorch/examples/models:model_base",
9092
"//executorch/examples/models:models",
9193
"//executorch/examples/models/llama2/custom_ops:custom_ops_aot_py",
9294
"//executorch/examples/portable:utils",
9395
"//executorch/exir:lib",
94-
"//executorch/sdk/etrecord:etrecord",
95-
"//executorch/util:memory_profiler",
96-
"//executorch/util:python_profiler",
9796
# one definition has to be included in the user of the libarary
9897
# depending on what library the client wants to use
9998
# "//executorch/extension/pybindings:aten_lib",
10099
# "//executorch/extension/pybindings:portable_lib",
101100
# "//executorch/extension/pybindings:portable_lib_plus_custom",
101+
"//executorch/sdk/etrecord:etrecord",
102+
"//executorch/util:memory_profiler",
103+
"//executorch/util:python_profiler",
104+
"fbsource//third-party/pypi/coremltools:coremltools",
102105
"fbsource//third-party/pypi/sentencepiece:sentencepiece",
103106
],
104107
)

examples/models/llama2/lib/partitioner_lib.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -56,13 +56,8 @@ def get_coreml_partitioner(args):
5656
args.use_kv_cache is True
5757
), "CoreML backend currently only supports static shape and use_kv_cache=True is the only way to support it at the moment"
5858
try:
59-
# pyre-ignore: Undefined import [21]: Could not find a module corresponding to import `coremltools`.
6059
import coremltools as ct
61-
62-
# pyre-ignore: Undefined import [21]: Could not find a module corresponding to import `executorch.backends.apple.coreml.compiler`
6360
from executorch.backends.apple.coreml.compiler import CoreMLBackend
64-
65-
# pyre-ignore: Undefined import [21]: Could not find a module corresponding to import `executorch.backends.apple.coreml.partition`
6661
from executorch.backends.apple.coreml.partition import CoreMLPartitioner
6762
except ImportError:
6863
raise ImportError(

exir/backend/TARGETS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,5 +109,6 @@ runtime.python_library(
109109
"fbsource//third-party/pypi/pandas:pandas",
110110
"//caffe2:torch",
111111
"//executorch/exir:lowered_backend_module",
112+
"//executorch/exir/backend/canonical_partitioners:duplicate_constant_node_pass",
112113
],
113114
)

0 commit comments

Comments
 (0)