Skip to content

Commit c9dea32

Browse files
cymbalrushfacebook-github-bot
authored andcommitted
Expand export options. (#2481)
Summary: Expand export options - `generate_compute_precision_compile_spec ` adds compute precision for the model. - `generate_min_deployment_target_compile_spec` adds a minimum deployment target for the model. - `generate_model_type_compile_spec ` adds an option to bundle the `mlmodelc` contents in the exported program. This avoids the extra step to compile the model on the device. CoreML backend can now be used on `watchos`, `watchos` has no model compilation API. The export script has been updated to include the expanded options. Pull Request resolved: #2481 Reviewed By: digantdesai Differential Revision: D54997649 Pulled By: shoumikhin fbshipit-source-id: 102916f399f520d81c4297c6937113c65f8d722a
1 parent 84cd2bb commit c9dea32

File tree

12 files changed

+398
-93
lines changed

12 files changed

+398
-93
lines changed

backends/apple/coreml/compiler/coreml_preprocess.py

Lines changed: 208 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,12 @@
55
import json
66
import shutil
77
import uuid
8+
from dataclasses import asdict, dataclass
9+
from enum import Enum
810

911
from pathlib import Path
1012

11-
from typing import final, List
13+
from typing import Dict, final, List
1214

1315
import coremltools as ct
1416
import executorchcoreml
@@ -21,56 +23,237 @@
2123
from executorch.exir.backend.compile_spec_schema import CompileSpec
2224

2325

26+
class COMPILE_SPEC_KEYS(Enum):
27+
COMPUTE_UNITS = "compute_units"
28+
MODEL_TYPE = "model_type"
29+
MIN_DEPLOYMENT_TARGET = "min_deployment_target"
30+
MODEL_COMPUTE_PRECISION = "model_compute_precision"
31+
32+
33+
@dataclass
34+
class ModelMetadata:
35+
# The model input names.
36+
inputNames: List[str]
37+
# The model output names.
38+
outputNames: List[str]
39+
# The model identifier.
40+
identifier: str
41+
42+
2443
@final
2544
class CoreMLBackend(BackendDetails):
45+
class MODEL_TYPE(Enum):
46+
MODEL = "model"
47+
COMPILED_MODEL = "compiled_model"
48+
49+
@staticmethod
50+
def generate_model_type_compile_spec(model_type: MODEL_TYPE) -> CompileSpec:
51+
"""
52+
Returns the compile spec representing the given model type.
53+
54+
If the model type is ``MODEL_TYPE.Model`` then the ``CoreMLBackend`` returns
55+
the in-memory representation of the ``mlpackage`` contents.
56+
57+
If the model type is ``MODEL_TYPE.COMPILED_MODEL`` then the ``CoreMLBackend`` compiles the model
58+
and returns the in-memory representation of ``mlmodelc`` (compiled model) contents.
59+
"""
60+
return CompileSpec(
61+
COMPILE_SPEC_KEYS.MODEL_TYPE.value, model_type.value.encode("utf-8")
62+
)
63+
64+
@staticmethod
65+
def model_type_from_compile_specs(compile_specs: List[CompileSpec]) -> MODEL_TYPE:
66+
"""
67+
Returns the model type by parsing the list of compile specs.
68+
"""
69+
for compile_spec in compile_specs:
70+
if compile_spec.key == COMPILE_SPEC_KEYS.MODEL_TYPE.value:
71+
return CoreMLBackend.MODEL_TYPE(compile_spec.value.decode("utf-8"))
72+
73+
return CoreMLBackend.MODEL_TYPE.MODEL
74+
75+
@staticmethod
76+
def generate_compute_precision_compile_spec(
77+
compute_precision: ct.precision,
78+
) -> CompileSpec:
79+
"""
80+
Returns the compile spec representing the model compute precision, for additional details
81+
please refer to the documentation for ``coremltools.precision``.
82+
"""
83+
return CompileSpec(
84+
COMPILE_SPEC_KEYS.MODEL_COMPUTE_PRECISION.value,
85+
compute_precision.value.encode("utf-8"),
86+
)
87+
88+
@staticmethod
89+
def model_compute_precision_from_compile_specs(
90+
compile_specs: List[CompileSpec],
91+
) -> ct.precision:
92+
"""
93+
Returns the model's compute precision by parsing the list of compile specs.
94+
"""
95+
for compile_spec in compile_specs:
96+
if compile_spec.key == COMPILE_SPEC_KEYS.MODEL_COMPUTE_PRECISION.value:
97+
return ct.precision(compile_spec.value.decode("utf-8"))
98+
99+
return ct.precision.FLOAT16
100+
101+
@staticmethod
102+
def generate_minimum_deployment_target_compile_spec(
103+
min_deployment_target: ct.target,
104+
) -> CompileSpec:
105+
"""
106+
Returns the compile spec representing the minimum deployment target on which the model can run,
107+
for additional details please refer to the documentation for ``coremltools.target``.
108+
"""
109+
return CompileSpec(
110+
COMPILE_SPEC_KEYS.MIN_DEPLOYMENT_TARGET.value,
111+
str(min_deployment_target.value).encode("utf-8"),
112+
)
113+
114+
@staticmethod
115+
def min_deployment_target_from_compile_specs(
116+
compile_specs: List[CompileSpec],
117+
) -> ct.target:
118+
"""
119+
Returns the minimum deployment target by parsing the list of compile specs.
120+
"""
121+
for compile_spec in compile_specs:
122+
if compile_spec.key == COMPILE_SPEC_KEYS.MIN_DEPLOYMENT_TARGET.value:
123+
compile_spec_value: int = int(compile_spec.value.decode("utf-8"))
124+
return ct.target(compile_spec_value)
125+
126+
return ct.target.iOS15
127+
26128
@staticmethod
27-
def to_bytes(mlmodel):
28-
dir_path = Path("tmp")
29-
model_dir_path = dir_path / "lowered_module"
30-
Path(model_dir_path).mkdir(parents=True, exist_ok=True)
31-
model_path = model_dir_path / "model.mlpackage"
32-
mlmodel.save(model_path)
33-
34-
# save model metdata
35-
spec = mlmodel.get_spec()
36-
input_names = [input.name for input in spec.description.input]
37-
output_names = [output.name for output in spec.description.output]
129+
def generate_compute_unit_compile_spec(
130+
compute_unit: ct.ComputeUnit,
131+
) -> CompileSpec:
132+
"""
133+
Returns the compile spec representing the compute units on which the model can run, for additional details
134+
please refer to the documentation for ``coremltools.ComputeUnit`.
135+
"""
136+
return CompileSpec(
137+
COMPILE_SPEC_KEYS.COMPUTE_UNITS.value,
138+
compute_unit.name.lower().encode("utf-8"),
139+
)
140+
141+
@staticmethod
142+
def generate_compile_specs(
143+
compute_unit: ct.ComputeUnit = ct.ComputeUnit.ALL,
144+
minimum_deployment_target: ct.target = ct.target.iOS15,
145+
compute_precision: ct.precision = ct.precision.FLOAT16,
146+
model_type: MODEL_TYPE = MODEL_TYPE.MODEL,
147+
) -> List[CompileSpec]:
148+
"""
149+
Returns the list of compile specs that's used by CoreMLBackend to lower the module.
150+
"""
151+
compile_specs: List[CompileSpec] = []
152+
compile_specs.append(
153+
CoreMLBackend.generate_compute_unit_compile_spec(compute_unit)
154+
)
155+
compile_specs.append(
156+
CoreMLBackend.generate_minimum_deployment_target_compile_spec(
157+
minimum_deployment_target
158+
)
159+
)
160+
compile_specs.append(
161+
CoreMLBackend.generate_compute_precision_compile_spec(compute_precision)
162+
)
163+
compile_specs.append(CoreMLBackend.generate_model_type_compile_spec(model_type))
164+
165+
return compile_specs
166+
167+
@staticmethod
168+
def model_metadata_from_spec(model_spec: ct.proto.Model_pb2) -> Dict[str, str]:
169+
input_names: List[str] = [input.name for input in model_spec.description.input]
170+
output_names = [output.name for output in model_spec.description.output]
38171
identifier = uuid.uuid4()
39172

40-
model_metadata = {
41-
"inputNames": input_names,
42-
"outputNames": output_names,
43-
"identifier": str(identifier),
44-
}
173+
return ModelMetadata(
174+
inputNames=input_names, outputNames=output_names, identifier=str(identifier)
175+
)
176+
177+
@staticmethod
178+
def to_bytes(mlmodel: ct.models.MLModel, model_type: MODEL_TYPE) -> bytes:
179+
dir_path: Path = Path("tmp")
180+
model_dir_path: Path = dir_path / "lowered_module"
181+
model_spec: ct.proto.Model_pb2 = mlmodel.get_spec()
182+
model_metadata: ModelMetadata = CoreMLBackend.model_metadata_from_spec(
183+
model_spec
184+
)
185+
match model_type:
186+
case CoreMLBackend.MODEL_TYPE.MODEL:
187+
# Store model.
188+
model_path = model_dir_path / "model.mlpackage"
189+
mlmodel.save(model_path)
45190

46-
# store metadata
191+
case CoreMLBackend.MODEL_TYPE.COMPILED_MODEL:
192+
# Store compiled model
193+
model_path = model_dir_path / "model.mlmodelc"
194+
compiled_model_path = mlmodel.get_compiled_model_path()
195+
196+
shutil.copytree(
197+
compiled_model_path,
198+
str(model_path.resolve()),
199+
dirs_exist_ok=True,
200+
)
201+
202+
# Store model metadata.
47203
model_metadata_path = Path(model_dir_path) / "metadata.json"
48-
json_object = json.dumps(model_metadata)
204+
model_metadata_json = json.dumps(asdict(model_metadata))
49205
with open(model_metadata_path, "w") as outfile:
50-
outfile.write(json_object)
206+
outfile.write(model_metadata_json)
51207

52208
# flatten directory contents and convert it to bytes
53209
flattened_bytes = executorchcoreml.flatten_directory_contents(
54210
str(model_dir_path.resolve())
55211
)
212+
56213
shutil.rmtree(str(model_dir_path.resolve()))
57214
return flattened_bytes
58215

59216
@classmethod
60-
# pyre-ignore
61217
def preprocess(
62218
cls,
63219
edge_program: ExportedProgram,
64-
module_compile_spec: List[CompileSpec],
220+
module_compile_specs: List[CompileSpec],
65221
) -> PreprocessResult:
222+
model_type: CoreMLBackend.MODEL_TYPE = (
223+
CoreMLBackend.model_type_from_compile_specs(
224+
module_compile_specs,
225+
)
226+
)
227+
228+
model_compute_precision: ct.precision = (
229+
CoreMLBackend.model_compute_precision_from_compile_specs(
230+
module_compile_specs
231+
)
232+
)
233+
234+
minimum_deployment_target: ct.target = (
235+
CoreMLBackend.min_deployment_target_from_compile_specs(module_compile_specs)
236+
)
237+
238+
skip_model_load: bool = False
239+
match model_type:
240+
case CoreMLBackend.MODEL_TYPE.MODEL:
241+
skip_model_load = True
242+
243+
case CoreMLBackend.MODEL_TYPE.COMPILED_MODEL:
244+
skip_model_load = False
245+
66246
mlmodel = ct.convert(
67247
model=edge_program,
68248
source="pytorch",
69249
convert_to="mlprogram",
70250
pass_pipeline=ct.PassPipeline.DEFAULT,
71-
skip_model_load=True,
251+
skip_model_load=skip_model_load,
252+
compute_precision=model_compute_precision,
253+
minimum_deployment_target=minimum_deployment_target,
72254
)
73-
flattened_bytes = CoreMLBackend.to_bytes(mlmodel)
255+
256+
processed_bytes = CoreMLBackend.to_bytes(mlmodel, model_type=model_type)
74257
return PreprocessResult(
75-
processed_bytes=flattened_bytes,
258+
processed_bytes=processed_bytes,
76259
)

backends/apple/coreml/partition/coreml_partitioner.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@
99

1010
import torch
1111

12+
from executorch.backends.apple.coreml.compiler import CoreMLBackend
13+
from executorch.exir.backend.compile_spec_schema import CompileSpec
14+
1215
from executorch.exir.backend.partitioner import (
1316
DelegationSpec,
1417
Partitioner,
@@ -52,15 +55,19 @@ def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
5255

5356

5457
class CoreMLPartitioner(Partitioner):
55-
compile_spec = []
5658

5759
def __init__(
58-
self, skip_ops_for_coreml_delegation: Optional[List[str]] = None
60+
self,
61+
skip_ops_for_coreml_delegation: Optional[List[str]] = None,
62+
compile_specs: Optional[List[CompileSpec]] = None,
5963
) -> None:
6064
if skip_ops_for_coreml_delegation is None:
6165
skip_ops_for_coreml_delegation = []
6266
self.skip_ops_for_coreml_delegation = skip_ops_for_coreml_delegation
63-
self.delegation_spec = DelegationSpec("CoreMLBackend", self.compile_spec)
67+
self.delegation_spec = DelegationSpec(
68+
backend_id=CoreMLBackend.__name__,
69+
compile_specs=compile_specs if compile_specs is not None else [],
70+
)
6471

6572
def partition(self, exported_program: ExportedProgram) -> PartitionResult:
6673
# Run the CapabilityBasedPartitioner to return the largest possible

backends/apple/coreml/runtime/delegate/ETCoreMLLogging.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ typedef NS_ERROR_ENUM(ETCoreMLErrorDomain, ETCoreMLError) {
2121
ETCoreMLErrorCorruptedModel, // AOT blob has incorrect or missing CoreML model.
2222
ETCoreMLErrorBrokenModel, // CoreML model doesn't match the input and output specification.
2323
ETCoreMLErrorCompilationFailed, // CoreML model failed to compile.
24+
ETCoreMLErrorModelCompilationNotSupported, // CoreML model compilation is not supported by the target.
2425
ETCoreMLErrorModelSaveFailed, // Failed to save CoreML model to disk.
2526
ETCoreMLErrorModelCacheCreationFailed, // Failed to create model cache.
2627
ETCoreMLErrorInternalError, // Internal error.

backends/apple/coreml/runtime/delegate/ETCoreMLModelCompiler.mm

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,23 @@
77

88
#import <ETCoreMLModelCompiler.h>
99
#import <ETCoreMLLogging.h>
10+
#import <TargetConditionals.h>
1011

1112
@implementation ETCoreMLModelCompiler
1213

1314
+ (nullable NSURL *)compileModelAtURL:(NSURL *)modelURL
1415
maxWaitTimeInSeconds:(NSTimeInterval)maxWaitTimeInSeconds
1516
error:(NSError* __autoreleasing *)error {
17+
#if TARGET_OS_WATCH
18+
(void)modelURL;
19+
(void)maxWaitTimeInSeconds;
20+
(void)error;
21+
ETCoreMLLogErrorAndSetNSError(error,
22+
ETCoreMLErrorModelCompilationNotSupported,
23+
"%@: Model compilation is not supported on the target, please make sure to export a compiled model.",
24+
NSStringFromClass(ETCoreMLModelCompiler.class));
25+
return nil;
26+
#else
1627
__block NSError *localError = nil;
1728
__block NSURL *result = nil;
1829

@@ -34,6 +45,7 @@ + (nullable NSURL *)compileModelAtURL:(NSURL *)modelURL
3445
}
3546

3647
return result;
48+
#endif
3749
}
3850

3951
@end

0 commit comments

Comments
 (0)