Skip to content

Commit 7d2344e

Browse files
committed
Update base for Update on "Pass one NDM to backend init"
Take external NDM if it exists, otherwise internal. Note: by default, xnnpack uses the named_data_map. Constants are not stored with the delegated blob anymore. Differential Revision: [D73679710](https://our.internmc.facebook.com/intern/diff/D73679710/) [ghstack-poisoned]
2 parents b58bca8 + 4524838 commit 7d2344e

File tree

23 files changed

+284
-79
lines changed

23 files changed

+284
-79
lines changed

.ci/scripts/test_model.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ test_model_with_coreml() {
222222

223223
DTYPE=float16
224224

225-
"${PYTHON_EXECUTABLE}" -m examples.apple.coreml.scripts.export --model_name="${MODEL_NAME}" --compute_precision "${DTYPE}"
225+
"${PYTHON_EXECUTABLE}" -m examples.apple.coreml.scripts.export --model_name="${MODEL_NAME}" --compute_precision "${DTYPE}" --use_partitioner
226226
EXPORTED_MODEL=$(find "." -type f -name "${MODEL_NAME}*.pte" -print -quit)
227227

228228
if [ -n "$EXPORTED_MODEL" ]; then

Package.swift

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,9 @@ let package = Package(
7777
name: "\(key)_dependencies",
7878
dependencies: [.target(name: key)],
7979
path: ".Package.swift/\(key)",
80-
linkerSettings:
80+
linkerSettings: [
81+
.linkedLibrary("c++")
82+
] +
8183
(value["frameworks"] as? [String] ?? []).map { .linkedFramework($0) } +
8284
(value["libraries"] as? [String] ?? []).map { .linkedLibrary($0) }
8385
),

backends/apple/coreml/runtime/delegate/coreml_backend_delegate.mm

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,17 @@
8888
ET_LOG(Error, "%s: DataType=%d is not supported", ETCoreMLStrings.delegateIdentifier.UTF8String, (int)tensor.scalar_type());
8989
return std::nullopt;
9090
}
91-
91+
9292
std::vector<ssize_t> strides(tensor.strides().begin(), tensor.strides().end());
9393
std::vector<size_t> shape(tensor.sizes().begin(), tensor.sizes().end());
94+
95+
// If tensor is rank 0, wrap in rank 1
96+
// See https://github.com/apple/coremltools/blob/8.2/coremltools/converters/mil/frontend/torch/exir_utils.py#L73
97+
if (shape.size() == 0) {
98+
shape.push_back(1);
99+
strides.push_back(1);
100+
}
101+
94102
MultiArray::MemoryLayout layout(dataType.value(), std::move(shape), std::move(strides));
95103
switch (argType) {
96104
case ArgType::Input: {
@@ -233,6 +241,12 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
233241
std::array<SizesType, kTensorDimensionLimit> new_shape;
234242
for (size_t i = nInputs; i < nInputs + nOutputs; i++) {
235243
Tensor& t = args[i]->toTensor();
244+
// If t has rank 0, do not resize. delegate_args[i] will have rank 1
245+
// because we resized it in get_multi_array
246+
if (t.dim() == 0) {
247+
continue;
248+
}
249+
236250
int rank = delegate_args[i].layout().rank();
237251
assert (rank <= new_shape.size());
238252
for (int d = 0; d < rank; d++) {

backends/apple/coreml/runtime/test/ETCoreMLModelManagerTests.mm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ - (void)testAddModelExecution {
113113
XCTAssertNotNil(inputs);
114114
MLMultiArray *output = [ETCoreMLTestUtils filledMultiArrayWithShape:inputs[0].shape dataType:inputs[0].dataType repeatedValue:@(0) error:&localError];
115115
NSArray<MLMultiArray *> *args = [inputs arrayByAddingObject:output];
116-
XCTAssertTrue([self.modelManager executeModelWithHandle:handle
116+
XCTAssertTrue([self.modelManager executeModelWithHandle:handle
117117
args:args
118118
loggingOptions:executorchcoreml::ModelLoggingOptions()
119119
eventLogger:nullptr

backends/apple/coreml/scripts/install_requirements.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ SCRIPT_DIR_PATH="$(
1212

1313
# TODO(jathu): remove the need to fetch coremltools to build deps for coreml_executor_runner.
1414
# Keep this version in sync with: pyproject.toml
15-
COREMLTOOLS_VERSION="8.2"
15+
COREMLTOOLS_VERSION="8.3"
1616

1717
red=`tput setaf 1`
1818
green=`tput setaf 2`

backends/arm/operators/op_upsample_bilinear2d.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,8 @@ def define_node(
3434
inputs: List[TosaArg],
3535
output: TosaArg,
3636
) -> None:
37-
assert (
38-
inputs[0].shape is not None and output.shape is not None
39-
), "Only static shapes are supported"
37+
if inputs[0].shape is None or output.shape is None:
38+
raise ValueError("Only static shapes are supported")
4039

4140
input_dtype = inputs[0].dtype
4241

@@ -55,9 +54,12 @@ def define_node(
5554
def in_int16_range(x):
5655
return torch.all(x >= -(2**15)) and torch.all(x <= 2**15 - 1)
5756

58-
assert in_int16_range(scale_n_yx)
59-
assert in_int16_range(scale_d_yx)
60-
assert in_int16_range(border_yx)
57+
if not in_int16_range(scale_n_yx):
58+
raise ValueError("scale_n_yx is out of the int16 range")
59+
if not in_int16_range(scale_d_yx):
60+
raise ValueError("scale_d_yx is out of the int16 range")
61+
if not in_int16_range(border_yx):
62+
raise ValueError("border_yx is out of the int16 range")
6163

6264
attr = ts.TosaSerializerAttribute()
6365
attr.ResizeAttribute(

backends/arm/operators/op_upsample_nearest2d.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,8 @@ def define_node(
3636
) -> None:
3737
import tosa_tools.v0_80.serializer.tosa_serializer as ts # type: ignore
3838

39-
assert (
40-
inputs[0].shape is not None and output.shape is not None
41-
), "Only static shapes are supported"
39+
if inputs[0].shape is None or output.shape is None:
40+
raise ValueError("Only static shapes are supported")
4241

4342
# tosa_shape output is NHWC, take HW
4443
input_size_yx = torch.tensor(
@@ -55,9 +54,12 @@ def define_node(
5554
def in_int16_range(x):
5655
return torch.all(x >= -(2**15)) and torch.all(x <= 2**15 - 1)
5756

58-
assert in_int16_range(scale_n_yx)
59-
assert in_int16_range(scale_d_yx)
60-
assert in_int16_range(border_yx)
57+
if not in_int16_range(scale_n_yx):
58+
raise ValueError("scale_n_yx is out of the int16 range")
59+
if not in_int16_range(scale_d_yx):
60+
raise ValueError("scale_d_yx is out of the int16 range")
61+
if not in_int16_range(border_yx):
62+
raise ValueError("border_yx is out of the int16 range")
6163

6264
attr = ts.TosaSerializerAttribute()
6365
attr.ResizeAttribute(

docs/source/conf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@
1818
# add these directories to sys.path here. If the directory is relative to the
1919
# documentation root, use os.path.abspath to make it absolute, like shown here.
2020
#
21+
import distutils.file_util
2122
import glob
2223
import os
23-
import shutil
2424
import sys
2525
from typing import Any
2626

@@ -135,7 +135,7 @@
135135
# Copy .md files from source dir to gallery dir
136136
for f in glob.glob(os.path.join(source_dir, "*.md")):
137137

138-
shutil.copyfile(f, gallery_dir)
138+
distutils.file_util.copy_file(f, gallery_dir, update=True)
139139

140140
source_suffix = [".rst", ".md"]
141141

docs/source/using-executorch-ios.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ The prebuilt ExecuTorch runtime, backend, and kernels are available as a [Swift
2525

2626
#### Xcode
2727

28-
In Xcode, go to `File > Add Package Dependencies`. Paste the URL of the [ExecuTorch repo](https://github.com/pytorch/executorch) into the search bar and select it. Make sure to change the branch name to the desired ExecuTorch version in format "swiftpm-<version>", (e.g. "swiftpm-0.6.0"), or a branch name in format "swiftpm-<version>.<year_month_date>" (e.g. "swiftpm-0.7.0-20250401") for a nightly build on a specific date.
28+
In Xcode, go to `File > Add Package Dependencies`. Paste the URL of the [ExecuTorch repo](https://github.com/pytorch/executorch) into the search bar and select it. Make sure to change the branch name to the desired ExecuTorch version in format "swiftpm-<version>", (e.g. "swiftpm-0.6.0"), or a branch name in format "swiftpm-<version>.<year_month_date>" (e.g. "swiftpm-0.7.0-20250401") for a [nightly build](https://ossci-ios.s3.amazonaws.com/list.html) on a specific date.
2929

3030
![](_static/img/swiftpm_xcode1.png)
3131

0 commit comments

Comments
 (0)