Skip to content

Commit 80afaf2

Browse files
dbortfacebook-github-bot
authored andcommitted
Migrate backends/apple/coreml to the new namespace (#5943)
Summary: Pull Request resolved: #5943 Move the Core ML backend out of the `torch::` namespace, and update to avoid using the `torch::` or `exec_aten::` namespaces. Reviewed By: cccclai Differential Revision: D63995558 fbshipit-source-id: 0d027a5ea42a9e989e33eddaa7cce1c92fdd0b21
1 parent 867c96a commit 80afaf2

File tree

10 files changed

+69
-43
lines changed

10 files changed

+69
-43
lines changed

backends/apple/coreml/runtime/delegate/coreml_backend_delegate.mm

Lines changed: 32 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,22 @@
2525
#endif
2626

2727
namespace {
28-
using namespace torch::executor;
2928
using namespace executorchcoreml;
3029

30+
using executorch::aten::ScalarType;
31+
using executorch::runtime::ArrayRef;
32+
using executorch::runtime::Backend;
33+
using executorch::runtime::BackendExecutionContext;
34+
using executorch::runtime::BackendInitContext;
35+
using executorch::runtime::CompileSpec;
36+
using executorch::runtime::DelegateHandle;
37+
using executorch::runtime::EValue;
38+
using executorch::runtime::Error;
39+
using executorch::runtime::EventTracerDebugLogLevel;
40+
using executorch::runtime::FreeableBuffer;
41+
using executorch::runtime::get_backend_class;
42+
using executorch::runtime::Result;
43+
3144
std::optional<MultiArray::DataType> get_data_type(ScalarType scalar_type) {
3245
switch (scalar_type) {
3346
case ScalarType::Bool:
@@ -60,14 +73,14 @@
6073
if (!eValue->isTensor()) {
6174
return std::nullopt;
6275
}
63-
76+
6477
auto tensor = eValue->toTensor();
6578
auto dataType = get_data_type(tensor.scalar_type());
6679
if (!dataType.has_value()) {
6780
ET_LOG(Error, "%s: DataType=%d is not supported", ETCoreMLStrings.delegateIdentifier.UTF8String, (int)tensor.scalar_type());
6881
return std::nullopt;
6982
}
70-
83+
7184
std::vector<ssize_t> strides(tensor.strides().begin(), tensor.strides().end());
7285
std::vector<size_t> shape(tensor.sizes().begin(), tensor.sizes().end());
7386
MultiArray::MemoryLayout layout(dataType.value(), std::move(shape), std::move(strides));
@@ -86,29 +99,29 @@
8699
if (!dict) {
87100
return std::nullopt;
88101
}
89-
102+
90103
BackendDelegate::Config config;
91104
{
92105
NSNumber *should_prewarm_model = SAFE_CAST(dict[@"shouldPrewarmModel"], NSNumber);
93106
if (should_prewarm_model) {
94107
config.should_prewarm_model = static_cast<bool>(should_prewarm_model.boolValue);
95108
}
96109
}
97-
110+
98111
{
99112
NSNumber *should_prewarm_asset = SAFE_CAST(dict[@"shouldPrewarmAsset"], NSNumber);
100113
if (should_prewarm_asset) {
101114
config.should_prewarm_asset = static_cast<bool>(should_prewarm_asset.boolValue);
102115
}
103116
}
104-
117+
105118
{
106119
NSNumber *max_models_cache_size_in_bytes = SAFE_CAST(dict[@"maxModelsCacheSizeInBytes"], NSNumber);
107120
if (max_models_cache_size_in_bytes) {
108121
config.max_models_cache_size = max_models_cache_size_in_bytes.unsignedLongLongValue;
109122
}
110123
}
111-
124+
112125
return config;
113126
}
114127

@@ -127,14 +140,15 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
127140
auto debug_level = event_tracer->event_tracer_debug_level();
128141
options.log_intermediate_tensors = (debug_level >= EventTracerDebugLogLevel::kIntermediateOutputs);
129142
}
130-
143+
131144
return options;
132145
}
133146

134147
} //namespace
135148

136-
namespace torch {
137-
namespace executor {
149+
namespace executorch {
150+
namespace backends {
151+
namespace coreml {
138152

139153
using namespace executorchcoreml;
140154

@@ -154,7 +168,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
154168
auto buffer = Buffer(spec.value.buffer, spec.value.nbytes);
155169
specs_map.emplace(spec.key, std::move(buffer));
156170
}
157-
171+
158172
auto buffer = Buffer(processed->data(), processed->size());
159173
std::error_code error;
160174
auto handle = impl_->init(std::move(buffer), specs_map);
@@ -173,7 +187,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
173187
size_t nInputs = nArgs.first;
174188
size_t nOutputs = nArgs.second;
175189
delegate_args.reserve(nInputs + nOutputs);
176-
190+
177191
// inputs
178192
for (size_t i = 0; i < nInputs; i++) {
179193
auto multi_array = get_multi_array(args[i], ArgType::Input);
@@ -182,7 +196,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
182196
"%s: Failed to create multiarray from input at args[%zu]", ETCoreMLStrings.delegateIdentifier.UTF8String, i);
183197
delegate_args.emplace_back(std::move(multi_array.value()));
184198
}
185-
199+
186200
// outputs
187201
for (size_t i = nInputs; i < nInputs + nOutputs; i++) {
188202
auto multi_array = get_multi_array(args[i], ArgType::Output);
@@ -191,7 +205,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
191205
"%s: Failed to create multiarray from output at args[%zu]", ETCoreMLStrings.delegateIdentifier.UTF8String, i);
192206
delegate_args.emplace_back(std::move(multi_array.value()));
193207
}
194-
208+
195209
auto logging_options = get_logging_options(context);
196210
std::error_code ec;
197211
#ifdef ET_EVENT_TRACER_ENABLED
@@ -206,7 +220,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
206220
"%s: Failed to run the model.",
207221
ETCoreMLStrings.delegateIdentifier.UTF8String);
208222
#endif
209-
223+
210224
return Error::Ok;
211225
}
212226

@@ -235,5 +249,6 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
235249
static auto success_with_compiler = register_backend(backend);
236250
}
237251

238-
} // namespace executor
239-
} // namespace torch
252+
} // namespace coreml
253+
} // namespace backends
254+
} // namespace executorch

backends/apple/coreml/runtime/include/coreml_backend/delegate.h

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,9 @@ namespace executorchcoreml {
1717
class BackendDelegate;
1818
}
1919

20-
namespace torch {
21-
namespace executor {
20+
namespace executorch {
21+
namespace backends {
22+
namespace coreml {
2223

2324
class CoreMLBackendDelegate final : public ::executorch::runtime::BackendInterface {
2425
public:
@@ -34,24 +35,28 @@ class CoreMLBackendDelegate final : public ::executorch::runtime::BackendInterfa
3435
/// produce `processed`.
3536
/// @retval On success, an opaque handle representing the loaded model
3637
/// otherwise an`Error` case.
37-
Result<DelegateHandle*>
38-
init(BackendInitContext& context, FreeableBuffer* processed, ArrayRef<CompileSpec> compileSpecs) const override;
38+
executorch::runtime::Result<executorch::runtime::DelegateHandle*>
39+
init(executorch::runtime::BackendInitContext& context,
40+
executorch::runtime::FreeableBuffer* processed,
41+
executorch::runtime::ArrayRef<executorch::runtime::CompileSpec> compileSpecs) const override;
3942

4043
/// Executes the loaded model.
4144
///
4245
/// @param context An execution context specific to the CoreML backend.
4346
/// @param handle The handle returned by an earlier call to `init`.
4447
/// @param args The models inputs and outputs.
4548
/// @retval On success, `Error::Ok` otherwise any other `Error` case.
46-
Error execute(BackendExecutionContext& context, DelegateHandle* handle, EValue** args) const override;
49+
executorch::runtime::Error execute(executorch::runtime::BackendExecutionContext& context,
50+
executorch::runtime::DelegateHandle* handle,
51+
executorch::runtime::EValue** args) const override;
4752

4853
/// Returns `true` if the delegate is available otherwise `false`.
4954
bool is_available() const override;
5055

5156
/// Unloads the loaded CoreML model with the specified handle.
5257
///
5358
/// @param handle The handle returned by an earlier call to `init`.
54-
void destroy(DelegateHandle* handle) const override;
59+
void destroy(executorch::runtime::DelegateHandle* handle) const override;
5560

5661
/// Returns the registered `CoreMLBackendDelegate` instance.
5762
static CoreMLBackendDelegate* get_registered_delegate() noexcept;
@@ -65,5 +70,7 @@ class CoreMLBackendDelegate final : public ::executorch::runtime::BackendInterfa
6570
private:
6671
std::shared_ptr<executorchcoreml::BackendDelegate> impl_;
6772
};
68-
} // namespace executor
69-
} // namespace torch
73+
74+
} // namespace coreml
75+
} // namespace backends
76+
} // namespace executorch

backends/apple/coreml/runtime/sdk/model_event_logger_impl.mm

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,15 +17,19 @@
1717

1818
namespace {
1919

20-
using namespace torch::executor;
20+
using namespace executorch::runtime;
21+
22+
using executorch::aten::ScalarType;
23+
using executorch::aten::Tensor;
24+
using executorch::aten::TensorImpl;
2125

2226
uint64_t time_units_to_nano_seconds(uint64_t time_units) {
2327
static mach_timebase_info_data_t info;
2428
static dispatch_once_t onceToken;
2529
dispatch_once(&onceToken, ^{
2630
NSCAssert(mach_timebase_info(&info) == KERN_SUCCESS, @"ModelEventLogger: Failed to get time base.");
2731
});
28-
32+
2933
return time_units * info.numer / info.denom;
3034
}
3135

@@ -100,7 +104,7 @@ bool is_packed(NSArray<NSNumber *> *shape, NSArray<NSNumber *> *strides) {
100104
estimated_execution_end_time_in_ns,
101105
metadata.bytes,
102106
metadata.length);
103-
107+
104108
}];
105109
}
106110

@@ -109,7 +113,7 @@ bool is_packed(NSArray<NSNumber *> *shape, NSArray<NSNumber *> *strides) {
109113
[op_path_to_value_map enumerateKeysAndObjectsUsingBlock:^(ETCoreMLModelStructurePath *path,
110114
MLMultiArray *intermediate_value,
111115
BOOL * _Nonnull __unused stop) {
112-
using namespace torch::executor;
116+
using namespace executorch::runtime;
113117

114118
@autoreleasepool {
115119
NSString *debug_symbol = op_path_to_debug_symbol_name_map[path];
@@ -123,15 +127,15 @@ bool is_packed(NSArray<NSNumber *> *shape, NSArray<NSNumber *> *strides) {
123127
}
124128

125129
MLMultiArray *supported_value = value;
126-
NSArray<NSNumber *> *shape = supported_value.shape;
130+
NSArray<NSNumber *> *shape = supported_value.shape;
127131
NSError *local_error = nil;
128132
MLMultiArrayDataType data_type = get_supported_data_type(value.dataType);
129133

130134
if (!is_packed(shape, value.strides) || (supported_value.dataType != data_type)) {
131135
supported_value = [[MLMultiArray alloc] initWithShape:shape
132136
dataType:data_type
133137
error:&local_error];
134-
NSCAssert(supported_value != nil,
138+
NSCAssert(supported_value != nil,
135139
@"ModelEventLoggerImpl: Failed to create packed multiarray with shape=%@, dataType=%ld, error=%@.",
136140
shape,
137141
static_cast<long>(value.dataType),

backends/apple/coreml/runtime/test/BackendDelegateTests.mm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ + (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString
6060
}
6161

6262
+ (void)setUp {
63-
torch::executor::runtime_init();
63+
executorch::runtime::runtime_init();
6464
}
6565

6666
- (void)setUp {

backends/apple/coreml/runtime/test/CoreMLBackendDelegateTests.mm

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@
1717

1818
static constexpr size_t kRuntimeMemorySize = 50 * 1024U * 1024U; // 50 MB
1919

20-
using namespace torch::executor;
21-
using torch::executor::testing::TensorFactory;
20+
using namespace executorch::runtime;
21+
using executorch::runtime::testing::TensorFactory;
2222

2323
namespace {
2424
// TODO: Move the following methods to a utility class, so that it can be shared with `executor_runner.main.mm`
@@ -107,8 +107,8 @@
107107
}
108108
Buffer buffer(tensor_meta->nbytes(), 0);
109109
auto sizes = tensor_meta->sizes();
110-
exec_aten::TensorImpl tensor_impl(tensor_meta->scalar_type(), std::size(sizes), const_cast<int *>(sizes.data()), buffer.data());
111-
exec_aten::Tensor tensor(&tensor_impl);
110+
executorch::aten::TensorImpl tensor_impl(tensor_meta->scalar_type(), std::size(sizes), const_cast<int *>(sizes.data()), buffer.data());
111+
executorch::aten::Tensor tensor(&tensor_impl);
112112
EValue input_value(std::move(tensor));
113113
Error err = method.set_input(input_value, i);
114114
if (err != Error::Ok) {
@@ -129,7 +129,7 @@ @interface CoreMLBackendDelegateTests : XCTestCase
129129
@implementation CoreMLBackendDelegateTests
130130

131131
+ (void)setUp {
132-
torch::executor::runtime_init();
132+
executorch::runtime::runtime_init();
133133
}
134134

135135
+ (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString *)extension {

backends/apple/coreml/runtime/test/ETCoreMLAssetManagerTests.mm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ @interface ETCoreMLAssetManagerTests : XCTestCase
2323
@implementation ETCoreMLAssetManagerTests
2424

2525
+ (void)setUp {
26-
torch::executor::runtime_init();
26+
executorch::runtime::runtime_init();
2727
}
2828

2929
- (void)setUp {

backends/apple/coreml/runtime/test/ETCoreMLModelDebuggerTests.mm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ @interface ETCoreMLModelDebuggerTests : XCTestCase
7070
@implementation ETCoreMLModelDebuggerTests
7171

7272
+ (void)setUp {
73-
torch::executor::runtime_init();
73+
executorch::runtime::runtime_init();
7474
}
7575

7676
+ (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString *)extension {

backends/apple/coreml/runtime/test/ETCoreMLModelManagerTests.mm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ + (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString
3232
}
3333

3434
- (void)setUp {
35-
torch::executor::runtime_init();
35+
executorch::runtime::runtime_init();
3636
@autoreleasepool {
3737
NSError *localError = nil;
3838
self.fileManager = [[NSFileManager alloc] init];

backends/apple/coreml/runtime/test/ETCoreMLModelProfilerTests.mm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ @interface ETCoreMLModelProfilerTests : XCTestCase
5959
@implementation ETCoreMLModelProfilerTests
6060

6161
+ (void)setUp {
62-
torch::executor::runtime_init();
62+
executorch::runtime::runtime_init();
6363
}
6464

6565
+ (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString *)extension {

examples/apple/coreml/executor_runner/main.mm

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,13 @@ static inline id check_class(id obj, Class cls) {
2424

2525
#define SAFE_CAST(Object, Type) ((Type *)check_class(Object, [Type class]))
2626

27+
using executorch::backends::coreml::CoreMLBackendDelegate;
2728
using executorch::etdump::ETDumpGen;
2829
using executorch::etdump::ETDumpResult;
2930
using executorch::extension::FileDataLoader;
3031
using executorch::runtime::DataLoader;
31-
using executorch::runtime::EValue;
3232
using executorch::runtime::Error;
33+
using executorch::runtime::EValue;
3334
using executorch::runtime::EventTracer;
3435
using executorch::runtime::EventTracerDebugLogLevel;
3536
using executorch::runtime::FreeableBuffer;
@@ -42,7 +43,6 @@ static inline id check_class(id obj, Class cls) {
4243
using executorch::runtime::Result;
4344
using executorch::runtime::Span;
4445
using executorch::runtime::TensorInfo;
45-
using torch::executor::CoreMLBackendDelegate;
4646

4747
static constexpr size_t kRuntimeMemorySize = 16 * 1024U * 1024U; // 16 MB
4848

0 commit comments

Comments
 (0)