Skip to content

Commit 6fb7687

Browse files
dbortfacebook-github-bot
authored andcommitted
Migrate backends/apple to the new namespace (#5883)
Summary: Pull Request resolved: #5883 Move the Apple backends out of the `torch::` namespace, and update to avoid using the `torch::` or `exec_aten::` namespaces. Differential Revision: D63908530
1 parent d9aeca5 commit 6fb7687

39 files changed

+224
-195
lines changed

backends/apple/coreml/runtime/delegate/coreml_backend_delegate.mm

Lines changed: 32 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,22 @@
2525
#endif
2626

2727
namespace {
28-
using namespace torch::executor;
2928
using namespace executorchcoreml;
3029

30+
using executorch::aten::ScalarType;
31+
using executorch::runtime::ArrayRef;
32+
using executorch::runtime::Backend;
33+
using executorch::runtime::BackendExecutionContext;
34+
using executorch::runtime::BackendInitContext;
35+
using executorch::runtime::CompileSpec;
36+
using executorch::runtime::DelegateHandle;
37+
using executorch::runtime::EValue;
38+
using executorch::runtime::Error;
39+
using executorch::runtime::EventTracerDebugLogLevel;
40+
using executorch::runtime::FreeableBuffer;
41+
using executorch::runtime::get_backend_class;
42+
using executorch::runtime::Result;
43+
3144
std::optional<MultiArray::DataType> get_data_type(ScalarType scalar_type) {
3245
switch (scalar_type) {
3346
case ScalarType::Bool:
@@ -60,14 +73,14 @@
6073
if (!eValue->isTensor()) {
6174
return std::nullopt;
6275
}
63-
76+
6477
auto tensor = eValue->toTensor();
6578
auto dataType = get_data_type(tensor.scalar_type());
6679
if (!dataType.has_value()) {
6780
ET_LOG(Error, "%s: DataType=%d is not supported", ETCoreMLStrings.delegateIdentifier.UTF8String, (int)tensor.scalar_type());
6881
return std::nullopt;
6982
}
70-
83+
7184
std::vector<ssize_t> strides(tensor.strides().begin(), tensor.strides().end());
7285
std::vector<size_t> shape(tensor.sizes().begin(), tensor.sizes().end());
7386
MultiArray::MemoryLayout layout(dataType.value(), std::move(shape), std::move(strides));
@@ -86,29 +99,29 @@
8699
if (!dict) {
87100
return std::nullopt;
88101
}
89-
102+
90103
BackendDelegate::Config config;
91104
{
92105
NSNumber *should_prewarm_model = SAFE_CAST(dict[@"shouldPrewarmModel"], NSNumber);
93106
if (should_prewarm_model) {
94107
config.should_prewarm_model = static_cast<bool>(should_prewarm_model.boolValue);
95108
}
96109
}
97-
110+
98111
{
99112
NSNumber *should_prewarm_asset = SAFE_CAST(dict[@"shouldPrewarmAsset"], NSNumber);
100113
if (should_prewarm_asset) {
101114
config.should_prewarm_asset = static_cast<bool>(should_prewarm_asset.boolValue);
102115
}
103116
}
104-
117+
105118
{
106119
NSNumber *max_models_cache_size_in_bytes = SAFE_CAST(dict[@"maxModelsCacheSizeInBytes"], NSNumber);
107120
if (max_models_cache_size_in_bytes) {
108121
config.max_models_cache_size = max_models_cache_size_in_bytes.unsignedLongLongValue;
109122
}
110123
}
111-
124+
112125
return config;
113126
}
114127

@@ -127,14 +140,15 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
127140
auto debug_level = event_tracer->event_tracer_debug_level();
128141
options.log_intermediate_tensors = (debug_level >= EventTracerDebugLogLevel::kIntermediateOutputs);
129142
}
130-
143+
131144
return options;
132145
}
133146

134147
} //namespace
135148

136-
namespace torch {
137-
namespace executor {
149+
namespace executorch {
150+
namespace backends {
151+
namespace coreml {
138152

139153
using namespace executorchcoreml;
140154

@@ -154,7 +168,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
154168
auto buffer = Buffer(spec.value.buffer, spec.value.nbytes);
155169
specs_map.emplace(spec.key, std::move(buffer));
156170
}
157-
171+
158172
auto buffer = Buffer(processed->data(), processed->size());
159173
std::error_code error;
160174
auto handle = impl_->init(std::move(buffer), specs_map);
@@ -173,7 +187,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
173187
size_t nInputs = nArgs.first;
174188
size_t nOutputs = nArgs.second;
175189
delegate_args.reserve(nInputs + nOutputs);
176-
190+
177191
// inputs
178192
for (size_t i = 0; i < nInputs; i++) {
179193
auto multi_array = get_multi_array(args[i], ArgType::Input);
@@ -182,7 +196,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
182196
"%s: Failed to create multiarray from input at args[%zu]", ETCoreMLStrings.delegateIdentifier.UTF8String, i);
183197
delegate_args.emplace_back(std::move(multi_array.value()));
184198
}
185-
199+
186200
// outputs
187201
for (size_t i = nInputs; i < nInputs + nOutputs; i++) {
188202
auto multi_array = get_multi_array(args[i], ArgType::Output);
@@ -191,7 +205,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
191205
"%s: Failed to create multiarray from output at args[%zu]", ETCoreMLStrings.delegateIdentifier.UTF8String, i);
192206
delegate_args.emplace_back(std::move(multi_array.value()));
193207
}
194-
208+
195209
auto logging_options = get_logging_options(context);
196210
std::error_code ec;
197211
#ifdef ET_EVENT_TRACER_ENABLED
@@ -206,7 +220,7 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
206220
"%s: Failed to run the model.",
207221
ETCoreMLStrings.delegateIdentifier.UTF8String);
208222
#endif
209-
223+
210224
return Error::Ok;
211225
}
212226

@@ -235,5 +249,6 @@ ModelLoggingOptions get_logging_options(BackendExecutionContext& context) {
235249
static auto success_with_compiler = register_backend(backend);
236250
}
237251

238-
} // namespace executor
239-
} // namespace torch
252+
} // namespace coreml
253+
} // namespace backends
254+
} // namespace executorch

backends/apple/coreml/runtime/include/coreml_backend/delegate.h

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,9 @@ namespace executorchcoreml {
1717
class BackendDelegate;
1818
}
1919

20-
namespace torch {
21-
namespace executor {
20+
namespace executorch {
21+
namespace backends {
22+
namespace coreml {
2223

2324
class CoreMLBackendDelegate final : public ::executorch::runtime::BackendInterface {
2425
public:
@@ -34,24 +35,28 @@ class CoreMLBackendDelegate final : public ::executorch::runtime::BackendInterfa
3435
/// produce `processed`.
3536
/// @retval On success, an opaque handle representing the loaded model
3637
/// otherwise an`Error` case.
37-
Result<DelegateHandle*>
38-
init(BackendInitContext& context, FreeableBuffer* processed, ArrayRef<CompileSpec> compileSpecs) const override;
38+
executorch::runtime::Result<executorch::runtime::DelegateHandle*>
39+
init(executorch::runtime::BackendInitContext& context,
40+
executorch::runtime::FreeableBuffer* processed,
41+
executorch::runtime::ArrayRef<executorch::runtime::CompileSpec> compileSpecs) const override;
3942

4043
/// Executes the loaded model.
4144
///
4245
/// @param context An execution context specific to the CoreML backend.
4346
/// @param handle The handle returned by an earlier call to `init`.
4447
/// @param args The models inputs and outputs.
4548
/// @retval On success, `Error::Ok` otherwise any other `Error` case.
46-
Error execute(BackendExecutionContext& context, DelegateHandle* handle, EValue** args) const override;
49+
executorch::runtime::Error execute(executorch::runtime::BackendExecutionContext& context,
50+
executorch::runtime::DelegateHandle* handle,
51+
executorch::runtime::EValue** args) const override;
4752

4853
/// Returns `true` if the delegate is available otherwise `false`.
4954
bool is_available() const override;
5055

5156
/// Unloads the loaded CoreML model with the specified handle.
5257
///
5358
/// @param handle The handle returned by an earlier call to `init`.
54-
void destroy(DelegateHandle* handle) const override;
59+
void destroy(executorch::runtime::DelegateHandle* handle) const override;
5560

5661
/// Returns the registered `CoreMLBackendDelegate` instance.
5762
static CoreMLBackendDelegate* get_registered_delegate() noexcept;
@@ -65,5 +70,7 @@ class CoreMLBackendDelegate final : public ::executorch::runtime::BackendInterfa
6570
private:
6671
std::shared_ptr<executorchcoreml::BackendDelegate> impl_;
6772
};
68-
} // namespace executor
69-
} // namespace torch
73+
74+
} // namespace coreml
75+
} // namespace backends
76+
} // namespace executorch

backends/apple/coreml/runtime/sdk/model_event_logger_impl.mm

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717

1818
namespace {
1919

20-
using namespace torch::executor;
20+
using namespace executorch::runtime;
2121

2222
uint64_t time_units_to_nano_seconds(uint64_t time_units) {
2323
static mach_timebase_info_data_t info;
@@ -109,7 +109,7 @@ bool is_packed(NSArray<NSNumber *> *shape, NSArray<NSNumber *> *strides) {
109109
[op_path_to_value_map enumerateKeysAndObjectsUsingBlock:^(ETCoreMLModelStructurePath *path,
110110
MLMultiArray *intermediate_value,
111111
BOOL * _Nonnull __unused stop) {
112-
using namespace torch::executor;
112+
using namespace executorch::runtime;
113113

114114
@autoreleasepool {
115115
NSString *debug_symbol = op_path_to_debug_symbol_name_map[path];

backends/apple/coreml/runtime/test/BackendDelegateTests.mm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ + (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString
6060
}
6161

6262
+ (void)setUp {
63-
torch::executor::runtime_init();
63+
executorch::runtime::runtime_init();
6464
}
6565

6666
- (void)setUp {

backends/apple/coreml/runtime/test/CoreMLBackendDelegateTests.mm

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@
1717

1818
static constexpr size_t kRuntimeMemorySize = 50 * 1024U * 1024U; // 50 MB
1919

20-
using namespace torch::executor;
21-
using torch::executor::testing::TensorFactory;
20+
using namespace executorch::runtime;
21+
using executorch::runtime::testing::TensorFactory;
2222

2323
namespace {
2424
// TODO: Move the following methods to a utility class, so that it can be shared with `executor_runner.main.mm`
@@ -107,8 +107,8 @@
107107
}
108108
Buffer buffer(tensor_meta->nbytes(), 0);
109109
auto sizes = tensor_meta->sizes();
110-
exec_aten::TensorImpl tensor_impl(tensor_meta->scalar_type(), std::size(sizes), const_cast<int *>(sizes.data()), buffer.data());
111-
exec_aten::Tensor tensor(&tensor_impl);
110+
executorch::aten::TensorImpl tensor_impl(tensor_meta->scalar_type(), std::size(sizes), const_cast<int *>(sizes.data()), buffer.data());
111+
executorch::aten::Tensor tensor(&tensor_impl);
112112
EValue input_value(std::move(tensor));
113113
Error err = method.set_input(input_value, i);
114114
if (err != Error::Ok) {
@@ -129,7 +129,7 @@ @interface CoreMLBackendDelegateTests : XCTestCase
129129
@implementation CoreMLBackendDelegateTests
130130

131131
+ (void)setUp {
132-
torch::executor::runtime_init();
132+
executorch::runtime::runtime_init();
133133
}
134134

135135
+ (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString *)extension {

backends/apple/coreml/runtime/test/ETCoreMLAssetManagerTests.mm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ @interface ETCoreMLAssetManagerTests : XCTestCase
2323
@implementation ETCoreMLAssetManagerTests
2424

2525
+ (void)setUp {
26-
torch::executor::runtime_init();
26+
executorch::runtime::runtime_init();
2727
}
2828

2929
- (void)setUp {

backends/apple/coreml/runtime/test/ETCoreMLModelDebuggerTests.mm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ @interface ETCoreMLModelDebuggerTests : XCTestCase
7070
@implementation ETCoreMLModelDebuggerTests
7171

7272
+ (void)setUp {
73-
torch::executor::runtime_init();
73+
executorch::runtime::runtime_init();
7474
}
7575

7676
+ (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString *)extension {

backends/apple/coreml/runtime/test/ETCoreMLModelManagerTests.mm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ + (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString
3232
}
3333

3434
- (void)setUp {
35-
torch::executor::runtime_init();
35+
executorch::runtime::runtime_init();
3636
@autoreleasepool {
3737
NSError *localError = nil;
3838
self.fileManager = [[NSFileManager alloc] init];

backends/apple/coreml/runtime/test/ETCoreMLModelProfilerTests.mm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ @interface ETCoreMLModelProfilerTests : XCTestCase
5959
@implementation ETCoreMLModelProfilerTests
6060

6161
+ (void)setUp {
62-
torch::executor::runtime_init();
62+
executorch::runtime::runtime_init();
6363
}
6464

6565
+ (nullable NSURL *)bundledResourceWithName:(NSString *)name extension:(NSString *)extension {

backends/apple/mps/runtime/MPSBackend.mm

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,8 @@
1616
#include <string>
1717
#include <iostream>
1818

19-
namespace torch {
20-
namespace executor {
19+
namespace executorch {
20+
namespace backends {
2121

2222
class MPSBackend final : public ::executorch::runtime::BackendInterface {
2323
public:
@@ -81,7 +81,7 @@ Error execute(
8181
output_pointers.push_back(&args[i]->toTensor());
8282
}
8383
} else if (args[i]->isTensorList()) {
84-
const exec_aten::ArrayRef<exec_aten::Tensor>& tensorList = args[i]->toTensorList();
84+
const executorch::aten::ArrayRef<executorch::aten::Tensor>& tensorList = args[i]->toTensorList();
8585
for (auto& tensor_ : tensorList) {
8686
if (input_pointers.size() < executor->getNumInputs()) {
8787
input_pointers.push_back(&tensor_);
@@ -122,5 +122,5 @@ void destroy(DelegateHandle* handle) const override {
122122
static auto success_with_compiler = register_backend(backend);
123123
} // namespace
124124

125-
} // namespace executor
126-
} // namespace torch
125+
} // namespace backends
126+
} // namespace executorch

backends/apple/mps/runtime/MPSCompiler.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@
1414
#include <memory>
1515
#include <vector>
1616

17-
namespace torch {
18-
namespace executor {
17+
namespace executorch {
18+
namespace backends {
1919
namespace mps {
2020
namespace delegate {
2121

@@ -34,5 +34,5 @@ class MPSCompiler {
3434

3535
} // namespace delegate
3636
} // namespace mps
37-
} // namespace executor
38-
} // namespace torch
37+
} // namespace backends
38+
} // namespace executorch

backends/apple/mps/runtime/MPSCompiler.mm

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@
2323

2424
#define MPS_UNUSED(x) ( (void)(x) )
2525

26-
namespace torch {
27-
namespace executor {
26+
namespace executorch {
27+
namespace backends {
2828
namespace mps {
2929
namespace delegate {
3030

@@ -66,5 +66,5 @@
6666

6767
} // namespace delegate
6868
} // namespace mps
69-
} // namespace executor
70-
} // namespace torch
69+
} // namespace backends
70+
} // namespace executorch

backends/apple/mps/runtime/MPSDelegateHeader.h

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@
77

88
#include <executorch/runtime/core/result.h>
99

10-
namespace torch {
11-
namespace executor {
10+
namespace executorch {
11+
namespace backends {
1212
namespace mps {
1313
namespace delegate {
1414

@@ -87,7 +87,9 @@ struct MPSDelegateHeader {
8787
* error if size was too short, if the header was not found, or if the
8888
* header appeared to be corrupt.
8989
*/
90-
static Result<MPSDelegateHeader> Parse(const void* data, size_t size);
90+
static executorch::runtime::Result<MPSDelegateHeader> Parse(
91+
const void* data,
92+
size_t size);
9193

9294
/**
9395
* The offset in bytes to the beginning of the constant data.
@@ -109,5 +111,5 @@ struct MPSDelegateHeader {
109111

110112
} // namespace delegate
111113
} // namespace mps
112-
} // namespace executor
113-
} // namespace torch
114+
} // namespace backends
115+
} // namespace executorch

0 commit comments

Comments
 (0)