Skip to content

Commit 5f57784

Browse files
ashari4pytorchmergebot
authored andcommitted
rename ort to maia (pytorch#123265)
Fixes pytorch#123264 Pull Request resolved: pytorch#123265 Approved by: https://github.com/albanD
1 parent bffecb5 commit 5f57784

39 files changed

+183
-189
lines changed

aten/src/ATen/Context.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@
1212
#include <ATen/detail/CUDAHooksInterface.h>
1313
#include <ATen/detail/HIPHooksInterface.h>
1414
#include <ATen/detail/IPUHooksInterface.h>
15+
#include <ATen/detail/MAIAHooksInterface.h>
1516
#include <ATen/detail/MPSHooksInterface.h>
1617
#include <ATen/detail/MTIAHooksInterface.h>
17-
#include <ATen/detail/ORTHooksInterface.h>
1818
#include <ATen/detail/PrivateUse1HooksInterface.h>
1919
#include <ATen/detail/XPUHooksInterface.h>
2020
#include <c10/core/QEngine.h>
@@ -142,8 +142,8 @@ class TORCH_API Context {
142142
static bool hasLazy() {
143143
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::Lazy);
144144
}
145-
static bool hasORT() {
146-
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::ORT);
145+
static bool hasMAIA() {
146+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::MAIA);
147147
}
148148
// defined in header so that getNonVariableType has ability to inline
149149
// call_once check. getNonVariableType is called fairly frequently
@@ -455,8 +455,8 @@ static inline bool hasMPS() {
455455
return globalContext().hasMPS();
456456
}
457457

458-
static inline bool hasORT() {
459-
return globalContext().hasORT();
458+
static inline bool hasMAIA() {
459+
return globalContext().hasMAIA();
460460
}
461461

462462
static inline bool hasXPU() {

aten/src/ATen/TensorIterator.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1530,13 +1530,13 @@ void TensorIteratorBase::build(TensorIteratorConfig& config) {
15301530

15311531
// XLA and lazy tensors don't have storage, so they don't have an underlying data pointer.
15321532
// Nothing beyond this point is important for meta functions, so it's fine to exit early here.
1533-
// Extend the condition to ORT tesnors as ORT tensors also don't have storage.
1533+
// Extend the condition to MAIA tesnors as MAIA tensors also don't have storage.
15341534
if (privateuse1_without_storage ||
15351535
common_device_.type() == DeviceType::MTIA ||
15361536
common_device_.type() == DeviceType::XLA ||
15371537
common_device_.type() == DeviceType::IPU ||
15381538
common_device_.type() == DeviceType::Lazy ||
1539-
common_device_.type() == DeviceType::ORT ||
1539+
common_device_.type() == DeviceType::MAIA ||
15401540
common_device_.type() == DeviceType::HPU) return;
15411541

15421542
for (auto& op : operands_) {

aten/src/ATen/Version.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -190,8 +190,8 @@ std::string show_config() {
190190
ss << detail::getCUDAHooks().showConfig();
191191
}
192192

193-
if (hasORT()) {
194-
ss << detail::getORTHooks().showConfig();
193+
if (hasMAIA()) {
194+
ss << detail::getMAIAHooks().showConfig();
195195
}
196196

197197
if (hasXPU()) {

aten/src/ATen/core/TensorBase.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -507,10 +507,10 @@ class TORCH_API TensorBase {
507507
return impl_->is_mps();
508508
}
509509

510-
/// Returns if a `Tensor` is ort tensor.
511-
bool is_ort() const {
510+
/// Returns if a `Tensor` is maia tensor.
511+
bool is_maia() const {
512512
// NB: this is not a native function to avoid dispatching overhead.
513-
return impl_->is_ort();
513+
return impl_->is_maia();
514514
}
515515

516516
/// Returns if a `Tensor` is vulkan tensor.

aten/src/ATen/core/dispatch/OperatorEntry.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -421,7 +421,7 @@ void OperatorEntry::updateDispatchTable_(const c10::Dispatcher& dispatcher, Disp
421421
// In theory, we should only have to check if the given runtime key has "dense" functionality,
422422
// e.g. DispatchKey::CPU (which is composed of DispatchKey::Dense and BackendComponent::CPUBit).
423423
// However, there are some backends that should be included in this set that don't have the dense key set.
424-
// E.g. DispatchKey::Meta, DispatchKey::ORT.
424+
// E.g. DispatchKey::Meta, DispatchKey::MAIA.
425425
if (c10::isBackendDispatchKey(dispatch_key)) {
426426
DispatchKey autograd_key = getAutogradKeyFromBackend(toBackendComponent(dispatch_key));
427427
updateDispatchTableEntry_(dispatcher, autograd_key);

aten/src/ATen/core/op_registration/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,13 @@ There’s four main use cases
1313
* You’re writing a new operator that isn’t supposed to be part of the public PyTorch API.
1414
* You’re writing a new operator but don’t want to change the core pytorch code base, say you’re developing a shared library with operators.
1515
* You’re writing a C++ extension for PyTorch or you’re using inline c++ in your .py model files.
16-
* You’re writing a backend library like XLA or ORT that adds new kernels to all operators defined in `native_functions.yaml`.
16+
* You’re writing a backend library like XLA or MAIA that adds new kernels to all operators defined in `native_functions.yaml`.
1717

1818
For these use cases, the custom operator API is the better solution.
1919

2020
### What is the price for using the custom operator API instead of `native_functions.yaml`?
2121

22-
If you’re just using the custom operator API to add new kernels for existing operators (e.g. the XLA/ORT example above), then you’re fine and don’t pay any price. If, however, you define a new operator purely using the custom op API, i.e. your operator never shows up in `native_functions.yaml`, then you need to be aware of a few caveats.
22+
If you’re just using the custom operator API to add new kernels for existing operators (e.g. the XLA/MAIA example above), then you’re fine and don’t pay any price. If, however, you define a new operator purely using the custom op API, i.e. your operator never shows up in `native_functions.yaml`, then you need to be aware of a few caveats.
2323

2424
* It will not get a C++ API generated. There will not be `Tensor::your_op()` methods or `at::your_op()` functions to call your operator.
2525
* The API for calling the operator from Python looks a little bit different. It needs to be called through `torch.ops.your_op()` instead of `torch._C`.
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
#include <ATen/detail/MAIAHooksInterface.h>
2+
3+
#include <c10/util/CallOnce.h>
4+
#include <c10/util/Registry.h>
5+
6+
#include <cstddef>
7+
#include <memory>
8+
9+
namespace at {
10+
namespace detail {
11+
12+
// See getCUDAHooks for some more commentary
13+
const MAIAHooksInterface& getMAIAHooks() {
14+
static std::unique_ptr<MAIAHooksInterface> maia_hooks;
15+
static c10::once_flag once;
16+
c10::call_once(once, [] {
17+
maia_hooks = MAIAHooksRegistry()->Create("MAIAHooks", {});
18+
if (!maia_hooks) {
19+
maia_hooks = std::make_unique<MAIAHooksInterface>();
20+
}
21+
});
22+
return *maia_hooks;
23+
}
24+
} // namespace detail
25+
26+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
27+
C10_DEFINE_REGISTRY(MAIAHooksRegistry, MAIAHooksInterface, MAIAHooksArgs)
28+
29+
} // namespace at
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
#pragma once
2+
3+
#include <c10/util/Exception.h>
4+
#include <c10/util/Registry.h>
5+
6+
// NB: Class must live in `at` due to limitations of Registry.h.
7+
namespace at {
8+
9+
struct TORCH_API MAIAHooksInterface {
10+
// This should never actually be implemented, but it is used to
11+
// squelch -Werror=non-virtual-dtor
12+
virtual ~MAIAHooksInterface() = default;
13+
14+
virtual std::string showConfig() const {
15+
TORCH_CHECK(false, "Cannot query detailed MAIA version information.");
16+
}
17+
};
18+
19+
// NB: dummy argument to suppress "ISO C++11 requires at least one argument
20+
// for the "..." in a variadic macro"
21+
struct TORCH_API MAIAHooksArgs {};
22+
23+
TORCH_DECLARE_REGISTRY(MAIAHooksRegistry, MAIAHooksInterface, MAIAHooksArgs);
24+
#define REGISTER_MAIA_HOOKS(clsname) \
25+
C10_REGISTER_CLASS(MAIAHooksRegistry, clsname, clsname)
26+
27+
namespace detail {
28+
TORCH_API const MAIAHooksInterface& getMAIAHooks();
29+
} // namespace detail
30+
31+
} // namespace at

aten/src/ATen/detail/ORTHooksInterface.cpp

Lines changed: 0 additions & 29 deletions
This file was deleted.

aten/src/ATen/detail/ORTHooksInterface.h

Lines changed: 0 additions & 36 deletions
This file was deleted.

aten/src/ATen/test/extension_backend_test.cpp

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66

77
#include <torch/csrc/jit/runtime/operator.h>
88

9-
// NB. These tests use the ORT dispatch key to test backend dispatching
10-
// machinery, but these tests are not specific to ORT at all. The ORT
9+
// NB. These tests use the MAIA dispatch key to test backend dispatching
10+
// machinery, but these tests are not specific to MAIA at all. The MAIA
1111
// backend is fully out-of-tree, so it's safe to use this key for
1212
// in-tree tests.
1313

@@ -22,16 +22,16 @@ Tensor empty_override(SymIntArrayRef size, c10::optional<ScalarType> dtype, c10:
2222
Storage(
2323
Storage::use_byte_size_t(),
2424
0,
25-
at::DataPtr(nullptr, Device(DeviceType::ORT, 1)),
25+
at::DataPtr(nullptr, Device(DeviceType::MAIA, 1)),
2626
nullptr,
2727
false),
28-
DispatchKey::ORT,
28+
DispatchKey::MAIA,
2929
caffe2::TypeMeta::Make<float>());
3030
return Tensor(std::move(tensor_impl));
3131
}
3232

3333
Tensor add_override(const Tensor & a, const Tensor & b , const Scalar& c) {
34-
auto out = empty({5, 5}, at::kORT); // Don't return self as-is
34+
auto out = empty({5, 5}, at::kMAIA); // Don't return self as-is
3535
test_int = 2;
3636
return out;
3737
}
@@ -47,28 +47,28 @@ Tensor empty_strided_override(
4747
return empty_override(fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, c10::nullopt);
4848
}
4949

50-
TORCH_LIBRARY_IMPL(aten, ORT, m) {
50+
TORCH_LIBRARY_IMPL(aten, MAIA, m) {
5151
m.impl("aten::empty.memory_format", empty_override);
5252
m.impl("aten::empty_strided", empty_strided_override);
5353
m.impl("aten::add.Tensor", add_override);
5454
}
5555

5656
TEST(BackendExtensionTest, TestRegisterOp) {
57-
Tensor a = empty({5, 5}, at::kORT);
58-
ASSERT_EQ(a.device().type(), at::kORT);
57+
Tensor a = empty({5, 5}, at::kMAIA);
58+
ASSERT_EQ(a.device().type(), at::kMAIA);
5959
ASSERT_EQ(a.device().index(), 1);
6060
ASSERT_EQ(a.dtype(), caffe2::TypeMeta::Make<float>());
6161
ASSERT_EQ(test_int, 1);
6262

63-
Tensor b = empty_like(a, at::kORT);
64-
ASSERT_EQ(b.device().type(), at::kORT);
63+
Tensor b = empty_like(a, at::kMAIA);
64+
ASSERT_EQ(b.device().type(), at::kMAIA);
6565
ASSERT_EQ(b.device().index(), 1);
6666
ASSERT_EQ(b.dtype(), caffe2::TypeMeta::Make<float>());
6767

6868
add(a, b);
6969
ASSERT_EQ(test_int, 2);
7070

71-
// Ensure that non-ORT operator still works
71+
// Ensure that non-MAIA operator still works
7272
Tensor d = empty({5, 5}, at::kCPU);
7373
ASSERT_EQ(d.device().type(), at::kCPU);
7474
}

build_variables.bzl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -947,7 +947,7 @@ aten_cpu_non_globed_sources = [
947947
"aten/src/ATen/detail/CUDAHooksInterface.cpp",
948948
"aten/src/ATen/detail/HIPHooksInterface.cpp",
949949
"aten/src/ATen/detail/MPSHooksInterface.cpp",
950-
"aten/src/ATen/detail/ORTHooksInterface.cpp",
950+
"aten/src/ATen/detail/MAIAHooksInterface.cpp",
951951
"aten/src/ATen/detail/PrivateUse1HooksInterface.cpp",
952952
"aten/src/ATen/detail/XPUHooksInterface.cpp",
953953
"aten/src/ATen/detail/MTIAHooksInterface.cpp",
@@ -964,7 +964,7 @@ aten_cpu_non_globed_headers = [
964964
"aten/src/ATen/detail/CUDAHooksInterface.h",
965965
"aten/src/ATen/detail/MPSHooksInterface.h",
966966
"aten/src/ATen/detail/HIPHooksInterface.h",
967-
"aten/src/ATen/detail/ORTHooksInterface.h",
967+
"aten/src/ATen/detail/MAIAHooksInterface.h",
968968
"aten/src/ATen/detail/PrivateUse1HooksInterface.h",
969969
"aten/src/ATen/detail/XPUHooksInterface.h",
970970
"aten/src/ATen/detail/MTIAHooksInterface.h",

c10/core/Backend.h

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ enum class Backend {
4646
SparseCsrVE,
4747
SparseCsrXPU,
4848
SparseCsrPrivateUse1,
49-
ORT,
49+
MAIA,
5050
XLA,
5151
Vulkan,
5252
Metal,
@@ -76,8 +76,8 @@ static inline Backend dispatchKeyToBackend(DispatchKey t) {
7676
return Backend::VE;
7777
} else if (t == DispatchKey::FPGA) {
7878
return Backend::FPGA;
79-
} else if (t == DispatchKey::ORT) {
80-
return Backend::ORT;
79+
} else if (t == DispatchKey::MAIA) {
80+
return Backend::MAIA;
8181
} else if (t == DispatchKey::XLA || t == DispatchKey::AutogradXLA) {
8282
return Backend::XLA;
8383
} else if (t == DispatchKey::Lazy || t == DispatchKey::AutogradLazy) {
@@ -154,8 +154,8 @@ static inline DispatchKey backendToDispatchKey(Backend b) {
154154
return DispatchKey::VE;
155155
case Backend::FPGA:
156156
return DispatchKey::FPGA;
157-
case Backend::ORT:
158-
return DispatchKey::ORT;
157+
case Backend::MAIA:
158+
return DispatchKey::MAIA;
159159
case Backend::XLA:
160160
return DispatchKey::XLA;
161161
case Backend::Lazy:
@@ -236,8 +236,8 @@ static inline DeviceType backendToDeviceType(Backend b) {
236236
return DeviceType::VE;
237237
case Backend::FPGA:
238238
return DeviceType::FPGA;
239-
case Backend::ORT:
240-
return DeviceType::ORT;
239+
case Backend::MAIA:
240+
return DeviceType::MAIA;
241241
case Backend::XLA:
242242
return DeviceType::XLA;
243243
case Backend::Lazy:
@@ -298,8 +298,8 @@ static inline const char* toString(Backend b) {
298298
return "XPU";
299299
case Backend::IPU:
300300
return "IPU";
301-
case Backend::ORT:
302-
return "ORT";
301+
case Backend::MAIA:
302+
return "MAIA";
303303
case Backend::XLA:
304304
return "XLA";
305305
case Backend::Lazy:

c10/core/Device.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ DeviceType parse_type(const std::string& device_string) {
2626
{"hip", DeviceType::HIP},
2727
{"ve", DeviceType::VE},
2828
{"fpga", DeviceType::FPGA},
29-
{"ort", DeviceType::ORT},
29+
{"maia", DeviceType::MAIA},
3030
{"xla", DeviceType::XLA},
3131
{"lazy", DeviceType::Lazy},
3232
{"vulkan", DeviceType::Vulkan},

c10/core/Device.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -142,9 +142,9 @@ struct C10_API Device final {
142142
return type_ == DeviceType::Metal;
143143
}
144144

145-
/// Return true if the device is of ORT type.
146-
bool is_ort() const noexcept {
147-
return type_ == DeviceType::ORT;
145+
/// Return true if the device is of MAIA type.
146+
bool is_maia() const noexcept {
147+
return type_ == DeviceType::MAIA;
148148
}
149149

150150
/// Return true if the device is of META type.

0 commit comments

Comments
 (0)