Skip to content

: Replace Executorch with ExecuTorch, Part 2/N #468

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions examples/bundled_executor_runner/bundled_executor_runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
/**
* @file
*
* This tool can run Executorch model files that only use operators that
* This tool can run ExecuTorch model files that only use operators that
* are covered by the portable kernels, with possible delegate to the
* test_backend_compiler_lib.
*
Expand Down Expand Up @@ -42,7 +42,7 @@ DEFINE_string(
DEFINE_string(
prof_result_path,
"prof_result.bin",
"Executorch profiler output path.");
"ExecuTorch profiler output path.");

DEFINE_bool(
bundled_program,
Expand Down
4 changes: 2 additions & 2 deletions examples/executor_runner/executor_runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
/**
* @file
*
* This tool can run Executorch model files that only use operators that
* This tool can run ExecuTorch model files that only use operators that
* are covered by the portable kernels, with possible delegate to the
* test_backend_compiler_lib.
*
Expand Down Expand Up @@ -38,7 +38,7 @@ DEFINE_string(
DEFINE_string(
prof_result_path,
"prof_result.bin",
"Executorch profiler output path.");
"ExecuTorch profiler output path.");

using namespace torch::executor;
using torch::executor::util::FileDataLoader;
Expand Down
2 changes: 1 addition & 1 deletion extension/pybindings/pybindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ class Module final {
#ifdef USE_ATEN_LIB
// [TLS handling] This is to workaround an assertion failure
// (https://fburl.com/code/302jyn8d) running `gelu` in ATen mode in fbcode
// (such as bento). The problem is Executorch ATen mode doesn't have
// (such as bento). The problem is ExecuTorch ATen mode doesn't have
// Thread Local State, but `torch-cpp` is assuming tls init is done. There
// are two more checks: MKLDNN disabled and C10_MOBILE, if any of them is
// true we won't be hitting this assertion error. However in `torch-cpp`
Expand Down
2 changes: 1 addition & 1 deletion kernels/optimized/cpu/moments_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
#pragma once

// Slightly modified version of caffe2/aten/src/ATen/native/cpu/moments_utils.h
// for use in optimized Executorch ops. Template specializations of BFloat16
// for use in optimized ExecuTorch ops. Template specializations of BFloat16
// are excluded.

#include <executorch/kernels/optimized/vec/vec.h>
Expand Down
8 changes: 4 additions & 4 deletions kernels/portable/cpu/op_allclose.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,13 +117,13 @@ Tensor& allclose_out(
* Note: This custom operator contains two variants: allclose.Tensor (a
* functional variant, no inplace mutating on the arguments) and allclose.out
* (an out variant, mutating out). We need to register both into the PyTorch
* runtime so that they can be visible from Executorch compiler side. Eventually
* only allclose.out will be seen from Executorch runtime. With this setup, the
* runtime so that they can be visible from ExecuTorch compiler side. Eventually
* only allclose.out will be seen from ExecuTorch runtime. With this setup, the
* portable kernel for allclose.Tensor can be implemented as a wrapper of
* allclose.out. We can easily instantiate an at::Tensor for the out argument,
* then pass it into allclose.out. This logic will only need to work out in
* "ATen mode" for Executorch compiler, since we won't expose allclose.Tensor in
* Executorch runtime.
* "ATen mode" for ExecuTorch compiler, since we won't expose allclose.Tensor in
* ExecuTorch runtime.
*/
Tensor allclose_tensor(
__ET_UNUSED const Tensor& self,
Expand Down
4 changes: 2 additions & 2 deletions runtime/core/error.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

/**
* @file
* Executorch Error declarations.
* ExecuTorch Error declarations.
*/

#pragma once
Expand All @@ -24,7 +24,7 @@ namespace executor {
typedef uint32_t error_code_t;

/**
* Executorch Error type.
* ExecuTorch Error type.
*/
enum class Error : error_code_t {
/*
Expand Down
2 changes: 1 addition & 1 deletion runtime/core/exec_aten/testing_util/tensor_factory.h
Original file line number Diff line number Diff line change
Expand Up @@ -554,7 +554,7 @@ inline void validate_strides(

} // namespace

// Note that this !USE_ATEN_LIB section uses Executorch-specific namespaces
// Note that this !USE_ATEN_LIB section uses ExecuTorch-specific namespaces
// instead of exec_aten to make it clear that we're dealing with ETensor, and
// because many of these names aren't mapped into exec_aten::.

Expand Down
6 changes: 3 additions & 3 deletions runtime/core/exec_aten/util/scalar_type_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
* See file comment in ../ScalarType.h.
*
* This file contains all of the non-critical parts of the original ScalarType.h
* that are not required for the core Executorch runtime, but may be helpful for
* that are not required for the core ExecuTorch runtime, but may be helpful for
* code that uses ScalarType.
*/

Expand Down Expand Up @@ -577,7 +577,7 @@ inline size_t sizeof_scalar_type(exec_aten::ScalarType type) {
//
// These macros are not meant to be used directly. They provide an easy way to
// generate a switch statement that can handle subsets of ScalarTypes supported
// by Executorch.
// by ExecuTorch.
//

#define ET_INTERNAL_SWITCH_CASE(enum_type, CTYPE_ALIAS, ...) \
Expand Down Expand Up @@ -744,7 +744,7 @@ inline size_t sizeof_scalar_type(exec_aten::ScalarType type) {
// Switch case macros
//
// These macros provide an easy way to generate switch statements that apply a
// common lambda function to subsets of ScalarTypes supported by Executorch.
// common lambda function to subsets of ScalarTypes supported by ExecuTorch.
// The lambda function can type specialize to the ctype associated with the
// ScalarType being handled through an alias passed as the CTYPE_ALIAS argument.
//
Expand Down
2 changes: 1 addition & 1 deletion runtime/core/exec_aten/util/tensor_util_portable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
namespace torch {
namespace executor {
/**
* Implementation for Executorch tensor util, should only be included in
* Implementation for ExecuTorch tensor util, should only be included in
* an target with ATen mode turned off. Explicitly taking
* torch::executor::Tensor (instead of exec_aten::Tensor) to make sure it fails
* at compile time if built incorrectly.
Expand Down
2 changes: 1 addition & 1 deletion runtime/core/memory_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ namespace executor {
* MemoryAllocator allocator(100, memory_pool)
* // Pass allocator object in the Executor
*
* Underneath the hood, Executorch will
* Underneath the hood, ExecuTorch will
* allocator.allocate() to keep iterating cur_ pointer
*/
class MemoryAllocator {
Expand Down
4 changes: 2 additions & 2 deletions runtime/core/portable_type/device.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,11 @@ using DeviceIndex = int8_t;

/**
* An abstraction for the compute device on which a tensor is located.
* Executorch doesn't allow dynamic dispatching based on device, so this type is
* ExecuTorch doesn't allow dynamic dispatching based on device, so this type is
* just a skeleton to allow certain kernels that expect device as an
* argument to still be run.
*
* In Executorch this is always expected to be CPU.
* In ExecuTorch this is always expected to be CPU.
*/
struct Device final {
using Type = DeviceType;
Expand Down
2 changes: 1 addition & 1 deletion runtime/core/portable_type/scalar_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ namespace executor {
*
* The indices and C types must be consistent with
* AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS in the core pytorch file
* c10/core/ScalarType.h. This ensures that Executorch serialization is
* c10/core/ScalarType.h. This ensures that ExecuTorch serialization is
* compatible with ATen serialization.
*
* @param _ A macro that takes two parameters: the name of a C type, and the
Expand Down
4 changes: 2 additions & 2 deletions runtime/core/portable_type/tensor_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class TensorImpl {
* This must match the size/signedness of the type used for `Tensor.sizes` in
* //executorch/schema/program.fbs.
*
* Note that at::TensorImpl uses `int64_t` for this type. Executorch uses
* Note that at::TensorImpl uses `int64_t` for this type. ExecuTorch uses
* `int32_t` to save memory, since no single size value will ever be larger
* than 2 billion.
*/
Expand All @@ -77,7 +77,7 @@ class TensorImpl {
* This must match the size/signedness of the type used for `Tensor.strides`
* in //executorch/schema/program.fbs.
*
* Note that at::TensorImpl uses `int64_t` for this type. Executorch uses
* Note that at::TensorImpl uses `int64_t` for this type. ExecuTorch uses
* `int32_t` to save memory, since no single stride value will ever be larger
* than 2 billion.
*/
Expand Down
4 changes: 2 additions & 2 deletions runtime/core/portable_type/tensor_options.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ enum class MemoryFormat : int8_t {
/**
* Row-major contiguous data format.
*
* This is the only format supported by Executorch. Use dim orders to
* This is the only format supported by ExecuTorch. Use dim orders to
* describe other layouts.
*/
Contiguous,
Expand All @@ -37,7 +37,7 @@ enum class Layout : int8_t {
* Contrasted with a sparse tensor layout where the memory structure of the
* data blob will be more complicated and indexing requires larger structures.
*
* This is the only layout supported by Executorch.
* This is the only layout supported by ExecuTorch.
*/
Strided,
};
Expand Down
2 changes: 1 addition & 1 deletion runtime/core/result.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

/**
* @file
* Result type to be used in conjunction with Executorch Error type.
* Result type to be used in conjunction with ExecuTorch Error type.
*/

#pragma once
Expand Down
4 changes: 2 additions & 2 deletions runtime/executor/method_meta.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ namespace torch {
namespace executor {

/**
* Metadata about a specific tensor of an Executorch Program.
* Metadata about a specific tensor of an ExecuTorch Program.
*
* The program used to create the MethodMeta object that created this
* TensorInfo must outlive this TensorInfo.
Expand Down Expand Up @@ -90,7 +90,7 @@ class TensorInfo final {
};

/**
* Describes a a method in an Executorch program.
* Describes a a method in an ExecuTorch program.
*
* The program used to create a MethodMeta object must outlive the MethodMeta.
* It is separate from Method so that this information can be accessed without
Expand Down
2 changes: 1 addition & 1 deletion runtime/executor/program.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class ProgramTestFriend;
} // namespace testing

/**
* A deserialized Executorch program binary.
* A deserialized ExecuTorch program binary.
*/
class Program final {
public:
Expand Down
2 changes: 1 addition & 1 deletion runtime/platform/abort.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ namespace torch {
namespace executor {

/**
* Trigger the Executorch global runtime to immediately exit without cleaning
* Trigger the ExecuTorch global runtime to immediately exit without cleaning
* up, and set an abnormal exit status (platform-defined).
*/
__ET_NORETURN void runtime_abort() {
Expand Down
4 changes: 2 additions & 2 deletions runtime/platform/abort.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

/**
* @file
* Executorch global abort wrapper function.
* ExecuTorch global abort wrapper function.
*/

#pragma once
Expand All @@ -19,7 +19,7 @@ namespace torch {
namespace executor {

/**
* Trigger the Executorch global runtime to immediately exit without cleaning
* Trigger the ExecuTorch global runtime to immediately exit without cleaning
* up, and set an abnormal exit status (platform-defined).
*/
__ET_NORETURN void runtime_abort();
Expand Down
6 changes: 3 additions & 3 deletions runtime/platform/compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@
// Compiler support checks.

#if !defined(__cplusplus)
#error Executorch must be compiled using a C++ compiler.
#error ExecuTorch must be compiled using a C++ compiler.
#endif

#if __cplusplus < 201103L && (!defined(_MSC_VER) || _MSC_VER < 1600) && \
(!defined(__GNUC__) || \
(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40400))
#error Executorch must use a compiler supporting at least the C++11 standard.
#error ExecuTorch must use a compiler supporting at least the C++11 standard.
#error __cplusplus _MSC_VER __GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__
#endif

Expand All @@ -31,7 +31,7 @@
* See all C++ declaration attributes here:
* https://en.cppreference.com/w/cpp/language/attributes
*
* Note that Executorch supports a lower C++ standard version than all standard
* Note that ExecuTorch supports a lower C++ standard version than all standard
* attributes. Therefore, some annotations are defined using their Clang/GNU
* counterparts.
*
Expand Down
6 changes: 3 additions & 3 deletions runtime/platform/log.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

/**
* @file
* Executorch logging API.
* ExecuTorch logging API.
*/

#pragma once
Expand All @@ -27,7 +27,7 @@

/*
* Enable logging by default if compiler option is not provided.
* This should facilitate less confusion for those developing Executorch.
* This should facilitate less confusion for those developing ExecuTorch.
*/
#ifndef ET_LOG_ENABLED
#define ET_LOG_ENABLED 1
Expand Down Expand Up @@ -56,7 +56,7 @@ enum class LogLevel : uint8_t {
Info,

/**
* Log messages about errors within Executorch during runtime.
* Log messages about errors within ExecuTorch during runtime.
*/
Error,

Expand Down
2 changes: 1 addition & 1 deletion runtime/platform/platform.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
/**
* @file
* Platform abstraction layer to allow individual platform libraries to override
* symbols in Executorch. PAL functions are defined as C functions so a platform
* symbols in ExecuTorch. PAL functions are defined as C functions so a platform
* library implementer can use C in lieu of C++.
*/

Expand Down
4 changes: 2 additions & 2 deletions runtime/platform/runtime.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

/**
* @file
* Executorch global runtime wrapper functions.
* ExecuTorch global runtime wrapper functions.
*/

#pragma once
Expand All @@ -19,7 +19,7 @@ namespace torch {
namespace executor {

/**
* Initialize the Executorch global runtime.
* Initialize the ExecuTorch global runtime.
*/
void runtime_init();

Expand Down
2 changes: 1 addition & 1 deletion runtime/platform/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
/**
* @file
* Platform abstraction layer to allow individual host OS to override
* symbols in Executorch. PAL functions are defined as C functions so an
* symbols in ExecuTorch. PAL functions are defined as C functions so an
* implementer can use C in lieu of C++.
*/
#pragma once
Expand Down
2 changes: 1 addition & 1 deletion runtime/platform/target/Posix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
if (!initialized) { \
fprintf( \
ET_LOG_OUTPUT_FILE, \
"Executorch PAL must be initialized before call to %s()", \
"ExecuTorch PAL must be initialized before call to %s()", \
__ET_FUNCTION); \
fflush(ET_LOG_OUTPUT_FILE); \
et_pal_abort(); \
Expand Down
2 changes: 1 addition & 1 deletion runtime/platform/types.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

/**
* @file
* Public types used by the Executorch Platform Abstraction Layer.
* Public types used by the ExecuTorch Platform Abstraction Layer.
*/

#pragma once
Expand Down
2 changes: 1 addition & 1 deletion schema/extended_header.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ namespace torch {
namespace executor {

/**
* An extended, Executorch-specific header that may be embedded in the
* An extended, ExecuTorch-specific header that may be embedded in the
* serialized Program data header.
*
* For details see
Expand Down
4 changes: 2 additions & 2 deletions sdk/runners/executor_runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ DEFINE_string(
DEFINE_string(
prof_result_path,
"prof_result.bin",
"Executorch profiler output path.");
"ExecuTorch profiler output path.");

DEFINE_bool(print_output, false, "Prints output of the model.");

Expand Down Expand Up @@ -316,7 +316,7 @@ int main(int argc, char** argv) {
#ifdef USE_ATEN_LIB
// [TLS handling] This is to workaround an assertion failure
// (https://fburl.com/code/302jyn8d) running `gelu` in ATen mode in fbcode
// (such as bento). The problem is Executorch ATen mode doesn't have Thread
// (such as bento). The problem is ExecuTorch ATen mode doesn't have Thread
// Local State, but `torch-cpp` is assuming tls init is done. There are two
// more checks: MKLDNN disabled and C10_MOBILE, if any of them is true we
// won't be hitting this assertion error. However in `torch-cpp` lib both
Expand Down
Loading