Skip to content

Commit 15388ce

Browse files
mnachinfacebook-github-bot
authored andcommitted
Replace Executorch with ExecuTorch, Part 2/N
Summary: `codemod -m -d ./ --extensions h,cpp 'Executorch' 'ExecuTorch'` and manually picked the ones that are strictly not code (e.g., docblock) Differential Revision: D49566516 fbshipit-source-id: 74e3c5fb131e1f5a1fb3b2d7a9a6bb9000d912b1
1 parent a3e6480 commit 15388ce

File tree

30 files changed

+51
-51
lines changed

30 files changed

+51
-51
lines changed

examples/bundled_executor_runner/bundled_executor_runner.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
/**
1010
* @file
1111
*
12-
* This tool can run Executorch model files that only use operators that
12+
* This tool can run ExecuTorch model files that only use operators that
1313
* are covered by the portable kernels, with possible delegate to the
1414
* test_backend_compiler_lib.
1515
*
@@ -42,7 +42,7 @@ DEFINE_string(
4242
DEFINE_string(
4343
prof_result_path,
4444
"prof_result.bin",
45-
"Executorch profiler output path.");
45+
"ExecuTorch profiler output path.");
4646

4747
DEFINE_bool(
4848
bundled_program,

examples/executor_runner/executor_runner.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
/**
1010
* @file
1111
*
12-
* This tool can run Executorch model files that only use operators that
12+
* This tool can run ExecuTorch model files that only use operators that
1313
* are covered by the portable kernels, with possible delegate to the
1414
* test_backend_compiler_lib.
1515
*
@@ -38,7 +38,7 @@ DEFINE_string(
3838
DEFINE_string(
3939
prof_result_path,
4040
"prof_result.bin",
41-
"Executorch profiler output path.");
41+
"ExecuTorch profiler output path.");
4242

4343
using namespace torch::executor;
4444
using torch::executor::util::FileDataLoader;

extension/pybindings/pybindings.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ class Module final {
146146
#ifdef USE_ATEN_LIB
147147
// [TLS handling] This is to workaround an assertion failure
148148
// (https://fburl.com/code/302jyn8d) running `gelu` in ATen mode in fbcode
149-
// (such as bento). The problem is Executorch ATen mode doesn't have
149+
// (such as bento). The problem is ExecuTorch ATen mode doesn't have
150150
// Thread Local State, but `torch-cpp` is assuming tls init is done. There
151151
// are two more checks: MKLDNN disabled and C10_MOBILE, if any of them is
152152
// true we won't be hitting this assertion error. However in `torch-cpp`

kernels/optimized/cpu/moments_utils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
#pragma once
1010

1111
// Slightly modified version of caffe2/aten/src/ATen/native/cpu/moments_utils.h
12-
// for use in optimized Executorch ops. Template specializations of BFloat16
12+
// for use in optimized ExecuTorch ops. Template specializations of BFloat16
1313
// are excluded.
1414

1515
#include <executorch/kernels/optimized/vec/vec.h>

kernels/portable/cpu/op_allclose.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -117,13 +117,13 @@ Tensor& allclose_out(
117117
* Note: This custom operator contains two variants: allclose.Tensor (a
118118
* functional variant, no inplace mutating on the arguments) and allclose.out
119119
* (an out variant, mutating out). We need to register both into the PyTorch
120-
* runtime so that they can be visible from Executorch compiler side. Eventually
121-
* only allclose.out will be seen from Executorch runtime. With this setup, the
120+
* runtime so that they can be visible from ExecuTorch compiler side. Eventually
121+
* only allclose.out will be seen from ExecuTorch runtime. With this setup, the
122122
* portable kernel for allclose.Tensor can be implemented as a wrapper of
123123
* allclose.out. We can easily instantiate an at::Tensor for the out argument,
124124
* then pass it into allclose.out. This logic will only need to work out in
125-
* "ATen mode" for Executorch compiler, since we won't expose allclose.Tensor in
126-
* Executorch runtime.
125+
* "ATen mode" for ExecuTorch compiler, since we won't expose allclose.Tensor in
126+
* ExecuTorch runtime.
127127
*/
128128
Tensor allclose_tensor(
129129
__ET_UNUSED const Tensor& self,

runtime/core/error.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
/**
1010
* @file
11-
* Executorch Error declarations.
11+
* ExecuTorch Error declarations.
1212
*/
1313

1414
#pragma once
@@ -24,7 +24,7 @@ namespace executor {
2424
typedef uint32_t error_code_t;
2525

2626
/**
27-
* Executorch Error type.
27+
* ExecuTorch Error type.
2828
*/
2929
enum class Error : error_code_t {
3030
/*

runtime/core/exec_aten/testing_util/tensor_factory.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -554,7 +554,7 @@ inline void validate_strides(
554554

555555
} // namespace
556556

557-
// Note that this !USE_ATEN_LIB section uses Executorch-specific namespaces
557+
// Note that this !USE_ATEN_LIB section uses ExecuTorch-specific namespaces
558558
// instead of exec_aten to make it clear that we're dealing with ETensor, and
559559
// because many of these names aren't mapped into exec_aten::.
560560

runtime/core/exec_aten/util/scalar_type_util.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
* See file comment in ../ScalarType.h.
1616
*
1717
* This file contains all of the non-critical parts of the original ScalarType.h
18-
* that are not required for the core Executorch runtime, but may be helpful for
18+
* that are not required for the core ExecuTorch runtime, but may be helpful for
1919
* code that uses ScalarType.
2020
*/
2121

@@ -577,7 +577,7 @@ inline size_t sizeof_scalar_type(exec_aten::ScalarType type) {
577577
//
578578
// These macros are not meant to be used directly. They provide an easy way to
579579
// generate a switch statement that can handle subsets of ScalarTypes supported
580-
// by Executorch.
580+
// by ExecuTorch.
581581
//
582582

583583
#define ET_INTERNAL_SWITCH_CASE(enum_type, CTYPE_ALIAS, ...) \
@@ -744,7 +744,7 @@ inline size_t sizeof_scalar_type(exec_aten::ScalarType type) {
744744
// Switch case macros
745745
//
746746
// These macros provide an easy way to generate switch statements that apply a
747-
// common lambda function to subsets of ScalarTypes supported by Executorch.
747+
// common lambda function to subsets of ScalarTypes supported by ExecuTorch.
748748
// The lambda function can type specialize to the ctype associated with the
749749
// ScalarType being handled through an alias passed as the CTYPE_ALIAS argument.
750750
//

runtime/core/exec_aten/util/tensor_util_portable.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
namespace torch {
1717
namespace executor {
1818
/**
19-
* Implementation for Executorch tensor util, should only be included in
19+
* Implementation for ExecuTorch tensor util, should only be included in
2020
* an target with ATen mode turned off. Explicitly taking
2121
* torch::executor::Tensor (instead of exec_aten::Tensor) to make sure it fails
2222
* at compile time if built incorrectly.

runtime/core/memory_allocator.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ namespace executor {
3333
* MemoryAllocator allocator(100, memory_pool)
3434
* // Pass allocator object in the Executor
3535
*
36-
* Underneath the hood, Executorch will
36+
* Underneath the hood, ExecuTorch will
3737
* allocator.allocate() to keep iterating cur_ pointer
3838
*/
3939
class MemoryAllocator {

runtime/core/portable_type/device.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,11 @@ using DeviceIndex = int8_t;
2424

2525
/**
2626
* An abstraction for the compute device on which a tensor is located.
27-
* Executorch doesn't allow dynamic dispatching based on device, so this type is
27+
* ExecuTorch doesn't allow dynamic dispatching based on device, so this type is
2828
* just a skeleton to allow certain kernels that expect device as an
2929
* argument to still be run.
3030
*
31-
* In Executorch this is always expected to be CPU.
31+
* In ExecuTorch this is always expected to be CPU.
3232
*/
3333
struct Device final {
3434
using Type = DeviceType;

runtime/core/portable_type/scalar_type.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ namespace executor {
5252
*
5353
* The indices and C types must be consistent with
5454
* AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS in the core pytorch file
55-
* c10/core/ScalarType.h. This ensures that Executorch serialization is
55+
* c10/core/ScalarType.h. This ensures that ExecuTorch serialization is
5656
* compatible with ATen serialization.
5757
*
5858
* @param _ A macro that takes two parameters: the name of a C type, and the

runtime/core/portable_type/tensor_impl.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ class TensorImpl {
5757
* This must match the size/signedness of the type used for `Tensor.sizes` in
5858
* //executorch/schema/program.fbs.
5959
*
60-
* Note that at::TensorImpl uses `int64_t` for this type. Executorch uses
60+
* Note that at::TensorImpl uses `int64_t` for this type. ExecuTorch uses
6161
* `int32_t` to save memory, since no single size value will ever be larger
6262
* than 2 billion.
6363
*/
@@ -77,7 +77,7 @@ class TensorImpl {
7777
* This must match the size/signedness of the type used for `Tensor.strides`
7878
* in //executorch/schema/program.fbs.
7979
*
80-
* Note that at::TensorImpl uses `int64_t` for this type. Executorch uses
80+
* Note that at::TensorImpl uses `int64_t` for this type. ExecuTorch uses
8181
* `int32_t` to save memory, since no single stride value will ever be larger
8282
* than 2 billion.
8383
*/

runtime/core/portable_type/tensor_options.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ enum class MemoryFormat : int8_t {
2121
/**
2222
* Row-major contiguous data format.
2323
*
24-
* This is the only format supported by Executorch. Use dim orders to
24+
* This is the only format supported by ExecuTorch. Use dim orders to
2525
* describe other layouts.
2626
*/
2727
Contiguous,
@@ -37,7 +37,7 @@ enum class Layout : int8_t {
3737
* Contrasted with a sparse tensor layout where the memory structure of the
3838
* data blob will be more complicated and indexing requires larger structures.
3939
*
40-
* This is the only layout supported by Executorch.
40+
* This is the only layout supported by ExecuTorch.
4141
*/
4242
Strided,
4343
};

runtime/core/result.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
/**
1010
* @file
11-
* Result type to be used in conjunction with Executorch Error type.
11+
* Result type to be used in conjunction with ExecuTorch Error type.
1212
*/
1313

1414
#pragma once

runtime/executor/method_meta.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ namespace torch {
2323
namespace executor {
2424

2525
/**
26-
* Metadata about a specific tensor of an Executorch Program.
26+
* Metadata about a specific tensor of an ExecuTorch Program.
2727
*
2828
* The program used to create the MethodMeta object that created this
2929
* TensorInfo must outlive this TensorInfo.
@@ -90,7 +90,7 @@ class TensorInfo final {
9090
};
9191

9292
/**
93-
* Describes a a method in an Executorch program.
93+
* Describes a a method in an ExecuTorch program.
9494
*
9595
* The program used to create a MethodMeta object must outlive the MethodMeta.
9696
* It is separate from Method so that this information can be accessed without

runtime/executor/program.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ class ProgramTestFriend;
3636
} // namespace testing
3737

3838
/**
39-
* A deserialized Executorch program binary.
39+
* A deserialized ExecuTorch program binary.
4040
*/
4141
class Program final {
4242
public:

runtime/platform/abort.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ namespace torch {
1313
namespace executor {
1414

1515
/**
16-
* Trigger the Executorch global runtime to immediately exit without cleaning
16+
* Trigger the ExecuTorch global runtime to immediately exit without cleaning
1717
* up, and set an abnormal exit status (platform-defined).
1818
*/
1919
__ET_NORETURN void runtime_abort() {

runtime/platform/abort.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
/**
1010
* @file
11-
* Executorch global abort wrapper function.
11+
* ExecuTorch global abort wrapper function.
1212
*/
1313

1414
#pragma once
@@ -19,7 +19,7 @@ namespace torch {
1919
namespace executor {
2020

2121
/**
22-
* Trigger the Executorch global runtime to immediately exit without cleaning
22+
* Trigger the ExecuTorch global runtime to immediately exit without cleaning
2323
* up, and set an abnormal exit status (platform-defined).
2424
*/
2525
__ET_NORETURN void runtime_abort();

runtime/platform/compiler.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,13 @@
1616
// Compiler support checks.
1717

1818
#if !defined(__cplusplus)
19-
#error Executorch must be compiled using a C++ compiler.
19+
#error ExecuTorch must be compiled using a C++ compiler.
2020
#endif
2121

2222
#if __cplusplus < 201103L && (!defined(_MSC_VER) || _MSC_VER < 1600) && \
2323
(!defined(__GNUC__) || \
2424
(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40400))
25-
#error Executorch must use a compiler supporting at least the C++11 standard.
25+
#error ExecuTorch must use a compiler supporting at least the C++11 standard.
2626
#error __cplusplus _MSC_VER __GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__
2727
#endif
2828

@@ -31,7 +31,7 @@
3131
* See all C++ declaration attributes here:
3232
* https://en.cppreference.com/w/cpp/language/attributes
3333
*
34-
* Note that Executorch supports a lower C++ standard version than all standard
34+
* Note that ExecuTorch supports a lower C++ standard version than all standard
3535
* attributes. Therefore, some annotations are defined using their Clang/GNU
3636
* counterparts.
3737
*

runtime/platform/log.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
/**
1010
* @file
11-
* Executorch logging API.
11+
* ExecuTorch logging API.
1212
*/
1313

1414
#pragma once
@@ -27,7 +27,7 @@
2727

2828
/*
2929
* Enable logging by default if compiler option is not provided.
30-
* This should facilitate less confusion for those developing Executorch.
30+
* This should facilitate less confusion for those developing ExecuTorch.
3131
*/
3232
#ifndef ET_LOG_ENABLED
3333
#define ET_LOG_ENABLED 1
@@ -56,7 +56,7 @@ enum class LogLevel : uint8_t {
5656
Info,
5757

5858
/**
59-
* Log messages about errors within Executorch during runtime.
59+
* Log messages about errors within ExecuTorch during runtime.
6060
*/
6161
Error,
6262

runtime/platform/platform.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
/**
1010
* @file
1111
* Platform abstraction layer to allow individual platform libraries to override
12-
* symbols in Executorch. PAL functions are defined as C functions so a platform
12+
* symbols in ExecuTorch. PAL functions are defined as C functions so a platform
1313
* library implementer can use C in lieu of C++.
1414
*/
1515

runtime/platform/runtime.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
/**
1010
* @file
11-
* Executorch global runtime wrapper functions.
11+
* ExecuTorch global runtime wrapper functions.
1212
*/
1313

1414
#pragma once
@@ -19,7 +19,7 @@ namespace torch {
1919
namespace executor {
2020

2121
/**
22-
* Initialize the Executorch global runtime.
22+
* Initialize the ExecuTorch global runtime.
2323
*/
2424
void runtime_init();
2525

runtime/platform/system.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
/**
1010
* @file
1111
* Platform abstraction layer to allow individual host OS to override
12-
* symbols in Executorch. PAL functions are defined as C functions so an
12+
* symbols in ExecuTorch. PAL functions are defined as C functions so an
1313
* implementer can use C in lieu of C++.
1414
*/
1515
#pragma once

runtime/platform/target/Posix.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@
5454
if (!initialized) { \
5555
fprintf( \
5656
ET_LOG_OUTPUT_FILE, \
57-
"Executorch PAL must be initialized before call to %s()", \
57+
"ExecuTorch PAL must be initialized before call to %s()", \
5858
__ET_FUNCTION); \
5959
fflush(ET_LOG_OUTPUT_FILE); \
6060
et_pal_abort(); \

runtime/platform/types.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
/**
1010
* @file
11-
* Public types used by the Executorch Platform Abstraction Layer.
11+
* Public types used by the ExecuTorch Platform Abstraction Layer.
1212
*/
1313

1414
#pragma once

schema/extended_header.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ namespace torch {
1414
namespace executor {
1515

1616
/**
17-
* An extended, Executorch-specific header that may be embedded in the
17+
* An extended, ExecuTorch-specific header that may be embedded in the
1818
* serialized Program data header.
1919
*
2020
* For details see

sdk/runners/executor_runner.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ DEFINE_string(
6262
DEFINE_string(
6363
prof_result_path,
6464
"prof_result.bin",
65-
"Executorch profiler output path.");
65+
"ExecuTorch profiler output path.");
6666

6767
DEFINE_bool(print_output, false, "Prints output of the model.");
6868

@@ -316,7 +316,7 @@ int main(int argc, char** argv) {
316316
#ifdef USE_ATEN_LIB
317317
// [TLS handling] This is to workaround an assertion failure
318318
// (https://fburl.com/code/302jyn8d) running `gelu` in ATen mode in fbcode
319-
// (such as bento). The problem is Executorch ATen mode doesn't have Thread
319+
// (such as bento). The problem is ExecuTorch ATen mode doesn't have Thread
320320
// Local State, but `torch-cpp` is assuming tls init is done. There are two
321321
// more checks: MKLDNN disabled and C10_MOBILE, if any of them is true we
322322
// won't be hitting this assertion error. However in `torch-cpp` lib both

0 commit comments

Comments
 (0)