Skip to content

Replace Executorch with ExecuTorch, Part 5/N #469

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion backends/xnnpack/test/tester/tester.py
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@ def run_method(self, method="forward"):
delegated_module = self.get_artifact(self._stage_name(Partition))
self.reference_output = delegated_module(*self.inputs)

# Executorch
# ExecuTorch
inputs_flattened, _ = tree_flatten(self.inputs)
serialized_buffer = self.get_artifact(self._stage_name(Serialize))
executorch_module = _load_for_executorch_from_buffer(serialized_buffer)
Expand Down
4 changes: 2 additions & 2 deletions bundled_program/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ class BundledExecutionPlanTest:

@dataclass
class BundledProgram:
"""Executorch program bunlded with data for verification."""
"""ExecuTorch program bunlded with data for verification."""

# Schema version.
version: int
Expand All @@ -94,5 +94,5 @@ class BundledProgram:
# Its length should be equal to the number of execution plans in program.
execution_plan_tests: List[BundledExecutionPlanTest]

# The binary data of a serialized Executorch program.
# The binary data of a serialized ExecuTorch program.
program: bytes
6 changes: 3 additions & 3 deletions codegen/tools/gen_all_oplist.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
def main(argv: List[Any]) -> None:
"""This binary is a wrapper for //xplat/caffe2/tools/code_analyzer/gen_oplist.py.
This is needed because we intend to error out for the case where `model_file_list_path`
is empty or invalid, so that the Executorch build will fail when no selective build target
is provided as a dependency to Executorch build.
is empty or invalid, so that the ExecuTorch build will fail when no selective build target
is provided as a dependency to ExecuTorch build.
"""
parser = argparse.ArgumentParser(description="Generate operator lists")
parser.add_argument(
Expand Down Expand Up @@ -66,7 +66,7 @@ def main(argv: List[Any]) -> None:
model_file_names = model_list_file.read().split()
assert (
len(model_file_names) > 0
), "BUCK was not able to find any `et_operator_library` in the dependency graph of the current Executorch "
), "BUCK was not able to find any `et_operator_library` in the dependency graph of the current ExecuTorch "
"build. Please refer to Selective Build wiki page to add at least one."
gen_oplist.main(argv)

Expand Down
2 changes: 1 addition & 1 deletion codegen/tools/gen_ops_def.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def dump_yaml(model_file: str, output_file: str) -> None:


def main(args: List[Any]) -> None:
"""This binary generates a template functions.yaml which will be consumed by Executorch codegen.
"""This binary generates a template functions.yaml which will be consumed by ExecuTorch codegen.
It reads the model file, deserialize it and dumps all the operators into a new functions.yaml.
The generated file contains placeholder kernels, it needs to be updated with proper kernel names.
"""
Expand Down
2 changes: 1 addition & 1 deletion docs/source_cpp/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@
############################################################################
# Main library page layout example configuration. #
############################################################################
"afterTitleDescription": "Welcome to the Executorch's documentation.",
"afterTitleDescription": "Welcome to the ExecuTorch's documentation.",
}

# Tell sphinx what the primary language being documented is.
Expand Down
4 changes: 2 additions & 2 deletions examples/bundled_executor_runner/bundled_executor_runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
/**
* @file
*
* This tool can run Executorch model files that only use operators that
* This tool can run ExecuTorch model files that only use operators that
* are covered by the portable kernels, with possible delegate to the
* test_backend_compiler_lib.
*
Expand Down Expand Up @@ -42,7 +42,7 @@ DEFINE_string(
DEFINE_string(
prof_result_path,
"prof_result.bin",
"Executorch profiler output path.");
"ExecuTorch profiler output path.");

DEFINE_bool(
bundled_program,
Expand Down
4 changes: 2 additions & 2 deletions examples/executor_runner/executor_runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
/**
* @file
*
* This tool can run Executorch model files that only use operators that
* This tool can run ExecuTorch model files that only use operators that
* are covered by the portable kernels, with possible delegate to the
* test_backend_compiler_lib.
*
Expand Down Expand Up @@ -38,7 +38,7 @@ DEFINE_string(
DEFINE_string(
prof_result_path,
"prof_result.bin",
"Executorch profiler output path.");
"ExecuTorch profiler output path.");

using namespace torch::executor;
using torch::executor::util::FileDataLoader;
Expand Down
6 changes: 3 additions & 3 deletions exir/backend/test/demos/rpc/test_rpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

"""
Server can be an App Call, and will send delegate to client backend like DSP,
DSP will reeive the rpc call, calls the Executorch instance (like on DSP),
DSP will reeive the rpc call, calls the ExecuTorch instance (like on DSP),
and return the result.


Expand Down Expand Up @@ -65,12 +65,12 @@

MCU
——
1. MCU instantiate Executorch instance with DSPBackend
1. MCU instantiate ExecuTorch instance with DSPBackend
2. In DSPBackend init/execute, it'll invoke the implemented RPC calls on DSP

DSP
——
3. DSP receives the RPC call and construct the Executorch instance on the DSP
3. DSP receives the RPC call and construct the ExecuTorch instance on the DSP
4. When dsp executor runs, it can call any delegate (e.g. Accelerator) as needed.

There’ll negligible overhead in binary size on the MCU, as the executor size is small.
Expand Down
2 changes: 1 addition & 1 deletion exir/dialects/backend/_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class BackendOpOverload(EdgeOpOverload):
2. backend op is stateless while delegation doesn't have to
3. backend op stays in executor standard runtime but delegation doesn't have to

Examples for backend ops including fused ops for a specific backend, Executorch prim ops to handle symbolic shape.
Examples for backend ops including fused ops for a specific backend, ExecuTorch prim ops to handle symbolic shape.

Note that the assumption here is that the backend op and the original callable / equivalent callable is 1 - 1 mapping.

Expand Down
2 changes: 1 addition & 1 deletion exir/dialects/edge/_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def __init__(
else:
# Not get the info from edge.yaml
# Create a dtype constraint for this operator that allows any dtype
# combinations as long as any dtype is legal in Executorch.
# combinations as long as any dtype is legal in ExecuTorch.
type_alias = {
f"T{idx}": list(regular_tensor_str_to_dtypes.values())
for idx in range(len(all_tensor_io_names))
Expand Down
2 changes: 1 addition & 1 deletion exir/emit/_emit_program.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ class EmitterOutput:
a mapping of instruction ids to debug handles.
"""

# The Executorch program
# The ExecuTorch program
program: Program

# This dictionary maps the instruction ids to their corresponding
Expand Down
4 changes: 2 additions & 2 deletions exir/emit/_emitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
# LICENSE file in the root directory of this source tree.

"""Takes an ExportedArtifact, or a collection of ExportedArtifacts, in execution dialect, and turns
them into a single Executorch Program.
them into a single ExecuTorch Program.

The provided ExportedArtifact's graph modules are in execution dialect and the emitter parses and
converts them into executorch instructions. The emitter walks the provided graphs and as it
Expand Down Expand Up @@ -1015,7 +1015,7 @@ def _emit_delegate(
return delegate_ret

def _get_operator(self, name: str, overload: str) -> Tuple[int, Operator]:
"""Given a fully qualified name, lookups the operator in the Executorch Program, or adds it
"""Given a fully qualified name, lookups the operator in the ExecuTorch Program, or adds it
if it is not already present"""
key = (name, overload)
op_index = self.emitter_state.operator_cache.get(key)
Expand Down
2 changes: 1 addition & 1 deletion exir/passes/memory_format_ops_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
class MemoryFormatOpsPass(ExportPass):
"""
This pass replaces ops which takes torch.memory_format as an argument with
'equivalent' op which takes dim_order. This is towards the larger Executorch
'equivalent' op which takes dim_order. This is towards the larger ExecuTorch
goal to move away from torch.memory_format. There is a 1:1 mapping between
the aten op and the new edge dialect dim_order op.
"""
Expand Down
2 changes: 1 addition & 1 deletion exir/passes/sym_to_tensor_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
class SymToTensorPass(ExportPass):
"""
The dispatcher implicitly converts SymInt/SymFloats to tensors, but
sometimes this doesn't comply with the operator's schema which Executorch
sometimes this doesn't comply with the operator's schema which ExecuTorch
heavily relies on. So this pass inserts a
torch.ops.aten.scalar_tensor.default operator before these SymInts are used
so that it matches the schema of the operator.
Expand Down
16 changes: 8 additions & 8 deletions exir/program/_program.py
Original file line number Diff line number Diff line change
Expand Up @@ -626,12 +626,12 @@ def to_edge(
class EdgeProgramManager:
"""
Package of one or more :class:'ExportedPrograms' in Edge dialect. Designed to simplify
lowering to Executorch.
lowering to ExecuTorch.

Allows easy applications of transforms across a collection of exported programs
including the delegation of subgraphs.

Manages the second link in the lowering chain of ATen -> Edge -> Executorch.
Manages the second link in the lowering chain of ATen -> Edge -> ExecuTorch.
"""

# TODO(T163717152): Link to Edge dialect docs here ^.
Expand Down Expand Up @@ -761,11 +761,11 @@ def to_executorch(
self, config: Optional[ExecutorchBackendConfig] = None
) -> "ExecutorchProgramManager":
"""
Transforms the program to the Executorch backend.
Transforms the program to the ExecuTorch backend.

Args:
config: An optional argument used to provide greater control over
the transformation to the Executorch backend.
the transformation to the ExecuTorch backend.

Returns:
ExecutorchProgramManager: A manager representing the state of the EdgeProgramManager
Expand All @@ -786,13 +786,13 @@ def to_executorch(
class ExecutorchProgramManager:
"""
Package of one or more :class:'ExportedPrograms' in Execution dialect. Designed to simplify
lowering to Executorch.
lowering to ExecuTorch.

When the ExecutorchProgramManager is constructed the ExportedPrograms in execution dialect
are used to form the executorch binary (in a process called emission) and then serialized
to a buffer.

Manages the final link in the lowering chain of ATen -> Edge -> Executorch.
Manages the final link in the lowering chain of ATen -> Edge -> ExecuTorch.
"""

# TODO(T163717152): Link to Execution dialect docs here ^.
Expand Down Expand Up @@ -864,7 +864,7 @@ def exported_program(self, method_name: str = "forward") -> ExportedProgram:

def dump_executorch_program(self, verbose: bool = False) -> None:
"""
Prints the Executorch binary in a human readable format.
Prints the ExecuTorch binary in a human readable format.

Args:
verbose (bool):
Expand All @@ -884,6 +884,6 @@ def debug_handle_map(self) -> Dict[int, Union[int, List[int]]]:
@property
def buffer(self) -> bytes:
"""
Returns a buffer containing the serialized Executorch binary.
Returns a buffer containing the serialized ExecuTorch binary.
"""
return self._buffer
2 changes: 1 addition & 1 deletion exir/serde/serialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -683,7 +683,7 @@ def deserialize(
) -> exir.ExportedProgram:
exported_program_str = exported_program_bytes.decode("utf-8")
exported_program_dict = json.loads(exported_program_str)
# Executorch tests save exported program on disk when we haven't
# ExecuTorch tests save exported program on disk when we haven't
# provided any compatibility guarantees.
if "module_call_graph" not in exported_program_dict["graph_module"]:
exported_program_dict["graph_module"]["module_call_graph"] = []
Expand Down
2 changes: 1 addition & 1 deletion extension/pybindings/pybindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ class Module final {
#ifdef USE_ATEN_LIB
// [TLS handling] This is to workaround an assertion failure
// (https://fburl.com/code/302jyn8d) running `gelu` in ATen mode in fbcode
// (such as bento). The problem is Executorch ATen mode doesn't have
// (such as bento). The problem is ExecuTorch ATen mode doesn't have
// Thread Local State, but `torch-cpp` is assuming tls init is done. There
// are two more checks: MKLDNN disabled and C10_MOBILE, if any of them is
// true we won't be hitting this assertion error. However in `torch-cpp`
Expand Down
6 changes: 3 additions & 3 deletions extension/pybindings/test/make_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def create_program(
) -> Tuple[Program, Tuple[Any, ...]]:
"""Returns an executorch program based on ModuleAdd, along with inputs."""

# Trace the test module and create a serialized Executorch program.
# Trace the test module and create a serialized ExecuTorch program.
inputs = (torch.ones(2, 2), torch.ones(2, 2))
input_map = {}
for method in eager_module.get_methods_to_export():
Expand All @@ -74,14 +74,14 @@ def create_program(
.to_executorch()
)

# Create the Executorch program from the graph.
# Create the ExecuTorch program from the graph.
pretty_print(exec_prog.program)
return (exec_prog, inputs)

######### TEST CASES #########

def test_e2e(tester):
# Create an Executorch program from ModuleAdd.
# Create an ExecuTorch program from ModuleAdd.
exported_program, inputs = create_program(ModuleAdd())

# Use pybindings to load and execute the program.
Expand Down
2 changes: 1 addition & 1 deletion kernels/optimized/cpu/moments_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
#pragma once

// Slightly modified version of caffe2/aten/src/ATen/native/cpu/moments_utils.h
// for use in optimized Executorch ops. Template specializations of BFloat16
// for use in optimized ExecuTorch ops. Template specializations of BFloat16
// are excluded.

#include <executorch/kernels/optimized/vec/vec.h>
Expand Down
8 changes: 4 additions & 4 deletions kernels/portable/cpu/op_allclose.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,13 +117,13 @@ Tensor& allclose_out(
* Note: This custom operator contains two variants: allclose.Tensor (a
* functional variant, no inplace mutating on the arguments) and allclose.out
* (an out variant, mutating out). We need to register both into the PyTorch
* runtime so that they can be visible from Executorch compiler side. Eventually
* only allclose.out will be seen from Executorch runtime. With this setup, the
* runtime so that they can be visible from ExecuTorch compiler side. Eventually
* only allclose.out will be seen from ExecuTorch runtime. With this setup, the
* portable kernel for allclose.Tensor can be implemented as a wrapper of
* allclose.out. We can easily instantiate an at::Tensor for the out argument,
* then pass it into allclose.out. This logic will only need to work out in
* "ATen mode" for Executorch compiler, since we won't expose allclose.Tensor in
* Executorch runtime.
* "ATen mode" for ExecuTorch compiler, since we won't expose allclose.Tensor in
* ExecuTorch runtime.
*/
Tensor allclose_tensor(
__ET_UNUSED const Tensor& self,
Expand Down
4 changes: 2 additions & 2 deletions runtime/core/error.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

/**
* @file
* Executorch Error declarations.
* ExecuTorch Error declarations.
*/

#pragma once
Expand All @@ -24,7 +24,7 @@ namespace executor {
typedef uint32_t error_code_t;

/**
* Executorch Error type.
* ExecuTorch Error type.
*/
enum class Error : error_code_t {
/*
Expand Down
2 changes: 1 addition & 1 deletion runtime/core/exec_aten/testing_util/tensor_factory.h
Original file line number Diff line number Diff line change
Expand Up @@ -554,7 +554,7 @@ inline void validate_strides(

} // namespace

// Note that this !USE_ATEN_LIB section uses Executorch-specific namespaces
// Note that this !USE_ATEN_LIB section uses ExecuTorch-specific namespaces
// instead of exec_aten to make it clear that we're dealing with ETensor, and
// because many of these names aren't mapped into exec_aten::.

Expand Down
6 changes: 3 additions & 3 deletions runtime/core/exec_aten/util/scalar_type_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
* See file comment in ../ScalarType.h.
*
* This file contains all of the non-critical parts of the original ScalarType.h
* that are not required for the core Executorch runtime, but may be helpful for
* that are not required for the core ExecuTorch runtime, but may be helpful for
* code that uses ScalarType.
*/

Expand Down Expand Up @@ -577,7 +577,7 @@ inline size_t sizeof_scalar_type(exec_aten::ScalarType type) {
//
// These macros are not meant to be used directly. They provide an easy way to
// generate a switch statement that can handle subsets of ScalarTypes supported
// by Executorch.
// by ExecuTorch.
//

#define ET_INTERNAL_SWITCH_CASE(enum_type, CTYPE_ALIAS, ...) \
Expand Down Expand Up @@ -744,7 +744,7 @@ inline size_t sizeof_scalar_type(exec_aten::ScalarType type) {
// Switch case macros
//
// These macros provide an easy way to generate switch statements that apply a
// common lambda function to subsets of ScalarTypes supported by Executorch.
// common lambda function to subsets of ScalarTypes supported by ExecuTorch.
// The lambda function can type specialize to the ctype associated with the
// ScalarType being handled through an alias passed as the CTYPE_ALIAS argument.
//
Expand Down
2 changes: 1 addition & 1 deletion runtime/core/exec_aten/util/tensor_util_portable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
namespace torch {
namespace executor {
/**
* Implementation for Executorch tensor util, should only be included in
* Implementation for ExecuTorch tensor util, should only be included in
* an target with ATen mode turned off. Explicitly taking
* torch::executor::Tensor (instead of exec_aten::Tensor) to make sure it fails
* at compile time if built incorrectly.
Expand Down
2 changes: 1 addition & 1 deletion runtime/core/memory_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ namespace executor {
* MemoryAllocator allocator(100, memory_pool)
* // Pass allocator object in the Executor
*
* Underneath the hood, Executorch will
* Underneath the hood, ExecuTorch will
* allocator.allocate() to keep iterating cur_ pointer
*/
class MemoryAllocator {
Expand Down
Loading