Skip to content

Commit 4fca28f

Browse files
dbortfacebook-github-bot
authored andcommitted
Update docs to stop referring to old namespaces
Summary: Audit all instances of `\bexec_aten::` and `\btorch::` under `docs/`, updating where appropriate. The only remaining `torch::` instances are for kernels, which I didn't get a chance to migrate before v0.4.0. Also update the LLM Manual code to be consistent between the doc and main.cpp. Differential Revision: D64152344
1 parent 57e3c81 commit 4fca28f

File tree

8 files changed

+78
-67
lines changed

8 files changed

+78
-67
lines changed

docs/source/Doxyfile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -943,7 +943,8 @@ WARN_LOGFILE =
943943
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
944944
# Note: If this tag is empty the current directory is searched.
945945

946-
INPUT = ../runtime/executor/memory_manager.h \
946+
INPUT = ../devtools/bundled_program/bundled_program.h \
947+
../runtime/executor/memory_manager.h \
947948
../runtime/executor/method.h \
948949
../runtime/executor/method_meta.h \
949950
../runtime/executor/program.h \

docs/source/build-run-coreml.md

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -147,11 +147,10 @@ libsqlite3.tbd
147147

148148
7. Update the code to load the program from the Application's bundle.
149149
``` objective-c
150-
using namespace torch::executor;
151-
152150
NSURL *model_url = [NBundle.mainBundle URLForResource:@"mv3_coreml_all" extension:@"pte"];
153151

154-
Result<util::FileDataLoader> loader = util::FileDataLoader::from(model_url.path.UTF8String);
152+
Result<executorch::extension::FileDataLoader> loader =
153+
executorch::extension::FileDataLoader::from(model_url.path.UTF8String);
155154
```
156155
157156
8. Use [Xcode](https://developer.apple.com/documentation/xcode/building-and-running-an-app#Build-run-and-debug-your-app) to deploy the application on the device.

docs/source/bundled-io.md

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -201,51 +201,51 @@ This stage mainly focuses on executing the model with the bundled inputs and and
201201
### Get ExecuTorch Program Pointer from `BundledProgram` Buffer
202202
We need the pointer to ExecuTorch program to do the execution. To unify the process of loading and executing `BundledProgram` and Program flatbuffer, we create an API:
203203

204-
:::{dropdown} `GetProgramData`
204+
:::{dropdown} `get_program_data`
205205

206206
```{eval-rst}
207-
.. doxygenfunction:: torch::executor::bundled_program::GetProgramData
207+
.. doxygenfunction:: executorch::bundled_program::get_program_data
208208
```
209209
:::
210210

211-
Here's an example of how to use the `GetProgramData` API:
211+
Here's an example of how to use the `get_program_data` API:
212212
```c++
213213
// Assume that the user has read the contents of the file into file_data using
214214
// whatever method works best for their application. The file could contain
215215
// either BundledProgram data or Program data.
216216
void* file_data = ...;
217217
size_t file_data_len = ...;
218218

219-
// If file_data contains a BundledProgram, GetProgramData() will return a
219+
// If file_data contains a BundledProgram, get_program_data() will return a
220220
// pointer to the Program data embedded inside it. Otherwise it will return
221221
// file_data, which already pointed to Program data.
222222
const void* program_ptr;
223223
size_t program_len;
224-
status = torch::executor::bundled_program::GetProgramData(
224+
status = executorch::bundled_program::get_program_data(
225225
file_data, file_data_len, &program_ptr, &program_len);
226226
ET_CHECK_MSG(
227227
status == Error::Ok,
228-
"GetProgramData() failed with status 0x%" PRIx32,
228+
"get_program_data() failed with status 0x%" PRIx32,
229229
status);
230230
```
231231
232232
### Load Bundled Input to Method
233-
To execute the program on the bundled input, we need to load the bundled input into the method. Here we provided an API called `torch::executor::bundled_program::LoadBundledInput`:
233+
To execute the program on the bundled input, we need to load the bundled input into the method. Here we provided an API called `executorch::bundled_program::load_bundled_input`:
234234
235-
:::{dropdown} `LoadBundledInput`
235+
:::{dropdown} `load_bundled_input`
236236
237237
```{eval-rst}
238-
.. doxygenfunction:: torch::executor::bundled_program::LoadBundledInput
238+
.. doxygenfunction:: executorch::bundled_program::load_bundled_input
239239
```
240240
:::
241241

242242
### Verify the Method's Output.
243-
We call `torch::executor::bundled_program::VerifyResultWithBundledExpectedOutput` to verify the method's output with bundled expected outputs. Here's the details of this API:
243+
We call `executorch::bundled_program::verify_method_outputs` to verify the method's output with bundled expected outputs. Here's the details of this API:
244244

245-
:::{dropdown} `VerifyResultWithBundledExpectedOutput`
245+
:::{dropdown} `verify_method_outputs`
246246

247247
```{eval-rst}
248-
.. doxygenfunction:: torch::executor::bundled_program::VerifyResultWithBundledExpectedOutput
248+
.. doxygenfunction:: executorch::bundled_program::verify_method_outputs
249249
```
250250
:::
251251

@@ -266,13 +266,13 @@ ET_CHECK_MSG(
266266
method.error());
267267

268268
// Load testset_idx-th input in the buffer to plan
269-
status = torch::executor::bundled_program::LoadBundledInput(
269+
status = executorch::bundled_program::load_bundled_input(
270270
*method,
271271
program_data.bundled_program_data(),
272272
FLAGS_testset_idx);
273273
ET_CHECK_MSG(
274274
status == Error::Ok,
275-
"LoadBundledInput failed with status 0x%" PRIx32,
275+
"load_bundled_input failed with status 0x%" PRIx32,
276276
status);
277277

278278
// Execute the plan
@@ -283,7 +283,7 @@ ET_CHECK_MSG(
283283
status);
284284

285285
// Verify the result.
286-
status = torch::executor::bundled_program::VerifyResultWithBundledExpectedOutput(
286+
status = executorch::bundled_program::verify_method_outputs(
287287
*method,
288288
program_data.bundled_program_data(),
289289
FLAGS_testset_idx,

docs/source/concepts.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ The goal of ATen dialect is to capture users’ programs as faithfully as possib
2626

2727
## ATen mode
2828

29-
ATen mode uses the ATen implementation of Tensor (`at::Tensor`) and related types, such as `ScalarType`, from the PyTorch core. This is in contrast to portable mode, which uses ExecuTorch’s smaller implementation of tensor (`torch::executor::Tensor`) and related types, such as `torch::executor::ScalarType`.
29+
ATen mode uses the ATen implementation of Tensor (`at::Tensor`) and related types, such as `ScalarType`, from the PyTorch core. This is in contrast to ETensor mode, which uses ExecuTorch’s smaller implementation of tensor (`executorch::runtime::etensor::Tensor`) and related types, such as `executorch::runtime::etensor::ScalarType`.
3030
- ATen kernels that rely on the full `at::Tensor` API are usable in this configuration.
3131
- ATen kernels tend to do dynamic memory allocation and often have extra flexibility (and thus overhead) to handle cases not needed by mobile/embedded clients. e.g., CUDA support, sparse tensor support, and dtype promotion.
3232
- Note: ATen mode is currently a WIP.
@@ -244,10 +244,10 @@ Kernels that support a subset of tensor dtypes and/or dim orders.
244244

245245
Parts of a model may be delegated to run on an optimized backend. The partitioner splits the graph into the appropriate sub-networks and tags them for delegation.
246246

247-
## Portable mode (lean mode)
247+
## ETensor mode
248248

249-
Portable mode uses ExecuTorch’s smaller implementation of tensor (`torch::executor::Tensor`) along with related types (`torch::executor::ScalarType`, etc.). This is in contrast to ATen mode, which uses the ATen implementation of Tensor (`at::Tensor`) and related types (`ScalarType`, etc.)
250-
- `torch::executor::Tensor`, also known as ETensor, is a source-compatible subset of `at::Tensor`. Code written against ETensor can build against `at::Tensor`.
249+
ETensor mode uses ExecuTorch’s smaller implementation of tensor (`executorch::runtime::etensor::Tensor`) along with related types (`executorch::runtime::etensor::ScalarType`, etc.). This is in contrast to ATen mode, which uses the ATen implementation of Tensor (`at::Tensor`) and related types (`ScalarType`, etc.)
250+
- `executorch::runtime::etensor::Tensor`, also known as ETensor, is a source-compatible subset of `at::Tensor`. Code written against ETensor can build against `at::Tensor`.
251251
- ETensor does not own or allocate memory on its own. To support dynamic shapes, kernels can allocate Tensor data using the MemoryAllocator provided by the client.
252252

253253
## Portable kernels

docs/source/etdump.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ Generating an ETDump is a relatively straightforward process. Users can follow t
1515
2. ***Create*** an Instance of the ETDumpGen class and pass it into the `load_method` call that is invoked in the runtime.
1616

1717
```C++
18-
torch::executor::ETDumpGen etdump_gen = torch::executor::ETDumpGen();
18+
executorch::etdump::ETDumpGen etdump_gen;
1919
Result<Method> method =
2020
program->load_method(method_name, &memory_manager, &etdump_gen);
2121
```

docs/source/llm/getting-started.md

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -208,8 +208,8 @@ Create a file called main.cpp with the following contents:
208208
#include <executorch/runtime/core/exec_aten/exec_aten.h>
209209
#include <executorch/runtime/core/result.h>
210210

211-
using exec_aten::ScalarType;
212-
using exec_aten::Tensor;
211+
using executorch::aten::ScalarType;
212+
using executorch::aten::Tensor;
213213
using executorch::extension::from_blob;
214214
using executorch::extension::Module;
215215
using executorch::runtime::EValue;
@@ -309,28 +309,28 @@ penalties for repeated tokens, and biases to prioritize or de-prioritize specifi
309309
```cpp
310310
// main.cpp
311311
312-
using namespace torch::executor;
313-
314312
int main() {
315-
// Set up the prompt. This provides the seed text for the model to elaborate.
316-
std::cout << "Enter model prompt: ";
317-
std::string prompt;
318-
std::getline(std::cin, prompt);
319-
320-
// The tokenizer is used to convert between tokens (used by the model) and
321-
// human-readable strings.
322-
BasicTokenizer tokenizer("vocab.json");
323-
324-
// The sampler is used to sample the next token from the logits.
325-
BasicSampler sampler = BasicSampler();
326-
327-
// Load the exported nanoGPT program, which was generated via the previous steps.
328-
Module model("nanogpt.pte", Module::LoadMode::MmapUseMlockIgnoreErrors);
329-
330-
const auto max_input_tokens = 1024;
331-
const auto max_output_tokens = 30;
332-
std::cout << prompt;
333-
generate(model, prompt, tokenizer, sampler, max_input_tokens, max_output_tokens);
313+
// Set up the prompt. This provides the seed text for the model to elaborate.
314+
std::cout << "Enter model prompt: ";
315+
std::string prompt;
316+
std::getline(std::cin, prompt);
317+
318+
// The tokenizer is used to convert between tokens (used by the model) and
319+
// human-readable strings.
320+
BasicTokenizer tokenizer("vocab.json");
321+
322+
// The sampler is used to sample the next token from the logits.
323+
BasicSampler sampler = BasicSampler();
324+
325+
// Load the exported nanoGPT program, which was generated via the previous
326+
// steps.
327+
Module model("nanogpt.pte", Module::LoadMode::MmapUseMlockIgnoreErrors);
328+
329+
const auto max_input_tokens = 1024;
330+
const auto max_output_tokens = 30;
331+
std::cout << prompt;
332+
generate(
333+
model, prompt, tokenizer, sampler, max_input_tokens, max_output_tokens);
334334
}
335335
```
336336

docs/source/running-a-model-cpp-tutorial.md

Lines changed: 28 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -24,14 +24,25 @@ Users can define their own `DataLoader`s to fit the needs of their particular sy
2424
For the `FileDataLoader` all we need to do is provide a file path to the constructor.
2525

2626
``` cpp
27-
using namespace torch::executor;
28-
29-
Result<util::FileDataLoader> loader =
30-
util::FileDataLoader::from("/tmp/model.pte");
27+
using executorch::aten::Tensor;
28+
using executorch::aten::TensorImpl;
29+
using executorch::extension::FileDataLoader;
30+
using executorch::extension::MallocMemoryAllocator;
31+
using executorch::runtime::Error;
32+
using executorch::runtime::EValue;
33+
using executorch::runtime::HierarchicalAllocator;
34+
using executorch::runtime::MemoryManager;
35+
using executorch::runtime::Method;
36+
using executorch::runtime::MethodMeta;
37+
using executorch::runtime::Program;
38+
using executorch::runtime::Result;
39+
using executorch::runtime::Span;
40+
41+
Result<FileDataLoader> loader =
42+
FileDataLoader::from("/tmp/model.pte");
3143
assert(loader.ok());
3244

33-
Result<Program> program =
34-
torch::executor::Program::load(&loader.get());
45+
Result<Program> program = Program::load(&loader.get());
3546
assert(program.ok());
3647
```
3748
@@ -48,14 +59,13 @@ One of the principles of ExecuTorch is giving users control over where the memor
4859
For this example we will retrieve the size of the planned memory arenas dynamically from the `Program`, but for heapless environments users could retrieve this information from the `Program` ahead of time and allocate the arena statically. We will also be using a malloc based allocator for the method allocator.
4960
5061
``` cpp
51-
52-
// Method names map back to Python nn.Module method names. Most users will only have the singular method "forward".
62+
// Method names map back to Python nn.Module method names. Most users will only
63+
// have the singular method "forward".
5364
const char* method_name = "forward";
5465
5566
// MethodMeta is a lightweight structure that lets us gather metadata
56-
// information about a specific method. In this case we are looking to
57-
// get the required size of the memory planned buffers for the method
58-
// "forward".
67+
// information about a specific method. In this case we are looking to get the
68+
// required size of the memory planned buffers for the method "forward".
5969
Result<MethodMeta> method_meta = program->method_meta(method_name);
6070
assert(method_meta.ok());
6171
@@ -64,7 +74,8 @@ std::vector<Span<uint8_t>> planned_arenas; // Passed to the allocator
6474
6575
size_t num_memory_planned_buffers = method_meta->num_memory_planned_buffers();
6676
67-
// It is possible to have multiple layers in our memory hierarchy; for example, SRAM and DRAM.
77+
// It is possible to have multiple layers in our memory hierarchy; for example,
78+
// SRAM and DRAM.
6879
for (size_t id = 0; id < num_memory_planned_buffers; ++id) {
6980
// .get() will always succeed because id < num_memory_planned_buffers.
7081
size_t buffer_size =
@@ -75,12 +86,12 @@ for (size_t id = 0; id < num_memory_planned_buffers; ++id) {
7586
HierarchicalAllocator planned_memory(
7687
{planned_arenas.data(), planned_arenas.size()});
7788
78-
// Version of MemoryAllocator that uses malloc to handle allocations
79-
// rather then a fixed buffer.
80-
util::MallocMemoryAllocator method_allocator;
89+
// Version of MemoryAllocator that uses malloc to handle allocations rather then
90+
// a fixed buffer.
91+
MallocMemoryAllocator method_allocator;
8192
82-
// Assemble all of the allocators into the MemoryManager that the Executor
83-
// will use.
93+
// Assemble all of the allocators into the MemoryManager that the Executor will
94+
// use.
8495
MemoryManager memory_manager(&method_allocator, &planned_memory);
8596
```
8697

examples/llm_manual/main.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@
1717
#include <executorch/runtime/core/exec_aten/exec_aten.h>
1818
#include <executorch/runtime/core/result.h>
1919

20-
using exec_aten::ScalarType;
21-
using exec_aten::Tensor;
20+
using executorch::aten::ScalarType;
21+
using executorch::aten::Tensor;
2222
using executorch::extension::from_blob;
2323
using executorch::extension::Module;
2424
using executorch::runtime::EValue;
@@ -90,7 +90,7 @@ std::string generate(
9090

9191
int main() {
9292
// Set up the prompt. This provides the seed text for the model to elaborate.
93-
std::cout << "Prompt: ";
93+
std::cout << "Enter model prompt: ";
9494
std::string prompt;
9595
std::getline(std::cin, prompt);
9696

0 commit comments

Comments
 (0)