Skip to content

Commit 0267b77

Browse files
pytorchbotdbort
andauthored
Move examples/mediatek out from under the torch namespace (#5556)
Move examples/mediatek out from under the torch namespace (#5478) Summary: The code under examples/... is a proxy for user code, and users should never declare code under the `torch::` or `executorch::` namespaces. Move this code under the `example::` namespace to make it more clear that users should use their own namespaces when writing code like this. Pull Request resolved: #5478 Test Plan: - Built using the instructions at https://github.com/pytorch/executorch/blob/main/examples/mediatek/README.md Reviewed By: JacobSzwejbka, cccclai Differential Revision: D62992974 Pulled By: dbort fbshipit-source-id: b01f1b33d2853a0555ae19d79769a5bb6d0ba853 (cherry picked from commit 182f138) Co-authored-by: Dave Bort <[email protected]>
1 parent e6f7bb1 commit 0267b77

21 files changed

+106
-62
lines changed

examples/mediatek/executor_runner/llama_runner/FileMemMapper.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
#include <sys/stat.h>
1616
#include <unistd.h>
1717

18-
namespace torch::executor {
18+
namespace example {
1919

2020
class FileMemMapper { // Read-only mmap
2121
public:
@@ -97,4 +97,4 @@ class FileMemMapper { // Read-only mmap
9797
size_t mSize = 0;
9898
};
9999

100-
} // namespace torch::executor
100+
} // namespace example

examples/mediatek/executor_runner/llama_runner/LlamaConfig.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313

1414
#include "llm_helper/include/llm_types.h"
1515

16-
namespace torch::executor {
16+
namespace example {
1717

1818
using llm_helper::LLMType;
1919

@@ -42,4 +42,4 @@ struct LlamaModelPaths {
4242
std::vector<std::string> gen_model_paths;
4343
};
4444

45-
} // namespace torch::executor
45+
} // namespace example

examples/mediatek/executor_runner/llama_runner/LlamaModelChunk.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
#include "llm_helper/include/mask_builder.h"
2727
#include "llm_helper/include/rotary_embedding.h"
2828

29-
namespace torch::executor {
29+
namespace example {
3030

3131
inline std::vector<size_t> getIndexRange(
3232
const size_t startIndex,
@@ -343,4 +343,4 @@ void LlamaModelChunk::InitCache() {
343343
}
344344
}
345345

346-
} // namespace torch::executor
346+
} // namespace example

examples/mediatek/executor_runner/llama_runner/LlamaModelChunk.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,12 @@
2727
#include "llm_helper/include/mask_builder.h"
2828
#include "llm_helper/include/rotary_embedding.h"
2929

30-
namespace torch::executor {
30+
namespace example {
3131

3232
using llm_helper::MaskBuilder;
3333
using llm_helper::RotaryEmbeddingMasterLut;
3434

35-
using TensorShape = Span<const int32_t>;
35+
using TensorShape = executorch::runtime::Span<const int32_t>;
3636
using ModelIndexMap = std::unordered_map<size_t, size_t>;
3737

3838
// Llama decoder chunk
@@ -135,4 +135,4 @@ class LlamaModelChunk : public ModelChunk {
135135
size_t mCurrentTokenIndex = 0;
136136
};
137137

138-
} // namespace torch::executor
138+
} // namespace example

examples/mediatek/executor_runner/llama_runner/LlamaRuntime.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
#include "llm_helper/include/rotary_embedding.h"
1919
#include "llm_helper/include/token_embedding.h"
2020

21-
namespace torch::executor {
21+
namespace example {
2222

2323
void LlamaRuntime::Initialize(
2424
const LlamaModelOptions& modelOptions,
@@ -201,4 +201,4 @@ const LlamaModelOptions& LlamaRuntime::GetModelOptions() const {
201201
return mModelOptions;
202202
}
203203

204-
} // namespace torch::executor
204+
} // namespace example

examples/mediatek/executor_runner/llama_runner/LlamaRuntime.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
#include "llm_helper/include/rotary_embedding.h"
2121
#include "llm_helper/include/token_embedding.h"
2222

23-
namespace torch::executor {
23+
namespace example {
2424

2525
class LlamaRuntime {
2626
public:
@@ -56,4 +56,4 @@ class LlamaRuntime {
5656
size_t mTokenIndex = 0;
5757
};
5858

59-
} // namespace torch::executor
59+
} // namespace example

examples/mediatek/executor_runner/llama_runner/ModelChunk.cpp

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,21 @@
2323
#define ENSURE_INIT \
2424
ET_CHECK_MSG(Initialized(), "Error: Model chunk not initialized.");
2525

26-
namespace torch::executor {
27-
28-
using util::FileDataLoader;
26+
namespace example {
27+
28+
using executorch::aten::Tensor;
29+
using executorch::aten::TensorImpl;
30+
using executorch::extension::FileDataLoader;
31+
using executorch::runtime::Error;
32+
using executorch::runtime::HierarchicalAllocator;
33+
using executorch::runtime::MemoryAllocator;
34+
using executorch::runtime::MemoryManager;
35+
using executorch::runtime::Method;
36+
using executorch::runtime::MethodMeta;
37+
using executorch::runtime::Program;
38+
using executorch::runtime::Result;
39+
using executorch::runtime::Span;
40+
using executorch::runtime::Tag;
2941

3042
static constexpr size_t kMethodAllocatorPoolSize = 4 * 1024U * 1024U; // 4MB
3143

@@ -572,4 +584,4 @@ void ModelChunk::ReleaseModelInstance(void* modelInstance) {
572584
}
573585
}
574586

575-
} // namespace torch::executor
587+
} // namespace example

examples/mediatek/executor_runner/llama_runner/ModelChunk.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717
#include "MultiModelLoader.h"
1818

19-
namespace torch::executor {
19+
namespace example {
2020

2121
struct BufferInfo {
2222
void* data = nullptr;
@@ -91,7 +91,7 @@ class ModelChunk : protected MultiTokenSizeModelLoader {
9191
// Release allocated buffers for model IOs
9292
void ReleaseIoBuffers();
9393

94-
Method& GetModelMethod();
94+
executorch::runtime::Method& GetModelMethod();
9595

9696
private:
9797
// Override the virtual functions
@@ -119,4 +119,4 @@ class ModelChunk : protected MultiTokenSizeModelLoader {
119119
std::unordered_map<size_t, size_t> mModelOutToInIndexLinks;
120120
};
121121

122-
} // namespace torch::executor
122+
} // namespace example

examples/mediatek/executor_runner/llama_runner/MultiModelLoader.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
#include <unordered_map>
1717
#include <vector>
1818

19-
namespace torch::executor {
19+
namespace example {
2020

2121
template <typename IdType>
2222
void MultiModelLoader<IdType>::LoadModels() {
@@ -174,4 +174,4 @@ std::string MultiModelLoader<IdType>::GetIdString(const IdType& id) {
174174
template class MultiModelLoader<int>;
175175
template class MultiModelLoader<size_t>;
176176

177-
} // namespace torch::executor
177+
} // namespace example

examples/mediatek/executor_runner/llama_runner/MultiModelLoader.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
#include <unordered_map>
1313
#include <vector>
1414

15-
namespace torch::executor {
15+
namespace example {
1616

1717
template <typename IdType = size_t>
1818
class MultiModelLoader {
@@ -92,4 +92,4 @@ class MultiModelLoader {
9292
IdType mCurrentModelId = 0;
9393
};
9494

95-
} // namespace torch::executor
95+
} // namespace example

examples/mediatek/executor_runner/llama_runner/Utils.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
#include <string_view>
1919
#include <vector>
2020

21-
namespace torch::executor {
21+
namespace example {
2222
namespace utils {
2323

2424
class Timer {
@@ -113,4 +113,4 @@ static std::string to_string(const std::vector<T> vec) {
113113
}
114114

115115
} // namespace utils
116-
} // namespace torch::executor
116+
} // namespace example

examples/mediatek/executor_runner/llama_runner/llm_helper/include/llm_types.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
#include <stddef.h>
1212
#include <strings.h>
1313

14-
namespace torch::executor {
14+
namespace example {
1515
namespace llm_helper {
1616

1717
typedef enum { INT4, INT8, INT16, FP16, INT32, FP32, INVALID } LLMType;
@@ -72,4 +72,4 @@ inline const char* getLLMTypeName(const LLMType llm_type) {
7272
}
7373

7474
} // namespace llm_helper
75-
} // namespace torch::executor
75+
} // namespace example

examples/mediatek/executor_runner/llama_runner/llm_helper/include/mask_builder.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
#include <string>
1414

15-
namespace torch::executor {
15+
namespace example {
1616
namespace llm_helper {
1717

1818
class MaskBuilder {
@@ -76,4 +76,4 @@ class MaskBuilder {
7676
};
7777

7878
} // namespace llm_helper
79-
} // namespace torch::executor
79+
} // namespace example

examples/mediatek/executor_runner/llama_runner/llm_helper/include/rotary_embedding.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
#include <string>
1414
#include <vector>
1515

16-
namespace torch::executor {
16+
namespace example {
1717
namespace llm_helper {
1818

1919
class RotaryEmbeddingMasterLut {
@@ -77,4 +77,4 @@ class RotaryEmbeddingMasterLut {
7777
};
7878

7979
} // namespace llm_helper
80-
} // namespace torch::executor
80+
} // namespace example

examples/mediatek/executor_runner/llama_runner/llm_helper/include/token_embedding.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
#include <string>
1414
#include <vector>
1515

16-
namespace torch::executor {
16+
namespace example {
1717

1818
class FileMemMapper;
1919

@@ -49,4 +49,4 @@ class TokenEmbeddingLut {
4949
};
5050

5151
} // namespace llm_helper
52-
} // namespace torch::executor
52+
} // namespace example

examples/mediatek/executor_runner/llama_runner/llm_helper/mask_builder.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
#include <executorch/runtime/platform/assert.h>
1212
#include <executorch/runtime/platform/log.h>
1313

14-
namespace torch::executor {
14+
namespace example {
1515
namespace llm_helper {
1616

1717
// Define mask values for different types
@@ -260,4 +260,4 @@ bool MaskBuilder::adjustMaskForPadding(const size_t tokenBatchSize) {
260260
}
261261

262262
} // namespace llm_helper
263-
} // namespace torch::executor
263+
} // namespace example

examples/mediatek/executor_runner/llama_runner/llm_helper/rotary_embedding.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
#include <fstream>
1717
#include <type_traits>
1818

19-
namespace torch::executor {
19+
namespace example {
2020
namespace llm_helper {
2121

2222
RotaryEmbeddingMasterLut::RotaryEmbeddingMasterLut(
@@ -394,4 +394,4 @@ size_t RotaryEmbeddingMasterLut::getRotEmbedLength() const {
394394
}
395395

396396
} // namespace llm_helper
397-
} // namespace torch::executor
397+
} // namespace example

examples/mediatek/executor_runner/llama_runner/llm_helper/token_embedding.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919

2020
namespace fs = std::filesystem;
2121

22-
namespace torch::executor {
22+
namespace example {
2323
namespace llm_helper {
2424

2525
TokenEmbeddingLut::TokenEmbeddingLut(
@@ -90,4 +90,4 @@ void TokenEmbeddingLut::lookupEmbedding(const std::vector<uint64_t>& tokens) {
9090
}
9191

9292
} // namespace llm_helper
93-
} // namespace torch::executor
93+
} // namespace example

examples/mediatek/executor_runner/mtk_executor_runner.cpp

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,11 +41,21 @@ DEFINE_string(
4141
"Model serialized in flatbuffer format.");
4242
DEFINE_int32(iteration, 1, "Iterations of inference.");
4343

44-
using namespace torch::executor;
45-
using torch::executor::util::FileDataLoader;
44+
using executorch::extension::FileDataLoader;
45+
using executorch::extension::prepare_input_tensors;
46+
using executorch::runtime::Error;
47+
using executorch::runtime::EValue;
48+
using executorch::runtime::HierarchicalAllocator;
49+
using executorch::runtime::MemoryAllocator;
50+
using executorch::runtime::MemoryManager;
51+
using executorch::runtime::Method;
52+
using executorch::runtime::MethodMeta;
53+
using executorch::runtime::Program;
54+
using executorch::runtime::Result;
55+
using executorch::runtime::Span;
4656

4757
int main(int argc, char** argv) {
48-
runtime_init();
58+
executorch::runtime::runtime_init();
4959

5060
gflags::ParseCommandLineFlags(&argc, &argv, true);
5161
if (argc != 1) {
@@ -158,7 +168,7 @@ int main(int argc, char** argv) {
158168
// Allocate input tensors and set all of their elements to 1. The `inputs`
159169
// variable owns the allocated memory and must live past the last call to
160170
// `execute()`.
161-
auto inputs = util::prepare_input_tensors(*method);
171+
auto inputs = prepare_input_tensors(*method);
162172
ET_CHECK_MSG(
163173
inputs.ok(),
164174
"Could not prepare inputs: 0x%" PRIx32,
@@ -196,7 +206,7 @@ int main(int argc, char** argv) {
196206
status = method->get_outputs(outputs.data(), outputs.size());
197207
ET_CHECK(status == Error::Ok);
198208
// Print the first and last 100 elements of long lists of scalars.
199-
std::cout << torch::executor::util::evalue_edge_items(100);
209+
std::cout << executorch::extension::evalue_edge_items(100);
200210
for (int i = 0; i < outputs.size(); ++i) {
201211
std::cout << "Output " << i << ": " << outputs[i] << std::endl;
202212
}

0 commit comments

Comments
 (0)