Skip to content

Commit 3fb03dc

Browse files
Fix llama runner build (#4817)
* Fix llama runner build * Fix linter error
1 parent 801e1c9 commit 3fb03dc

File tree

2 files changed

+9
-8
lines changed

2 files changed

+9
-8
lines changed

examples/mediatek/CMakeLists.txt

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -105,11 +105,10 @@ if(${ANDROID})
105105
_mtk_llama_executor_runner__srcs
106106
${CMAKE_CURRENT_LIST_DIR}/executor_runner/mtk_llama_executor_runner.cpp
107107
)
108-
109108
# Build ABSL and RE2
110-
set(LLAMA2_EXAMPLE_MODEL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../examples/models/llama2)
111-
set(THIRD_PARTY_ABSL_DIR ${LLAMA2_EXAMPLE_MODEL_DIR}/third-party/abseil-cpp)
112-
set(THIRD_PARTY_RE2_DIR ${LLAMA2_EXAMPLE_MODEL_DIR}/third-party/re2)
109+
set(EXTENSIONS_LLM_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../extension/llm)
110+
set(THIRD_PARTY_ABSL_DIR ${EXTENSIONS_LLM_DIR}/third-party/abseil-cpp)
111+
set(THIRD_PARTY_RE2_DIR ${EXTENSIONS_LLM_DIR}/third-party/re2)
113112
set(ABSL_ENABLE_INSTALL ON)
114113
set(ABSL_PROPAGATE_CXX_STD ON)
115114
set(_pic_flag ${CMAKE_POSITION_INDEPENDENT_CODE})
@@ -119,7 +118,7 @@ if(${ANDROID})
119118
set(CMAKE_POSITION_INDEPENDENT_CODE ${_pic_flag})
120119

121120
# Build tokenizers
122-
set(LLAMA2_TOKENIZER_DIR ${LLAMA2_EXAMPLE_MODEL_DIR}/tokenizer)
121+
set(LLAMA2_TOKENIZER_DIR ${EXTENSIONS_LLM_DIR}/tokenizer)
123122
add_library(tokenizer STATIC)
124123
target_include_directories(tokenizer
125124
PUBLIC
@@ -135,6 +134,7 @@ if(${ANDROID})
135134
PRIVATE
136135
${LLAMA2_TOKENIZER_DIR}/tiktoken.cpp
137136
${LLAMA2_TOKENIZER_DIR}/bpe_tokenizer.cpp
137+
${CMAKE_CURRENT_SOURCE_DIR}/../models/llama2/tokenizer/llama_tiktoken.cpp
138138
)
139139

140140
# Include directory for neuron headers

examples/mediatek/executor_runner/mtk_llama_executor_runner.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,9 @@
6868
#include "llama_runner/Utils.h"
6969
#include "llama_runner/llm_helper/include/llm_types.h"
7070

71-
#include <executorch/examples/models/llama2/tokenizer/bpe_tokenizer.h>
72-
#include <executorch/examples/models/llama2/tokenizer/tiktoken.h>
71+
#include <executorch/examples/models/llama2/tokenizer/llama_tiktoken.h>
72+
#include <executorch/extension/llm/tokenizer/bpe_tokenizer.h>
73+
#include <executorch/extension/llm/tokenizer/tiktoken.h>
7374

7475
// Llama model options
7576
DEFINE_uint64(
@@ -316,7 +317,7 @@ std::unique_ptr<Tokenizer> load_tokenizer() {
316317
if (FLAGS_tokenizer_type == "bpe") {
317318
tokenizer = std::make_unique<torch::executor::BPETokenizer>();
318319
} else if (FLAGS_tokenizer_type == "tiktoken") {
319-
tokenizer = std::make_unique<torch::executor::Tiktoken>();
320+
tokenizer = torch::executor::get_tiktoken_for_llama();
320321
}
321322
ET_CHECK_MSG(
322323
tokenizer, "Invalid tokenizer type: %s", FLAGS_tokenizer_type.c_str());

0 commit comments

Comments
 (0)