Skip to content

Let models provider their own specific special tokens #4227

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/models/llama2/runner/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ target_include_directories(

if(EXECUTORCH_USE_TIKTOKEN)
list(APPEND _llama_runner__srcs
${CMAKE_CURRENT_SOURCE_DIR}/../tokenizer/tiktoken.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../tokenizer/llama_tiktoken.cpp
)
set(_preprocessor_flag -DET_USE_TIKTOKEN)
endif()
Expand Down
4 changes: 2 additions & 2 deletions examples/models/llama2/runner/runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

#include <executorch/examples/models/llama2/runner/runner.h>
#if ET_USE_TIKTOKEN
#include <executorch/examples/models/llama2/tokenizer/tiktoken.h>
#include <executorch/examples/models/llama2/tokenizer/llama_tiktoken.h>
#else /* BPE */
#include <executorch/examples/models/llama2/tokenizer/bpe_tokenizer.h>
#endif /* ET_USE_TIKTOKEN*/
Expand Down Expand Up @@ -81,7 +81,7 @@ Error Runner::load() {

// Load tokenizer
#if ET_USE_TIKTOKEN
tokenizer_ = std::make_unique<Tiktoken>();
tokenizer_ = get_tiktoken_for_llama();
#else
tokenizer_ = std::make_unique<BPETokenizer>();
#endif
Expand Down
90 changes: 90 additions & 0 deletions examples/models/llama2/tokenizer/llama_tiktoken.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#include <executorch/examples/models/llama2/tokenizer/llama_tiktoken.h>

namespace torch {
namespace executor {
namespace {
static constexpr int32_t kSpecialTokensSize = 256;
static constexpr size_t kBOSTokenIndex = 0;
static constexpr size_t kEOSTokenIndex = 1;

static inline std::unique_ptr<std::vector<std::string>>
_get_default_special_tokens() {
auto special_tokens =
std::make_unique<std::vector<std::string>>(std::vector<std::string>{
"<|begin_of_text|>",
"<|end_of_text|>",
"<|reserved_special_token_0|>",
"<|reserved_special_token_1|>",
"<|reserved_special_token_2|>",
"<|reserved_special_token_3|>",
"<|start_header_id|>",
"<|end_header_id|>",
"<|reserved_special_token_4|>",
"<|eot_id|>"});

// pad the rest of the special tokens with reserved tokens
ssize_t reserved_special_token_num = 5;
while (special_tokens->size() < kSpecialTokensSize) {
special_tokens->emplace_back(
"<|reserved_special_token_" +
std::to_string(reserved_special_token_num++) + "|>");
}
return special_tokens;
}

static inline std::unique_ptr<std::vector<std::string>>
_get_multimodal_special_tokens() {
auto special_tokens =
std::make_unique<std::vector<std::string>>(std::vector<std::string>{
"<|begin_of_text|>",
"<|end_of_text|>",
"<|reserved_special_token_0|>",
"<|reserved_special_token_1|>",
"<|reserved_special_token_2|>",
"<|reserved_special_token_3|>",
"<|start_header_id|>",
"<|end_header_id|>",
"<|eom_id|>",
"<|eot_id|>",
"<|image|>"});

// pad the rest of the special tokens with reserved tokens except the last
// one
ssize_t reserved_special_token_num = 4;
while (special_tokens->size() < kSpecialTokensSize - 1) {
special_tokens->emplace_back(
"<|reserved_special_token_" +
std::to_string(reserved_special_token_num++) + "|>");
}

special_tokens->emplace_back("<|python_tag|>");

return special_tokens;
}

std::unique_ptr<std::vector<std::string>> _get_special_tokens(Version version) {
switch (version) {
case MULTIMODAL:
return _get_multimodal_special_tokens();
default:
return _get_default_special_tokens();
}
}

} // namespace

std::unique_ptr<Tiktoken> get_tiktoken_for_llama(Version version) {
return std::make_unique<Tiktoken>(
_get_special_tokens(version), kBOSTokenIndex, kEOSTokenIndex);
}

} // namespace executor
} // namespace torch
24 changes: 24 additions & 0 deletions examples/models/llama2/tokenizer/llama_tiktoken.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#pragma once

#include <executorch/examples/models/llama2/tokenizer/tiktoken.h>

namespace torch {
namespace executor {

enum Version {
DEFAULT,
MULTIMODAL,
};

std::unique_ptr<Tiktoken> get_tiktoken_for_llama(Version version = DEFAULT);

} // namespace executor
} // namespace torch
2 changes: 2 additions & 0 deletions examples/models/llama2/tokenizer/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,12 @@ def define_common_targets():
name = "tiktoken",
srcs = [
"tiktoken.cpp",
"llama_tiktoken.cpp",
],
exported_headers = [
"tokenizer.h",
"tiktoken.h",
"llama_tiktoken.h",
"base64.h",
],
exported_deps = [
Expand Down
1 change: 1 addition & 0 deletions examples/models/llama2/tokenizer/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ set(
test_tiktoken.cpp
test_bpe_tokenizer.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../tiktoken.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../llama_tiktoken.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../bpe_tokenizer.cpp
)

Expand Down
35 changes: 32 additions & 3 deletions examples/models/llama2/tokenizer/test/test_tiktoken.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
* LICENSE file in the root directory of this source tree.
*/

#include <executorch/examples/models/llama2/tokenizer/tiktoken.h>
#include <executorch/examples/models/llama2/tokenizer/llama_tiktoken.h>
#include <executorch/examples/models/llama2/tokenizer/tokenizer.h>
#include <executorch/runtime/platform/runtime.h>
#include <gtest/gtest.h>
Expand All @@ -21,7 +21,7 @@ class TiktokenExtensionTest : public Test {
public:
void SetUp() override {
torch::executor::runtime_init();
tokenizer_ = std::make_unique<Tiktoken>();
tokenizer_ = get_tiktoken_for_llama();
modelPath_ = std::getenv("RESOURCES_PATH") +
std::string("/test_tiktoken_tokenizer.model");
}
Expand All @@ -34,7 +34,7 @@ class MultimodalTiktokenV5ExtensionTest : public Test {
public:
void SetUp() override {
torch::executor::runtime_init();
tokenizer_ = std::make_unique<Tiktoken>(MULTIMODAL);
tokenizer_ = get_tiktoken_for_llama(MULTIMODAL);
modelPath_ = std::getenv("RESOURCES_PATH") +
std::string("/test_tiktoken_tokenizer.model");
}
Expand Down Expand Up @@ -144,5 +144,34 @@ TEST_F(TiktokenExtensionTest, TokenizerDecodeOutOfRangeFails) {
EXPECT_EQ(out.error(), Error::NotSupported);
}

TEST_F(TiktokenExtensionTest, ConstructionWithInvalidBOSIndex) {
// gtest death test doesn't work on iOS:
// https://github.com/google/googletest/issues/2834
#if !GTEST_OS_IOS
EXPECT_EXIT(
std::make_unique<Tiktoken>(
std::make_unique<std::vector<std::string>>(
std::vector<std::string>{"<|end_of_text|>"}),
1,
0),
::testing::KilledBySignal(SIGABRT),
"");
#endif
}

TEST_F(TiktokenExtensionTest, ConstructionWithInvalidEOSIndex) {
// gtest death test doesn't work on iOS:
// https://github.com/google/googletest/issues/2834
#if !GTEST_OS_IOS
EXPECT_EXIT(
std::make_unique<Tiktoken>(
std::make_unique<std::vector<std::string>>(
std::vector<std::string>{"<|begin_of_text|>"}),
0,
1),
::testing::KilledBySignal(SIGABRT),
"");
#endif
}
} // namespace executor
} // namespace torch
32 changes: 29 additions & 3 deletions examples/models/llama2/tokenizer/tiktoken.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -329,12 +329,38 @@ std::pair<std::vector<uint64_t>, uint64_t> Tiktoken::_encode_with_special_token(
return std::make_pair(tokens, last_piece_token_len);
}

Encoder Tiktoken::_build_special_token_encoder(ssize_t num_base_tokens) const {
Encoder special_token_encoder;
for (ssize_t i = 0; i < _special_tokens->size(); ++i) {
special_token_encoder.emplace(_special_tokens->at(i), num_base_tokens + i);
}
return special_token_encoder;
}

// -------------------------private method end-------------------------------
// -------------------------public method start-------------------------------

Tiktoken::Tiktoken(
std::unique_ptr<std::vector<std::string>> special_tokens,
size_t bos_token_index,
size_t eos_token_index)
: Tokenizer(),
_special_tokens(std::move(special_tokens)),
_bos_token_index(bos_token_index),
_eos_token_index(eos_token_index) {
ET_CHECK_MSG(
_bos_token_index < _special_tokens->size(),
"invalid bos_token_index %zu",
_bos_token_index);
ET_CHECK_MSG(
_eos_token_index < _special_tokens->size(),
"invalid eos_token_index %zu",
_eos_token_index);
}

Error Tiktoken::load(const std::string& path) {
_encoder = _load_encoder(path);
_special_token_encoder = _get_special_tokens(_encoder.size());
_special_token_encoder = _build_special_token_encoder(_encoder.size());

_decoder = _build_decoder(_encoder);
_special_token_decoder = _build_decoder(_special_token_encoder);
Expand All @@ -345,8 +371,8 @@ Error Tiktoken::load(const std::string& path) {

// initialize vocab_size, bos_tok, eos_tok
vocab_size_ = _encoder.size() + _special_token_encoder.size();
bos_tok_ = _special_token_encoder.at("<|begin_of_text|>");
eos_tok_ = _special_token_encoder.at("<|end_of_text|>");
bos_tok_ = _special_token_encoder.at(_special_tokens->at(_bos_token_index));
eos_tok_ = _special_token_encoder.at(_special_tokens->at(_eos_token_index));

initialized_ = true;
return Error::Ok;
Expand Down
Loading
Loading