Skip to content

Commit db2ffd5

Browse files
committed
llama : fix mpt and olmo pre-tokenizer
1 parent 1c5eba6 commit db2ffd5

File tree

1 file changed

+28
-7
lines changed

1 file changed

+28
-7
lines changed

src/llama.cpp

Lines changed: 28 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5170,6 +5170,28 @@ static void llm_load_vocab(
51705170
vocab.token_to_id[word] = i;
51715171
vocab.max_token_len = std::max(vocab.max_token_len, (int) word.size());
51725172

5173+
// TODO: properly handle pre-normalized added_tokens and remove this
5174+
// handle space tokens with dual tokens,
5175+
// like the pre-normalized added_tokens
5176+
// of neox-style tokenizers (mpt, olmo, stablelm, etc)
5177+
if (word.find(' ') != std::string::npos) {
5178+
// same as in the internal `unicode_byte_encoding_process`
5179+
// TODO: extract and expose this in some unicode_* function
5180+
std::string text_utf;
5181+
auto utf_word = unicode_cpts_from_utf8(word);
5182+
for (size_t i = 0; i < utf_word.size(); ++i) {
5183+
text_utf += unicode_cpt_to_utf8(utf_word[i]);
5184+
}
5185+
5186+
std::string encoded_token;
5187+
for (char & c : text_utf) {
5188+
encoded_token += unicode_byte_to_utf8(c);
5189+
}
5190+
5191+
// override token id
5192+
vocab.token_to_id[encoded_token] = i;
5193+
}
5194+
51735195
auto & token_data = vocab.id_to_token[i];
51745196
token_data.text = std::move(word);
51755197
token_data.score = scores ? scores[i] : 0.0f;
@@ -13890,13 +13912,9 @@ struct llm_tokenizer_bpe {
1389013912
};
1389113913
break;
1389213914
case LLAMA_VOCAB_PRE_TYPE_MPT:
13893-
// TODO: MPT pre-tokenization regexes are unknown
13894-
// the following are close, but not exact. run the following:
13895-
// ./bin/test-tokenizer-0 ../models/ggml-vocab-mpt.gguf
13896-
GGML_ASSERT("MPT pre-tokenization regexes are unknown - fixes needed");
13915+
case LLAMA_VOCAB_PRE_TYPE_OLMO:
1389713916
regex_exprs = {
13898-
"\\s?\\p{L}+",
13899-
"\\s?\\p{P}+",
13917+
"[ ]{2,24}", // the spaces from the added_tokens are split separately
1390013918
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
1390113919
};
1390213920
break;
@@ -13909,7 +13927,6 @@ struct llm_tokenizer_bpe {
1390913927
};
1391013928
break;
1391113929
case LLAMA_VOCAB_PRE_TYPE_GPT2:
13912-
case LLAMA_VOCAB_PRE_TYPE_OLMO:
1391313930
regex_exprs = {
1391413931
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
1391513932
};
@@ -13985,6 +14002,10 @@ struct llm_tokenizer_bpe {
1398514002
void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
1398614003
int final_prev_index = -1;
1398714004

14005+
// FIXME: pre-tokenize added_tokens (user-defined tokens) before other pre-tokenization
14006+
// ref: https://github.com/huggingface/tokenizers/blob/fdd26ba9a3f0c133427aab0423888cbde91362d7/tokenizers/src/tokenizer/mod.rs#L726
14007+
// (useful for neox-style tokenizers)
14008+
1398814009
const auto word_collection = unicode_regex_split(text, regex_exprs);
1398914010

1399014011
symbols_final.clear();

0 commit comments

Comments
 (0)