@@ -5495,28 +5495,6 @@ static void llm_load_vocab(
5495
5495
vocab.token_to_id[word] = i;
5496
5496
vocab.max_token_len = std::max(vocab.max_token_len, (int) word.size());
5497
5497
5498
- // TODO: properly handle pre-normalized added_tokens and remove this
5499
- // handle space tokens with dual tokens,
5500
- // like the pre-normalized added_tokens
5501
- // of neox-style tokenizers (mpt, olmo, stablelm, etc)
5502
- if (word.find(' ') != std::string::npos) {
5503
- // same as in the internal `unicode_byte_encoding_process`
5504
- // TODO: extract and expose this in some unicode_* function
5505
- std::string text_utf;
5506
- auto utf_word = unicode_cpts_from_utf8(word);
5507
- for (size_t i = 0; i < utf_word.size(); ++i) {
5508
- text_utf += unicode_cpt_to_utf8(utf_word[i]);
5509
- }
5510
-
5511
- std::string encoded_token;
5512
- for (char & c : text_utf) {
5513
- encoded_token += unicode_byte_to_utf8(c);
5514
- }
5515
-
5516
- // override token id
5517
- vocab.token_to_id[encoded_token] = i;
5518
- }
5519
-
5520
5498
auto & token_data = vocab.id_to_token[i];
5521
5499
token_data.text = std::move(word);
5522
5500
token_data.score = scores ? scores[i] : 0.0f;
@@ -5534,6 +5512,13 @@ static void llm_load_vocab(
5534
5512
default: token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED; break;
5535
5513
}
5536
5514
}
5515
+
5516
+ if ((token_data.attr & LLAMA_TOKEN_ATTR_USER_DEFINED) && token_data.text.find('<') && token_data.text.rfind('>')) {
5517
+ // Some models mark some added tokens which ought to be control tokens as not special.
5518
+ // (e.g. command-r, command-r-plus, deepseek-coder)
5519
+ // TODO: should this be fixed in the convert script instead?
5520
+ token_data.attr = LLAMA_TOKEN_ATTR_CONTROL;
5521
+ }
5537
5522
}
5538
5523
GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size());
5539
5524
@@ -15426,13 +15411,6 @@ struct llm_tokenizer_bpe {
15426
15411
"[0-9][0-9][0-9]",
15427
15412
};
15428
15413
break;
15429
- case LLAMA_VOCAB_PRE_TYPE_MPT:
15430
- case LLAMA_VOCAB_PRE_TYPE_OLMO:
15431
- regex_exprs = {
15432
- "[ ]{2,24}", // the spaces from the added_tokens are split separately
15433
- "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
15434
- };
15435
- break;
15436
15414
case LLAMA_VOCAB_PRE_TYPE_STARCODER:
15437
15415
case LLAMA_VOCAB_PRE_TYPE_REFACT:
15438
15416
case LLAMA_VOCAB_PRE_TYPE_COMMAND_R:
@@ -15442,6 +15420,8 @@ struct llm_tokenizer_bpe {
15442
15420
};
15443
15421
break;
15444
15422
case LLAMA_VOCAB_PRE_TYPE_GPT2:
15423
+ case LLAMA_VOCAB_PRE_TYPE_MPT:
15424
+ case LLAMA_VOCAB_PRE_TYPE_OLMO:
15445
15425
case LLAMA_VOCAB_PRE_TYPE_JAIS:
15446
15426
regex_exprs = {
15447
15427
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
@@ -15523,10 +15503,6 @@ struct llm_tokenizer_bpe {
15523
15503
void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
15524
15504
int final_prev_index = -1;
15525
15505
15526
- // FIXME: pre-tokenize added_tokens (user-defined tokens) before other pre-tokenization
15527
- // ref: https://github.com/huggingface/tokenizers/blob/fdd26ba9a3f0c133427aab0423888cbde91362d7/tokenizers/src/tokenizer/mod.rs#L726
15528
- // (useful for neox-style tokenizers)
15529
-
15530
15506
const auto word_collection = unicode_regex_split(text, regex_exprs);
15531
15507
15532
15508
symbols_final.clear();
@@ -16192,12 +16168,20 @@ struct fragment_buffer_variant {
16192
16168
16193
16169
// #define PRETOKENIZERDEBUG
16194
16170
16195
- static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer) {
16171
+ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer, bool parse_special ) {
16196
16172
// for each special token
16197
16173
for (const llama_vocab::id special_id : vocab.cache_special_tokens) {
16198
16174
const auto & data = vocab.id_to_token[special_id];
16199
16175
const auto & special_token = data.text;
16200
16176
16177
+ if (!parse_special && (data.attr & LLAMA_TOKEN_ATTR_CONTROL)) {
16178
+ // Only ignore control tokens when parse_special == false
16179
+ continue;
16180
+ // User-defined tokens are still pre-tokenized before everything else
16181
+ // ref: https://github.com/huggingface/tokenizers/blob/fdd26ba9a3f0c133427aab0423888cbde91362d7/tokenizers/src/tokenizer/mod.rs#L726
16182
+ // This is mostly relevant for neox-style tokenizers (mpt, olmo, stablelm, etc.)
16183
+ }
16184
+
16201
16185
// for each text fragment
16202
16186
std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
16203
16187
while (it != buffer.end()) {
@@ -16310,7 +16294,7 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
16310
16294
16311
16295
if (!raw_text.empty()) {
16312
16296
fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
16313
- if (parse_special) tokenizer_st_partition(vocab, fragment_buffer);
16297
+ tokenizer_st_partition(vocab, fragment_buffer, parse_special );
16314
16298
}
16315
16299
16316
16300
switch (vocab.type) {
0 commit comments