Skip to content

Commit a95ae75

Browse files
authored
llama.cpp : fix bpe tokenizer
1 parent ffa5099 commit a95ae75

File tree

1 file changed

+8
-4
lines changed

1 file changed

+8
-4
lines changed

llama.cpp

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2839,10 +2839,14 @@ static bool llama_is_pad_token(const llama_vocab & vocab, llama_token id ) {
28392839
}
28402840

28412841
static uint8_t llama_token_to_byte(const llama_vocab & vocab, llama_token id) {
2842-
GGML_ASSERT(llama_is_byte_token(vocab, id));
2843-
const auto& token_data = vocab.id_to_token.at(id);
2844-
auto buf = token_data.text.substr(3, 2);
2845-
return strtol(buf.c_str(), NULL, 16);
2842+
if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
2843+
char buf[7];
2844+
int result = snprintf(buf, sizeof(buf), "<0x%02X>", ch);
2845+
GGML_ASSERT(0 <= result && result < 7);
2846+
return vocab.token_to_id.at(buf);
2847+
}
2848+
// vocab.type == LLAMA_VOCAB_TYPE_BPE
2849+
return vocab.token_to_id.at(std::string(1, ch));
28462850
}
28472851

28482852
static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) {

0 commit comments

Comments
 (0)