Skip to content

Commit 41b9260

Browse files
ezosaggerganov
andauthored
convert : add Poro-34B-chat tokenizer support (#7713)
* support for Poro chat pre-tokenizer * add support for Poro pre-tokenizer * Update convert-hf-to-gguf-update.py Co-authored-by: Georgi Gerganov <[email protected]> * Change Poro-34B-chat to poro-chat * Change Poro-34B-chat to poro-chat * Update convert-hf-to-gguf-update.py * Update llama.cpp --------- Co-authored-by: Georgi Gerganov <[email protected]>
1 parent 172c825 commit 41b9260

File tree

4 files changed

+13
-0
lines changed

4 files changed

+13
-0
lines changed

convert-hf-to-gguf-update.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,7 @@ class TOKENIZER_TYPE(IntEnum):
8383
{"name": "jina-v2-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", },
8484
{"name": "jina-v2-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", },
8585
{"name": "smaug-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct", },
86+
{"name": "poro-chat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Poro-34B-chat", },
8687
{"name": "jina-v2-code", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-code", },
8788
]
8889

convert-hf-to-gguf.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -477,6 +477,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
477477
if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d":
478478
# ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct
479479
res = "smaug-bpe"
480+
if chkhsh == "c7ea5862a53e4272c035c8238367063e2b270d51faa48c0f09e9d5b54746c360":
481+
# ref: https://huggingface.co/LumiOpen/Poro-34B-chat
482+
res = "poro-chat"
480483
if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
481484
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
482485
res = "jina-v2-code"

llama.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4713,6 +4713,9 @@ static void llm_load_vocab(
47134713
} else if (
47144714
tokenizer_pre == "smaug-bpe") {
47154715
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMAUG;
4716+
} else if (
4717+
tokenizer_pre == "poro-chat") {
4718+
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO;
47164719
} else {
47174720
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
47184721
}
@@ -13028,6 +13031,11 @@ struct llm_tokenizer_bpe {
1302813031
"(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
1302913032
});
1303013033
break;
13034+
case LLAMA_VOCAB_PRE_TYPE_PORO:
13035+
word_collection = unicode_regex_split(text, {
13036+
" ?[^(\\s|.,!?…。,、।۔،)]+",
13037+
});
13038+
break;
1303113039
default:
1303213040
// default regex for BPE tokenization pre-processing
1303313041
word_collection = unicode_regex_split(text, {

llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@ extern "C" {
8686
LLAMA_VOCAB_PRE_TYPE_OLMO = 12,
8787
LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
8888
LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
89+
LLAMA_VOCAB_PRE_TYPE_PORO = 15,
8990
};
9091

9192
// note: these values should be synchronized with ggml_rope

0 commit comments

Comments
 (0)