Skip to content

Commit d5d6731

Browse files
committed
Add BPE pre-tokenization for Command-R/R+.
1 parent 03fb8a0 commit d5d6731

File tree

5 files changed

+22
-0
lines changed

5 files changed

+22
-0
lines changed

convert-hf-to-gguf-update.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ class TOKENIZER_TYPE(IntEnum):
6464
{"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
6565
{"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
6666
{"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
67+
{"name": "command-r", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", },
6768
]
6869

6970
# make directory "models/tokenizers" if it doesn't exist
@@ -104,6 +105,14 @@ def download_file_with_auth(url, token, save_path):
104105
save_path = f"models/tokenizers/{name}/tokenizer.json"
105106
download_file_with_auth(url, token, save_path)
106107

108+
# if downloaded file is less than 1KB, we likely need to download an LFS instead
109+
if os.path.getsize(save_path) < 1024:
110+
# remove the file
111+
os.remove(save_path)
112+
url = f"{repo}/resolve/main/tokenizer.json"
113+
save_path = f"models/tokenizers/{name}/tokenizer.json"
114+
download_file_with_auth(url, token, save_path)
115+
107116
if tokt == TOKENIZER_TYPE.SPM:
108117
url = f"{repo}/resolve/main/tokenizer.model"
109118
save_path = f"models/tokenizers/{name}/tokenizer.model"

convert-hf-to-gguf.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -311,6 +311,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
311311
if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
312312
# ref: https://huggingface.co/smallcloudai/Refact-1_6-base
313313
res = "refact"
314+
if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
315+
# ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
316+
res = "command-r"
314317

315318
if res is None:
316319
logger.warning("\n")

llama.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4386,6 +4386,9 @@ static void llm_load_vocab(
43864386
} else if (
43874387
tokenizer_pre == "refact") {
43884388
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_REFACT;
4389+
} else if (
4390+
tokenizer_pre == "command-r") {
4391+
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
43894392
} else {
43904393
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
43914394
}
@@ -12248,6 +12251,11 @@ struct llm_tokenizer_bpe {
1224812251
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
1224912252
});
1225012253
break;
12254+
case LLAMA_VOCAB_PRE_TYPE_COMMAND_R:
12255+
word_collection = unicode_regex_split(text, {
12256+
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
12257+
});
12258+
break;
1225112259
default:
1225212260
// default regex for BPE tokenization pre-processing
1225312261
word_collection = unicode_regex_split(text, {

llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ extern "C" {
8080
LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
8181
LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
8282
LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
83+
LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
8384
};
8485

8586
// note: these values should be synchronized with ggml_rope

tests/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,7 @@ llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${CMAKE
8383
llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
8484
llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-2.gguf)
8585
llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
86+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-command-r ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-command-r.gguf)
8687

8788
# build test-tokenizer-1-bpe target once and add many tests
8889
add_executable(test-tokenizer-1-bpe test-tokenizer-1-bpe.cpp)

0 commit comments

Comments
 (0)