Skip to content

Commit b6aa670

Browse files
authored
Fix OLMo HF to GGUF conversion (ggml-org#6910)
1 parent 260b7c6 commit b6aa670

File tree

4 files changed

+12
-2
lines changed

4 files changed

+12
-2
lines changed

convert-hf-to-gguf-update.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ class TOKENIZER_TYPE(IntEnum):
6767
{"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
6868
{"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
6969
{"name": "command-r", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", },
70+
{"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", },
7071
]
7172

7273
# make directory "models/tokenizers" if it doesn't exist

convert-hf-to-gguf.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -314,6 +314,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
314314
if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
315315
# ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
316316
res = "command-r"
317+
if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
318+
# ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
319+
res = "olmo"
317320

318321
if res is None:
319322
logger.warning("\n")
@@ -2831,8 +2834,9 @@ class OlmoModel(Model):
28312834
def set_gguf_parameters(self):
28322835
super().set_gguf_parameters()
28332836
self.gguf_writer.add_layer_norm_eps(1e-5)
2834-
if "clip_qkv" in self.hparams is not None:
2835-
self.gguf_writer.add_clamp_kqv(self.hparams["clip_qkv"])
2837+
clip_qkv = self.hparams.get("clip_qkv")
2838+
if clip_qkv is not None:
2839+
self.gguf_writer.add_clamp_kqv(clip_qkv)
28362840

28372841
# Same as super class, but permuting q_proj, k_proj
28382842
# Copied from: LlamaModel

llama.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4389,6 +4389,9 @@ static void llm_load_vocab(
43894389
} else if (
43904390
tokenizer_pre == "command-r") {
43914391
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
4392+
} else if (
4393+
tokenizer_pre == "olmo") {
4394+
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO;
43924395
} else {
43934396
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
43944397
}
@@ -12248,6 +12251,7 @@ struct llm_tokenizer_bpe {
1224812251
});
1224912252
break;
1225012253
case LLAMA_VOCAB_PRE_TYPE_GPT2:
12254+
case LLAMA_VOCAB_PRE_TYPE_OLMO:
1225112255
word_collection = unicode_regex_split(text, {
1225212256
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
1225312257
});

llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ extern "C" {
8181
LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
8282
LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
8383
LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
84+
LLAMA_VOCAB_PRE_TYPE_OLMO = 10,
8485
};
8586

8687
// note: these values should be synchronized with ggml_rope

0 commit comments

Comments
 (0)