Skip to content

Commit a8d4afb

Browse files
author
fmz
committed
address review comments
1 parent a067ed8 commit a8d4afb

File tree

3 files changed

+24
-0
lines changed

3 files changed

+24
-0
lines changed

convert-hf-to-gguf.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -427,6 +427,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
427427
# NOTE: if you get an error here, you need to update the convert-hf-to-gguf-update.py script
428428
# or pull the latest version of the model from Huggingface
429429
# don't edit the hashes manually!
430+
if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5":
431+
# ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B
432+
res = "llama-bpe"
430433
if chkhsh == "049ecf7629871e3041641907f3de7c733e4dbfdc736f57d882ba0b0845599754":
431434
# ref: https://huggingface.co/deepseek-ai/deepseek-llm-7b-base
432435
res = "deepseek-llm"
@@ -454,12 +457,18 @@ def get_vocab_base_pre(self, tokenizer) -> str:
454457
if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
455458
# ref: https://huggingface.co/smallcloudai/Refact-1_6-base
456459
res = "refact"
460+
if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
461+
# ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
462+
res = "command-r"
457463
if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea":
458464
# ref: https://huggingface.co/Qwen/Qwen1.5-7B
459465
res = "qwen2"
460466
if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
461467
# ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
462468
res = "olmo"
469+
if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
470+
# ref: https://huggingface.co/databricks/dbrx-base
471+
res = "dbrx"
463472
if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
464473
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en
465474
res = "jina-v2-en"
@@ -2811,6 +2820,7 @@ def write_tensors(self):
28112820
if len(experts) > 0:
28122821
raise ValueError(f"Unprocessed experts: {experts}")
28132822

2823+
28142824
@Model.register("T5ForConditionalGeneration")
28152825
@Model.register("T5WithLMHeadModel")
28162826
class T5Model(Model):

include/llama.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -652,6 +652,11 @@ extern "C" {
652652
// State / sessions
653653
//
654654

655+
// hack
656+
void llama_set_logits_all(
657+
struct llama_context * ctx,
658+
bool logits_all);
659+
655660
// Returns the maximum size in bytes of the state (rng, logits, embedding
656661
// and kv_cache) - will often be smaller after compacting tokens
657662
LLAMA_API size_t llama_state_get_size(const struct llama_context * ctx);

src/llama.cpp

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4281,6 +4281,7 @@ static const char * llama_model_type_name(e_model type) {
42814281
case MODEL_410M: return "410M";
42824282
case MODEL_0_5B: return "0.5B";
42834283
case MODEL_1B: return "1B";
4284+
case MODEL_1_3B: return "1.3B";
42844285
case MODEL_1_4B: return "1.4B";
42854286
case MODEL_2B: return "2B";
42864287
case MODEL_2_8B: return "2.8B";
@@ -13105,6 +13106,13 @@ static void llama_graph_compute(
1310513106
// fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(lctx.sched));
1310613107
}
1310713108

13109+
void llama_set_logits_all(
13110+
struct llama_context * ctx,
13111+
bool logits_all
13112+
) {
13113+
ctx->logits_all = logits_all;
13114+
}
13115+
1310813116
// decode a batch of tokens by evaluating the transformer
1310913117
//
1311013118
// - lctx: llama context
@@ -14052,6 +14060,7 @@ struct llm_tokenizer_bpe {
1405214060
break;
1405314061
case LLAMA_VOCAB_PRE_TYPE_GPT2:
1405414062
case LLAMA_VOCAB_PRE_TYPE_OLMO:
14063+
case LLAMA_VOCAB_PRE_TYPE_JAIS:
1405514064
regex_exprs = {
1405614065
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
1405714066
};

0 commit comments

Comments
 (0)