Skip to content

Commit 68d3314

Browse files
author
fmz
committed
address review comments
1 parent 1a51b36 commit 68d3314

File tree

3 files changed

+24
-0
lines changed

3 files changed

+24
-0
lines changed

convert-hf-to-gguf.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -427,6 +427,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
427427
# NOTE: if you get an error here, you need to update the convert-hf-to-gguf-update.py script
428428
# or pull the latest version of the model from Huggingface
429429
# don't edit the hashes manually!
430+
if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5":
431+
# ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B
432+
res = "llama-bpe"
430433
if chkhsh == "049ecf7629871e3041641907f3de7c733e4dbfdc736f57d882ba0b0845599754":
431434
# ref: https://huggingface.co/deepseek-ai/deepseek-llm-7b-base
432435
res = "deepseek-llm"
@@ -454,12 +457,18 @@ def get_vocab_base_pre(self, tokenizer) -> str:
454457
if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff":
455458
# ref: https://huggingface.co/smallcloudai/Refact-1_6-base
456459
res = "refact"
460+
if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8":
461+
# ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01
462+
res = "command-r"
457463
if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea":
458464
# ref: https://huggingface.co/Qwen/Qwen1.5-7B
459465
res = "qwen2"
460466
if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166":
461467
# ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
462468
res = "olmo"
469+
if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
470+
# ref: https://huggingface.co/databricks/dbrx-base
471+
res = "dbrx"
463472
if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
464473
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en
465474
res = "jina-v2-en"
@@ -2768,6 +2777,7 @@ def write_tensors(self):
27682777
if len(experts) > 0:
27692778
raise ValueError(f"Unprocessed experts: {experts}")
27702779

2780+
27712781
@Model.register("T5ForConditionalGeneration")
27722782
@Model.register("T5WithLMHeadModel")
27732783
class T5Model(Model):

include/llama.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -651,6 +651,11 @@ extern "C" {
651651
// State / sessions
652652
//
653653

654+
// hack
655+
void llama_set_logits_all(
656+
struct llama_context * ctx,
657+
bool logits_all);
658+
654659
// Returns the maximum size in bytes of the state (rng, logits, embedding
655660
// and kv_cache) - will often be smaller after compacting tokens
656661
LLAMA_API size_t llama_state_get_size(const struct llama_context * ctx);

src/llama.cpp

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4254,6 +4254,7 @@ static const char * llama_model_type_name(e_model type) {
42544254
case MODEL_410M: return "410M";
42554255
case MODEL_0_5B: return "0.5B";
42564256
case MODEL_1B: return "1B";
4257+
case MODEL_1_3B: return "1.3B";
42574258
case MODEL_1_4B: return "1.4B";
42584259
case MODEL_2B: return "2B";
42594260
case MODEL_2_8B: return "2.8B";
@@ -12904,6 +12905,13 @@ static void llama_graph_compute(
1290412905
// fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(lctx.sched));
1290512906
}
1290612907

12908+
void llama_set_logits_all(
12909+
struct llama_context * ctx,
12910+
bool logits_all
12911+
) {
12912+
ctx->logits_all = logits_all;
12913+
}
12914+
1290712915
// decode a batch of tokens by evaluating the transformer
1290812916
//
1290912917
// - lctx: llama context
@@ -13851,6 +13859,7 @@ struct llm_tokenizer_bpe {
1385113859
break;
1385213860
case LLAMA_VOCAB_PRE_TYPE_GPT2:
1385313861
case LLAMA_VOCAB_PRE_TYPE_OLMO:
13862+
case LLAMA_VOCAB_PRE_TYPE_JAIS:
1385413863
regex_exprs = {
1385513864
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
1385613865
};

0 commit comments

Comments
 (0)