Skip to content

Commit 79e5010

Browse files
committed
Update llama.cpp
1 parent ecc93d0 commit 79e5010

File tree

1 file changed

+15
-1
lines changed

1 file changed

+15
-1
lines changed

src/llama.cpp

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1562,6 +1562,7 @@ enum llm_chat_template {
15621562
LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN,
15631563
LLM_CHAT_TEMPLATE_MISTRAL_V7,
15641564
LLM_CHAT_TEMPLATE_PHI_3,
1565+
LLM_CHAT_TEMPLATE_FALCON_3,
15651566
LLM_CHAT_TEMPLATE_ZEPHYR,
15661567
LLM_CHAT_TEMPLATE_MONARCH,
15671568
LLM_CHAT_TEMPLATE_GEMMA,
@@ -1593,6 +1594,7 @@ static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = {
15931594
{ "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN },
15941595
{ "mistral-v7", LLM_CHAT_TEMPLATE_MISTRAL_V7 },
15951596
{ "phi3", LLM_CHAT_TEMPLATE_PHI_3 },
1597+
{ "falcon3", LLM_CHAT_TEMPLATE_FALCON_3 },
15961598
{ "zephyr", LLM_CHAT_TEMPLATE_ZEPHYR },
15971599
{ "monarch", LLM_CHAT_TEMPLATE_MONARCH },
15981600
{ "gemma", LLM_CHAT_TEMPLATE_GEMMA },
@@ -6377,7 +6379,8 @@ static void llm_load_vocab(
63776379
} else if (
63786380
tokenizer_pre == "llama3" ||
63796381
tokenizer_pre == "llama-v3" ||
6380-
tokenizer_pre == "llama-bpe") {
6382+
tokenizer_pre == "llama-bpe" ||
6383+
tokenizer_pre == "falcon3") {
63816384
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_LLAMA3;
63826385
vocab.tokenizer_ignore_merges = true;
63836386
vocab.tokenizer_add_bos = true;
@@ -21794,6 +21797,8 @@ static llm_chat_template llama_chat_detect_template(const std::string & tmpl) {
2179421797
}
2179521798
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) {
2179621799
return LLM_CHAT_TEMPLATE_PHI_3;
21800+
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
21801+
return LLM_CHAT_TEMPLATE_FALCON_3;
2179721802
} else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) {
2179821803
return LLM_CHAT_TEMPLATE_ZEPHYR;
2179921804
} else if (tmpl_contains("bos_token + message['role']")) {
@@ -21944,6 +21949,15 @@ static int32_t llama_chat_apply_template_internal(
2194421949
if (add_ass) {
2194521950
ss << "<|assistant|>\n";
2194621951
}
21952+
} else if (tmpl == LLM_CHAT_TEMPLATE_FALCON_3) {
21953+
// Falcon 3
21954+
for (auto message : chat) {
21955+
std::string role(message->role);
21956+
ss << "<|" << role << "|>\n" << message->content << "\n";
21957+
}
21958+
if (add_ass) {
21959+
ss << "<|assistant|>\n";
21960+
}
2194721961
} else if (tmpl == LLM_CHAT_TEMPLATE_ZEPHYR) {
2194821962
// zephyr template
2194921963
for (auto message : chat) {

0 commit comments

Comments
 (0)