Skip to content

Commit e5d6c25

Browse files
authored
llama-chat : fix typo GML --> GLM (#13143)
1 parent f0dd6a1 commit e5d6c25

File tree

2 files changed

+8
-8
lines changed

2 files changed

+8
-8
lines changed

src/llama-chat.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = {
5050
{ "deepseek3", LLM_CHAT_TEMPLATE_DEEPSEEK_3 },
5151
{ "command-r", LLM_CHAT_TEMPLATE_COMMAND_R },
5252
{ "llama3", LLM_CHAT_TEMPLATE_LLAMA_3 },
53-
{ "chatglm3", LLM_CHAT_TEMPLATE_CHATGML_3 },
54-
{ "chatglm4", LLM_CHAT_TEMPLATE_CHATGML_4 },
53+
{ "chatglm3", LLM_CHAT_TEMPLATE_CHATGLM_3 },
54+
{ "chatglm4", LLM_CHAT_TEMPLATE_CHATGLM_4 },
5555
{ "glmedge", LLM_CHAT_TEMPLATE_GLMEDGE },
5656
{ "minicpm", LLM_CHAT_TEMPLATE_MINICPM },
5757
{ "exaone3", LLM_CHAT_TEMPLATE_EXAONE_3 },
@@ -123,7 +123,7 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
123123
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) {
124124
return LLM_CHAT_TEMPLATE_PHI_3;
125125
} else if (tmpl_contains("[gMASK]<sop>")) {
126-
return LLM_CHAT_TEMPLATE_CHATGML_4;
126+
return LLM_CHAT_TEMPLATE_CHATGLM_4;
127127
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
128128
return tmpl_contains("</s>") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE;
129129
} else if (tmpl_contains("<|{{ item['role'] }}|>") && tmpl_contains("<|begin_of_image|>")) {
@@ -156,7 +156,7 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
156156
return LLM_CHAT_TEMPLATE_LLAMA_3;
157157
} else if (tmpl_contains("[gMASK]sop")) {
158158
// chatglm3-6b
159-
return LLM_CHAT_TEMPLATE_CHATGML_3;
159+
return LLM_CHAT_TEMPLATE_CHATGLM_3;
160160
} else if (tmpl_contains(LU8("<用户>"))) {
161161
// MiniCPM-3B-OpenHermes-2.5-v2-GGUF
162162
return LLM_CHAT_TEMPLATE_MINICPM;
@@ -437,7 +437,7 @@ int32_t llm_chat_apply_template(
437437
if (add_ass) {
438438
ss << "<|start_header_id|>assistant<|end_header_id|>\n\n";
439439
}
440-
} else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_3) {
440+
} else if (tmpl == LLM_CHAT_TEMPLATE_CHATGLM_3) {
441441
// chatglm3-6b
442442
ss << "[gMASK]" << "sop";
443443
for (auto message : chat) {
@@ -447,7 +447,7 @@ int32_t llm_chat_apply_template(
447447
if (add_ass) {
448448
ss << "<|assistant|>";
449449
}
450-
} else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_4) {
450+
} else if (tmpl == LLM_CHAT_TEMPLATE_CHATGLM_4) {
451451
ss << "[gMASK]" << "<sop>";
452452
for (auto message : chat) {
453453
std::string role(message->role);

src/llama-chat.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@ enum llm_chat_template {
2929
LLM_CHAT_TEMPLATE_DEEPSEEK_3,
3030
LLM_CHAT_TEMPLATE_COMMAND_R,
3131
LLM_CHAT_TEMPLATE_LLAMA_3,
32-
LLM_CHAT_TEMPLATE_CHATGML_3,
33-
LLM_CHAT_TEMPLATE_CHATGML_4,
32+
LLM_CHAT_TEMPLATE_CHATGLM_3,
33+
LLM_CHAT_TEMPLATE_CHATGLM_4,
3434
LLM_CHAT_TEMPLATE_GLMEDGE,
3535
LLM_CHAT_TEMPLATE_MINICPM,
3636
LLM_CHAT_TEMPLATE_EXAONE_3,

0 commit comments

Comments
 (0)