Skip to content

Commit e49ffeb

Browse files
MollySophiaarthw
authored andcommitted
llama : add chat template for RWKV-World + fix EOT (ggml-org#9968)
* Add chat template for RWKV-World Signed-off-by: Molly Sophia <[email protected]> * RWKV: Fix the chat template not being used Signed-off-by: Molly Sophia <[email protected]> * RWKV v6: Set EOT token to ``\n\n`` Signed-off-by: Molly Sophia <[email protected]> * readme: add rwkv into supported model list Signed-off-by: Molly Sophia <[email protected]> --------- Signed-off-by: Molly Sophia <[email protected]>
1 parent fe14ce7 commit e49ffeb

File tree

4 files changed

+16
-0
lines changed

4 files changed

+16
-0
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,7 @@ Typically finetunes of the base models below are supported as well.
127127
- [x] [FalconMamba Models](https://huggingface.co/collections/tiiuae/falconmamba-7b-66b9a580324dd1598b0f6d4a)
128128
- [x] [Jais](https://huggingface.co/inceptionai/jais-13b-chat)
129129
- [x] [Bielik-11B-v2.3](https://huggingface.co/collections/speakleash/bielik-11b-v23-66ee813238d9b526a072408a)
130+
- [x] [RWKV-6](https://github.com/BlinkDL/RWKV-LM)
130131

131132
(instructions for supporting more models: [HOWTO-add-model.md](./docs/development/HOWTO-add-model.md))
132133

convert_hf_to_gguf.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2864,6 +2864,8 @@ def set_vocab(self):
28642864
self.gguf_writer.add_token_list(tokens)
28652865
self.gguf_writer.add_token_types(toktypes)
28662866
special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
2867+
special_vocab.chat_template = "rwkv-world"
2868+
special_vocab._set_special_token("eot", 261)
28672869
special_vocab.add_to_gguf(self.gguf_writer)
28682870

28692871
def set_gguf_parameters(self):

src/llama.cpp

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21703,6 +21703,15 @@ static int32_t llama_chat_apply_template_internal(
2170321703
if (add_ass) {
2170421704
ss << "[|assistant|]";
2170521705
}
21706+
} else if (tmpl == "rwkv-world" || tmpl_contains("rwkv-world") || tmpl_contains("'User: ' + message['content'] + '\n\nAssistant:'")) {
21707+
for (auto message : chat) {
21708+
std::string role(message->role);
21709+
if (role == "user") {
21710+
ss << "User: " << message->content << "\n\nAssistant:";
21711+
} else {
21712+
ss << message->content << "\n\n";
21713+
}
21714+
}
2170621715
} else {
2170721716
// template not supported
2170821717
return -1;

tests/test-chat-template.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,8 @@ int main(void) {
6565
u8"{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + '<AI>'}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}",
6666
// DeepSeek-V2
6767
"{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
68+
// RWKV-World
69+
"{% for message in messages %}{% if message['role'] == 'user' %}{{'User: ' + message['content'] + '\n\nAssistant:'}}{% else %}{{message['content'] + '\n\n'}}{% endif %}{% endfor %}",
6870
};
6971
std::vector<std::string> expected_output = {
7072
// teknium/OpenHermes-2.5-Mistral-7B
@@ -109,6 +111,8 @@ int main(void) {
109111
u8"You are a helpful assistant<用户>Hello<AI>Hi there<用户>Who are you<AI>I am an assistant<用户>Another question<AI>",
110112
// DeepSeek-V2
111113
u8"You are a helpful assistant\n\nUser: Hello\n\nAssistant: Hi there<|end▁of▁sentence|>User: Who are you\n\nAssistant: I am an assistant <|end▁of▁sentence|>User: Another question\n\nAssistant:",
114+
// RWKV-World
115+
"You are a helpful assistant\n\nUser: Hello\n\nAssistant:Hi there\n\nUser: Who are you\n\nAssistant: I am an assistant \n\nUser: Another question\n\nAssistant:",
112116
};
113117
std::vector<char> formatted_chat(1024);
114118
int32_t res;

0 commit comments

Comments
 (0)