Skip to content

Commit 95d7593

Browse files
committed
llama : sync gguf-llama.cpp
1 parent c35fc0b commit 95d7593

File tree

2 files changed

+7
-8
lines changed

2 files changed

+7
-8
lines changed

examples/gguf/gguf-llama-simple.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ int main(int argc, char ** argv) {
5454
const int max_context_size = llama_n_ctx(ctx);
5555
const int max_tokens_list_size = max_context_size - 4;
5656

57-
if ((int)tokens_list.size() > max_tokens_list_size) {
57+
if ((int) tokens_list.size() > max_tokens_list_size) {
5858
fprintf(stderr, "%s: error: prompt too long (%d tokens, max %d)\n", __func__, (int) tokens_list.size(), max_tokens_list_size);
5959
return 1;
6060
}

gguf-llama.cpp

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4555,24 +4555,24 @@ int llama_token_to_str_with_model(const struct llama_model * model, llama_token
45554555
if (0 <= token && token < llama_n_vocab_from_model(model)) {
45564556
if (llama_is_normal_token(model->vocab, token)) {
45574557
std::string result = model->vocab.id_to_token[token].tok;
4558-
if (llama_vocab_type(model->vocab) == "spm") {
4558+
if(llama_vocab_type(model->vocab) == "spm") {
45594559
result = llama_unescape_whitespace(result);
45604560
}
45614561
if (length < (int) result.length()) {
45624562
return -result.length();
45634563
}
4564-
strcpy(str, result.c_str());
4564+
strncpy(str, result.c_str(), result.length());
45654565
return result.length();
45664566
} else if (llama_is_unknown_token(model->vocab, token)) {
45674567
if (length < 3) {
45684568
return -3;
45694569
}
4570-
strcpy(str, "\xe2\x96\x85");
4570+
strncpy(str, "\xe2\x96\x85", 3);
45714571
return 3;
45724572
} else if (llama_is_control_token(model->vocab, token)) {
45734573
;
45744574
} else if (llama_is_byte_token(model->vocab, token)) {
4575-
if(1 > length) {
4575+
if (length < 1) {
45764576
return -1;
45774577
}
45784578
str[0] = llama_byte_to_char(model->vocab, token);
@@ -4607,7 +4607,7 @@ int llama_token_to_str_bpe(const struct llama_context * ctx, llama_token token,
46074607
if (length < (int) result.length()) {
46084608
return -result.length();
46094609
}
4610-
strcpy(str, result.c_str());
4610+
strncpy(str, result.c_str(), result.length());
46114611
return result.length();
46124612
}
46134613
return 0;
@@ -4618,9 +4618,8 @@ std::string llama_token_to_str_bpe(const struct llama_context * ctx, llama_token
46184618
const int length = llama_token_to_str_bpe(ctx, token, result.data(), result.size());
46194619
if (length < 0) {
46204620
result.resize(-length);
4621-
const int check = llama_token_to_str_bpe(ctx, token, (char*)result.data(), result.size());
4621+
const int check = llama_token_to_str_bpe(ctx, token, result.data(), result.size());
46224622
GGML_ASSERT(check == -length);
4623-
GGML_UNUSED(check);
46244623
} else {
46254624
result.resize(length);
46264625
}

0 commit comments

Comments
 (0)