Skip to content

Commit 13b4249

Browse files
committed
update common.cpp
1 parent cd09771 commit 13b4249

File tree

3 files changed

+8
-8
lines changed

3 files changed

+8
-8
lines changed

common/arg.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1876,7 +1876,7 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex,
18761876
"if suffix/prefix are specified, template will be disabled\n"
18771877
"only commonly used templates are accepted:\nhttps://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template",
18781878
[](gpt_params & params, const std::string & value) {
1879-
if (!llama_chat_verify_template(value)) {
1879+
if (!common_chat_verify_template(value)) {
18801880
throw std::runtime_error(format(
18811881
"error: the supplied chat template is not supported: %s\n"
18821882
"note: llama.cpp does not use jinja parser, we only support commonly used templates\n",

common/common.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -876,7 +876,7 @@ struct common_init_result llama_init_from_gpt_params(gpt_params & params) {
876876
if (params.control_vector_layer_start <= 0) params.control_vector_layer_start = 1;
877877
if (params.control_vector_layer_end <= 0) params.control_vector_layer_end = llama_n_layer(model);
878878

879-
const auto cvec = llama_control_vector_load(params.control_vectors);
879+
const auto cvec = common_control_vector_load(params.control_vectors);
880880
if (cvec.n_embd == -1) {
881881
llama_free(lctx);
882882
llama_free_model(model);
@@ -1545,7 +1545,7 @@ std::string common_detokenize(llama_context * ctx, const std::vector<llama_token
15451545
// Chat template utils
15461546
//
15471547

1548-
bool llama_chat_verify_template(const std::string & tmpl) {
1548+
bool common_chat_verify_template(const std::string & tmpl) {
15491549
llama_chat_message chat[] = {{"user", "test"}};
15501550
int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0);
15511551
return res >= 0;
@@ -1765,7 +1765,7 @@ float common_embd_similarity_cos(const float * embd1, const float * embd2, int n
17651765
// Control vector utils
17661766
//
17671767

1768-
static common_control_vector_data llama_control_vector_load_one(const common_control_vector_load_info & load_info) {
1768+
static common_control_vector_data common_control_vector_load_one(const common_control_vector_load_info & load_info) {
17691769
common_control_vector_data result = { -1, {} };
17701770

17711771
ggml_context * ctx = nullptr;
@@ -1850,11 +1850,11 @@ static common_control_vector_data llama_control_vector_load_one(const common_con
18501850
return result;
18511851
}
18521852

1853-
common_control_vector_data llama_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos) {
1853+
common_control_vector_data common_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos) {
18541854
common_control_vector_data result = { -1, {} };
18551855

18561856
for (const auto & info : load_infos) {
1857-
auto cur = llama_control_vector_load_one(info);
1857+
auto cur = common_control_vector_load_one(info);
18581858

18591859
if (cur.n_embd == -1) {
18601860
result.n_embd = -1;

common/common.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -477,7 +477,7 @@ struct common_chat_msg {
477477
};
478478

479479
// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
480-
bool llama_chat_verify_template(const std::string & tmpl);
480+
bool common_chat_verify_template(const std::string & tmpl);
481481

482482
// CPP wrapper for llama_chat_apply_template
483483
// If the built-in template is not supported, we default to chatml
@@ -535,7 +535,7 @@ struct common_control_vector_load_info {
535535

536536
// Load control vectors, scale each by strength, and add them together.
537537
// On error, returns {-1, empty}
538-
common_control_vector_data llama_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos);
538+
common_control_vector_data common_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos);
539539

540540
//
541541
// Split utils

0 commit comments

Comments
 (0)