Skip to content

Commit 672438d

Browse files
committed
update common.cpp
1 parent cd09771 commit 672438d

File tree

3 files changed

+17
-17
lines changed

3 files changed

+17
-17
lines changed

common/arg.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1876,7 +1876,7 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex,
18761876
"if suffix/prefix are specified, template will be disabled\n"
18771877
"only commonly used templates are accepted:\nhttps://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template",
18781878
[](gpt_params & params, const std::string & value) {
1879-
if (!llama_chat_verify_template(value)) {
1879+
if (!common_chat_verify_template(value)) {
18801880
throw std::runtime_error(format(
18811881
"error: the supplied chat template is not supported: %s\n"
18821882
"note: llama.cpp does not use jinja parser, we only support commonly used templates\n",

common/common.cpp

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -876,7 +876,7 @@ struct common_init_result llama_init_from_gpt_params(gpt_params & params) {
876876
if (params.control_vector_layer_start <= 0) params.control_vector_layer_start = 1;
877877
if (params.control_vector_layer_end <= 0) params.control_vector_layer_end = llama_n_layer(model);
878878

879-
const auto cvec = llama_control_vector_load(params.control_vectors);
879+
const auto cvec = common_control_vector_load(params.control_vectors);
880880
if (cvec.n_embd == -1) {
881881
llama_free(lctx);
882882
llama_free_model(model);
@@ -1112,7 +1112,7 @@ static bool curl_perform_with_retry(const std::string& url, CURL* curl, int max_
11121112
return false;
11131113
}
11141114

1115-
static bool llama_download_file(const std::string & url, const std::string & path, const std::string & hf_token) {
1115+
static bool common_download_file(const std::string & url, const std::string & path, const std::string & hf_token) {
11161116

11171117
// Initialize libcurl
11181118
std::unique_ptr<CURL, decltype(&curl_easy_cleanup)> curl(curl_easy_init(), &curl_easy_cleanup);
@@ -1182,15 +1182,15 @@ static bool llama_download_file(const std::string & url, const std::string & pat
11821182
}
11831183

11841184
// Send a HEAD request to retrieve the etag and last-modified headers
1185-
struct llama_load_model_from_url_headers {
1185+
struct common_load_model_from_url_headers {
11861186
std::string etag;
11871187
std::string last_modified;
11881188
};
1189-
llama_load_model_from_url_headers headers;
1189+
common_load_model_from_url_headers headers;
11901190
{
11911191
typedef size_t(*CURLOPT_HEADERFUNCTION_PTR)(char *, size_t, size_t, void *);
11921192
auto header_callback = [](char * buffer, size_t /*size*/, size_t n_items, void * userdata) -> size_t {
1193-
llama_load_model_from_url_headers *headers = (llama_load_model_from_url_headers *) userdata;
1193+
common_load_model_from_url_headers *headers = (common_load_model_from_url_headers *) userdata;
11941194

11951195
static std::regex header_regex("([^:]+): (.*)\r\n");
11961196
static std::regex etag_regex("ETag", std::regex_constants::icase);
@@ -1326,7 +1326,7 @@ static bool llama_download_file(const std::string & url, const std::string & pat
13261326
return true;
13271327
}
13281328

1329-
struct llama_model * llama_load_model_from_url(
1329+
struct llama_model * common_load_model_from_url(
13301330
const char * model_url,
13311331
const char * path_model,
13321332
const char * hf_token,
@@ -1337,7 +1337,7 @@ struct llama_model * llama_load_model_from_url(
13371337
return NULL;
13381338
}
13391339

1340-
if (!llama_download_file(model_url, path_model, hf_token)) {
1340+
if (!common_download_file(model_url, path_model, hf_token)) {
13411341
return NULL;
13421342
}
13431343

@@ -1390,7 +1390,7 @@ struct llama_model * llama_load_model_from_url(
13901390
char split_url[LLAMA_CURL_MAX_URL_LENGTH] = {0};
13911391
llama_split_path(split_url, sizeof(split_url), split_url_prefix, download_idx, n_split);
13921392

1393-
return llama_download_file(split_url, split_path, hf_token);
1393+
return common_download_file(split_url, split_path, hf_token);
13941394
}, idx));
13951395
}
13961396

@@ -1405,7 +1405,7 @@ struct llama_model * llama_load_model_from_url(
14051405
return llama_load_model_from_file(path_model, params);
14061406
}
14071407

1408-
struct llama_model * llama_load_model_from_hf(
1408+
struct llama_model * common_load_model_from_hf(
14091409
const char * repo,
14101410
const char * model,
14111411
const char * path_model,
@@ -1425,7 +1425,7 @@ struct llama_model * llama_load_model_from_hf(
14251425
model_url += "/resolve/main/";
14261426
model_url += model;
14271427

1428-
return llama_load_model_from_url(model_url.c_str(), path_model, hf_token, params);
1428+
return common_load_model_from_url(model_url.c_str(), path_model, hf_token, params);
14291429
}
14301430

14311431
#else
@@ -1545,7 +1545,7 @@ std::string common_detokenize(llama_context * ctx, const std::vector<llama_token
15451545
// Chat template utils
15461546
//
15471547

1548-
bool llama_chat_verify_template(const std::string & tmpl) {
1548+
bool common_chat_verify_template(const std::string & tmpl) {
15491549
llama_chat_message chat[] = {{"user", "test"}};
15501550
int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0);
15511551
return res >= 0;
@@ -1765,7 +1765,7 @@ float common_embd_similarity_cos(const float * embd1, const float * embd2, int n
17651765
// Control vector utils
17661766
//
17671767

1768-
static common_control_vector_data llama_control_vector_load_one(const common_control_vector_load_info & load_info) {
1768+
static common_control_vector_data common_control_vector_load_one(const common_control_vector_load_info & load_info) {
17691769
common_control_vector_data result = { -1, {} };
17701770

17711771
ggml_context * ctx = nullptr;
@@ -1850,11 +1850,11 @@ static common_control_vector_data llama_control_vector_load_one(const common_con
18501850
return result;
18511851
}
18521852

1853-
common_control_vector_data llama_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos) {
1853+
common_control_vector_data common_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos) {
18541854
common_control_vector_data result = { -1, {} };
18551855

18561856
for (const auto & info : load_infos) {
1857-
auto cur = llama_control_vector_load_one(info);
1857+
auto cur = common_control_vector_load_one(info);
18581858

18591859
if (cur.n_embd == -1) {
18601860
result.n_embd = -1;

common/common.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -477,7 +477,7 @@ struct common_chat_msg {
477477
};
478478

479479
// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
480-
bool llama_chat_verify_template(const std::string & tmpl);
480+
bool common_chat_verify_template(const std::string & tmpl);
481481

482482
// CPP wrapper for llama_chat_apply_template
483483
// If the built-in template is not supported, we default to chatml
@@ -535,7 +535,7 @@ struct common_control_vector_load_info {
535535

536536
// Load control vectors, scale each by strength, and add them together.
537537
// On error, returns {-1, empty}
538-
common_control_vector_data llama_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos);
538+
common_control_vector_data common_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos);
539539

540540
//
541541
// Split utils

0 commit comments

Comments
 (0)