@@ -876,7 +876,7 @@ struct common_init_result llama_init_from_gpt_params(gpt_params & params) {
876
876
if (params.control_vector_layer_start <= 0 ) params.control_vector_layer_start = 1 ;
877
877
if (params.control_vector_layer_end <= 0 ) params.control_vector_layer_end = llama_n_layer (model);
878
878
879
- const auto cvec = llama_control_vector_load (params.control_vectors );
879
+ const auto cvec = common_control_vector_load (params.control_vectors );
880
880
if (cvec.n_embd == -1 ) {
881
881
llama_free (lctx);
882
882
llama_free_model (model);
@@ -1112,7 +1112,7 @@ static bool curl_perform_with_retry(const std::string& url, CURL* curl, int max_
1112
1112
return false ;
1113
1113
}
1114
1114
1115
- static bool llama_download_file (const std::string & url, const std::string & path, const std::string & hf_token) {
1115
+ static bool common_download_file (const std::string & url, const std::string & path, const std::string & hf_token) {
1116
1116
1117
1117
// Initialize libcurl
1118
1118
std::unique_ptr<CURL, decltype (&curl_easy_cleanup)> curl (curl_easy_init (), &curl_easy_cleanup);
@@ -1182,15 +1182,15 @@ static bool llama_download_file(const std::string & url, const std::string & pat
1182
1182
}
1183
1183
1184
1184
// Send a HEAD request to retrieve the etag and last-modified headers
1185
- struct llama_load_model_from_url_headers {
1185
+ struct common_load_model_from_url_headers {
1186
1186
std::string etag;
1187
1187
std::string last_modified;
1188
1188
};
1189
- llama_load_model_from_url_headers headers;
1189
+ common_load_model_from_url_headers headers;
1190
1190
{
1191
1191
typedef size_t (*CURLOPT_HEADERFUNCTION_PTR)(char *, size_t , size_t , void *);
1192
1192
auto header_callback = [](char * buffer, size_t /* size*/ , size_t n_items, void * userdata) -> size_t {
1193
- llama_load_model_from_url_headers *headers = (llama_load_model_from_url_headers *) userdata;
1193
+ common_load_model_from_url_headers *headers = (common_load_model_from_url_headers *) userdata;
1194
1194
1195
1195
static std::regex header_regex (" ([^:]+): (.*)\r\n " );
1196
1196
static std::regex etag_regex (" ETag" , std::regex_constants::icase);
@@ -1326,7 +1326,7 @@ static bool llama_download_file(const std::string & url, const std::string & pat
1326
1326
return true ;
1327
1327
}
1328
1328
1329
- struct llama_model * llama_load_model_from_url (
1329
+ struct llama_model * common_load_model_from_url (
1330
1330
const char * model_url,
1331
1331
const char * path_model,
1332
1332
const char * hf_token,
@@ -1337,7 +1337,7 @@ struct llama_model * llama_load_model_from_url(
1337
1337
return NULL ;
1338
1338
}
1339
1339
1340
- if (!llama_download_file (model_url, path_model, hf_token)) {
1340
+ if (!common_download_file (model_url, path_model, hf_token)) {
1341
1341
return NULL ;
1342
1342
}
1343
1343
@@ -1390,7 +1390,7 @@ struct llama_model * llama_load_model_from_url(
1390
1390
char split_url[LLAMA_CURL_MAX_URL_LENGTH] = {0 };
1391
1391
llama_split_path (split_url, sizeof (split_url), split_url_prefix, download_idx, n_split);
1392
1392
1393
- return llama_download_file (split_url, split_path, hf_token);
1393
+ return common_download_file (split_url, split_path, hf_token);
1394
1394
}, idx));
1395
1395
}
1396
1396
@@ -1405,7 +1405,7 @@ struct llama_model * llama_load_model_from_url(
1405
1405
return llama_load_model_from_file (path_model, params);
1406
1406
}
1407
1407
1408
- struct llama_model * llama_load_model_from_hf (
1408
+ struct llama_model * common_load_model_from_hf (
1409
1409
const char * repo,
1410
1410
const char * model,
1411
1411
const char * path_model,
@@ -1425,7 +1425,7 @@ struct llama_model * llama_load_model_from_hf(
1425
1425
model_url += " /resolve/main/" ;
1426
1426
model_url += model;
1427
1427
1428
- return llama_load_model_from_url (model_url.c_str (), path_model, hf_token, params);
1428
+ return common_load_model_from_url (model_url.c_str (), path_model, hf_token, params);
1429
1429
}
1430
1430
1431
1431
#else
@@ -1545,7 +1545,7 @@ std::string common_detokenize(llama_context * ctx, const std::vector<llama_token
1545
1545
// Chat template utils
1546
1546
//
1547
1547
1548
- bool llama_chat_verify_template (const std::string & tmpl) {
1548
+ bool common_chat_verify_template (const std::string & tmpl) {
1549
1549
llama_chat_message chat[] = {{" user" , " test" }};
1550
1550
int res = llama_chat_apply_template (nullptr , tmpl.c_str (), chat, 1 , true , nullptr , 0 );
1551
1551
return res >= 0 ;
@@ -1765,7 +1765,7 @@ float common_embd_similarity_cos(const float * embd1, const float * embd2, int n
1765
1765
// Control vector utils
1766
1766
//
1767
1767
1768
- static common_control_vector_data llama_control_vector_load_one (const common_control_vector_load_info & load_info) {
1768
+ static common_control_vector_data common_control_vector_load_one (const common_control_vector_load_info & load_info) {
1769
1769
common_control_vector_data result = { -1 , {} };
1770
1770
1771
1771
ggml_context * ctx = nullptr ;
@@ -1850,11 +1850,11 @@ static common_control_vector_data llama_control_vector_load_one(const common_con
1850
1850
return result;
1851
1851
}
1852
1852
1853
- common_control_vector_data llama_control_vector_load (const std::vector<common_control_vector_load_info> & load_infos) {
1853
+ common_control_vector_data common_control_vector_load (const std::vector<common_control_vector_load_info> & load_infos) {
1854
1854
common_control_vector_data result = { -1 , {} };
1855
1855
1856
1856
for (const auto & info : load_infos) {
1857
- auto cur = llama_control_vector_load_one (info);
1857
+ auto cur = common_control_vector_load_one (info);
1858
1858
1859
1859
if (cur.n_embd == -1 ) {
1860
1860
result.n_embd = -1 ;
0 commit comments