@@ -876,7 +876,7 @@ struct common_init_result llama_init_from_gpt_params(gpt_params & params) {
876
876
if (params.control_vector_layer_start <= 0 ) params.control_vector_layer_start = 1 ;
877
877
if (params.control_vector_layer_end <= 0 ) params.control_vector_layer_end = llama_n_layer (model);
878
878
879
- const auto cvec = llama_control_vector_load (params.control_vectors );
879
+ const auto cvec = common_control_vector_load (params.control_vectors );
880
880
if (cvec.n_embd == -1 ) {
881
881
llama_free (lctx);
882
882
llama_free_model (model);
@@ -1545,7 +1545,7 @@ std::string common_detokenize(llama_context * ctx, const std::vector<llama_token
1545
1545
// Chat template utils
1546
1546
//
1547
1547
1548
- bool llama_chat_verify_template (const std::string & tmpl) {
1548
+ bool common_chat_verify_template (const std::string & tmpl) {
1549
1549
llama_chat_message chat[] = {{" user" , " test" }};
1550
1550
int res = llama_chat_apply_template (nullptr , tmpl.c_str (), chat, 1 , true , nullptr , 0 );
1551
1551
return res >= 0 ;
@@ -1765,7 +1765,7 @@ float common_embd_similarity_cos(const float * embd1, const float * embd2, int n
1765
1765
// Control vector utils
1766
1766
//
1767
1767
1768
- static common_control_vector_data llama_control_vector_load_one (const common_control_vector_load_info & load_info) {
1768
+ static common_control_vector_data common_control_vector_load_one (const common_control_vector_load_info & load_info) {
1769
1769
common_control_vector_data result = { -1 , {} };
1770
1770
1771
1771
ggml_context * ctx = nullptr ;
@@ -1850,11 +1850,11 @@ static common_control_vector_data llama_control_vector_load_one(const common_con
1850
1850
return result;
1851
1851
}
1852
1852
1853
- common_control_vector_data llama_control_vector_load (const std::vector<common_control_vector_load_info> & load_infos) {
1853
+ common_control_vector_data common_control_vector_load (const std::vector<common_control_vector_load_info> & load_infos) {
1854
1854
common_control_vector_data result = { -1 , {} };
1855
1855
1856
1856
for (const auto & info : load_infos) {
1857
- auto cur = llama_control_vector_load_one (info);
1857
+ auto cur = common_control_vector_load_one (info);
1858
1858
1859
1859
if (cur.n_embd == -1 ) {
1860
1860
result.n_embd = -1 ;
0 commit comments