Skip to content

Commit 5a8d0fa

Browse files
committed
common : fix name
ggml-ci
1 parent b75ce0d commit 5a8d0fa

File tree

3 files changed

+4
-4
lines changed

3 files changed

+4
-4
lines changed

common/common.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -939,7 +939,7 @@ struct common_init_result common_init_from_params(common_params & params) {
939939
}
940940

941941
if (!params.lora_init_without_apply) {
942-
common_adapter_loras_apply(lctx, params.lora_adapters);
942+
common_set_adapter_lora(lctx, params.lora_adapters);
943943
}
944944

945945
if (params.sampling.ignore_eos && llama_token_eos(model) == LLAMA_TOKEN_NULL) {
@@ -1006,7 +1006,7 @@ struct common_init_result common_init_from_params(common_params & params) {
10061006
return iparams;
10071007
}
10081008

1009-
void common_adapter_loras_apply(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora) {
1009+
void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora) {
10101010
llama_clear_adapter_lora(ctx);
10111011
for (auto & la : lora) {
10121012
if (la.scale != 0.0f) {

common/common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -503,7 +503,7 @@ struct llama_model * common_load_model_from_hf(
503503
const struct llama_model_params & params);
504504

505505
// clear LoRA adapters from context, then apply new list of adapters
506-
void common_adapter_loras_apply(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora);
506+
void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora);
507507

508508
//
509509
// Batch utils

examples/server/server.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2927,7 +2927,7 @@ struct server_context {
29272927
// make sure we're in the right embedding mode
29282928
llama_set_embeddings(ctx, slot_batched->is_non_causal());
29292929
// apply lora, only need to do it once per batch
2930-
common_adapter_loras_apply(ctx, slot_batched->lora);
2930+
common_set_adapter_lora(ctx, slot_batched->lora);
29312931
}
29322932

29332933
// process the created batch of tokens

0 commit comments

Comments
 (0)