@@ -385,6 +385,7 @@ extern "C" {
385
385
} llama_chat_message;
386
386
387
387
// lora adapter
388
+ // TODO: rename to llama_adapter_lora
388
389
struct llama_lora_adapter ;
389
390
390
391
// Helpers for getting default parameters
@@ -501,31 +502,40 @@ extern "C" {
501
502
const char * fname_out,
502
503
const llama_model_quantize_params * params);
503
504
505
+ //
506
+ // Adapters
507
+ //
508
+
504
509
// Load a LoRA adapter from file
505
510
// The loaded adapter will be associated to the given model, and will be free when the model is deleted
511
+ // TODO: rename to llama_adapter_lora_init
506
512
LLAMA_API struct llama_lora_adapter * llama_lora_adapter_init (
507
513
struct llama_model * model,
508
514
const char * path_lora);
509
515
510
516
// Add a loaded LoRA adapter to given context
511
517
// This will not modify model's weight
518
+ // TODO: rename to llama_set_adapter_lora
512
519
LLAMA_API int32_t llama_lora_adapter_set (
513
520
struct llama_context * ctx,
514
521
struct llama_lora_adapter * adapter,
515
522
float scale);
516
523
517
524
// Remove a specific LoRA adapter from given context
518
525
// Return -1 if the adapter is not present in the context
526
+ // TODO: rename to llama_rm_adapter_lora
519
527
LLAMA_API int32_t llama_lora_adapter_remove (
520
528
struct llama_context * ctx,
521
529
struct llama_lora_adapter * adapter);
522
530
523
531
// Remove all LoRA adapters from given context
532
+ // TODO: rename to llama_clear_adapter_lora
524
533
LLAMA_API void llama_lora_adapter_clear (
525
534
struct llama_context * ctx);
526
535
527
536
// Manually free a LoRA adapter
528
537
// Note: loaded adapters will be free when the associated model is deleted
538
+ // TODO: rename to llama_adapter_lora_free
529
539
LLAMA_API void llama_lora_adapter_free (struct llama_lora_adapter * adapter);
530
540
531
541
// Apply a loaded control vector to a llama_context, or if data is NULL, clear
@@ -534,6 +544,7 @@ extern "C" {
534
544
// to an n_embd x n_layers buffer starting from layer 1.
535
545
// il_start and il_end are the layer range the vector should apply to (both inclusive)
536
546
// See llama_control_vector_load in common to load a control vector.
547
+ // TODO: rename to llama_adapter_vec_apply
537
548
LLAMA_API int32_t llama_control_vector_apply (
538
549
struct llama_context * lctx,
539
550
const float * data,
0 commit comments