@@ -467,7 +467,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
467
467
{ LLM_KV_TOKENIZER_MIDDLE_ID, "tokenizer.ggml.middle_token_id" },
468
468
{ LLM_KV_TOKENIZER_EOT_ID, "tokenizer.ggml.eot_token_id" },
469
469
470
- { LLM_KV_TRAINING_TYPE, "training.type" },
470
+ { LLM_KV_TRAINING_TYPE, "training.type" },
471
471
};
472
472
473
473
struct LLM_KV {
@@ -18521,7 +18521,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
18521
18521
static void llama_lora_adapter_init_internal(struct llama_model * model, const char * path_lora, struct llama_lora_adapter & adapter) {
18522
18522
static const int n_inp_tensors = 5; // see llama_model
18523
18523
static const int n_out_tensors = 5; // see llama_model
18524
- LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
18524
+ LLAMA_LOG_INFO("%s: applying lora adapter from '%s' ...\n", __func__, path_lora);
18525
18525
18526
18526
ggml_context * ctx = nullptr;
18527
18527
struct gguf_init_params meta_gguf_params = {
@@ -18530,8 +18530,7 @@ static void llama_lora_adapter_init_internal(struct llama_model * model, const c
18530
18530
};
18531
18531
struct gguf_context * ctx_gguf = gguf_init_from_file(path_lora, meta_gguf_params);
18532
18532
if (!ctx_gguf) {
18533
- LLAMA_LOG_ERROR("%s: failed to load lora adapter file from %s\n", __func__, path_lora);
18534
- throw std::exception();
18533
+ throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
18535
18534
}
18536
18535
18537
18536
// check metadata
@@ -18631,11 +18630,17 @@ static void llama_lora_adapter_init_internal(struct llama_model * model, const c
18631
18630
if (!model_tensor) {
18632
18631
gguf_free(ctx_gguf);
18633
18632
ggml_free(ctx);
18634
- throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model\n ");
18633
+ throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model");
18635
18634
}
18636
18635
struct ggml_context * dev_ctx = ctx_map.at(ggml_backend_buffer_get_type(model_tensor->buffer));
18637
- // TODO: validate tensor shape
18638
- // LLAMA_LOG_INFO("%s %p %p\n", cname, w.a, w.b);
18636
+ // validate tensor shape
18637
+ if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) {
18638
+ throw std::runtime_error("tensor '" + name + "' has incorrect shape");
18639
+ }
18640
+ if (w.a->ne[1] != w.b->ne[0]) {
18641
+ throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)");
18642
+ }
18643
+ // save tensor to adapter
18639
18644
struct ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a);
18640
18645
struct ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b);
18641
18646
ggml_set_name(tensor_a, w.a->name);
0 commit comments