Skip to content

Commit 11d40ea

Browse files
committed
Export lora A matrix pre-transposed
1 parent 3db2bf9 commit 11d40ea

File tree

2 files changed

+12
-2
lines changed

2 files changed

+12
-2
lines changed

convert-lora-to-ggml.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,8 @@ def write_tensor_header(self, name: str, shape: Sequence[int], data_type: 1) ->
9494
# since ggml doesn't always support other types for the second operand,
9595
# the tensors are always converted and exported as f32
9696
t = v.float().numpy()
97+
if "lora_A" in k:
98+
t = t.T
9799
print(f"{k} => {translate_tensor_name(k)} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB")
98100
write_tensor_header(fout, translate_tensor_name(k), t.shape, t.dtype)
99101
t.tofile(fout)

llama.cpp

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1685,8 +1685,12 @@ int llama_model_quantize(
16851685

16861686
int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, int n_threads) {
16871687
// TODO: refactor all of this after PR #801
1688+
fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
1689+
16881690
auto & model = ctx->model;
16891691

1692+
const int64_t t_start_lora_us = ggml_time_us();
1693+
16901694
auto fin = std::ifstream(path_lora, std::ios::binary);
16911695
if (!fin) {
16921696
fprintf(stderr, "%s: failed to open '%s'\n", __func__, path_lora);
@@ -1799,7 +1803,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
17991803
lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) {
18001804

18011805
ggml_tensor * tensor = model.tensors[base_name];
1802-
ggml_tensor * loraA = ggml_transpose(lora_ctx, lora_tensors[base_name + ".loraA"]);
1806+
ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
18031807
ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
18041808

18051809
if (tensor->ne[0] != loraA->ne[1]) {
@@ -1826,7 +1830,11 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
18261830
fprintf(stderr, ".");
18271831
}
18281832
}
1829-
fprintf(stderr, " done\n");
1833+
1834+
ggml_free(lora_ctx);
1835+
1836+
const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
1837+
fprintf(stderr, " done (%.2f ms)\n", t_lora_us / 1000.0);
18301838

18311839
return 0;
18321840
}

0 commit comments

Comments
 (0)