Skip to content

Commit ecb23d4

Browse files
committed
restore progress_callback behavior
1 parent 5834a25 commit ecb23d4

File tree

1 file changed

+5
-4
lines changed

1 file changed

+5
-4
lines changed

llama.cpp

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2372,6 +2372,11 @@ struct llama_model_loader {
23722372
for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
23732373
struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
23742374
GGML_ASSERT(cur); // unused tensors should have been caught by load_data already
2375+
2376+
if (progress_callback) {
2377+
progress_callback((float) size_done / size_data, progress_callback_user_data);
2378+
}
2379+
23752380
const size_t offs = file_offset(ggml_get_name(cur));
23762381

23772382
if (!legacy_offload || cur->backend == GGML_BACKEND_CPU) {
@@ -2422,10 +2427,6 @@ struct llama_model_loader {
24222427
}
24232428

24242429
size_done += ggml_nbytes(cur);
2425-
2426-
if (progress_callback) {
2427-
progress_callback((float) size_done / size_data, progress_callback_user_data);
2428-
}
24292430
}
24302431

24312432
// unmap GPU tensors

0 commit comments

Comments
 (0)