File tree Expand file tree Collapse file tree 1 file changed +5
-4
lines changed Expand file tree Collapse file tree 1 file changed +5
-4
lines changed Original file line number Diff line number Diff line change @@ -2372,6 +2372,11 @@ struct llama_model_loader {
2372
2372
for (int i = 0 ; i < gguf_get_n_tensors (ctx_gguf); i++) {
2373
2373
struct ggml_tensor * cur = ggml_get_tensor (ctx, gguf_get_tensor_name (ctx_gguf, i));
2374
2374
GGML_ASSERT (cur); // unused tensors should have been caught by load_data already
2375
+
2376
+ if (progress_callback) {
2377
+ progress_callback ((float ) size_done / size_data, progress_callback_user_data);
2378
+ }
2379
+
2375
2380
const size_t offs = file_offset (ggml_get_name (cur));
2376
2381
2377
2382
if (!legacy_offload || cur->backend == GGML_BACKEND_CPU) {
@@ -2422,10 +2427,6 @@ struct llama_model_loader {
2422
2427
}
2423
2428
2424
2429
size_done += ggml_nbytes (cur);
2425
-
2426
- if (progress_callback) {
2427
- progress_callback ((float ) size_done / size_data, progress_callback_user_data);
2428
- }
2429
2430
}
2430
2431
2431
2432
// unmap GPU tensors
You can’t perform that action at this time.
0 commit comments