Skip to content

Commit afd220d

Browse files
committed
Properly free llama_context on failure
1 parent 481044d commit afd220d

File tree

1 file changed

+6
-4
lines changed

1 file changed

+6
-4
lines changed

llama.cpp

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1432,16 +1432,16 @@ struct llama_context * llama_init_from_file(
14321432
if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_parts, type_memory,
14331433
params.vocab_only)) {
14341434
fprintf(stderr, "%s: failed to load model\n", __func__);
1435-
delete ctx;
1435+
llama_free(ctx);
14361436
return nullptr;
14371437
}
1438-
1438+
14391439
if (params.use_mlock) {
14401440
char *err;
14411441
if (!ggml_mlock(ctx->model.ctx, &err)) {
14421442
fprintf(stderr, "%s\n", err);
14431443
free(err);
1444-
delete ctx;
1444+
llama_free(ctx);
14451445
return nullptr;
14461446
}
14471447
}
@@ -1464,7 +1464,9 @@ struct llama_context * llama_init_from_file(
14641464
}
14651465

14661466
void llama_free(struct llama_context * ctx) {
1467-
ggml_free(ctx->model.ctx);
1467+
if (ctx->model.ctx) {
1468+
ggml_free(ctx->model.ctx);
1469+
}
14681470

14691471
delete ctx;
14701472
}

0 commit comments

Comments
 (0)