Skip to content

Commit b005191

Browse files
committed
llama : wrap llama_new_context_with_model in try/catch
This fixes a crash where ggml_vk_allocate fails in llama_kv_cache_init, but the exception is never caught. Signed-off-by: Jared Van Bortel <[email protected]>
1 parent 8b33d56 commit b005191

File tree

1 file changed

+13
-1
lines changed

1 file changed

+13
-1
lines changed

llama.cpp

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12127,7 +12127,7 @@ void llama_free_model(struct llama_model * model) {
1212712127
delete model;
1212812128
}
1212912129

12130-
struct llama_context * llama_new_context_with_model(
12130+
static struct llama_context * llama_new_context_with_model_internal(
1213112131
struct llama_model * model,
1213212132
struct llama_context_params params) {
1213312133

@@ -12426,6 +12426,18 @@ struct llama_context * llama_new_context_with_model(
1242612426
return ctx;
1242712427
}
1242812428

12429+
struct llama_context * llama_new_context_with_model(
12430+
struct llama_model * model,
12431+
struct llama_context_params params
12432+
) {
12433+
try {
12434+
return llama_new_context_with_model_internal(model, params);
12435+
} catch (const std::exception & err) {
12436+
LLAMA_LOG_ERROR("%s: failed to init context: %s\n", __func__, err.what());
12437+
return nullptr;
12438+
}
12439+
}
12440+
1242912441
void llama_free(struct llama_context * ctx) {
1243012442
delete ctx;
1243112443
}

0 commit comments

Comments
 (0)