Skip to content

Commit 3425e62

Browse files
committed
llama : Add test for model load cancellation
1 parent 9abe2e4 commit 3425e62

File tree

2 files changed

+18
-0
lines changed

2 files changed

+18
-0
lines changed

tests/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ llama_build_and_test_executable(test-grad0.cpp)
5050
llama_build_and_test_executable(test-backend-ops.cpp)
5151

5252
llama_build_and_test_executable(test-rope.cpp)
53+
llama_build_and_test_executable(test-model-load-cancel.cpp)
5354

5455
# dummy executable - not installed
5556
get_filename_component(TEST_TARGET test-c.c NAME_WE)

tests/test-model-load-cancel.cpp

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
#include "llama.h"
2+
3+
#include <cstdlib>
4+
#include <tuple>
5+
6+
int main(void) {
7+
llama_backend_init(false);
8+
auto params = llama_model_params{};
9+
params.use_mmap = false;
10+
params.progress_callback = [](float progress, void * ctx){
11+
std::ignore = ctx;
12+
return progress > 0.50;
13+
};
14+
auto * model = llama_load_model_from_file("../models/7B/ggml-model-f16.gguf", params);
15+
llama_backend_free();
16+
return model == nullptr ? EXIT_SUCCESS : EXIT_FAILURE;
17+
}

0 commit comments

Comments
 (0)