Skip to content

Commit b0c3a3c

Browse files
committed
make github CI happy
1 parent 9cba545 commit b0c3a3c

File tree

16 files changed

+35
-6
lines changed

16 files changed

+35
-6
lines changed

common/common.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2810,6 +2810,7 @@ static llama_control_vector_data llama_control_vector_load_one(const llama_contr
28102810
/* .mem_size = */ ggml_tensor_overhead() * 128 + ggml_graph_overhead(),
28112811
/* .mem_buffer = */ nullptr,
28122812
/* .no_alloc = */ true,
2813+
/* .use_hwaccel= */ false
28132814
};
28142815
ggml_context * meta_ctx = ggml_init(meta_params);
28152816
struct gguf_init_params meta_gguf_params = {
@@ -2880,6 +2881,7 @@ static llama_control_vector_data llama_control_vector_load_one(const llama_contr
28802881
/* .mem_size = */ ggml_tensor_overhead() * n_tensors + n_bytes,
28812882
/* .mem_buffer = */ nullptr,
28822883
/* .no_alloc = */ false,
2884+
/* .use_hwaccel= */ false
28832885
};
28842886
struct ggml_context * ctx = ggml_init(ggml_params);
28852887

examples/baby-llama/baby-llama.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1522,6 +1522,7 @@ int main(int argc, char ** argv) {
15221522
/*.mem_size =*/ compute_size,
15231523
/*.mem_buffer =*/ compute_addr,
15241524
/*.no_alloc =*/ false,
1525+
/*.use_hwaccel=*/ false
15251526
};
15261527

15271528
struct ggml_context * ctx0 = ggml_init(params);
@@ -1598,6 +1599,7 @@ int main(int argc, char ** argv) {
15981599
/*.mem_size =*/ compute_size,
15991600
/*.mem_buffer =*/ compute_addr,
16001601
/*.no_alloc =*/ false,
1602+
/*.use_hwaccel=*/ false
16011603
};
16021604
struct ggml_context * ctx0 = ggml_init(params);
16031605

examples/benchmark/benchmark-matmult.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,8 @@ int main(int argc, char ** argv) {
143143
struct ggml_init_params params = {
144144
/*.mem_size =*/ ctx_size,
145145
/*.mem_buffer =*/ NULL,
146-
/* no_alloc =*/ 0
146+
/* no_alloc =*/ 0,
147+
/* use_hwaccel=*/ 0
147148
};
148149

149150
ctx = ggml_init(params);

examples/finetune/finetune.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1634,6 +1634,7 @@ int main(int argc, char ** argv) {
16341634
ggml_tensor_overhead() * 2, // mem_size
16351635
NULL, // mem_buffer
16361636
true, // no_alloc
1637+
false // use_hwaccel
16371638
};
16381639
struct ggml_context * ctx_input = ggml_init(ctx_input_params);
16391640

@@ -1656,6 +1657,7 @@ int main(int argc, char ** argv) {
16561657
estimated_compute_size_wo_data, // mem_size
16571658
NULL, // mem_buffer
16581659
true, // no_alloc
1660+
false // use_hwaccel
16591661
};
16601662
struct ggml_context * ctx_compute = NULL;
16611663

@@ -1825,6 +1827,7 @@ int main(int argc, char ** argv) {
18251827
max_work_size, // mem_size
18261828
NULL, // mem_buffer
18271829
false, // no_alloc
1830+
false // use_hwaccel
18281831
};
18291832
struct ggml_context * ctx_work = ggml_init(ctx_work_params);
18301833

examples/gguf/gguf.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ static bool gguf_ex_write(const std::string & fname) {
4343
/*.mem_size =*/ 128ull*1024ull*1024ull,
4444
/*.mem_buffer =*/ NULL,
4545
/*.no_alloc =*/ false,
46+
/*.use_hwaccel=*/ false
4647
};
4748

4849
struct ggml_context * ctx_data = ggml_init(params);

examples/llava/clip.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -543,6 +543,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
543543
/*.mem_size =*/ ctx->buf_compute_meta.size(),
544544
/*.mem_buffer =*/ ctx->buf_compute_meta.data(),
545545
/*.no_alloc =*/ true,
546+
/*.use_hwaccel=*/ false
546547
};
547548

548549
struct ggml_context * ctx0 = ggml_init(params);
@@ -1020,9 +1021,10 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
10201021
{
10211022
std::vector<uint8_t> read_buf;
10221023
struct ggml_init_params params = {
1023-
/*.mem_size =*/ (n_tensors + 1) * ggml_tensor_overhead(),
1024+
/*.mem_size =*/ (n_tensors + 1) * ggml_tensor_overhead(),
10241025
/*.mem_buffer =*/ NULL,
1025-
/*.no_alloc =*/ true,
1026+
/*.no_alloc =*/ true,
1027+
/*.use_hwaccel=*/ false
10261028
};
10271029

10281030
new_clip->ctx_data = ggml_init(params);

examples/llava/llava.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector<float *>
114114
/*.mem_size =*/ ctx_size,
115115
/*.mem_buffer =*/ NULL,
116116
/*.no_alloc =*/ false, // NOTE: this should be false when using the legacy API
117+
/*.use_hwaccel=*/ false
117118
};
118119

119120
// Python reference code for full unpad:

examples/train-text-from-scratch/train-text-from-scratch.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1044,6 +1044,7 @@ int main(int argc, char ** argv) {
10441044
ggml_tensor_overhead() * 2, // mem_size
10451045
NULL, // mem_buffer
10461046
true, // no_alloc
1047+
false // use_hwaccel
10471048
};
10481049
struct ggml_context * ctx_input = ggml_init(ctx_input_params);
10491050

@@ -1066,6 +1067,7 @@ int main(int argc, char ** argv) {
10661067
estimated_compute_size_wo_data, // mem_size
10671068
NULL, // mem_buffer
10681069
true, // no_alloc
1070+
false // use_hwaccel
10691071
};
10701072
struct ggml_context * ctx_compute = NULL;
10711073

@@ -1218,6 +1220,7 @@ int main(int argc, char ** argv) {
12181220
max_work_size, // mem_size
12191221
NULL, // mem_buffer
12201222
false, // no_alloc
1223+
false // use_hwaccel
12211224
};
12221225
struct ggml_context * ctx_work = ggml_init(ctx_work_params);
12231226

ggml-backend.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1238,7 +1238,8 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
12381238
struct ggml_init_params params = {
12391239
/* .mem_size = */ sizeof(sched->context_buffer),
12401240
/* .mem_buffer = */ sched->context_buffer,
1241-
/* .no_alloc = */ true
1241+
/* .no_alloc = */ true,
1242+
/* .use_hwaccel =*/ false
12421243
};
12431244

12441245
ggml_free(sched->ctx);
@@ -1980,7 +1981,8 @@ struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, s
19801981
struct ggml_init_params params = {
19811982
/* .mem_size = */ ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false),
19821983
/* .mem_buffer = */ NULL,
1983-
/* .no_alloc = */ true
1984+
/* .no_alloc = */ true,
1985+
/* .use_hwaccel =*/ false
19841986
};
19851987

19861988
struct ggml_context * ctx_allocated = ggml_init(params);

ggml.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2401,6 +2401,7 @@ extern "C" {
24012401

24022402
GGML_API ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type);
24032403

2404+
24042405
#ifdef __cplusplus
24052406
}
24062407
#endif

llama.cpp

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2378,6 +2378,7 @@ static bool llama_kv_cache_init(
23782378
/*.mem_size =*/ 2u*n_layers*ggml_tensor_overhead(),
23792379
/*.mem_buffer =*/ NULL,
23802380
/*.no_alloc =*/ true,
2381+
/*.use_hwaccel=*/ false
23812382
};
23822383
ggml_context * ctx = ggml_init(params);
23832384
if (!ctx) {
@@ -4664,6 +4665,7 @@ static bool llm_load_tensors(
46644665
/*.mem_size =*/ ctx_size,
46654666
/*.mem_buffer =*/ NULL,
46664667
/*.no_alloc =*/ true,
4668+
/*.use_hwaccel=*/ false
46674669
};
46684670
ggml_context * ctx = ggml_init(params);
46694671
if (!ctx) {
@@ -6535,6 +6537,7 @@ struct llm_build_context {
65356537
/*.mem_size =*/ buf_compute_meta.size(),
65366538
/*.mem_buffer =*/ buf_compute_meta.data(),
65376539
/*.no_alloc =*/ true,
6540+
/*.use_hwaccel=*/ false
65386541
};
65396542

65406543
ctx0 = ggml_init(params);
@@ -14679,6 +14682,7 @@ static int llama_apply_lora_from_file_internal(
1467914682
/* .mem_size */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
1468014683
/* .mem_buffer */ nullptr,
1468114684
/* .no_alloc */ true,
14685+
/* .use_hwaccel*/ false
1468214686
};
1468314687
ggml_context * lora_ctx = ggml_init(lora_init_params);
1468414688
if (lora_ctx == nullptr) {
@@ -14929,7 +14933,7 @@ void llama_backend_init(void) {
1492914933

1493014934
// needed to initialize f16 tables
1493114935
{
14932-
struct ggml_init_params params = { 0, NULL, false };
14936+
struct ggml_init_params params = { 0, NULL, false, false };
1493314937
struct ggml_context * ctx = ggml_init(params);
1493414938
ggml_free(ctx);
1493514939
}
@@ -15540,6 +15544,7 @@ static bool llama_control_vector_init(struct llama_control_vector & cvec, const
1554015544
/*.mem_size =*/ n_layers * ggml_tensor_overhead(),
1554115545
/*.mem_buffer =*/ NULL,
1554215546
/*.no_alloc =*/ true,
15547+
/*.use_hwaccel=*/ false
1554315548
};
1554415549
ggml_context * ctx = ggml_init(params);
1554515550
if (!ctx) {

tests/test-backend-ops.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -359,6 +359,7 @@ struct test_case {
359359
/* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
360360
/* .mem_base = */ NULL,
361361
/* .no_alloc = */ true,
362+
/* .use_hwaccel=*/false
362363
};
363364
ggml_context * ctx = ggml_init(params);
364365

@@ -520,6 +521,7 @@ struct test_case {
520521
/* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead_custom(graph_nodes, false),
521522
/* .mem_base = */ NULL,
522523
/* .no_alloc = */ true,
524+
/* .use_hwaccel=*/false
523525
};
524526
ggml_context * ctx = ggml_init(params);
525527

tests/test-grad0.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -359,6 +359,7 @@ int main(int argc, const char ** argv) {
359359
/* .mem_size = */ 256*1024*1024,
360360
/* .mem_buffer = */ NULL,
361361
/* .no_alloc = */ false,
362+
/* .use_hwaccel= */ false
362363
};
363364

364365
int64_t ne[4];

tests/test-quantize-fns.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,7 @@ int main(int argc, char * argv[]) {
121121
/* .mem_size = */ 1*1024,
122122
/* .mem_buffer = */ NULL,
123123
/* .no_alloc = */ true,
124+
/* .use_hwaccel= */ false
124125
};
125126
struct ggml_context * ctx = ggml_init(ggml_params);
126127

tests/test-quantize-perf.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -265,6 +265,7 @@ int main(int argc, char * argv[]) {
265265
/* .mem_size = */ 1*1024,
266266
/* .mem_buffer = */ NULL,
267267
/* .no_alloc = */ true,
268+
/* .use_hwaccel= */ false
268269
};
269270
struct ggml_context * ctx = ggml_init(ggml_params);
270271

tests/test-rope.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,7 @@ int main(int /*argc*/, const char ** /*argv*/) {
128128
/* .mem_size = */ 128*1024*1024,
129129
/* .mem_buffer = */ NULL,
130130
/* .no_alloc = */ false,
131+
/* .use_hwaccel= */ false
131132
};
132133

133134
std::vector<uint8_t> work_buffer;

0 commit comments

Comments
 (0)