Skip to content

Commit 07ba2bb

Browse files
committed
Introduce GGML_CALL function annotation
This change makes it possible to build ggml-cuda.cu and ggml-metal.m as independent dynamic shared objects, that may be conditionally linked at runtime in a multiplatform binary. It introduces a GGML_CALL annotation that documents which functions have a cyclic call relationship, between the application code and GPU modules. This change does nothing, unless the build defines -DGGML_MULTIPLATFORM which causes back-references and function pointers to conform to MS ABI which is supported by NVCC, ROCm, XCode, GCC and Clang across platforms
1 parent 36e5a08 commit 07ba2bb

File tree

10 files changed

+209
-199
lines changed

10 files changed

+209
-199
lines changed

ggml-backend-impl.h

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,13 @@ extern "C" {
1616
typedef void * ggml_backend_buffer_type_context_t;
1717

1818
struct ggml_backend_buffer_type_i {
19-
ggml_backend_buffer_t (*alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size);
20-
size_t (*get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment
21-
size_t (*get_alloc_size) (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
22-
bool (*supports_backend)(ggml_backend_buffer_type_t buft, ggml_backend_t backend); // check if the buffer type is usable by the backend
19+
ggml_backend_buffer_t (*GGML_CALL alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size);
20+
size_t (*GGML_CALL get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment
21+
size_t (*GGML_CALL get_alloc_size) (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
22+
bool (*GGML_CALL supports_backend)(ggml_backend_buffer_type_t buft, ggml_backend_t backend); // check if the buffer type is usable by the backend
2323
// check if tensor data is in host memory
2424
// should be equivalent to supports_backend(buft, ggml_backend_cpu_init())
25-
bool (*is_host) (ggml_backend_buffer_type_t buft);
25+
bool (*GGML_CALL is_host) (ggml_backend_buffer_type_t buft);
2626
};
2727

2828
struct ggml_backend_buffer_type {
@@ -34,16 +34,16 @@ extern "C" {
3434
typedef void * ggml_backend_buffer_context_t;
3535

3636
struct ggml_backend_buffer_i {
37-
void (*free_buffer) (ggml_backend_buffer_t buffer);
38-
//void (*reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
39-
void * (*get_base) (ggml_backend_buffer_t buffer);
40-
void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
41-
void (*set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
42-
void (*get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
37+
void (*GGML_CALL free_buffer) (ggml_backend_buffer_t buffer);
38+
//void (*GGML_CALL reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
39+
void * (*GGML_CALL get_base) (ggml_backend_buffer_t buffer);
40+
void (*GGML_CALL init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
41+
void (*GGML_CALL set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
42+
void (*GGML_CALL get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
4343
// (optional) copy tensor between different buffer-type, allow for single-copy tranfers
44-
void (*cpy_tensor_from)(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst);
45-
void (*cpy_tensor_to) (ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst);
46-
void (*clear) (ggml_backend_buffer_t buffer, uint8_t value);
44+
void (*GGML_CALL cpy_tensor_from)(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst);
45+
void (*GGML_CALL cpy_tensor_to) (ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst);
46+
void (*GGML_CALL clear) (ggml_backend_buffer_t buffer, uint8_t value);
4747
};
4848

4949
struct ggml_backend_buffer {
@@ -57,7 +57,7 @@ extern "C" {
5757
ggml_backend_buffer_type_t buft,
5858
struct ggml_backend_buffer_i iface,
5959
ggml_backend_buffer_context_t context,
60-
size_t size);
60+
size_t size) GGML_CALL;
6161

6262

6363
//
@@ -67,33 +67,33 @@ extern "C" {
6767
typedef void * ggml_backend_context_t;
6868

6969
struct ggml_backend_i {
70-
const char * (*get_name)(ggml_backend_t backend);
70+
const char * (*GGML_CALL get_name)(ggml_backend_t backend);
7171

72-
void (*free)(ggml_backend_t backend);
72+
void (*GGML_CALL free)(ggml_backend_t backend);
7373

7474
// buffer allocation
75-
ggml_backend_buffer_type_t (*get_default_buffer_type)(ggml_backend_t backend);
75+
ggml_backend_buffer_type_t (*GGML_CALL get_default_buffer_type)(ggml_backend_t backend);
7676

7777
// (optional) asynchroneous tensor data access
78-
void (*set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
79-
void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
78+
void (*GGML_CALL set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
79+
void (*GGML_CALL get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
8080

8181
// (optional) asynchroneous tensor copy
82-
void (*cpy_tensor_from_async)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
83-
void (*cpy_tensor_to_async) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
82+
void (*GGML_CALL cpy_tensor_from_async)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
83+
void (*GGML_CALL cpy_tensor_to_async) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
8484

85-
void (*synchronize)(ggml_backend_t backend);
85+
void (*GGML_CALL synchronize)(ggml_backend_t backend);
8686

8787
// compute graph with a plan
88-
ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph);
89-
void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
90-
void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
88+
ggml_backend_graph_plan_t (*GGML_CALL graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph);
89+
void (*GGML_CALL graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
90+
void (*GGML_CALL graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
9191

9292
// compute graph without a plan
93-
bool (*graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph);
93+
bool (*GGML_CALL graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph);
9494

9595
// check if the backend supports an operation
96-
bool (*supports_op)(ggml_backend_t backend, const struct ggml_tensor * op);
96+
bool (*GGML_CALL supports_op)(ggml_backend_t backend, const struct ggml_tensor * op);
9797
};
9898

9999
struct ggml_backend {
@@ -107,7 +107,7 @@ extern "C" {
107107
// Backend registry
108108
//
109109

110-
typedef ggml_backend_t (*ggml_backend_init_fn)(const char * params, void * user_data);
110+
typedef ggml_backend_t (*GGML_CALL ggml_backend_init_fn)(const char * params, void * user_data);
111111

112112
void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data);
113113

ggml-backend.c

Lines changed: 31 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -15,15 +15,15 @@
1515

1616
// backend buffer type
1717

18-
ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
18+
GGML_CALL ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
1919
return buft->iface.alloc_buffer(buft, size);
2020
}
2121

2222
size_t ggml_backend_buft_get_alignment(ggml_backend_buffer_type_t buft) {
2323
return buft->iface.get_alignment(buft);
2424
}
2525

26-
size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor) {
26+
GGML_CALL size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor) {
2727
// get_alloc_size is optional, defaults to ggml_nbytes
2828
if (buft->iface.get_alloc_size) {
2929
return buft->iface.get_alloc_size(buft, tensor);
@@ -44,7 +44,7 @@ bool ggml_backend_buft_is_host(ggml_backend_buffer_type_t buft) {
4444

4545
// backend buffer
4646

47-
ggml_backend_buffer_t ggml_backend_buffer_init(
47+
GGML_CALL ggml_backend_buffer_t ggml_backend_buffer_init(
4848
ggml_backend_buffer_type_t buft,
4949
struct ggml_backend_buffer_i iface,
5050
ggml_backend_buffer_context_t context,
@@ -86,7 +86,7 @@ void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) {
8686
return base;
8787
}
8888

89-
void ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
89+
GGML_CALL void ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
9090
// init_tensor is optional
9191
if (buffer->iface.init_tensor) {
9292
buffer->iface.init_tensor(buffer, tensor);
@@ -156,15 +156,15 @@ void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_ten
156156
backend->iface.get_tensor_async(backend, tensor, data, offset, size);
157157
}
158158

159-
void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
159+
GGML_CALL void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
160160
GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
161161
GGML_ASSERT(tensor->buffer != NULL && "tensor buffer not set");
162162
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
163163

164164
tensor->buffer->iface.set_tensor(tensor->buffer, tensor, data, offset, size);
165165
}
166166

167-
void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
167+
GGML_CALL void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
168168
GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
169169
GGML_ASSERT(tensor->buffer != NULL && "tensor buffer not set");
170170
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
@@ -271,9 +271,9 @@ struct ggml_backend_reg {
271271
static struct ggml_backend_reg ggml_backend_registry[GGML_MAX_BACKENDS_REG];
272272
static size_t ggml_backend_registry_count = 0;
273273

274-
static ggml_backend_t ggml_backend_reg_cpu_init(const char * params, void * user_data);
274+
GGML_CALL static ggml_backend_t ggml_backend_reg_cpu_init(const char * params, void * user_data);
275275

276-
static void ggml_backend_registry_init(void) {
276+
GGML_CALL static void ggml_backend_registry_init(void) {
277277
static bool initialized = false;
278278

279279
if (initialized) {
@@ -392,39 +392,39 @@ ggml_backend_buffer_t ggml_backend_reg_alloc_buffer(size_t i, size_t size) {
392392

393393
// backend CPU
394394

395-
static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
395+
GGML_CALL static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
396396
return (void *)buffer->context;
397397
}
398398

399-
static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) {
399+
GGML_CALL static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) {
400400
free(buffer->context);
401401
}
402402

403-
static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
403+
GGML_CALL static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
404404
memcpy((char *)tensor->data + offset, data, size);
405405

406406
GGML_UNUSED(buffer);
407407
}
408408

409-
static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
409+
GGML_CALL static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
410410
memcpy(data, (const char *)tensor->data + offset, size);
411411

412412
GGML_UNUSED(buffer);
413413
}
414414

415-
static void ggml_backend_cpu_buffer_cpy_tensor_from(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) {
415+
GGML_CALL static void ggml_backend_cpu_buffer_cpy_tensor_from(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) {
416416
ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
417417

418418
GGML_UNUSED(buffer);
419419
}
420420

421-
static void ggml_backend_cpu_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) {
421+
GGML_CALL static void ggml_backend_cpu_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) {
422422
ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
423423

424424
GGML_UNUSED(buffer);
425425
}
426426

427-
static void ggml_backend_cpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
427+
GGML_CALL static void ggml_backend_cpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
428428
memset(buffer->context, value, buffer->size);
429429
}
430430

@@ -453,7 +453,7 @@ static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = {
453453

454454
static const size_t TENSOR_ALIGNMENT = 64; // should be enough for AVX 512
455455

456-
static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
456+
GGML_CALL static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
457457
size += TENSOR_ALIGNMENT; // malloc may return an address that is not aligned
458458
void * data = malloc(size); // TODO: maybe use GGML_ALIGNED_MALLOC?
459459

@@ -462,25 +462,25 @@ static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_back
462462
return ggml_backend_buffer_init(buft, cpu_backend_buffer_i, data, size);
463463
}
464464

465-
static size_t ggml_backend_cpu_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
465+
GGML_CALL static size_t ggml_backend_cpu_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
466466
return TENSOR_ALIGNMENT;
467467

468468
GGML_UNUSED(buft);
469469
}
470470

471-
static bool ggml_backend_cpu_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
471+
GGML_CALL static bool ggml_backend_cpu_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
472472
return ggml_backend_is_cpu(backend);
473473

474474
GGML_UNUSED(buft);
475475
}
476476

477-
static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
477+
GGML_CALL static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
478478
return true;
479479

480480
GGML_UNUSED(buft);
481481
}
482482

483-
ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) {
483+
GGML_CALL ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) {
484484
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = {
485485
/* .iface = */ {
486486
/* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer,
@@ -544,20 +544,20 @@ struct ggml_backend_cpu_context {
544544
size_t work_size;
545545
};
546546

547-
static const char * ggml_backend_cpu_name(ggml_backend_t backend) {
547+
GGML_CALL static const char * ggml_backend_cpu_name(ggml_backend_t backend) {
548548
return "CPU";
549549

550550
GGML_UNUSED(backend);
551551
}
552552

553-
static void ggml_backend_cpu_free(ggml_backend_t backend) {
553+
GGML_CALL static void ggml_backend_cpu_free(ggml_backend_t backend) {
554554
struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
555555
free(cpu_ctx->work_data);
556556
free(cpu_ctx);
557557
free(backend);
558558
}
559559

560-
static ggml_backend_buffer_type_t ggml_backend_cpu_get_default_buffer_type(ggml_backend_t backend) {
560+
GGML_CALL static ggml_backend_buffer_type_t ggml_backend_cpu_get_default_buffer_type(ggml_backend_t backend) {
561561
return ggml_backend_cpu_buffer_type();
562562

563563
GGML_UNUSED(backend);
@@ -568,7 +568,7 @@ struct ggml_backend_plan_cpu {
568568
struct ggml_cgraph cgraph;
569569
};
570570

571-
static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
571+
GGML_CALL static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
572572
struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
573573

574574
struct ggml_backend_plan_cpu * cpu_plan = malloc(sizeof(struct ggml_backend_plan_cpu));
@@ -583,7 +583,7 @@ static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend
583583
return cpu_plan;
584584
}
585585

586-
static void ggml_backend_cpu_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
586+
GGML_CALL static void ggml_backend_cpu_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
587587
struct ggml_backend_plan_cpu * cpu_plan = (struct ggml_backend_plan_cpu *)plan;
588588

589589
free(cpu_plan->cplan.work_data);
@@ -592,15 +592,15 @@ static void ggml_backend_cpu_graph_plan_free(ggml_backend_t backend, ggml_backen
592592
GGML_UNUSED(backend);
593593
}
594594

595-
static void ggml_backend_cpu_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
595+
GGML_CALL static void ggml_backend_cpu_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
596596
struct ggml_backend_plan_cpu * cpu_plan = (struct ggml_backend_plan_cpu *)plan;
597597

598598
ggml_graph_compute(&cpu_plan->cgraph, &cpu_plan->cplan);
599599

600600
GGML_UNUSED(backend);
601601
}
602602

603-
static bool ggml_backend_cpu_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
603+
GGML_CALL static bool ggml_backend_cpu_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
604604
struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
605605

606606
struct ggml_cplan cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads);
@@ -617,7 +617,7 @@ static bool ggml_backend_cpu_graph_compute(ggml_backend_t backend, struct ggml_c
617617
return true;
618618
}
619619

620-
static bool ggml_backend_cpu_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
620+
GGML_CALL static bool ggml_backend_cpu_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
621621
switch (op->op) {
622622
case GGML_OP_MUL_MAT:
623623
return op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == ggml_internal_get_type_traits(op->src[0]->type).vec_dot_type;
@@ -660,7 +660,7 @@ ggml_backend_t ggml_backend_cpu_init(void) {
660660
return cpu_backend;
661661
}
662662

663-
bool ggml_backend_is_cpu(ggml_backend_t backend) {
663+
GGML_CALL bool ggml_backend_is_cpu(ggml_backend_t backend) {
664664
return backend->iface.get_name == ggml_backend_cpu_name;
665665
}
666666

@@ -671,11 +671,11 @@ void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) {
671671
ctx->n_threads = n_threads;
672672
}
673673

674-
ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) {
674+
GGML_CALL ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) {
675675
return ggml_backend_buffer_init(ggml_backend_cpu_buffer_type(), cpu_backend_buffer_i_from_ptr, ptr, size);
676676
}
677677

678-
static ggml_backend_t ggml_backend_reg_cpu_init(const char * params, void * user_data) {
678+
GGML_CALL static ggml_backend_t ggml_backend_reg_cpu_init(const char * params, void * user_data) {
679679
return ggml_backend_cpu_init();
680680

681681
GGML_UNUSED(params);

0 commit comments

Comments
 (0)