Skip to content

Commit b88957e

Browse files
committed
rename GGML_USE_OPENBLAS to GGML_USE_BLAS
1 parent 7f58793 commit b88957e

File tree

5 files changed

+10
-13
lines changed

5 files changed

+10
-13
lines changed

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -374,7 +374,7 @@ if (LLAMA_BLAS)
374374

375375
add_compile_options(${BLAS_LINKER_FLAGS})
376376

377-
add_compile_definitions(GGML_USE_OPENBLAS)
377+
add_compile_definitions(GGML_USE_BLAS)
378378

379379
if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${LLAMA_BLAS_VENDOR} MATCHES "Generic" OR ${LLAMA_BLAS_VENDOR} MATCHES "Intel"))
380380
add_compile_definitions(GGML_BLAS_USE_MKL)

Makefile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -419,21 +419,21 @@ ifndef LLAMA_NO_OPENMP
419419
endif # LLAMA_NO_OPENMP
420420

421421
ifdef LLAMA_OPENBLAS
422-
MK_CPPFLAGS += -DGGML_USE_OPENBLAS $(shell pkg-config --cflags-only-I openblas)
422+
MK_CPPFLAGS += -DGGML_USE_BLAS $(shell pkg-config --cflags-only-I openblas)
423423
MK_CFLAGS += $(shell pkg-config --cflags-only-other openblas)
424424
MK_LDFLAGS += $(shell pkg-config --libs openblas)
425425
OBJS += ggml-blas.o
426426
endif # LLAMA_OPENBLAS
427427

428428
ifdef LLAMA_OPENBLAS64
429-
MK_CPPFLAGS += -DGGML_USE_OPENBLAS $(shell pkg-config --cflags-only-I openblas64)
429+
MK_CPPFLAGS += -DGGML_USE_BLAS $(shell pkg-config --cflags-only-I openblas64)
430430
MK_CFLAGS += $(shell pkg-config --cflags-only-other openblas64)
431431
MK_LDFLAGS += $(shell pkg-config --libs openblas64)
432432
OBJS += ggml-blas.o
433433
endif # LLAMA_OPENBLAS64
434434

435435
ifdef LLAMA_BLIS
436-
MK_CPPFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/blis -I/usr/include/blis
436+
MK_CPPFLAGS += -DGGML_USE_BLAS -I/usr/local/include/blis -I/usr/include/blis
437437
MK_LDFLAGS += -lblis -L/usr/local/lib
438438
OBJS += ggml-blas.o
439439
endif # LLAMA_BLIS

ggml-blas.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
#if defined(GGML_USE_ACCELERATE)
77
# include <Accelerate/Accelerate.h>
8-
#elif defined(GGML_USE_OPENBLAS)
8+
#elif defined(GGML_USE_BLAS)
99
# if defined(GGML_BLAS_USE_MKL)
1010
# include <mkl.h>
1111
# else
@@ -25,9 +25,6 @@ static bool ggml_compute_forward_mul_mat_use_blas(const struct ggml_tensor * dst
2525
const struct ggml_tensor * src0 = dst->src[0];
2626
const struct ggml_tensor * src1 = dst->src[1];
2727

28-
//const int64_t ne00 = src0->ne[0];
29-
//const int64_t ne01 = src0->ne[1];
30-
3128
const int64_t ne10 = src1->ne[0];
3229

3330
const int64_t ne0 = dst->ne[0];

ggml.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22645,7 +22645,7 @@ int ggml_cpu_has_wasm_simd(void) {
2264522645
}
2264622646

2264722647
int ggml_cpu_has_blas(void) {
22648-
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) || defined(GGML_USE_SYCL)
22648+
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_BLAS) || defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) || defined(GGML_USE_SYCL)
2264922649
return 1;
2265022650
#else
2265122651
return 0;

llama.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
# include "ggml-kompute.h"
2222
#endif
2323

24-
#if defined(GGML_USE_OPENBLAS) || defined(GGML_USE_ACCELERATE)
24+
#if defined(GGML_USE_BLAS) || defined(GGML_USE_ACCELERATE)
2525
# include "ggml-blas.h"
2626
#endif
2727

@@ -2303,7 +2303,7 @@ struct llama_context {
23032303
#ifdef GGML_USE_METAL
23042304
ggml_backend_t backend_metal = nullptr;
23052305
#endif
2306-
#if defined(GGML_USE_OPENBLAS) || defined(GGML_USE_ACCELERATE)
2306+
#if defined(GGML_USE_BLAS) || defined(GGML_USE_ACCELERATE)
23072307
ggml_backend_t backend_blas = nullptr;
23082308
#endif
23092309
ggml_backend_t backend_cpu = nullptr;
@@ -12025,7 +12025,7 @@ static void llama_graph_compute(
1202512025
ggml_backend_cpu_set_n_threads(lctx.backend_cpu, n_threads);
1202612026
ggml_backend_cpu_set_abort_callback(lctx.backend_cpu, lctx.abort_callback, lctx.abort_callback_data);
1202712027
}
12028-
#if defined(GGML_USE_OPENBLAS) || defined(GGML_USE_ACCELERATE)
12028+
#if defined(GGML_USE_BLAS) || defined(GGML_USE_ACCELERATE)
1202912029
if (lctx.backend_blas != nullptr) {
1203012030
ggml_backend_blas_set_n_threads(lctx.backend_blas, n_threads);
1203112031
}
@@ -16240,7 +16240,7 @@ struct llama_context * llama_new_context_with_model(
1624016240
}
1624116241
#endif
1624216242

16243-
#if defined(GGML_USE_OPENBLAS) || defined(GGML_USE_ACCELERATE)
16243+
#if defined(GGML_USE_BLAS) || defined(GGML_USE_ACCELERATE)
1624416244
ctx->backend_blas = ggml_backend_blas_init();
1624516245
if (ctx->backend_blas == nullptr) {
1624616246
LLAMA_LOG_WARN("%s: failed to initialize BLAS backend\n", __func__);

0 commit comments

Comments
 (0)