File tree Expand file tree Collapse file tree 2 files changed +22
-3
lines changed Expand file tree Collapse file tree 2 files changed +22
-3
lines changed Original file line number Diff line number Diff line change @@ -235,13 +235,15 @@ ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
235
235
endif # LLAMA_CUBLAS
236
236
237
237
ifdef LLAMA_CLBLAST
238
- CFLAGS += -DGGML_USE_CLBLAST
239
- CXXFLAGS += -DGGML_USE_CLBLAST
238
+
239
+ CFLAGS += -DGGML_USE_CLBLAST $(shell pkg-config --cflags clblast OpenCL)
240
+ CXXFLAGS += -DGGML_USE_CLBLAST $(shell pkg-config --cflags clblast OpenCL)
241
+
240
242
# Mac provides OpenCL as a framework
241
243
ifeq ($(UNAME_S),Darwin)
242
244
LDFLAGS += -lclblast -framework OpenCL
243
245
else
244
- LDFLAGS += -lclblast -lOpenCL
246
+ LDFLAGS += $(shell pkg-config --libs clblast OpenCL)
245
247
endif
246
248
OBJS += ggml-opencl.o
247
249
Original file line number Diff line number Diff line change @@ -242,6 +242,23 @@ In order to build llama.cpp you have three different options.
242
242
zig build -Doptimize=ReleaseFast
243
243
` ` `
244
244
245
+ - Using ` gmake` (FreeBSD):
246
+
247
+ 1. Install and activate [DRM in FreeBSD](https://wiki.freebsd.org/Graphics)
248
+ 2. Add your user to ** video** group
249
+ 3. Install compilation dependencies.
250
+
251
+ ` ` ` bash
252
+ sudo pkg install gmake automake autoconf pkgconf llvm15 clinfo clover \
253
+ opencl clblast openblas
254
+
255
+ gmake CC=/usr/local/bin/clang15 CXX=/usr/local/bin/clang++15 -j4
256
+ ` ` `
257
+
258
+ ** Notes:** With this packages you can build llama.cpp with OPENBLAS and
259
+ CLBLAST support for use OpenCL GPU acceleration in FreeBSD. Please read
260
+ the instructions for use and activate this options in this document below.
261
+
245
262
# ## Metal Build
246
263
247
264
Using Metal allows the computation to be executed on the GPU for Apple devices:
You can’t perform that action at this time.
0 commit comments