Skip to content

Commit 27432b6

Browse files
committed
llama : use cmake for swift build
1 parent 249cd93 commit 27432b6

File tree

5 files changed

+29
-75
lines changed

5 files changed

+29
-75
lines changed

.github/workflows/build.yml

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -660,10 +660,26 @@ jobs:
660660
run: |
661661
brew update
662662
663+
- name: Build llama.cpp with CMake
664+
id: cmake_build
665+
run: |
666+
sysctl -a
667+
mkdir build
668+
cd build
669+
cmake -G Xcode .. \
670+
-DGGML_METAL_USE_BF16=ON \
671+
-DGGML_METAL_EMBED_LIBRARY=ON \
672+
-DLLAMA_BUILD_EXAMPLES=OFF \
673+
-DLLAMA_BUILD_TESTS=OFF \
674+
-DLLAMA_BUILD_SERVER=OFF \
675+
-DCMAKE_OSX_ARCHITECTURES="arm64;x86_64"
676+
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu)
677+
sudo cmake --install . --config Release
678+
663679
- name: xcodebuild for swift package
664680
id: xcodebuild
665681
run: |
666-
xcodebuild -scheme llama -destination "${{ matrix.destination }}"
682+
xcodebuild -scheme llama-Package -destination "${{ matrix.destination }}"
667683
668684
- name: Build Swift Example
669685
id: make_build_swift_example

Package.swift

Lines changed: 2 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -2,57 +2,6 @@
22

33
import PackageDescription
44

5-
var sources = [
6-
"src/llama.cpp",
7-
"src/llama-vocab.cpp",
8-
"src/llama-grammar.cpp",
9-
"src/llama-sampling.cpp",
10-
"src/unicode.cpp",
11-
"src/unicode-data.cpp",
12-
"ggml/src/ggml.c",
13-
"ggml/src/ggml-aarch64.c",
14-
"ggml/src/ggml-alloc.c",
15-
"ggml/src/ggml-backend.cpp",
16-
"ggml/src/ggml-backend-reg.cpp",
17-
"ggml/src/ggml-cpu/ggml-cpu.c",
18-
"ggml/src/ggml-cpu/ggml-cpu.cpp",
19-
"ggml/src/ggml-cpu/ggml-cpu-aarch64.c",
20-
"ggml/src/ggml-cpu/ggml-cpu-quants.c",
21-
"ggml/src/ggml-threading.cpp",
22-
"ggml/src/ggml-quants.c",
23-
]
24-
25-
var resources: [Resource] = []
26-
var linkerSettings: [LinkerSetting] = []
27-
var cSettings: [CSetting] = [
28-
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
29-
.unsafeFlags(["-fno-objc-arc"]),
30-
.headerSearchPath("ggml/src"),
31-
// NOTE: NEW_LAPACK will required iOS version 16.4+
32-
// We should consider add this in the future when we drop support for iOS 14
33-
// (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
34-
// .define("ACCELERATE_NEW_LAPACK"),
35-
// .define("ACCELERATE_LAPACK_ILP64")
36-
]
37-
38-
#if canImport(Darwin)
39-
sources.append("ggml/src/ggml-common.h")
40-
sources.append("ggml/src/ggml-metal/ggml-metal.m")
41-
resources.append(.process("ggml/src/ggml-metal/ggml-metal.metal"))
42-
linkerSettings.append(.linkedFramework("Accelerate"))
43-
cSettings.append(
44-
contentsOf: [
45-
.define("GGML_USE_ACCELERATE"),
46-
.define("GGML_USE_METAL"),
47-
.define("GGML_USE_CPU")
48-
]
49-
)
50-
#endif
51-
52-
#if os(Linux)
53-
cSettings.append(.define("_GNU_SOURCE"))
54-
#endif
55-
565
let package = Package(
576
name: "llama",
587
platforms: [
@@ -65,26 +14,6 @@ let package = Package(
6514
.library(name: "llama", targets: ["llama"]),
6615
],
6716
targets: [
68-
.target(
69-
name: "llama",
70-
path: ".",
71-
exclude: [
72-
"build",
73-
"cmake",
74-
"examples",
75-
"scripts",
76-
"models",
77-
"tests",
78-
"CMakeLists.txt",
79-
"Makefile",
80-
"ggml/src/ggml-metal-embed.metal"
81-
],
82-
sources: sources,
83-
resources: resources,
84-
publicHeadersPath: "spm-headers",
85-
cSettings: cSettings,
86-
linkerSettings: linkerSettings
87-
)
88-
],
89-
cxxLanguageStandard: .cxx11
17+
.systemLibrary(name: "llama", pkgConfig: "llama"),
18+
]
9019
)

Sources/llama/llama.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
#pragma once
2+
3+
#include <llama.h>
4+

Sources/llama/module.modulemap

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
module llama [system] {
2+
header "llama.h"
3+
link "llama"
4+
export *
5+
}

cmake/llama.pc.in

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,5 @@ includedir=${prefix}/include
66
Name: llama
77
Description: Port of Facebook's LLaMA model in C/C++
88
Version: @PROJECT_VERSION@
9-
Libs: -L${libdir} -lllama
9+
Libs: -L${libdir} -lggml -lggml-base -lllama
1010
Cflags: -I${includedir}

0 commit comments

Comments
 (0)