Skip to content

Commit 47844dc

Browse files
slarenggerganov
authored andcommitted
llama : use cmake for swift build
1 parent d9c3ba2 commit 47844dc

File tree

5 files changed

+55
-106
lines changed

5 files changed

+55
-106
lines changed

.github/workflows/build.yml

Lines changed: 43 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -552,35 +552,49 @@ jobs:
552552
-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
553553
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
554554
555-
# TODO: tmp disabled. see for possible re-enable:
556-
# https://github.com/ggerganov/llama.cpp/pull/10525
557-
# macOS-latest-swift:
558-
# runs-on: macos-latest
559-
#
560-
# strategy:
561-
# matrix:
562-
# destination: ['generic/platform=macOS', 'generic/platform=iOS', 'generic/platform=tvOS']
563-
#
564-
# steps:
565-
# - name: Clone
566-
# id: checkout
567-
# uses: actions/checkout@v4
568-
#
569-
# - name: Dependencies
570-
# id: depends
571-
# continue-on-error: true
572-
# run: |
573-
# brew update
574-
#
575-
# - name: xcodebuild for swift package
576-
# id: xcodebuild
577-
# run: |
578-
# xcodebuild -scheme llama -destination "${{ matrix.destination }}"
579-
#
580-
# - name: Build Swift Example
581-
# id: make_build_swift_example
582-
# run: |
583-
# make swift
555+
macOS-latest-swift:
556+
runs-on: macos-latest
557+
558+
strategy:
559+
matrix:
560+
destination: ['generic/platform=macOS', 'generic/platform=iOS', 'generic/platform=tvOS']
561+
562+
steps:
563+
- name: Clone
564+
id: checkout
565+
uses: actions/checkout@v4
566+
567+
- name: Dependencies
568+
id: depends
569+
continue-on-error: true
570+
run: |
571+
brew update
572+
573+
- name: Build llama.cpp with CMake
574+
id: cmake_build
575+
run: |
576+
sysctl -a
577+
mkdir build
578+
cd build
579+
cmake -G Xcode .. \
580+
-DGGML_METAL_USE_BF16=ON \
581+
-DGGML_METAL_EMBED_LIBRARY=ON \
582+
-DLLAMA_BUILD_EXAMPLES=OFF \
583+
-DLLAMA_BUILD_TESTS=OFF \
584+
-DLLAMA_BUILD_SERVER=OFF \
585+
-DCMAKE_OSX_ARCHITECTURES="arm64;x86_64"
586+
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu)
587+
sudo cmake --install . --config Release
588+
589+
- name: xcodebuild for swift package
590+
id: xcodebuild
591+
run: |
592+
xcodebuild -scheme llama-Package -destination "${{ matrix.destination }}"
593+
594+
- name: Build Swift Example
595+
id: make_build_swift_example
596+
run: |
597+
make swift
584598
585599
windows-msys2:
586600
runs-on: windows-latest

Package.swift

Lines changed: 2 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -2,60 +2,6 @@
22

33
import PackageDescription
44

5-
var sources = [
6-
"src/llama.cpp",
7-
"src/llama-vocab.cpp",
8-
"src/llama-grammar.cpp",
9-
"src/llama-sampling.cpp",
10-
"src/unicode.cpp",
11-
"src/unicode-data.cpp",
12-
"ggml/src/ggml.c",
13-
"ggml/src/ggml-alloc.c",
14-
"ggml/src/ggml-backend.cpp",
15-
"ggml/src/ggml-backend-reg.cpp",
16-
"ggml/src/ggml-cpu/ggml-cpu.c",
17-
"ggml/src/ggml-cpu/ggml-cpu.cpp",
18-
"ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp",
19-
"ggml/src/ggml-cpu/ggml-cpu-hbm.cpp",
20-
"ggml/src/ggml-cpu/ggml-cpu-quants.c",
21-
"ggml/src/ggml-cpu/ggml-cpu-traits.cpp",
22-
"ggml/src/ggml-threading.cpp",
23-
"ggml/src/ggml-quants.c",
24-
]
25-
26-
var resources: [Resource] = []
27-
var linkerSettings: [LinkerSetting] = []
28-
var cSettings: [CSetting] = [
29-
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
30-
.unsafeFlags(["-fno-objc-arc"]),
31-
.headerSearchPath("ggml/src"),
32-
.headerSearchPath("ggml/src/ggml-cpu"),
33-
// NOTE: NEW_LAPACK will required iOS version 16.4+
34-
// We should consider add this in the future when we drop support for iOS 14
35-
// (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
36-
// .define("ACCELERATE_NEW_LAPACK"),
37-
// .define("ACCELERATE_LAPACK_ILP64")
38-
.define("GGML_USE_CPU"),
39-
]
40-
41-
42-
#if canImport(Darwin)
43-
sources.append("ggml/src/ggml-common.h")
44-
sources.append("ggml/src/ggml-metal/ggml-metal.m")
45-
resources.append(.process("ggml/src/ggml-metal/ggml-metal.metal"))
46-
linkerSettings.append(.linkedFramework("Accelerate"))
47-
cSettings.append(
48-
contentsOf: [
49-
.define("GGML_USE_ACCELERATE"),
50-
.define("GGML_USE_METAL"),
51-
]
52-
)
53-
#endif
54-
55-
#if os(Linux)
56-
cSettings.append(.define("_GNU_SOURCE"))
57-
#endif
58-
595
let package = Package(
606
name: "llama",
617
platforms: [
@@ -68,26 +14,6 @@ let package = Package(
6814
.library(name: "llama", targets: ["llama"]),
6915
],
7016
targets: [
71-
.target(
72-
name: "llama",
73-
path: ".",
74-
exclude: [
75-
"build",
76-
"cmake",
77-
"examples",
78-
"scripts",
79-
"models",
80-
"tests",
81-
"CMakeLists.txt",
82-
"Makefile",
83-
"ggml/src/ggml-metal-embed.metal"
84-
],
85-
sources: sources,
86-
resources: resources,
87-
publicHeadersPath: "spm-headers",
88-
cSettings: cSettings,
89-
linkerSettings: linkerSettings
90-
)
91-
],
92-
cxxLanguageStandard: .cxx17
17+
.systemLibrary(name: "llama", pkgConfig: "llama"),
18+
]
9319
)

Sources/llama/llama.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
#pragma once
2+
3+
#include <llama.h>
4+

Sources/llama/module.modulemap

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
module llama [system] {
2+
header "llama.h"
3+
link "llama"
4+
export *
5+
}

cmake/llama.pc.in

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,5 @@ includedir=${prefix}/include
66
Name: llama
77
Description: Port of Facebook's LLaMA model in C/C++
88
Version: @PROJECT_VERSION@
9-
Libs: -L${libdir} -lllama
9+
Libs: -L${libdir} -lggml -lggml-base -lllama
1010
Cflags: -I${includedir}

0 commit comments

Comments
 (0)