Skip to content

Commit 2ad9162

Browse files
committed
Revert "llama : use cmake for swift build (ggml-org#10525)"
This reverts commit 43ed389.
1 parent 6c14ee3 commit 2ad9162

File tree

7 files changed

+131
-81
lines changed

7 files changed

+131
-81
lines changed

.github/workflows/build.yml

Lines changed: 46 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -553,44 +553,35 @@ jobs:
553553
-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
554554
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
555555
556-
macOS-latest-swift:
557-
runs-on: macos-latest
558-
559-
strategy:
560-
matrix:
561-
destination: ['generic/platform=macOS', 'generic/platform=iOS', 'generic/platform=tvOS']
562-
563-
steps:
564-
- name: Clone
565-
id: checkout
566-
uses: actions/checkout@v4
567-
568-
- name: Dependencies
569-
id: depends
570-
continue-on-error: true
571-
run: |
572-
brew update
573-
574-
- name: Build llama.cpp with CMake
575-
id: cmake_build
576-
run: |
577-
sysctl -a
578-
mkdir build
579-
cd build
580-
cmake -G Xcode .. \
581-
-DGGML_METAL_USE_BF16=ON \
582-
-DGGML_METAL_EMBED_LIBRARY=ON \
583-
-DLLAMA_BUILD_EXAMPLES=OFF \
584-
-DLLAMA_BUILD_TESTS=OFF \
585-
-DLLAMA_BUILD_SERVER=OFF \
586-
-DCMAKE_OSX_ARCHITECTURES="arm64;x86_64"
587-
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu)
588-
sudo cmake --install . --config Release
589-
590-
- name: xcodebuild for swift package
591-
id: xcodebuild
592-
run: |
593-
xcodebuild -scheme llama-Package -destination "${{ matrix.destination }}"
556+
# TODO: tmp disabled. see for possible re-enable:
557+
# https://github.com/ggerganov/llama.cpp/pull/10525
558+
# macOS-latest-swift:
559+
# runs-on: macos-latest
560+
#
561+
# strategy:
562+
# matrix:
563+
# destination: ['generic/platform=macOS', 'generic/platform=iOS', 'generic/platform=tvOS']
564+
#
565+
# steps:
566+
# - name: Clone
567+
# id: checkout
568+
# uses: actions/checkout@v4
569+
#
570+
# - name: Dependencies
571+
# id: depends
572+
# continue-on-error: true
573+
# run: |
574+
# brew update
575+
#
576+
# - name: xcodebuild for swift package
577+
# id: xcodebuild
578+
# run: |
579+
# xcodebuild -scheme llama -destination "${{ matrix.destination }}"
580+
#
581+
# - name: Build Swift Example
582+
# id: make_build_swift_example
583+
# run: |
584+
# make swift
594585

595586
windows-msys2:
596587
runs-on: windows-latest
@@ -1161,29 +1152,6 @@ jobs:
11611152
- name: Checkout code
11621153
uses: actions/checkout@v4
11631154

1164-
- name: Build
1165-
id: cmake_build
1166-
run: |
1167-
sysctl -a
1168-
mkdir build
1169-
cd build
1170-
cmake -G Xcode .. \
1171-
-DGGML_METAL_USE_BF16=ON \
1172-
-DGGML_METAL_EMBED_LIBRARY=ON \
1173-
-DLLAMA_BUILD_EXAMPLES=OFF \
1174-
-DLLAMA_BUILD_TESTS=OFF \
1175-
-DLLAMA_BUILD_SERVER=OFF \
1176-
-DCMAKE_SYSTEM_NAME=iOS \
1177-
-DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \
1178-
-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
1179-
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
1180-
sudo cmake --install . --config Release
1181-
1182-
- name: xcodebuild for swift package
1183-
id: xcodebuild
1184-
run: |
1185-
xcodebuild -scheme llama-Package -destination 'generic/platform=iOS'
1186-
11871155
- name: Build Xcode project
11881156
run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' build
11891157

@@ -1211,6 +1179,23 @@ jobs:
12111179
12121180
./gradlew build --no-daemon
12131181
1182+
# freeBSD-latest:
1183+
# runs-on: macos-12
1184+
# steps:
1185+
# - name: Clone
1186+
# uses: actions/checkout@v4
1187+
#
1188+
# - name: Build
1189+
# uses: cross-platform-actions/[email protected]
1190+
# with:
1191+
# operating_system: freebsd
1192+
# version: '13.2'
1193+
# hypervisor: 'qemu'
1194+
# run: |
1195+
# sudo pkg update
1196+
# sudo pkg install -y gmake automake autoconf pkgconf llvm15 openblas
1197+
# gmake CC=/usr/local/bin/clang15 CXX=/usr/local/bin/clang++15 -j `sysctl -n hw.ncpu`
1198+
12141199
release:
12151200
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
12161201

Package.swift

Lines changed: 76 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,60 @@
22

33
import PackageDescription
44

5+
var sources = [
6+
"src/llama.cpp",
7+
"src/llama-vocab.cpp",
8+
"src/llama-grammar.cpp",
9+
"src/llama-sampling.cpp",
10+
"src/unicode.cpp",
11+
"src/unicode-data.cpp",
12+
"ggml/src/ggml.c",
13+
"ggml/src/ggml-alloc.c",
14+
"ggml/src/ggml-backend.cpp",
15+
"ggml/src/ggml-backend-reg.cpp",
16+
"ggml/src/ggml-cpu/ggml-cpu.c",
17+
"ggml/src/ggml-cpu/ggml-cpu.cpp",
18+
"ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp",
19+
"ggml/src/ggml-cpu/ggml-cpu-hbm.cpp",
20+
"ggml/src/ggml-cpu/ggml-cpu-quants.c",
21+
"ggml/src/ggml-cpu/ggml-cpu-traits.cpp",
22+
"ggml/src/ggml-threading.cpp",
23+
"ggml/src/ggml-quants.c",
24+
]
25+
26+
var resources: [Resource] = []
27+
var linkerSettings: [LinkerSetting] = []
28+
var cSettings: [CSetting] = [
29+
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
30+
.unsafeFlags(["-fno-objc-arc"]),
31+
.headerSearchPath("ggml/src"),
32+
.headerSearchPath("ggml/src/ggml-cpu"),
33+
// NOTE: NEW_LAPACK will required iOS version 16.4+
34+
// We should consider add this in the future when we drop support for iOS 14
35+
// (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
36+
// .define("ACCELERATE_NEW_LAPACK"),
37+
// .define("ACCELERATE_LAPACK_ILP64")
38+
.define("GGML_USE_CPU"),
39+
]
40+
41+
42+
#if canImport(Darwin)
43+
sources.append("ggml/src/ggml-common.h")
44+
sources.append("ggml/src/ggml-metal/ggml-metal.m")
45+
resources.append(.process("ggml/src/ggml-metal/ggml-metal.metal"))
46+
linkerSettings.append(.linkedFramework("Accelerate"))
47+
cSettings.append(
48+
contentsOf: [
49+
.define("GGML_USE_ACCELERATE"),
50+
.define("GGML_USE_METAL"),
51+
]
52+
)
53+
#endif
54+
55+
#if os(Linux)
56+
cSettings.append(.define("_GNU_SOURCE"))
57+
#endif
58+
559
let package = Package(
660
name: "llama",
761
platforms: [
@@ -14,6 +68,26 @@ let package = Package(
1468
.library(name: "llama", targets: ["llama"]),
1569
],
1670
targets: [
17-
.systemLibrary(name: "llama", pkgConfig: "llama"),
18-
]
71+
.target(
72+
name: "llama",
73+
path: ".",
74+
exclude: [
75+
"build",
76+
"cmake",
77+
"examples",
78+
"scripts",
79+
"models",
80+
"tests",
81+
"CMakeLists.txt",
82+
"Makefile",
83+
"ggml/src/ggml-metal-embed.metal"
84+
],
85+
sources: sources,
86+
resources: resources,
87+
publicHeadersPath: "spm-headers",
88+
cSettings: cSettings,
89+
linkerSettings: linkerSettings
90+
)
91+
],
92+
cxxLanguageStandard: .cxx17
1993
)

Sources/llama/llama.h

Lines changed: 0 additions & 4 deletions
This file was deleted.

Sources/llama/module.modulemap

Lines changed: 0 additions & 5 deletions
This file was deleted.

cmake/llama.pc.in

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,5 @@ includedir=${prefix}/include
66
Name: llama
77
Description: Port of Facebook's LLaMA model in C/C++
88
Version: @PROJECT_VERSION@
9-
Libs: -L${libdir} -lggml -lggml-base -lllama
9+
Libs: -L${libdir} -lllama
1010
Cflags: -I${includedir}

examples/llama.swiftui/llama.cpp.swift/LibLlama.swift

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -210,20 +210,20 @@ actor LlamaContext {
210210

211211
llama_kv_cache_clear(context)
212212

213-
let t_pp_start = DispatchTime.now().uptimeNanoseconds / 1000;
213+
let t_pp_start = ggml_time_us()
214214

215215
if llama_decode(context, batch) != 0 {
216216
print("llama_decode() failed during prompt")
217217
}
218218
llama_synchronize(context)
219219

220-
let t_pp_end = DispatchTime.now().uptimeNanoseconds / 1000;
220+
let t_pp_end = ggml_time_us()
221221

222222
// bench text generation
223223

224224
llama_kv_cache_clear(context)
225225

226-
let t_tg_start = DispatchTime.now().uptimeNanoseconds / 1000;
226+
let t_tg_start = ggml_time_us()
227227

228228
for i in 0..<tg {
229229
llama_batch_clear(&batch)
@@ -238,7 +238,7 @@ actor LlamaContext {
238238
llama_synchronize(context)
239239
}
240240

241-
let t_tg_end = DispatchTime.now().uptimeNanoseconds / 1000;
241+
let t_tg_end = ggml_time_us()
242242

243243
llama_kv_cache_clear(context)
244244

examples/llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
objects = {
88

99
/* Begin PBXBuildFile section */
10-
1809696D2D05A39F00400EE8 /* llama in Frameworks */ = {isa = PBXBuildFile; productRef = 1809696C2D05A39F00400EE8 /* llama */; };
1110
549479CB2AC9E16000E0F78B /* Metal.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 549479CA2AC9E16000E0F78B /* Metal.framework */; };
1211
79E1D9CD2B4CD16E005F8E46 /* InputButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = 79E1D9CC2B4CD16E005F8E46 /* InputButton.swift */; };
1312
7FA3D2B32B2EA2F600543F92 /* DownloadButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7FA3D2B22B2EA2F600543F92 /* DownloadButton.swift */; };
@@ -18,6 +17,7 @@
1817
8A3F84242AC4C891005E2EE8 /* models in Resources */ = {isa = PBXBuildFile; fileRef = 8A3F84232AC4C891005E2EE8 /* models */; };
1918
8A907F332AC7138A006146EA /* LibLlama.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A907F322AC7134E006146EA /* LibLlama.swift */; };
2019
8A9F7C4D2AC332EE008AE1EA /* LlamaState.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A9F7C4C2AC332EE008AE1EA /* LlamaState.swift */; };
20+
DF810E132B4A5BA200301144 /* llama in Frameworks */ = {isa = PBXBuildFile; productRef = DF810E122B4A5BA200301144 /* llama */; };
2121
F1FE20E22B465ECA00B45541 /* LoadCustomButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = F1FE20E12B465EC900B45541 /* LoadCustomButton.swift */; };
2222
/* End PBXBuildFile section */
2323

@@ -42,7 +42,7 @@
4242
isa = PBXFrameworksBuildPhase;
4343
buildActionMask = 2147483647;
4444
files = (
45-
1809696D2D05A39F00400EE8 /* llama in Frameworks */,
45+
DF810E132B4A5BA200301144 /* llama in Frameworks */,
4646
549479CB2AC9E16000E0F78B /* Metal.framework in Frameworks */,
4747
8A39BE0A2AC7601100BFEB40 /* Accelerate.framework in Frameworks */,
4848
);
@@ -151,7 +151,7 @@
151151
);
152152
name = llama.swiftui;
153153
packageProductDependencies = (
154-
1809696C2D05A39F00400EE8 /* llama */,
154+
DF810E122B4A5BA200301144 /* llama */,
155155
);
156156
productName = llama.swiftui;
157157
productReference = 8A1C83732AC328BD0096AF73 /* llama.swiftui.app */;
@@ -429,7 +429,7 @@
429429
/* End XCConfigurationList section */
430430

431431
/* Begin XCSwiftPackageProductDependency section */
432-
1809696C2D05A39F00400EE8 /* llama */ = {
432+
DF810E122B4A5BA200301144 /* llama */ = {
433433
isa = XCSwiftPackageProductDependency;
434434
productName = llama;
435435
};

0 commit comments

Comments
 (0)