Skip to content

Commit 855b808

Browse files
committed
chore(script): migrate towards repo implementations
1 parent 6a29083 commit 855b808

File tree

8 files changed

+44
-797
lines changed

8 files changed

+44
-797
lines changed

.github/workflows/build.yml

Lines changed: 0 additions & 661 deletions
This file was deleted.

.github/workflows/docker.yml

Lines changed: 0 additions & 65 deletions
This file was deleted.

.github/workflows/editorconfig.yml

Lines changed: 0 additions & 17 deletions
This file was deleted.

.github/workflows/tidy-post.yml

Lines changed: 0 additions & 20 deletions
This file was deleted.

.github/workflows/tidy-review.yml

Lines changed: 0 additions & 23 deletions
This file was deleted.

examples/alpaca.sh

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@
77
cd `dirname $0`
88
cd ..
99

10-
./main -f ./prompts/alpaca.txt \
11-
-n 1 \
12-
-t 7 \
13-
-b 256 \
14-
-ins \
10+
./main -m ./models/alpaca.13b.ggmlv3.q8_0.bin \
1511
--color \
12+
-f ./prompts/alpaca.txt \
1613
--ctx_size 2048 \
14+
-n -1 \
15+
-ins -b 256 \
1716
--top_k 10000 \
1817
--temp 0.2 \
19-
--repeat_penalty 1.1
18+
--repeat_penalty 1.1 \
19+
-t 7

examples/chat-13B.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ set -e
44

55
cd "$(dirname "$0")/.." || exit
66

7-
MODEL="${MODEL:-../models/13B/ggml-model-q4_0.bin}"
7+
MODEL="${MODEL:-./models/13B/ggml-model-q8_0.gguf}"
88
PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat.txt}
99
USER_NAME="${USER_NAME:-USER}"
1010
AI_NAME="${AI_NAME:-LLaMa}"

llama.sh

Lines changed: 37 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,40 @@
11
#!/bin/bash
2-
#
3-
# Temporary script - will be removed in the future
4-
#
52

6-
./main -m ../models/13B/ggml-model-q4_0.bin -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat.txt
3+
set -e
74

5+
MODEL="${MODEL:-./models/13B/ggml-model-q8_0.gguf}"
6+
PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat.txt}
7+
USER_NAME="${USER_NAME:-Matt}"
8+
AI_NAME="${AI_NAME:-LLaMa}"
9+
10+
# Adjust to the number of CPU cores you want to use.
11+
N_THREAD="${N_THREAD:-8}"
12+
13+
# Number of tokens to predict (made it larger than default because we want a long interaction)
14+
N_PREDICTS="${N_PREDICTS:-2048}"
15+
16+
# Note: you can also override the generation options by specifying them on the command line:
17+
# For example, override the context size by doing: ./chatLLaMa --ctx_size 1024
18+
GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647}"
19+
20+
DATE_TIME=$(date +%H:%M)
21+
DATE_YEAR=$(date +%Y)
22+
23+
PROMPT_FILE=$(mktemp -t llamacpp_prompt.XXXXXXX.txt)
24+
25+
sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \
26+
-e "s/\[\[AI_NAME\]\]/$AI_NAME/g" \
27+
-e "s/\[\[DATE_TIME\]\]/$DATE_TIME/g" \
28+
-e "s/\[\[DATE_YEAR\]\]/$DATE_YEAR/g" \
29+
$PROMPT_TEMPLATE > $PROMPT_FILE
30+
31+
# shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS
32+
./main $GEN_OPTIONS \
33+
--model "$MODEL" \
34+
--threads "$N_THREAD" \
35+
--n_predict "$N_PREDICTS" \
36+
--color --interactive \
37+
--file ${PROMPT_FILE} \
38+
--reverse-prompt "${USER_NAME}:" \
39+
--in-prefix ' ' \
40+
"$@"

0 commit comments

Comments
 (0)