Skip to content

Commit 075ee61

Browse files
committed
Merge branch 'master' into sync
ggml-ci
2 parents 7f8e2a5 + 8f961ab commit 075ee61

File tree

8 files changed

+62
-38
lines changed

8 files changed

+62
-38
lines changed

README.md

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22

33
![llama](https://user-images.githubusercontent.com/1991296/230134379-7181e485-c521-4d23-a0d6-f7b3b61ba524.png)
44

5-
[![Actions Status](https://github.com/ggerganov/llama.cpp/workflows/CI/badge.svg)](https://github.com/ggerganov/llama.cpp/actions)
65
[![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT)
76

87
[Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggerganov/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggerganov/llama.cpp/discussions/205) / [ggml](https://github.com/ggerganov/ggml)
@@ -11,8 +10,7 @@ Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++
1110

1211
### Hot topics
1312

14-
- LLaVA support: https://github.com/ggerganov/llama.cpp/pull/3436
15-
- ‼️ BPE tokenizer update: existing Falcon and Starcoder `.gguf` models will need to be reconverted: [#3252](https://github.com/ggerganov/llama.cpp/pull/3252)
13+
- ⚠️ **Upcoming change that might break functionality. Help with testing is needed:** https://github.com/ggerganov/llama.cpp/pull/3912
1614

1715
----
1816

common/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/../.git")
1111
if(NOT IS_DIRECTORY "${GIT_DIR}")
1212
file(READ ${GIT_DIR} REAL_GIT_DIR_LINK)
1313
string(REGEX REPLACE "gitdir: (.*)\n$" "\\1" REAL_GIT_DIR ${REAL_GIT_DIR_LINK})
14-
set(GIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/${REAL_GIT_DIR}")
14+
set(GIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../${REAL_GIT_DIR}")
1515
endif()
1616

1717
set(GIT_INDEX "${GIT_DIR}/index")

common/common.cpp

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -403,6 +403,18 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
403403
break;
404404
}
405405
params.n_sequences = std::stoi(argv[i]);
406+
} else if (arg == "--p-accept" || arg == "-pa") {
407+
if (++i >= argc) {
408+
invalid_param = true;
409+
break;
410+
}
411+
params.p_accept = std::stof(argv[i]);
412+
} else if (arg == "--p-split" || arg == "-ps") {
413+
if (++i >= argc) {
414+
invalid_param = true;
415+
break;
416+
}
417+
params.p_split = std::stof(argv[i]);
406418
} else if (arg == "-m" || arg == "--model") {
407419
if (++i >= argc) {
408420
invalid_param = true;
@@ -778,6 +790,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
778790
printf(" --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks);
779791
printf(" -np N, --parallel N number of parallel sequences to decode (default: %d)\n", params.n_parallel);
780792
printf(" -ns N, --sequences N number of sequences to decode (default: %d)\n", params.n_sequences);
793+
printf(" -pa N, --p-accept N speculative decoding accept probability (default: %.1f)\n", (double)params.p_accept);
794+
printf(" -ps N, --p-split N speculative decoding split probability (default: %.1f)\n", (double)params.p_split);
781795
printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
782796
printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA. see examples/llava/README.md\n");
783797
printf(" --image IMAGE_FILE path to an image file. use with multimodal models\n");

common/common.h

Lines changed: 28 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -43,30 +43,34 @@ extern char const *LLAMA_BUILD_TARGET;
4343
int32_t get_num_physical_cores();
4444

4545
struct gpt_params {
46-
uint32_t seed = -1; // RNG seed
46+
uint32_t seed = -1; // RNG seed
47+
4748
int32_t n_threads = get_num_physical_cores();
48-
int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)
49-
int32_t n_predict = -1; // new tokens to predict
50-
int32_t n_ctx = 512; // context size
51-
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
52-
int32_t n_keep = 0; // number of tokens to keep from initial prompt
53-
int32_t n_draft = 16; // number of tokens to draft during speculative decoding
54-
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
55-
int32_t n_parallel = 1; // number of parallel sequences to decode
56-
int32_t n_sequences = 1; // number of sequences to decode
57-
int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default)
58-
int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
59-
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
60-
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
61-
int32_t n_beams = 0; // if non-zero then use beam search of given width.
62-
float rope_freq_base = 0.0f; // RoPE base frequency
63-
float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
64-
float yarn_ext_factor = NAN; // YaRN extrapolation mix factor
65-
float yarn_attn_factor = 1.0f; // YaRN magnitude scaling factor
66-
float yarn_beta_fast = 32.0f;// YaRN low correction dim
67-
float yarn_beta_slow = 1.0f; // YaRN high correction dim
68-
int32_t yarn_orig_ctx = 0; // YaRN original context length
69-
int8_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED;
49+
int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)
50+
int32_t n_predict = -1; // new tokens to predict
51+
int32_t n_ctx = 512; // context size
52+
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
53+
int32_t n_keep = 0; // number of tokens to keep from initial prompt
54+
int32_t n_draft = 16; // number of tokens to draft during speculative decoding
55+
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
56+
int32_t n_parallel = 1; // number of parallel sequences to decode
57+
int32_t n_sequences = 1; // number of sequences to decode
58+
float p_accept = 0.5f; // speculative decoding accept probability
59+
float p_split = 0.1f; // speculative decoding split probability
60+
int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default)
61+
int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
62+
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
63+
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
64+
int32_t n_beams = 0; // if non-zero then use beam search of given width.
65+
float rope_freq_base = 0.0f; // RoPE base frequency
66+
float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
67+
float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor
68+
float yarn_attn_factor = 1.0f; // YaRN magnitude scaling factor
69+
float yarn_beta_fast = 32.0f; // YaRN low correction dim
70+
float yarn_beta_slow = 1.0f; // YaRN high correction dim
71+
int32_t yarn_orig_ctx = 0; // YaRN original context length
72+
int8_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; // TODO: better to be int32_t for alignment
73+
// pinging @cebtenzzre
7074

7175
// // sampling parameters
7276
struct llama_sampling_params sparams;
@@ -90,7 +94,7 @@ struct gpt_params {
9094
int ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
9195
// (which is more convenient to use for plotting)
9296
//
93-
bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt
97+
bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt
9498
size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score
9599

96100
bool mul_mat_q = true; // if true, use mul_mat_q kernels instead of cuBLAS

examples/speculative/speculative.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,9 +37,11 @@ int main(int argc, char ** argv) {
3737
// max number of parallel drafting sequences (i.e. tree branches)
3838
const int n_seq_dft = params.n_parallel;
3939

40-
// TODO: make this configurable
41-
const float p_accept = 0.80f;
42-
const float p_split = 0.10f;
40+
// probability threshold for accepting a token from the draft model
41+
const float p_accept = params.p_accept;
42+
43+
// probability threshold for splitting a draft branch (only for n_seq_dft > 1)
44+
const float p_split = params.p_split;
4345

4446
#ifndef LOG_DISABLE_LOGS
4547
log_set_target(log_filename_generator("speculative", "log"));

ggml-cuda.cu

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,10 @@
3939
#define cudaDeviceCanAccessPeer hipDeviceCanAccessPeer
4040
#define cudaDeviceDisablePeerAccess hipDeviceDisablePeerAccess
4141
#define cudaDeviceEnablePeerAccess hipDeviceEnablePeerAccess
42+
#define cudaDeviceGetMemPool hipDeviceGetMemPool
43+
#define cudaMemPoolAttrReleaseThreshold hipMemPoolAttrReleaseThreshold
44+
#define cudaMemPoolSetAttribute hipMemPoolSetAttribute
45+
#define cudaMemPool_t hipMemPool_t
4246
#define cudaDeviceProp hipDeviceProp_t
4347
#define cudaDeviceSynchronize hipDeviceSynchronize
4448
#define cudaError_t hipError_t
@@ -48,13 +52,15 @@
4852
#define cudaEvent_t hipEvent_t
4953
#define cudaEventDestroy hipEventDestroy
5054
#define cudaFree hipFree
55+
#define cudaFreeAsync hipFreeAsync
5156
#define cudaFreeHost hipHostFree
5257
#define cudaGetDevice hipGetDevice
5358
#define cudaGetDeviceCount hipGetDeviceCount
5459
#define cudaGetDeviceProperties hipGetDeviceProperties
5560
#define cudaGetErrorString hipGetErrorString
5661
#define cudaGetLastError hipGetLastError
5762
#define cudaMalloc hipMalloc
63+
#define cudaMallocFromPoolAsync hipMallocFromPoolAsync
5864
#define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size, hipHostMallocDefault)
5965
#define cudaMemcpy hipMemcpy
6066
#define cudaMemcpy2DAsync hipMemcpy2DAsync

llama.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7982,7 +7982,7 @@ struct llama_context_params llama_context_default_params() {
79827982
/*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_UNSPECIFIED,
79837983
/*.rope_freq_base =*/ 0.0f,
79847984
/*.rope_freq_scale =*/ 0.0f,
7985-
/*.yarn_ext_factor =*/ NAN,
7985+
/*.yarn_ext_factor =*/ -1.0f,
79867986
/*.yarn_attn_factor =*/ 1.0f,
79877987
/*.yarn_beta_fast =*/ 32.0f,
79887988
/*.yarn_beta_slow =*/ 1.0f,
@@ -8125,7 +8125,7 @@ struct llama_context * llama_new_context_with_model(
81258125
cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none
81268126
}
81278127

8128-
if (std::isnan(cparams.yarn_ext_factor)) { // NaN indicates 'not set'
8128+
if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set'
81298129
cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_YARN ? 1.0f : 0.0f;
81308130
}
81318131

llama.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -175,11 +175,11 @@ extern "C" {
175175
};
176176

177177
struct llama_context_params {
178-
uint32_t seed; // RNG seed, -1 for random
179-
uint32_t n_ctx; // text context, 0 = from model
180-
uint32_t n_batch; // prompt processing maximum batch size
181-
uint32_t n_threads; // number of threads to use for generation
182-
uint32_t n_threads_batch; // number of threads to use for batch processing
178+
uint32_t seed; // RNG seed, -1 for random
179+
uint32_t n_ctx; // text context, 0 = from model
180+
uint32_t n_batch; // prompt processing maximum batch size
181+
uint32_t n_threads; // number of threads to use for generation
182+
uint32_t n_threads_batch; // number of threads to use for batch processing
183183
int8_t rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
184184

185185
// ref: https://github.com/ggerganov/llama.cpp/pull/2054

0 commit comments

Comments
 (0)