@@ -43,30 +43,34 @@ extern char const *LLAMA_BUILD_TARGET;
43
43
int32_t get_num_physical_cores ();
44
44
45
45
struct gpt_params {
46
- uint32_t seed = -1 ; // RNG seed
46
+ uint32_t seed = -1 ; // RNG seed
47
+
47
48
int32_t n_threads = get_num_physical_cores();
48
- int32_t n_threads_batch = -1 ; // number of threads to use for batch processing (-1 = use n_threads)
49
- int32_t n_predict = -1 ; // new tokens to predict
50
- int32_t n_ctx = 512 ; // context size
51
- int32_t n_batch = 512 ; // batch size for prompt processing (must be >=32 to use BLAS)
52
- int32_t n_keep = 0 ; // number of tokens to keep from initial prompt
53
- int32_t n_draft = 16 ; // number of tokens to draft during speculative decoding
54
- int32_t n_chunks = -1 ; // max number of chunks to process (-1 = unlimited)
55
- int32_t n_parallel = 1 ; // number of parallel sequences to decode
56
- int32_t n_sequences = 1 ; // number of sequences to decode
57
- int32_t n_gpu_layers = -1 ; // number of layers to store in VRAM (-1 - use default)
58
- int32_t n_gpu_layers_draft = -1 ; // number of layers to store in VRAM for the draft model (-1 - use default)
59
- int32_t main_gpu = 0 ; // the GPU that is used for scratch and small tensors
60
- float tensor_split[LLAMA_MAX_DEVICES] = {0 }; // how split tensors should be distributed across GPUs
61
- int32_t n_beams = 0 ; // if non-zero then use beam search of given width.
62
- float rope_freq_base = 0 .0f ; // RoPE base frequency
63
- float rope_freq_scale = 0 .0f ; // RoPE frequency scaling factor
64
- float yarn_ext_factor = NAN; // YaRN extrapolation mix factor
65
- float yarn_attn_factor = 1 .0f ; // YaRN magnitude scaling factor
66
- float yarn_beta_fast = 32 .0f ;// YaRN low correction dim
67
- float yarn_beta_slow = 1 .0f ; // YaRN high correction dim
68
- int32_t yarn_orig_ctx = 0 ; // YaRN original context length
69
- int8_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED;
49
+ int32_t n_threads_batch = -1 ; // number of threads to use for batch processing (-1 = use n_threads)
50
+ int32_t n_predict = -1 ; // new tokens to predict
51
+ int32_t n_ctx = 512 ; // context size
52
+ int32_t n_batch = 512 ; // batch size for prompt processing (must be >=32 to use BLAS)
53
+ int32_t n_keep = 0 ; // number of tokens to keep from initial prompt
54
+ int32_t n_draft = 16 ; // number of tokens to draft during speculative decoding
55
+ int32_t n_chunks = -1 ; // max number of chunks to process (-1 = unlimited)
56
+ int32_t n_parallel = 1 ; // number of parallel sequences to decode
57
+ int32_t n_sequences = 1 ; // number of sequences to decode
58
+ float p_accept = 0 .5f ; // speculative decoding accept probability
59
+ float p_split = 0 .1f ; // speculative decoding split probability
60
+ int32_t n_gpu_layers = -1 ; // number of layers to store in VRAM (-1 - use default)
61
+ int32_t n_gpu_layers_draft = -1 ; // number of layers to store in VRAM for the draft model (-1 - use default)
62
+ int32_t main_gpu = 0 ; // the GPU that is used for scratch and small tensors
63
+ float tensor_split[LLAMA_MAX_DEVICES] = {0 }; // how split tensors should be distributed across GPUs
64
+ int32_t n_beams = 0 ; // if non-zero then use beam search of given width.
65
+ float rope_freq_base = 0 .0f ; // RoPE base frequency
66
+ float rope_freq_scale = 0 .0f ; // RoPE frequency scaling factor
67
+ float yarn_ext_factor = -1 .0f ; // YaRN extrapolation mix factor
68
+ float yarn_attn_factor = 1 .0f ; // YaRN magnitude scaling factor
69
+ float yarn_beta_fast = 32 .0f ; // YaRN low correction dim
70
+ float yarn_beta_slow = 1 .0f ; // YaRN high correction dim
71
+ int32_t yarn_orig_ctx = 0 ; // YaRN original context length
72
+ int8_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; // TODO: better to be int32_t for alignment
73
+ // pinging @cebtenzzre
70
74
71
75
// // sampling parameters
72
76
struct llama_sampling_params sparams;
@@ -90,7 +94,7 @@ struct gpt_params {
90
94
int ppl_output_type = 0 ; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
91
95
// (which is more convenient to use for plotting)
92
96
//
93
- bool hellaswag = false ; // compute HellaSwag score over random tasks from datafile supplied in prompt
97
+ bool hellaswag = false ; // compute HellaSwag score over random tasks from datafile supplied in prompt
94
98
size_t hellaswag_tasks = 400 ; // number of tasks to use when computing the HellaSwag score
95
99
96
100
bool mul_mat_q = true ; // if true, use mul_mat_q kernels instead of cuBLAS
0 commit comments