Skip to content

Commit 24e53a1

Browse files
committed
add rms_norm_eps to command line
1 parent 9fe47c7 commit 24e53a1

File tree

4 files changed

+32
-16
lines changed

4 files changed

+32
-16
lines changed

examples/common.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -177,6 +177,12 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
177177
break;
178178
}
179179
params.n_gqa = std::stoi(argv[i]);
180+
} else if (arg == "-eps" || arg == "--rms-norm-eps") {
181+
if (++i >= argc) {
182+
invalid_param = true;
183+
break;
184+
}
185+
params.rms_norm_eps = std::stof(argv[i]);
180186
} else if (arg == "--rope-freq-base") {
181187
if (++i >= argc) {
182188
invalid_param = true;
@@ -519,6 +525,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
519525
fprintf(stdout, " -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
520526
fprintf(stdout, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
521527
fprintf(stdout, " -gqa N, --gqa N grouped-query attention factor (TEMP!!! use 8 for LLaMAv2 70B) (default: %d)\n", params.n_gqa);
528+
fprintf(stdout, " -eps N, --rms-norm-eps N rms norm eps (TEMP!!! use 1e-5 for LLaMAv2) (default: %f)\n", params.rms_norm_eps);
522529
fprintf(stdout, " --top-k N top-k sampling (default: %d, 0 = disabled)\n", params.top_k);
523530
fprintf(stdout, " --top-p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)params.top_p);
524531
fprintf(stdout, " --tfs N tail free sampling, parameter z (default: %.1f, 1.0 = disabled)\n", (double)params.tfs_z);
@@ -615,6 +622,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
615622
lparams.n_ctx = params.n_ctx;
616623
lparams.n_batch = params.n_batch;
617624
lparams.n_gqa = params.n_gqa;
625+
lparams.rms_norm_eps = params.rms_norm_eps;
618626
lparams.n_gpu_layers = params.n_gpu_layers;
619627
lparams.main_gpu = params.main_gpu;
620628
lparams.tensor_split = params.tensor_split;

examples/common.h

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -22,18 +22,19 @@
2222
int32_t get_num_physical_cores();
2323

2424
struct gpt_params {
25-
uint32_t seed = -1; // RNG seed
25+
uint32_t seed = -1; // RNG seed
2626
int32_t n_threads = get_num_physical_cores();
27-
int32_t n_predict = -1; // new tokens to predict
28-
int32_t n_ctx = 512; // context size
29-
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
30-
int32_t n_gqa = 1; // grouped-query attention factor (TODO: move to hparams)
31-
int32_t n_keep = 0; // number of tokens to keep from initial prompt
32-
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
33-
int32_t n_gpu_layers = 0; // number of layers to store in VRAM
34-
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
35-
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
36-
int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
27+
int32_t n_predict = -1; // new tokens to predict
28+
int32_t n_ctx = 512; // context size
29+
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
30+
int32_t n_gqa = 1; // grouped-query attention factor (TODO: move to hparams)
31+
int32_t n_keep = 0; // number of tokens to keep from initial prompt
32+
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
33+
int32_t n_gpu_layers = 0; // number of layers to store in VRAM
34+
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
35+
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
36+
int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
37+
float rms_norm_eps = 1e-6; // rms norm epsilon
3738
float rope_freq_base = 10000.0f; // RoPE base frequency
3839
float rope_freq_scale = 1.0f; // RoPE frequency scaling factor
3940

llama.cpp

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,7 @@ struct llama_hparams {
186186
// LLaMAv2
187187
// TODO: load from model data hparams
188188
float f_ffn_mult = 1.0f;
189+
float f_rms_norm_eps = 1e-6f;
189190

190191
float rope_freq_base = 10000.0f;
191192
float rope_freq_scale = 1.0f;
@@ -869,6 +870,7 @@ struct llama_context_params llama_context_default_params() {
869870
/*.n_ctx =*/ 512,
870871
/*.n_batch =*/ 512,
871872
/*.n_gqa =*/ 1,
873+
/*.rms_norm_eps =*/ 1e-6f,
872874
/*.gpu_layers =*/ 0,
873875
/*.main_gpu =*/ 0,
874876
/*.tensor_split =*/ nullptr,
@@ -1000,6 +1002,7 @@ static void llama_model_load_internal(
10001002
int n_ctx,
10011003
int n_batch,
10021004
int n_gqa,
1005+
float rms_norm_eps,
10031006
int n_gpu_layers,
10041007
int main_gpu,
10051008
const float * tensor_split,
@@ -1024,6 +1027,9 @@ static void llama_model_load_internal(
10241027

10251028
auto & hparams = model.hparams;
10261029

1030+
// TODO: read from file
1031+
hparams.f_rms_norm_eps = rms_norm_eps;
1032+
10271033
{
10281034
switch (hparams.n_layer) {
10291035
case 26: model.type = e_model::MODEL_3B; break;
@@ -1072,6 +1078,7 @@ static void llama_model_load_internal(
10721078
fprintf(stderr, "%s: n_layer = %u\n", __func__, hparams.n_layer);
10731079
fprintf(stderr, "%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
10741080
fprintf(stderr, "%s: n_gqa = %u\n", __func__, hparams.n_gqa());
1081+
fprintf(stderr, "%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps);
10751082
fprintf(stderr, "%s: n_ff = %u\n", __func__, n_ff);
10761083
fprintf(stderr, "%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base);
10771084
fprintf(stderr, "%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale);
@@ -1330,6 +1337,7 @@ static bool llama_model_load(
13301337
int n_ctx,
13311338
int n_batch,
13321339
int n_gqa,
1340+
float rms_norm_eps,
13331341
int n_gpu_layers,
13341342
int main_gpu,
13351343
const float * tensor_split,
@@ -1343,7 +1351,7 @@ static bool llama_model_load(
13431351
llama_progress_callback progress_callback,
13441352
void *progress_callback_user_data) {
13451353
try {
1346-
llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gqa, n_gpu_layers, main_gpu, tensor_split, rope_freq_base, rope_freq_scale, low_vram, memory_type,
1354+
llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gqa, rms_norm_eps, n_gpu_layers, main_gpu, tensor_split, rope_freq_base, rope_freq_scale, low_vram, memory_type,
13471355
use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data);
13481356
return true;
13491357
} catch (const std::exception & err) {
@@ -1401,9 +1409,7 @@ static bool llama_eval_internal(
14011409

14021410
const float freq_base = hparams.rope_freq_base;
14031411
const float freq_scale = hparams.rope_freq_scale;
1404-
1405-
// TODO: read from hparams
1406-
const float rms_norm_eps = 1e-6f;
1412+
const float rms_norm_eps = hparams.f_rms_norm_eps;
14071413

14081414
const int n_gpu_layers = model.n_gpu_layers;
14091415

@@ -3088,7 +3094,7 @@ struct llama_model * llama_load_model_from_file(
30883094

30893095
ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
30903096

3091-
if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gqa, params.n_gpu_layers,
3097+
if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gqa, params.rms_norm_eps, params.n_gpu_layers,
30923098
params.main_gpu, params.tensor_split, params.rope_freq_base, params.rope_freq_scale,params.low_vram,
30933099
memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback,
30943100
params.progress_callback_user_data)) {

llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,7 @@ extern "C" {
8787
int32_t n_ctx; // text context
8888
int32_t n_batch; // prompt processing batch size
8989
int32_t n_gqa; // grouped-query attention (TEMP - will be moved to model hparams)
90+
float rms_norm_eps; // rms norm epsilon (TEMP - will be moved to model hparams)
9091
int32_t n_gpu_layers; // number of layers to store in VRAM
9192
int32_t main_gpu; // the GPU that is used for scratch and small tensors
9293

0 commit comments

Comments
 (0)