Skip to content

Commit cd61ea0

Browse files
committed
Update llama-run to include temperature option
This commit updates the `examples/run/README.md` file to include a new option for setting the temperature and updates the `run.cpp` file to parse this option. Signed-off-by: Eric Curtin <[email protected]>
1 parent a3c33b1 commit cd61ea0

File tree

2 files changed

+51
-14
lines changed

2 files changed

+51
-14
lines changed

examples/run/README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@ Options:
1919
Context size (default: 2048)
2020
-n, --ngl <value>
2121
Number of GPU layers (default: 0)
22+
--temp <value>
23+
Temperature (default: 0.8)
2224
-v, --verbose, --log-verbose
2325
Set verbosity level to infinity (i.e. log all messages, useful for debugging)
2426
-h, --help

examples/run/run.cpp

Lines changed: 49 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,19 @@ static int printe(const char * fmt, ...) {
5555
class Opt {
5656
public:
5757
int init(int argc, const char ** argv) {
58+
ctx_params_ = llama_context_default_params();
59+
model_params_ = llama_model_default_params();
60+
context_size_default = ctx_params_.n_batch;
61+
ngl_default = model_params_.n_gpu_layers;
62+
common_params_sampling sampling;
63+
temperature_default = sampling.temp;
64+
65+
if (argc < 2) {
66+
printe("Error: No arguments provided.\n");
67+
help();
68+
return 1;
69+
}
70+
5871
// Parse arguments
5972
if (parse(argc, argv)) {
6073
printe("Error: Failed to parse arguments.\n");
@@ -68,15 +81,24 @@ class Opt {
6881
return 2;
6982
}
7083

84+
ctx_params_.n_batch = context_size_ >= 0 ? context_size_ : context_size_default;
85+
model_params_.n_gpu_layers = ngl_ >= 0 ? ngl_ : ngl_default;
86+
temperature_ = temperature_ >= 0 ? temperature_ : temperature_default;
87+
7188
return 0; // Success
7289
}
7390

91+
llama_context_params ctx_params_;
92+
llama_model_params model_params_;
7493
std::string model_;
75-
std::string user_;
76-
int context_size_ = -1, ngl_ = -1;
94+
std::string user_;
95+
int context_size_ = -1, ngl_ = -1;
96+
float temperature_ = -1;
7797
bool verbose_ = false;
7898

7999
private:
100+
int context_size_default = -1, ngl_default = -1;
101+
float temperature_default = -1;
80102
bool help_ = false;
81103

82104
bool parse_flag(const char ** argv, int i, const char * short_opt, const char * long_opt) {
@@ -89,6 +111,17 @@ class Opt {
89111
}
90112

91113
option_value = std::atoi(argv[++i]);
114+
115+
return 0;
116+
}
117+
118+
int handle_option_with_value(int argc, const char ** argv, int & i, float & option_value) {
119+
if (i + 1 >= argc) {
120+
return 1;
121+
}
122+
123+
option_value = std::atof(argv[++i]);
124+
92125
return 0;
93126
}
94127

@@ -103,6 +136,10 @@ class Opt {
103136
if (handle_option_with_value(argc, argv, i, ngl_) == 1) {
104137
return 1;
105138
}
139+
} else if (options_parsing && strcmp(argv[i], "--temperature") == 0) {
140+
if (handle_option_with_value(argc, argv, i, temperature_) == 1) {
141+
return 1;
142+
}
106143
} else if (options_parsing &&
107144
(parse_flag(argv, i, "-v", "--verbose") || parse_flag(argv, i, "-v", "--log-verbose"))) {
108145
verbose_ = true;
@@ -142,6 +179,8 @@ class Opt {
142179
" Context size (default: %d)\n"
143180
" -n, --ngl <value>\n"
144181
" Number of GPU layers (default: %d)\n"
182+
" --temp <value>\n"
183+
" Temperature (default: %.1f)\n"
145184
" -v, --verbose, --log-verbose\n"
146185
" Set verbosity level to infinity (i.e. log all messages, useful for debugging)\n"
147186
" -h, --help\n"
@@ -170,7 +209,7 @@ class Opt {
170209
" llama-run file://some-file3.gguf\n"
171210
" llama-run --ngl 999 some-file4.gguf\n"
172211
" llama-run --ngl 999 some-file5.gguf Hello World\n",
173-
llama_context_default_params().n_batch, llama_model_default_params().n_gpu_layers);
212+
context_size_default, ngl_default, temperature_default);
174213
}
175214
};
176215

@@ -495,12 +534,12 @@ class LlamaData {
495534
return 1;
496535
}
497536

498-
context = initialize_context(model, opt.context_size_);
537+
context = initialize_context(model, opt);
499538
if (!context) {
500539
return 1;
501540
}
502541

503-
sampler = initialize_sampler();
542+
sampler = initialize_sampler(opt);
504543
return 0;
505544
}
506545

@@ -619,14 +658,12 @@ class LlamaData {
619658
// Initializes the model and returns a unique pointer to it
620659
llama_model_ptr initialize_model(Opt & opt) {
621660
ggml_backend_load_all();
622-
llama_model_params model_params = llama_model_default_params();
623-
model_params.n_gpu_layers = opt.ngl_ >= 0 ? opt.ngl_ : model_params.n_gpu_layers;
624661
resolve_model(opt.model_);
625662
printe(
626663
"\r%*s"
627664
"\rLoading model",
628665
get_terminal_width(), " ");
629-
llama_model_ptr model(llama_load_model_from_file(opt.model_.c_str(), model_params));
666+
llama_model_ptr model(llama_load_model_from_file(opt.model_.c_str(), opt.model_params_));
630667
if (!model) {
631668
printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str());
632669
}
@@ -636,10 +673,8 @@ class LlamaData {
636673
}
637674

638675
// Initializes the context with the specified parameters
639-
llama_context_ptr initialize_context(const llama_model_ptr & model, const int n_ctx) {
640-
llama_context_params ctx_params = llama_context_default_params();
641-
ctx_params.n_ctx = ctx_params.n_batch = n_ctx >= 0 ? n_ctx : ctx_params.n_batch;
642-
llama_context_ptr context(llama_new_context_with_model(model.get(), ctx_params));
676+
llama_context_ptr initialize_context(const llama_model_ptr & model, const Opt & opt) {
677+
llama_context_ptr context(llama_new_context_with_model(model.get(), opt.ctx_params_));
643678
if (!context) {
644679
printe("%s: error: failed to create the llama_context\n", __func__);
645680
}
@@ -648,10 +683,10 @@ class LlamaData {
648683
}
649684

650685
// Initializes and configures the sampler
651-
llama_sampler_ptr initialize_sampler() {
686+
llama_sampler_ptr initialize_sampler(const Opt & opt) {
652687
llama_sampler_ptr sampler(llama_sampler_chain_init(llama_sampler_chain_default_params()));
653688
llama_sampler_chain_add(sampler.get(), llama_sampler_init_min_p(0.05f, 1));
654-
llama_sampler_chain_add(sampler.get(), llama_sampler_init_temp(0.8f));
689+
llama_sampler_chain_add(sampler.get(), llama_sampler_init_temp(opt.temperature_));
655690
llama_sampler_chain_add(sampler.get(), llama_sampler_init_dist(LLAMA_DEFAULT_SEED));
656691

657692
return sampler;

0 commit comments

Comments
 (0)