Skip to content

Commit 2afe0a0

Browse files
committed
examples : move gpt_init() after parsing the cli args
1 parent 078be07 commit 2afe0a0

File tree

20 files changed

+39
-38
lines changed

20 files changed

+39
-38
lines changed

examples/batched-bench/batched-bench.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,14 +15,14 @@ static void print_usage(int, char ** argv) {
1515
}
1616

1717
int main(int argc, char ** argv) {
18-
gpt_init();
19-
2018
gpt_params params;
2119

2220
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_BENCH, print_usage)) {
2321
return 1;
2422
}
2523

24+
gpt_init();
25+
2626
int is_pp_shared = params.is_pp_shared;
2727

2828
std::vector<int> n_pp = params.n_pp;

examples/batched/batched.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,6 @@ static void print_usage(int, char ** argv) {
1515
}
1616

1717
int main(int argc, char ** argv) {
18-
gpt_log_init();
19-
2018
gpt_params params;
2119

2220
params.prompt = "Hello my name is";
@@ -26,6 +24,7 @@ int main(int argc, char ** argv) {
2624
return 1;
2725
}
2826

27+
gpt_init();
2928

3029
// number of parallel batches
3130
int n_parallel = params.n_parallel;

examples/embedding/embedding.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,14 +79,14 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
7979
}
8080

8181
int main(int argc, char ** argv) {
82-
gpt_log_init();
83-
8482
gpt_params params;
8583

8684
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_EMBEDDING)) {
8785
return 1;
8886
}
8987

88+
gpt_init();
89+
9090
params.embedding = true;
9191
// For non-causal models, batch size must be equal to ubatch size
9292
params.n_ubatch = params.n_batch;

examples/eval-callback/eval-callback.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,6 @@ static bool run(llama_context * ctx, const gpt_params & params) {
140140
}
141141

142142
int main(int argc, char ** argv) {
143-
gpt_init();
144-
145143
callback_data cb_data;
146144

147145
gpt_params params;
@@ -150,6 +148,8 @@ int main(int argc, char ** argv) {
150148
return 1;
151149
}
152150

151+
gpt_init();
152+
153153
llama_backend_init();
154154
llama_numa_init(params.numa);
155155

examples/gritlm/gritlm.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,8 @@ int main(int argc, char * argv[]) {
158158
return 1;
159159
}
160160

161+
gpt_init();
162+
161163
llama_model_params mparams = llama_model_params_from_gpt_params(params);
162164
llama_context_params cparams = llama_context_params_from_gpt_params(params);
163165

examples/imatrix/imatrix.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -573,8 +573,6 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
573573
}
574574

575575
int main(int argc, char ** argv) {
576-
gpt_init();
577-
578576
gpt_params params;
579577

580578
params.n_ctx = 512;
@@ -585,6 +583,8 @@ int main(int argc, char ** argv) {
585583
return 1;
586584
}
587585

586+
gpt_init();
587+
588588
params.n_batch = std::min(params.n_batch, params.n_ctx);
589589

590590
g_collector.set_params(params);

examples/infill/infill.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,15 +104,15 @@ static void sigint_handler(int signo) {
104104
#endif
105105

106106
int main(int argc, char ** argv) {
107-
gpt_init();
108-
109107
gpt_params params;
110108
g_params = &params;
111109

112110
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_INFILL)) {
113111
return 1;
114112
}
115113

114+
gpt_init();
115+
116116
auto & sparams = params.sparams;
117117

118118
console::init(params.simple_io, params.use_color);

examples/llava/llava-cli.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -270,8 +270,6 @@ static void llava_free(struct llava_context * ctx_llava) {
270270
}
271271

272272
int main(int argc, char ** argv) {
273-
gpt_init();
274-
275273
ggml_time_init();
276274

277275
gpt_params params;
@@ -280,6 +278,8 @@ int main(int argc, char ** argv) {
280278
return 1;
281279
}
282280

281+
gpt_init();
282+
283283
if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) {
284284
print_usage(argc, argv);
285285
return 1;

examples/llava/minicpmv-cli.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -248,8 +248,6 @@ static const char * llama_loop(struct llava_context * ctx_llava,struct gpt_sampl
248248
}
249249

250250
int main(int argc, char ** argv) {
251-
gpt_init();
252-
253251
ggml_time_init();
254252

255253
gpt_params params;
@@ -258,6 +256,8 @@ int main(int argc, char ** argv) {
258256
return 1;
259257
}
260258

259+
gpt_init();
260+
261261
if (params.mmproj.empty() || (params.image.empty())) {
262262
show_additional_info(argc, argv);
263263
return 1;

examples/lookahead/lookahead.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,14 +37,14 @@ struct ngram_container {
3737
};
3838

3939
int main(int argc, char ** argv) {
40-
gpt_init();
41-
4240
gpt_params params;
4341

4442
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
4543
return 1;
4644
}
4745

46+
gpt_init();
47+
4848
const int W = 15; // lookahead window
4949
const int N = 5; // n-gram size
5050
const int G = 15; // max verification n-grams

examples/lookup/lookup-stats.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,14 @@
1313
#include <vector>
1414

1515
int main(int argc, char ** argv){
16-
gpt_init();
17-
1816
gpt_params params;
1917

2018
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_LOOKUP)) {
2119
return 1;
2220
}
2321

22+
gpt_init();
23+
2424
const int n_draft = params.n_draft;
2525

2626
// init llama.cpp

examples/lookup/lookup.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,14 @@
1313
#include <vector>
1414

1515
int main(int argc, char ** argv){
16-
gpt_init();
17-
1816
gpt_params params;
1917

2018
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_LOOKUP)) {
2119
return 1;
2220
}
2321

22+
gpt_init();
23+
2424
// max. number of additional tokens to draft if match is found
2525
const int n_draft = params.n_draft;
2626

examples/main/main.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -131,14 +131,14 @@ static std::string chat_add_and_format(struct llama_model * model, std::vector<l
131131
}
132132

133133
int main(int argc, char ** argv) {
134-
gpt_init();
135-
136134
gpt_params params;
137135
g_params = &params;
138136
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_MAIN, print_usage)) {
139137
return 1;
140138
}
141139

140+
gpt_init();
141+
142142
auto & sparams = params.sparams;
143143

144144
// save choice to use color for later

examples/parallel/parallel.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,14 +103,14 @@ static std::vector<std::string> split_string(const std::string& input, char deli
103103
int main(int argc, char ** argv) {
104104
srand(1234);
105105

106-
gpt_init();
107-
108106
gpt_params params;
109107

110108
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_PARALLEL)) {
111109
return 1;
112110
}
113111

112+
gpt_init();
113+
114114
// number of simultaneous "clients" to simulate
115115
const int32_t n_clients = params.n_parallel;
116116

examples/passkey/passkey.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,6 @@ static void print_usage(int, char ** argv) {
1515
}
1616

1717
int main(int argc, char ** argv) {
18-
gpt_init();
19-
2018
gpt_params params;
2119

2220
params.n_junk = 250;
@@ -27,6 +25,8 @@ int main(int argc, char ** argv) {
2725
return 1;
2826
}
2927

28+
gpt_init();
29+
3030
int n_junk = params.n_junk;
3131
int n_keep = params.n_keep;
3232
int n_grp = params.grp_attn_n;

examples/perplexity/perplexity.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1957,8 +1957,6 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
19571957
}
19581958

19591959
int main(int argc, char ** argv) {
1960-
gpt_init();
1961-
19621960
gpt_params params;
19631961

19641962
params.n_ctx = 512;
@@ -1968,6 +1966,8 @@ int main(int argc, char ** argv) {
19681966
return 1;
19691967
}
19701968

1969+
gpt_init();
1970+
19711971
const int32_t n_ctx = params.n_ctx;
19721972

19731973
if (n_ctx <= 0) {

examples/retrieval/retrieval.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,14 +112,14 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
112112
}
113113

114114
int main(int argc, char ** argv) {
115-
gpt_init();
116-
117115
gpt_params params;
118116

119117
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_RETRIEVAL, print_usage)) {
120118
return 1;
121119
}
122120

121+
gpt_init();
122+
123123
// For BERT models, batch size must be equal to ubatch size
124124
params.n_ubatch = params.n_batch;
125125
params.embedding = true;

examples/server/server.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2316,15 +2316,15 @@ inline void signal_handler(int signal) {
23162316
}
23172317

23182318
int main(int argc, char ** argv) {
2319-
gpt_init();
2320-
23212319
// own arguments required by this example
23222320
gpt_params params;
23232321

23242322
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_SERVER)) {
23252323
return 1;
23262324
}
23272325

2326+
gpt_init();
2327+
23282328
const bool verbose = params.verbosity > 0;
23292329

23302330
// struct that contains llama context and inference

examples/simple/simple.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,6 @@ static void print_usage(int, char ** argv) {
1212
}
1313

1414
int main(int argc, char ** argv) {
15-
gpt_init();
16-
1715
gpt_params params;
1816

1917
params.prompt = "Hello my name is";
@@ -23,6 +21,8 @@ int main(int argc, char ** argv) {
2321
return 1;
2422
}
2523

24+
gpt_init();
25+
2626
// total length of the sequence including the prompt
2727
const int n_predict = params.n_predict;
2828

examples/speculative/speculative.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,14 +30,14 @@ struct seq_draft {
3030
};
3131

3232
int main(int argc, char ** argv) {
33-
gpt_init();
34-
3533
gpt_params params;
3634

3735
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_SPECULATIVE)) {
3836
return 1;
3937
}
4038

39+
gpt_init();
40+
4141
if (params.model_draft.empty()) {
4242
LOG_ERR("%s: --model-draft is required\n", __func__);
4343
return 1;

0 commit comments

Comments
 (0)