Skip to content

Commit 2dff6f4

Browse files
committed
log : simplify init
1 parent c426837 commit 2dff6f4

File tree

24 files changed

+43
-115
lines changed

24 files changed

+43
-115
lines changed

common/common.cpp

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -362,6 +362,17 @@ bool parse_cpu_mask(const std::string & mask, bool (&boolmask)[GGML_MAX_N_THREAD
362362
return true;
363363
}
364364

365+
void gpt_init() {
366+
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
367+
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
368+
gpt_log_add(gpt_log_main(), level, "%s", text);
369+
}
370+
}, NULL);
371+
372+
373+
LOG_INF("build: %d (%s) with %s for %s\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT, LLAMA_COMPILER, LLAMA_BUILD_TARGET);
374+
}
375+
365376
std::string gpt_params_get_system_info(const gpt_params & params) {
366377
std::ostringstream os;
367378

common/common.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -339,6 +339,9 @@ struct gpt_params {
339339
bool batched_bench_output_jsonl = false;
340340
};
341341

342+
// call once at the start of a program using common
343+
void gpt_init();
344+
342345
std::string gpt_params_get_system_info(const gpt_params & params);
343346

344347
bool parse_cpu_range(const std::string& range, bool(&boolmask)[GGML_MAX_N_THREADS]);

common/log.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ struct gpt_log_entry {
6767
if (level != GGML_LOG_LEVEL_NONE) {
6868
if (timestamp) {
6969
// [M.s.ms.us]
70-
fprintf(fcur, "" LOG_COL_BLUE "%05d.%02d.%03d.%03d" LOG_COL_DEFAULT " ",
70+
fprintf(fcur, "" LOG_COL_BLUE "%d.%02d.%03d.%03d" LOG_COL_DEFAULT " ",
7171
(int) (timestamp / 1000000 / 60),
7272
(int) (timestamp / 1000000 % 60),
7373
(int) (timestamp / 1000 % 1000),

examples/batched-bench/batched-bench.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,7 @@ static void print_usage(int, char ** argv) {
1515
}
1616

1717
int main(int argc, char ** argv) {
18-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
19-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
20-
gpt_log_add(gpt_log_main(), level, "%s", text);
21-
}
22-
}, NULL);
18+
gpt_init();
2319

2420
gpt_params params;
2521

examples/batched/batched.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,7 @@ static void print_usage(int, char ** argv) {
1515
}
1616

1717
int main(int argc, char ** argv) {
18-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
19-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
20-
gpt_log_add(gpt_log_main(), level, "%s", text);
21-
}
22-
}, NULL);
18+
gpt_log_init();
2319

2420
gpt_params params;
2521

examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -872,16 +872,13 @@ static std::string basename(const std::string &path) {
872872
}
873873

874874
int main(int argc, char ** argv) {
875-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
876-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
877-
gpt_log_add(gpt_log_main(), level, "%s", text);
878-
}
879-
}, NULL);
875+
gpt_init();
880876

881877
struct train_params params = get_default_train_params();
882878
if (!params_parse(argc, argv, &params)) {
883879
return 1;
884880
}
881+
885882
Config config;
886883
TransformerWeights weights = {};
887884
{

examples/embedding/embedding.cpp

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -79,11 +79,7 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
7979
}
8080

8181
int main(int argc, char ** argv) {
82-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
83-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
84-
gpt_log_add(gpt_log_main(), level, "%s", text);
85-
}
86-
}, NULL);
82+
gpt_log_init();
8783

8884
gpt_params params;
8985

@@ -95,8 +91,6 @@ int main(int argc, char ** argv) {
9591
// For non-causal models, batch size must be equal to ubatch size
9692
params.n_ubatch = params.n_batch;
9793

98-
print_build_info();
99-
10094
llama_backend_init();
10195
llama_numa_init(params.numa);
10296

examples/eval-callback/eval-callback.cpp

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -140,11 +140,7 @@ static bool run(llama_context * ctx, const gpt_params & params) {
140140
}
141141

142142
int main(int argc, char ** argv) {
143-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
144-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
145-
gpt_log_add(gpt_log_main(), level, "%s", text);
146-
}
147-
}, NULL);
143+
gpt_init();
148144

149145
callback_data cb_data;
150146

@@ -154,8 +150,6 @@ int main(int argc, char ** argv) {
154150
return 1;
155151
}
156152

157-
print_build_info();
158-
159153
llama_backend_init();
160154
llama_numa_init(params.numa);
161155

examples/imatrix/imatrix.cpp

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -543,7 +543,7 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
543543

544544
if (params.compute_ppl) {
545545
const int first = n_ctx/2;
546-
const auto all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
546+
const auto * all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
547547
process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
548548
workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first);
549549
count += n_ctx - first - 1;
@@ -573,11 +573,7 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
573573
}
574574

575575
int main(int argc, char ** argv) {
576-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
577-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
578-
gpt_log_add(gpt_log_main(), level, "%s", text);
579-
}
580-
}, NULL);
576+
gpt_init();
581577

582578
gpt_params params;
583579

examples/infill/infill.cpp

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -104,11 +104,7 @@ static void sigint_handler(int signo) {
104104
#endif
105105

106106
int main(int argc, char ** argv) {
107-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
108-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
109-
gpt_log_add(gpt_log_main(), level, "%s", text);
110-
}
111-
}, NULL);
107+
gpt_init();
112108

113109
gpt_params params;
114110
g_params = &params;
@@ -159,8 +155,6 @@ int main(int argc, char ** argv) {
159155
LOG_WRN("%s: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale);
160156
}
161157

162-
print_build_info();
163-
164158
LOG_INF("%s: llama backend init\n", __func__);
165159
llama_backend_init();
166160
llama_numa_init(params.numa);

examples/llava/llava-cli.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -270,11 +270,7 @@ static void llava_free(struct llava_context * ctx_llava) {
270270
}
271271

272272
int main(int argc, char ** argv) {
273-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
274-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
275-
gpt_log_add(gpt_log_main(), level, "%s", text);
276-
}
277-
}, NULL);
273+
gpt_init();
278274

279275
ggml_time_init();
280276

examples/llava/minicpmv-cli.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -248,11 +248,7 @@ static const char * llama_loop(struct llava_context * ctx_llava,struct gpt_sampl
248248
}
249249

250250
int main(int argc, char ** argv) {
251-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
252-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
253-
gpt_log_add(gpt_log_main(), level, "%s", text);
254-
}
255-
}, NULL);
251+
gpt_init();
256252

257253
ggml_time_init();
258254

examples/lookahead/lookahead.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -37,11 +37,7 @@ struct ngram_container {
3737
};
3838

3939
int main(int argc, char ** argv) {
40-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
41-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
42-
gpt_log_add(gpt_log_main(), level, "%s", text);
43-
}
44-
}, NULL);
40+
gpt_init();
4541

4642
gpt_params params;
4743

examples/lookup/lookup-stats.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,7 @@
1313
#include <vector>
1414

1515
int main(int argc, char ** argv){
16-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
17-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
18-
gpt_log_add(gpt_log_main(), level, "%s", text);
19-
}
20-
}, NULL);
16+
gpt_init();
2117

2218
gpt_params params;
2319

examples/lookup/lookup.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,7 @@
1313
#include <vector>
1414

1515
int main(int argc, char ** argv){
16-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
17-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
18-
gpt_log_add(gpt_log_main(), level, "%s", text);
19-
}
20-
}, NULL);
16+
gpt_init();
2117

2218
gpt_params params;
2319

examples/main/main.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -126,11 +126,13 @@ static std::string chat_add_and_format(struct llama_model * model, std::vector<l
126126
llama_chat_msg new_msg{role, content};
127127
auto formatted = llama_chat_format_single(model, g_params->chat_template, chat_msgs, new_msg, role == "user");
128128
chat_msgs.push_back({role, content});
129-
LOG_DBG("formatted: %s\n", formatted.c_str());
129+
LOG_DBG("formatted: '%s'\n", formatted.c_str());
130130
return formatted;
131131
}
132132

133133
int main(int argc, char ** argv) {
134+
gpt_init();
135+
134136
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
135137
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
136138
gpt_log_add(gpt_log_main(), level, "%s", text);
@@ -179,8 +181,6 @@ int main(int argc, char ** argv) {
179181
LOG_WRN("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale);
180182
}
181183

182-
print_build_info();
183-
184184
LOG_INF("%s: llama backend init\n", __func__);
185185

186186
llama_backend_init();

examples/parallel/parallel.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -103,11 +103,7 @@ static std::vector<std::string> split_string(const std::string& input, char deli
103103
int main(int argc, char ** argv) {
104104
srand(1234);
105105

106-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
107-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
108-
gpt_log_add(gpt_log_main(), level, "%s", text);
109-
}
110-
}, NULL);
106+
gpt_init();
111107

112108
gpt_params params;
113109

examples/passkey/passkey.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,7 @@ static void print_usage(int, char ** argv) {
1515
}
1616

1717
int main(int argc, char ** argv) {
18-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
19-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
20-
gpt_log_add(gpt_log_main(), level, "%s", text);
21-
}
22-
}, NULL);
18+
gpt_init();
2319

2420
gpt_params params;
2521

examples/perplexity/perplexity.cpp

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1957,11 +1957,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
19571957
}
19581958

19591959
int main(int argc, char ** argv) {
1960-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
1961-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
1962-
gpt_log_add(gpt_log_main(), level, "%s", text);
1963-
}
1964-
}, NULL);
1960+
gpt_init();
19651961

19661962
gpt_params params;
19671963

@@ -2005,8 +2001,6 @@ int main(int argc, char ** argv) {
20052001
params.n_ctx += params.ppl_stride/2;
20062002
}
20072003

2008-
print_build_info();
2009-
20102004
llama_backend_init();
20112005
llama_numa_init(params.numa);
20122006

examples/retrieval/retrieval.cpp

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -112,11 +112,7 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
112112
}
113113

114114
int main(int argc, char ** argv) {
115-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
116-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
117-
gpt_log_add(gpt_log_main(), level, "%s", text);
118-
}
119-
}, NULL);
115+
gpt_init();
120116

121117
gpt_params params;
122118

@@ -137,8 +133,6 @@ int main(int argc, char ** argv) {
137133
return 1;
138134
}
139135

140-
print_build_info();
141-
142136
LOG_INF("processing files:\n");
143137
for (auto & context_file : params.context_files) {
144138
LOG_INF("%s\n", context_file.c_str());

examples/server/server.cpp

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2372,11 +2372,7 @@ inline void signal_handler(int signal) {
23722372
}
23732373

23742374
int main(int argc, char ** argv) {
2375-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
2376-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
2377-
gpt_log_add(gpt_log_main(), level, "%s", text);
2378-
}
2379-
}, NULL);
2375+
gpt_init();
23802376

23812377
// own arguments required by this example
23822378
gpt_params params;
@@ -2401,8 +2397,6 @@ int main(int argc, char ** argv) {
24012397
llama_backend_init();
24022398
llama_numa_init(params.numa);
24032399

2404-
LOG_INF("build: %d %s\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
2405-
24062400
LOG_INF("system info: n_threads = %d, n_threads_batch = %d, total_threads = %d\n", params.cpuparams.n_threads, params.cpuparams_batch.n_threads, std::thread::hardware_concurrency());
24072401
LOG_INF("\n");
24082402
LOG_INF("%s\n", gpt_params_get_system_info(params).c_str());
@@ -3193,7 +3187,7 @@ int main(int argc, char ** argv) {
31933187
}
31943188

31953189
// print sample chat example to make it clear which template is used
3196-
LOG_INF("%s: chat template, built_in: %d, chat_example: %s\n", __func__, params.chat_template.empty(), llama_chat_format_example(ctx_server.model, params.chat_template).c_str());
3190+
LOG_INF("%s: chat template, built_in: %d, chat_example: '%s\n'", __func__, params.chat_template.empty(), llama_chat_format_example(ctx_server.model, params.chat_template).c_str());
31973191

31983192
ctx_server.queue_tasks.on_new_task(std::bind(
31993193
&server_context::process_single_task, &ctx_server, std::placeholders::_1));

examples/server/utils.hpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ inline std::string format_chat(const struct llama_model * model, const std::stri
8585
}
8686

8787
const auto formatted_chat = llama_chat_apply_template(model, tmpl, chat, true);
88-
LOG_DBG("formatted_chat: %s\n", formatted_chat.c_str());
88+
LOG_DBG("formatted_chat: '%s'\n", formatted_chat.c_str());
8989

9090
return formatted_chat;
9191
}
@@ -295,7 +295,8 @@ static json probs_vector_to_json(const llama_context * ctx, const std::vector<co
295295
static bool server_sent_event(httplib::DataSink & sink, const char * event, const json & data) {
296296
const std::string str =
297297
std::string(event) + ": " +
298-
data.dump(-1, ' ', false, json::error_handler_t::replace) + "\n\n";
298+
data.dump(-1, ' ', false, json::error_handler_t::replace) +
299+
"\n\n"; // note: these newlines are important (not sure why though, if you know, add a comment to explain)
299300

300301
LOG_DBG("data stream, to_send: %s", str.c_str());
301302

examples/simple/simple.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,7 @@ static void print_usage(int, char ** argv) {
1212
}
1313

1414
int main(int argc, char ** argv) {
15-
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
16-
if (LOG_DEFAULT_LLAMA <= gpt_log_verbosity_env) {
17-
gpt_log_add(gpt_log_main(), level, "%s", text);
18-
}
19-
}, NULL);
15+
gpt_init();
2016

2117
gpt_params params;
2218

0 commit comments

Comments
 (0)