Skip to content

Commit 2948768

Browse files
committed
common : reimplement the logger
ggml-ci
1 parent 0abc6a2 commit 2948768

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

50 files changed

+2000
-2365
lines changed

.github/workflows/build.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@ env:
2323
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
2424
GGML_NLOOP: 3
2525
GGML_N_THREADS: 1
26+
LLAMA_LOG_COLORS: 1
27+
LLAMA_LOG_TIMESTAMPS: 1
2628

2729
jobs:
2830
macOS-latest-cmake-arm64:

.github/workflows/server.yml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,11 @@ on:
2020
types: [opened, synchronize, reopened]
2121
paths: ['.github/workflows/server.yml', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m', 'examples/server/**.*']
2222

23+
env:
24+
LLAMA_LOG_COLORS: 1
25+
LLAMA_LOG_TIMESTAMPS: 1
26+
LLAMA_LOG_VERBOSITY: 10
27+
2328
concurrency:
2429
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.run_id }}
2530
cancel-in-progress: true

Makefile

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ TEST_TARGETS = \
5454
tests/test-grammar-parser \
5555
tests/test-json-schema-to-grammar \
5656
tests/test-llama-grammar \
57+
tests/test-log \
5758
tests/test-model-load-cancel \
5859
tests/test-opt \
5960
tests/test-quantize-fns \
@@ -931,6 +932,7 @@ OBJ_LLAMA = \
931932
OBJ_COMMON = \
932933
common/common.o \
933934
common/arg.o \
935+
common/log.o \
934936
common/console.o \
935937
common/ngram-cache.o \
936938
common/sampling.o \
@@ -1168,6 +1170,11 @@ common/arg.o: \
11681170
common/arg.h
11691171
$(CXX) $(CXXFLAGS) -c $< -o $@
11701172

1173+
common/log.o: \
1174+
common/log.cpp \
1175+
common/log.h
1176+
$(CXX) $(CXXFLAGS) -c $< -o $@
1177+
11711178
common/sampling.o: \
11721179
common/sampling.cpp \
11731180
common/sampling.h \
@@ -1346,7 +1353,7 @@ llama-cvector-generator: examples/cvector-generator/cvector-generator.cpp \
13461353
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
13471354

13481355
llama-convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp \
1349-
$(OBJ_GGML) $(OBJ_LLAMA)
1356+
$(OBJ_ALL)
13501357
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
13511358
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
13521359

@@ -1527,6 +1534,11 @@ tests/test-llama-grammar: tests/test-llama-grammar.cpp \
15271534
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
15281535
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
15291536

1537+
tests/test-log: tests/test-log.cpp \
1538+
$(OBJ_ALL)
1539+
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
1540+
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
1541+
15301542
tests/test-grammar-parser: tests/test-grammar-parser.cpp \
15311543
$(OBJ_ALL)
15321544
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)

ci/run.sh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -737,6 +737,8 @@ function gg_sum_embd_bge_small {
737737

738738
## main
739739

740+
export LLAMA_LOG_TIMESTAMPS=1
741+
740742
if [ -z ${GG_BUILD_LOW_PERF} ]; then
741743
# Create symlink: ./llama.cpp/models-mnt -> $MNT/models/models-mnt
742744
rm -rf ${SRC}/models-mnt

common/CMakeLists.txt

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -51,21 +51,23 @@ endif()
5151
set(TARGET common)
5252

5353
add_library(${TARGET} STATIC
54+
arg.cpp
55+
arg.h
5456
base64.hpp
55-
common.h
5657
common.cpp
57-
arg.h
58-
arg.cpp
59-
sampling.h
60-
sampling.cpp
61-
console.h
58+
common.h
6259
console.cpp
63-
json.hpp
60+
console.h
6461
json-schema-to-grammar.cpp
65-
train.h
66-
train.cpp
67-
ngram-cache.h
62+
json.hpp
63+
log.cpp
64+
log.h
6865
ngram-cache.cpp
66+
ngram-cache.h
67+
sampling.cpp
68+
sampling.h
69+
train.cpp
70+
train.h
6971
)
7072

7173
if (BUILD_SHARED_LIBS)

common/arg.cpp

Lines changed: 42 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,17 @@
11
#include "arg.h"
22

3+
#include "log.h"
34
#include "sampling.h"
45

56
#include <algorithm>
6-
#include <string>
7-
#include <vector>
8-
#include <set>
7+
#include <climits>
8+
#include <cstdarg>
99
#include <fstream>
1010
#include <regex>
11-
#include <cstdarg>
12-
#include <climits>
11+
#include <set>
12+
#include <string>
13+
#include <thread>
14+
#include <vector>
1315

1416
#include "json-schema-to-grammar.h"
1517

@@ -417,7 +419,7 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex,
417419
[](gpt_params & params) {
418420
params.use_color = true;
419421
}
420-
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
422+
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP}));
421423
add_opt(llama_arg(
422424
{"-t", "--threads"}, "N",
423425
format("number of threads to use during generation (default: %d)", params.cpuparams.n_threads),
@@ -876,15 +878,15 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex,
876878
params.input_prefix = value;
877879
params.enable_chat_template = false;
878880
}
879-
).set_examples({LLAMA_EXAMPLE_MAIN}));
881+
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
880882
add_opt(llama_arg(
881883
{"--in-suffix"}, "STRING",
882884
"string to suffix after user inputs with (default: empty)",
883885
[](gpt_params & params, const std::string & value) {
884886
params.input_suffix = value;
885887
params.enable_chat_template = false;
886888
}
887-
).set_examples({LLAMA_EXAMPLE_MAIN}));
889+
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
888890
add_opt(llama_arg(
889891
{"--no-warmup"},
890892
"skip warming up the model with an empty run",
@@ -1824,19 +1826,6 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex,
18241826
params.system_prompt = system_prompt;
18251827
}
18261828
).set_examples({LLAMA_EXAMPLE_SERVER}));
1827-
add_opt(llama_arg(
1828-
{"--log-format"}, "{text, json}",
1829-
"log output format: json or text (default: json)",
1830-
[](gpt_params & params, const std::string & value) {
1831-
if (value == "json") {
1832-
params.log_json = true;
1833-
} else if (value == "text") {
1834-
params.log_json = false;
1835-
} else {
1836-
throw std::invalid_argument("invalid value");
1837-
}
1838-
}
1839-
).set_examples({LLAMA_EXAMPLE_SERVER}));
18401829
add_opt(llama_arg(
18411830
{"--metrics"},
18421831
format("enable prometheus compatible metrics endpoint (default: %s)", params.endpoint_metrics ? "enabled" : "disabled"),
@@ -1956,39 +1945,48 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex,
19561945
else { std::invalid_argument("invalid value"); }
19571946
}
19581947
).set_examples({LLAMA_EXAMPLE_BENCH}));
1959-
#ifndef LOG_DISABLE_LOGS
1960-
// TODO: make this looks less weird
1961-
add_opt(llama_arg(
1962-
{"--log-test"},
1963-
"Log test",
1964-
[](gpt_params &) { log_param_single_parse("--log-test"); }
1965-
));
19661948
add_opt(llama_arg(
19671949
{"--log-disable"},
19681950
"Log disable",
1969-
[](gpt_params &) { log_param_single_parse("--log-disable"); }
1951+
[](gpt_params &) {
1952+
gpt_log_pause(gpt_log_main());
1953+
}
19701954
));
19711955
add_opt(llama_arg(
1972-
{"--log-enable"},
1973-
"Log enable",
1974-
[](gpt_params &) { log_param_single_parse("--log-enable"); }
1956+
{"--log-file"}, "FNAME",
1957+
"Log to file",
1958+
[](gpt_params &, const std::string & value) {
1959+
gpt_log_set_file(gpt_log_main(), value.c_str());
1960+
}
19751961
));
19761962
add_opt(llama_arg(
1977-
{"--log-new"},
1978-
"Log new",
1979-
[](gpt_params &) { log_param_single_parse("--log-new"); }
1980-
));
1963+
{"--log-colors"},
1964+
"Enable colored logging",
1965+
[](gpt_params &) {
1966+
gpt_log_set_colors(gpt_log_main(), true);
1967+
}
1968+
).set_env("LLAMA_LOG_COLORS"));
19811969
add_opt(llama_arg(
1982-
{"--log-append"},
1983-
"Log append",
1984-
[](gpt_params &) { log_param_single_parse("--log-append"); }
1970+
{"-lv", "--log-verbose"},
1971+
"Set verbosity level to infinity (i.e. log all messages, useful for debugging)",
1972+
[](gpt_params &) {
1973+
gpt_log_set_verbosity_thold(INT_MAX);
1974+
}
19851975
));
19861976
add_opt(llama_arg(
1987-
{"--log-file"}, "FNAME",
1988-
"Log file",
1989-
[](gpt_params &, const std::string & value) { log_param_pair_parse(false, "--log-file", value); }
1990-
));
1991-
#endif // LOG_DISABLE_LOGS
1977+
{"--log-verbosity"}, "THOLD",
1978+
"Set the verbosity threshold. Messages with a higher verbosity will be ignored.",
1979+
[](gpt_params &, int value) {
1980+
gpt_log_set_verbosity_thold(value);
1981+
}
1982+
).set_env("LLAMA_LOG_VERBOSITY"));
1983+
add_opt(llama_arg(
1984+
{"--log-timestamps"},
1985+
"Enable timestamps in log messages",
1986+
[](gpt_params &) {
1987+
gpt_log_set_timestamps(gpt_log_main(), true);
1988+
}
1989+
).set_env("LLAMA_LOG_TIMESTAMPS"));
19921990

19931991
return ctx_arg;
19941992
}

0 commit comments

Comments
 (0)