Skip to content

Commit 5e07844

Browse files
committed
Remove direct access to std streams from llama_main
The goal is to allow running llama_main while connected to other streams, such as TCP sockets. Signed-off-by: Thiago Padilha <[email protected]>
1 parent a7f9931 commit 5e07844

File tree

3 files changed

+39
-31
lines changed

3 files changed

+39
-31
lines changed

llama.cpp

Lines changed: 32 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -834,13 +834,16 @@ int llama_main(
834834
llama_vocab vocab,
835835
llama_model model,
836836
int64_t t_load_us,
837-
int64_t t_main_start_us) {
837+
int64_t t_main_start_us,
838+
std::istream & instream,
839+
FILE *outstream,
840+
FILE *errstream) {
838841

839842
if (params.seed < 0) {
840843
params.seed = time(NULL);
841844
}
842845

843-
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
846+
fprintf(errstream, "%s: seed = %d\n", __func__, params.seed);
844847

845848
std::mt19937 rng(params.seed);
846849
if (params.random_prompt) {
@@ -888,13 +891,13 @@ int llama_main(
888891
params.interactive = true;
889892
}
890893

891-
fprintf(stderr, "\n");
892-
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
893-
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
894+
fprintf(errstream, "\n");
895+
fprintf(errstream, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
896+
fprintf(errstream, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
894897
for (int i = 0; i < (int) embd_inp.size(); i++) {
895-
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], vocab.id_to_token.at(embd_inp[i]).c_str());
898+
fprintf(errstream, "%6d -> '%s'\n", embd_inp[i], vocab.id_to_token.at(embd_inp[i]).c_str());
896899
}
897-
fprintf(stderr, "\n");
900+
fprintf(errstream, "\n");
898901
if (params.interactive) {
899902
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
900903
struct sigaction sigint_action;
@@ -906,16 +909,16 @@ int llama_main(
906909
signal(SIGINT, sigint_handler);
907910
#endif
908911

909-
fprintf(stderr, "%s: interactive mode on.\n", __func__);
912+
fprintf(errstream, "%s: interactive mode on.\n", __func__);
910913

911914
if(params.antiprompt.size()) {
912915
for (auto antiprompt : params.antiprompt) {
913-
fprintf(stderr, "Reverse prompt: '%s'\n", antiprompt.c_str());
916+
fprintf(errstream, "Reverse prompt: '%s'\n", antiprompt.c_str());
914917
}
915918
}
916919
}
917-
fprintf(stderr, "sampling parameters: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty);
918-
fprintf(stderr, "\n\n");
920+
fprintf(errstream, "sampling parameters: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty);
921+
fprintf(errstream, "\n\n");
919922

920923
std::vector<llama_vocab::id> embd;
921924

@@ -924,7 +927,7 @@ int llama_main(
924927
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
925928

926929
if (params.interactive) {
927-
fprintf(stderr, "== Running in interactive mode. ==\n"
930+
fprintf(errstream, "== Running in interactive mode. ==\n"
928931
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
929932
" - Press Ctrl+C to interject at any time.\n"
930933
#endif
@@ -948,7 +951,7 @@ int llama_main(
948951
SetConsoleMode(hConOut, dwMode | 0x4); // ENABLE_VIRTUAL_TERMINAL_PROCESSING (0x4)
949952
}
950953
#endif
951-
printf(ANSI_COLOR_YELLOW);
954+
fprintf(outstream, ANSI_COLOR_YELLOW);
952955
}
953956

954957
while (remaining_tokens > 0 || params.interactive) {
@@ -957,7 +960,7 @@ int llama_main(
957960
const int64_t t_start_us = ggml_time_us();
958961

959962
if (!llama_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) {
960-
fprintf(stderr, "Failed to predict\n");
963+
fprintf(errstream, "Failed to predict\n");
961964
return 1;
962965
}
963966

@@ -1018,13 +1021,13 @@ int llama_main(
10181021
// display text
10191022
if (!input_noecho) {
10201023
for (auto id : embd) {
1021-
printf("%s", vocab.id_to_token[id].c_str());
1024+
fprintf(outstream, "%s", vocab.id_to_token[id].c_str());
10221025
}
1023-
fflush(stdout);
1026+
fflush(outstream);
10241027
}
10251028
// reset color to default if we there is no pending user input
10261029
if (!input_noecho && params.use_color && (int)embd_inp.size() == input_consumed) {
1027-
printf(ANSI_COLOR_RESET);
1030+
fprintf(outstream, ANSI_COLOR_RESET);
10281031
}
10291032

10301033
// in interactive mode, and not currently processing queued inputs;
@@ -1048,24 +1051,24 @@ int llama_main(
10481051
input_consumed = embd_inp.size();
10491052
embd_inp.insert(embd_inp.end(), inp_pfx.begin(), inp_pfx.end());
10501053

1051-
printf("\n> ");
1054+
fprintf(outstream, "\n> ");
10521055
}
10531056

10541057
// currently being interactive
1055-
if (params.use_color) printf(ANSI_BOLD ANSI_COLOR_GREEN);
1058+
if (params.use_color) fprintf(outstream, ANSI_BOLD ANSI_COLOR_GREEN);
10561059
std::string buffer;
10571060
std::string line;
10581061
bool another_line = true;
10591062
do {
1060-
std::getline(std::cin, line);
1063+
std::getline(instream, line);
10611064
if (line.empty() || line.back() != '\\') {
10621065
another_line = false;
10631066
} else {
10641067
line.pop_back(); // Remove the continue character
10651068
}
10661069
buffer += line + '\n'; // Append the line to the result
10671070
} while (another_line);
1068-
if (params.use_color) printf(ANSI_COLOR_RESET);
1071+
if (params.use_color) fprintf(outstream, ANSI_COLOR_RESET);
10691072

10701073
std::vector<llama_vocab::id> line_inp = ::llama_tokenize(vocab, buffer, false);
10711074
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
@@ -1086,7 +1089,7 @@ int llama_main(
10861089
if (params.interactive) {
10871090
is_interacting = true;
10881091
} else {
1089-
fprintf(stderr, " [end of text]\n");
1092+
fprintf(errstream, " [end of text]\n");
10901093
break;
10911094
}
10921095
}
@@ -1106,18 +1109,18 @@ int llama_main(
11061109
{
11071110
const int64_t t_main_end_us = ggml_time_us();
11081111

1109-
fprintf(stderr, "\n\n");
1110-
fprintf(stderr, "%s: mem per token = %8zu bytes\n", __func__, mem_per_token);
1111-
fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f);
1112-
fprintf(stderr, "%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f);
1113-
fprintf(stderr, "%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past);
1114-
fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
1112+
fprintf(errstream, "\n\n");
1113+
fprintf(errstream, "%s: mem per token = %8zu bytes\n", __func__, mem_per_token);
1114+
fprintf(errstream, "%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f);
1115+
fprintf(errstream, "%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f);
1116+
fprintf(errstream, "%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past);
1117+
fprintf(errstream, "%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
11151118
}
11161119

11171120
ggml_free(model.ctx);
11181121

11191122
if (params.use_color) {
1120-
printf(ANSI_COLOR_RESET);
1123+
fprintf(outstream, ANSI_COLOR_RESET);
11211124
}
11221125

11231126
return 0;

llama.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,5 +64,8 @@ int llama_main(
6464
llama_vocab vocab,
6565
llama_model model,
6666
int64_t t_load_us,
67-
int64_t t_main_start_us);
67+
int64_t t_main_start_us,
68+
std::istream & instream,
69+
FILE *outstream,
70+
FILE *errstream);
6871
bool llama_model_load(const std::string & fname, llama_model & model, llama_vocab & vocab, int n_ctx, int n_parts, ggml_type memory_type);

main.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22
#include "utils.h"
33
#include "llama.h"
44

5+
#include <iostream>
6+
57
const char * llama_print_system_info(void) {
68
static std::string s;
79

@@ -63,5 +65,5 @@ int main(int argc, char ** argv) {
6365
params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
6466
}
6567

66-
return llama_main(params, vocab, model, t_main_start_us, t_load_us);
68+
return llama_main(params, vocab, model, t_main_start_us, t_load_us, std::cin, stdout, stderr);
6769
}

0 commit comments

Comments
 (0)