Skip to content

Commit 407c1b8

Browse files
committed
Apply suggestions to main.cpp
1 parent 63506ec commit 407c1b8

File tree

1 file changed

+6
-9
lines changed

1 file changed

+6
-9
lines changed

main.cpp

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -88,8 +88,6 @@ int main(int argc, char ** argv) {
8888

8989
// Add a space in front of the first character to match OG llama tokenizer behavior
9090
params.prompt.insert(0, 1, ' ');
91-
// tokenize the prompt
92-
std::vector<gpt_vocab::id> embd_inp = llama_tokenize_text(ctx, params.prompt);
9391

9492
// tokenize the reverse prompt
9593
std::vector<gpt_vocab::id> antiprompt_inp = llama_tokenize_text(ctx, params.prompt);
@@ -140,15 +138,15 @@ int main(int argc, char ** argv) {
140138
printf(ANSI_COLOR_YELLOW);
141139
}
142140

143-
if(!llama_injest_input(ctx, params.prompt))
141+
if(!llama_ingest_input(ctx, params.prompt))
144142
{
145-
fprintf(stderr, "Failed to injest prompt\n");
143+
fprintf(stderr, "Failed to ingest prompt\n");
146144
return 1;
147145
};
148146

149147
// display text
150148
input_noecho = false;
151-
const std::vector<gpt_vocab::id>& embd = llama_context_get_embd(ctx);
149+
const std::vector<gpt_vocab::id>& embd = llama_context_get_embedding(ctx);
152150
if (!input_noecho) {
153151
for (auto id : embd) {
154152
printf("%s", vocab.id_to_token[id].c_str());
@@ -162,15 +160,14 @@ int main(int argc, char ** argv) {
162160

163161
const std::vector<gpt_vocab::id>& last_n_tokens = llama_context_get_last_n_tokens(ctx);
164162

165-
while (llama_context_not_finished(ctx) > 0) {
163+
while (llama_context_is_finished(ctx) != true) {
166164
gpt_vocab::id model_output = 0;
167-
bool response = llama_inference(ctx, model_output);
165+
bool response = llama_infer(ctx, model_output);
168166
if (response) {
169167
printf("%s", vocab.id_to_token[model_output].c_str());
170168
fflush(stdout);
171169
}
172170

173-
174171
// in interactive mode, and not currently processing queued inputs;
175172
// check if we should prompt the user for more
176173
if (params.interactive) {
@@ -204,7 +201,7 @@ int main(int argc, char ** argv) {
204201
buf[n_read+1] = 0;
205202
}
206203
// Do not clear existing context in interactive mode
207-
llama_init_context_with_prompt(ctx, buf, false);
204+
llama_update_context_with_prompt(ctx, buf, false);
208205
}
209206

210207
is_interacting = false;

0 commit comments

Comments
 (0)