Skip to content

Commit aee57d4

Browse files
committed
no longer necessary to disambiguate common functions with ::
1 parent e58d3b1 commit aee57d4

File tree

21 files changed

+48
-48
lines changed

21 files changed

+48
-48
lines changed

examples/batched/batched.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ int main(int argc, char ** argv) {
5151
// tokenize the prompt
5252

5353
std::vector<llama_token> tokens_list;
54-
tokens_list = ::common_tokenize(model, params.prompt, true);
54+
tokens_list = common_tokenize(model, params.prompt, true);
5555

5656
const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel;
5757

examples/cvector-generator/cvector-generator.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -272,16 +272,16 @@ struct tokenized_prompt {
272272

273273
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
274274
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
275-
tokens_pos = ::common_tokenize(ctx, pos, add_bos, true);
276-
tokens_neg = ::common_tokenize(ctx, neg, add_bos, true);
275+
tokens_pos = common_tokenize(ctx, pos, add_bos, true);
276+
tokens_neg = common_tokenize(ctx, neg, add_bos, true);
277277
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
278278
padding_seq(ctx, tokens_pos, max_seq_len);
279279
padding_seq(ctx, tokens_neg, max_seq_len);
280280
}
281281

282282
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
283283
// TODO: customize padding token
284-
std::vector<llama_token> pad_tokens = ::common_tokenize(ctx, " ", false);
284+
std::vector<llama_token> pad_tokens = common_tokenize(ctx, " ", false);
285285
llama_token pad_tok = pad_tokens.back();
286286
while (tokens.size() < len) {
287287
tokens.push_back(pad_tok);

examples/embedding/embedding.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ int main(int argc, char ** argv) {
135135
// tokenize the prompts and trim
136136
std::vector<std::vector<int32_t>> inputs;
137137
for (const auto & prompt : prompts) {
138-
auto inp = ::common_tokenize(ctx, prompt, true, true);
138+
auto inp = common_tokenize(ctx, prompt, true, true);
139139
if (inp.size() > n_batch) {
140140
LOG_ERR("%s: number of tokens in input line (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
141141
__func__, (long long int) inp.size(), (long long int) n_batch);

examples/eval-callback/eval-callback.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ static bool ggml_debug(struct ggml_tensor * t, bool ask, void * user_data) {
129129
static bool run(llama_context * ctx, const gpt_params & params) {
130130
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
131131

132-
std::vector<llama_token> tokens = ::common_tokenize(ctx, params.prompt, add_bos);
132+
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, add_bos);
133133

134134
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
135135
LOG_ERR("%s : failed to eval\n", __func__);

examples/imatrix/imatrix.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -436,7 +436,7 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
436436
auto tim1 = std::chrono::high_resolution_clock::now();
437437
LOG_INF("%s: tokenizing the input ..\n", __func__);
438438

439-
std::vector<llama_token> tokens = ::common_tokenize(ctx, params.prompt, true);
439+
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, true);
440440

441441
auto tim2 = std::chrono::high_resolution_clock::now();
442442
LOG_INF("%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());

examples/infill/infill.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -202,8 +202,8 @@ int main(int argc, char ** argv) {
202202

203203
std::vector<llama_token> embd_inp;
204204
std::vector<llama_token> embd_end;
205-
std::vector<llama_token> inp_pfx = ::common_tokenize(ctx, params.input_prefix, false);
206-
std::vector<llama_token> inp_sfx = ::common_tokenize(ctx, params.input_suffix, false);
205+
std::vector<llama_token> inp_pfx = common_tokenize(ctx, params.input_prefix, false);
206+
std::vector<llama_token> inp_sfx = common_tokenize(ctx, params.input_suffix, false);
207207

208208
GGML_ASSERT(llama_token_prefix(model) >= 0);
209209
GGML_ASSERT(llama_token_suffix(model) >= 0);
@@ -505,8 +505,8 @@ int main(int argc, char ** argv) {
505505
}
506506

507507
// tokenize new prefix and suffix
508-
std::vector<llama_token> inp_pfx = ::common_tokenize(ctx, params.input_prefix, false);
509-
std::vector<llama_token> inp_sfx = ::common_tokenize(ctx, params.input_suffix, false);
508+
std::vector<llama_token> inp_pfx = common_tokenize(ctx, params.input_prefix, false);
509+
std::vector<llama_token> inp_sfx = common_tokenize(ctx, params.input_suffix, false);
510510

511511
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
512512
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
@@ -579,7 +579,7 @@ int main(int argc, char ** argv) {
579579

580580
const size_t original_size = embd_inp.size();
581581

582-
const auto line_inp = ::common_tokenize(ctx, buffer, false);
582+
const auto line_inp = common_tokenize(ctx, buffer, false);
583583
LOG_DBG("input tokens: %s\n", string_from(ctx, line_inp).c_str());
584584

585585
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());

examples/llava/llava-cli.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
3737

3838
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
3939
std::string str2 = str;
40-
std::vector<llama_token> embd_inp = ::common_tokenize(ctx_llama, str2, add_bos, true);
40+
std::vector<llama_token> embd_inp = common_tokenize(ctx_llama, str2, add_bos, true);
4141
eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
4242
return true;
4343
}
@@ -159,14 +159,14 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
159159
user_prompt = prompt.substr(image_pos + std::string("<image>").length());
160160
LOG_INF("system_prompt: %s\n", system_prompt.c_str());
161161
if (params->verbose_prompt) {
162-
auto tmp = ::common_tokenize(ctx_llava->ctx_llama, system_prompt, true, true);
162+
auto tmp = common_tokenize(ctx_llava->ctx_llama, system_prompt, true, true);
163163
for (int i = 0; i < (int) tmp.size(); i++) {
164164
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
165165
}
166166
}
167167
LOG_INF("user_prompt: %s\n", user_prompt.c_str());
168168
if (params->verbose_prompt) {
169-
auto tmp = ::common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
169+
auto tmp = common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
170170
for (int i = 0; i < (int) tmp.size(); i++) {
171171
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
172172
}
@@ -176,7 +176,7 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
176176
system_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:";
177177
user_prompt = prompt + "\nASSISTANT:";
178178
if (params->verbose_prompt) {
179-
auto tmp = ::common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
179+
auto tmp = common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
180180
for (int i = 0; i < (int) tmp.size(); i++) {
181181
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
182182
}

examples/llava/minicpmv-cli.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
114114

115115
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
116116
std::string str2 = str;
117-
std::vector<llama_token> embd_inp = ::common_tokenize(ctx_llama, str2, add_bos, true);
117+
std::vector<llama_token> embd_inp = common_tokenize(ctx_llama, str2, add_bos, true);
118118
return eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
119119
}
120120

examples/lookahead/lookahead.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ int main(int argc, char ** argv) {
6565
std::vector<llama_token> inp;
6666
std::vector<llama_token> all;
6767

68-
inp = ::common_tokenize(ctx, params.prompt, true, true);
68+
inp = common_tokenize(ctx, params.prompt, true, true);
6969
all = inp;
7070

7171
const int max_context_size = llama_n_ctx(ctx);

examples/lookup/lookup-create.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ int main(int argc, char ** argv){
3131

3232
// tokenize the prompt
3333
std::vector<llama_token> inp;
34-
inp = ::common_tokenize(ctx, params.prompt, true, true);
34+
inp = common_tokenize(ctx, params.prompt, true, true);
3535
fprintf(stderr, "%s: tokenization done\n", __func__);
3636

3737

examples/lookup/lookup-stats.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ int main(int argc, char ** argv){
3535

3636
// tokenize the prompt
3737
std::vector<llama_token> inp;
38-
inp = ::common_tokenize(ctx, params.prompt, true, true);
38+
inp = common_tokenize(ctx, params.prompt, true, true);
3939

4040
llama_ngram_cache ngram_cache_context;
4141
llama_ngram_cache ngram_cache_dynamic;

examples/lookup/lookup.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ int main(int argc, char ** argv){
3838

3939
// tokenize the prompt
4040
std::vector<llama_token> inp;
41-
inp = ::common_tokenize(ctx, params.prompt, true, true);
41+
inp = common_tokenize(ctx, params.prompt, true, true);
4242

4343
llama_ngram_cache ngram_cache_context;
4444
llama_ngram_cache ngram_cache_dynamic;

examples/main/main.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ int main(int argc, char ** argv) {
296296
: params.prompt;
297297
if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
298298
LOG_DBG("tokenize the prompt\n");
299-
embd_inp = ::common_tokenize(ctx, prompt, true, true);
299+
embd_inp = common_tokenize(ctx, prompt, true, true);
300300
} else {
301301
LOG_DBG("use session tokens\n");
302302
embd_inp = session_tokens;
@@ -415,7 +415,7 @@ int main(int argc, char ** argv) {
415415
for (const auto & antiprompt : params.antiprompt) {
416416
LOG_INF("Reverse prompt: '%s'\n", antiprompt.c_str());
417417
if (params.verbose_prompt) {
418-
auto tmp = ::common_tokenize(ctx, antiprompt, false, true);
418+
auto tmp = common_tokenize(ctx, antiprompt, false, true);
419419
for (int i = 0; i < (int) tmp.size(); i++) {
420420
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
421421
}
@@ -430,7 +430,7 @@ int main(int argc, char ** argv) {
430430
if (!params.input_prefix.empty()) {
431431
LOG_INF("Input prefix: '%s'\n", params.input_prefix.c_str());
432432
if (params.verbose_prompt) {
433-
auto tmp = ::common_tokenize(ctx, params.input_prefix, true, true);
433+
auto tmp = common_tokenize(ctx, params.input_prefix, true, true);
434434
for (int i = 0; i < (int) tmp.size(); i++) {
435435
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
436436
}
@@ -440,7 +440,7 @@ int main(int argc, char ** argv) {
440440
if (!params.input_suffix.empty()) {
441441
LOG_INF("Input suffix: '%s'\n", params.input_suffix.c_str());
442442
if (params.verbose_prompt) {
443-
auto tmp = ::common_tokenize(ctx, params.input_suffix, false, true);
443+
auto tmp = common_tokenize(ctx, params.input_suffix, false, true);
444444
for (int i = 0; i < (int) tmp.size(); i++) {
445445
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
446446
}
@@ -788,7 +788,7 @@ int main(int argc, char ** argv) {
788788
if (params.interactive) {
789789
if (!params.antiprompt.empty()) {
790790
// tokenize and inject first reverse prompt
791-
const auto first_antiprompt = ::common_tokenize(ctx, params.antiprompt.front(), false, true);
791+
const auto first_antiprompt = common_tokenize(ctx, params.antiprompt.front(), false, true);
792792
embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
793793
is_antiprompt = true;
794794
}
@@ -862,9 +862,9 @@ int main(int argc, char ** argv) {
862862
? chat_add_and_format(model, chat_msgs, "user", std::move(buffer))
863863
: std::move(buffer);
864864
// TODO: one inconvenient of current chat template implementation is that we can't distinguish between user input and special tokens (prefix/postfix)
865-
const auto line_pfx = ::common_tokenize(ctx, params.input_prefix, false, true);
866-
const auto line_inp = ::common_tokenize(ctx, user_inp, false, format_chat);
867-
const auto line_sfx = ::common_tokenize(ctx, params.input_suffix, false, true);
865+
const auto line_pfx = common_tokenize(ctx, params.input_prefix, false, true);
866+
const auto line_inp = common_tokenize(ctx, user_inp, false, format_chat);
867+
const auto line_sfx = common_tokenize(ctx, params.input_suffix, false, true);
868868

869869
LOG_DBG("input tokens: %s\n", string_from(ctx, line_inp).c_str());
870870

examples/parallel/parallel.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ int main(int argc, char ** argv) {
164164
}
165165

166166
std::vector<llama_token> tokens_system;
167-
tokens_system = ::common_tokenize(ctx, k_system, true);
167+
tokens_system = common_tokenize(ctx, k_system, true);
168168
const int32_t n_tokens_system = tokens_system.size();
169169

170170
llama_seq_id g_seq_id = 0;
@@ -256,7 +256,7 @@ int main(int argc, char ** argv) {
256256

257257
// do not prepend BOS because we have a system prompt!
258258
std::vector<llama_token> tokens_prompt;
259-
tokens_prompt = ::common_tokenize(ctx, client.prompt, false);
259+
tokens_prompt = common_tokenize(ctx, client.prompt, false);
260260

261261
for (size_t i = 0; i < tokens_prompt.size(); ++i) {
262262
common_batch_add(batch, tokens_prompt[i], i + n_tokens_system, { client.id + 1 }, false);

examples/passkey/passkey.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,10 +92,10 @@ int main(int argc, char ** argv) {
9292

9393
// tokenize the prompt
9494
std::vector<llama_token> tokens_list;
95-
tokens_list = ::common_tokenize(ctx, params.prompt, true);
95+
tokens_list = common_tokenize(ctx, params.prompt, true);
9696

9797
// tokenize the prefix and use it as a sink
98-
const int n_tokens_prefix = ::common_tokenize(ctx, prompt_prefix, true).size();
98+
const int n_tokens_prefix = common_tokenize(ctx, prompt_prefix, true).size();
9999

100100
const int n_tokens_all = tokens_list.size();
101101

examples/perplexity/perplexity.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,7 @@ static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params &
348348

349349
LOG_INF("%s: tokenizing the input ..\n", __func__);
350350

351-
std::vector<llama_token> tokens = ::common_tokenize(ctx, params.prompt, true);
351+
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, true);
352352

353353
const int n_ctx = llama_n_ctx(ctx);
354354

@@ -500,7 +500,7 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
500500
auto tim1 = std::chrono::high_resolution_clock::now();
501501
LOG_INF("%s: tokenizing the input ..\n", __func__);
502502

503-
std::vector<llama_token> tokens = ::common_tokenize(ctx, params.prompt, true);
503+
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, true);
504504

505505
auto tim2 = std::chrono::high_resolution_clock::now();
506506
LOG_INF("%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());
@@ -844,7 +844,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
844844
hs_cur.gold_ending_idx = std::stoi( prompt_lines[idx*6+1] );
845845
for (size_t j = 0; j < 4; j++) {
846846
hs_cur.ending[j] = prompt_lines[idx*6+2+j];
847-
hs_cur.seq_tokens[j] = ::common_tokenize(ctx, hs_cur.context + " " + hs_cur.ending[j], true);
847+
hs_cur.seq_tokens[j] = common_tokenize(ctx, hs_cur.context + " " + hs_cur.ending[j], true);
848848
}
849849

850850
// determine the common prefix of the endings
@@ -1136,8 +1136,8 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
11361136
LOG_INF("%s : tokenizing selected tasks\n", __func__);
11371137

11381138
for (auto & task : data) {
1139-
task.seq_tokens[0] = ::common_tokenize(ctx, task.first + task.choices[0] + task.second, true);
1140-
task.seq_tokens[1] = ::common_tokenize(ctx, task.first + task.choices[1] + task.second, true);
1139+
task.seq_tokens[0] = common_tokenize(ctx, task.first + task.choices[0] + task.second, true);
1140+
task.seq_tokens[1] = common_tokenize(ctx, task.first + task.choices[1] + task.second, true);
11411141

11421142
task.common_prefix = 0;
11431143
for (size_t k = 0; k < task.seq_tokens[0].size(); k++) {
@@ -1152,8 +1152,8 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
11521152
task.seq_tokens[0].size() - task.common_prefix +
11531153
task.seq_tokens[1].size() - task.common_prefix;
11541154

1155-
task.n_base1 = ::common_tokenize(ctx, task.first + task.choices[0], true).size();
1156-
task.n_base2 = ::common_tokenize(ctx, task.first + task.choices[1], true).size();
1155+
task.n_base1 = common_tokenize(ctx, task.first + task.choices[0], true).size();
1156+
task.n_base2 = common_tokenize(ctx, task.first + task.choices[1], true).size();
11571157
}
11581158

11591159
LOG_INF("%s : calculating winogrande score over selected tasks.\n", __func__);

examples/retrieval/retrieval.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ int main(int argc, char ** argv) {
185185

186186
// tokenize the prompts and trim
187187
for (auto & chunk : chunks) {
188-
auto inp = ::common_tokenize(ctx, chunk.textdata, true, false);
188+
auto inp = common_tokenize(ctx, chunk.textdata, true, false);
189189
if (inp.size() > n_batch) {
190190
LOG_ERR("%s: chunk size (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
191191
__func__, (long long int) inp.size(), (long long int) n_batch);

examples/server/server.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -771,10 +771,10 @@ struct server_context {
771771

772772
std::vector<llama_token> p;
773773
if (first) {
774-
p = ::common_tokenize(ctx, s, add_special, TMP_FORCE_SPECIAL);
774+
p = common_tokenize(ctx, s, add_special, TMP_FORCE_SPECIAL);
775775
first = false;
776776
} else {
777-
p = ::common_tokenize(ctx, s, false, TMP_FORCE_SPECIAL);
777+
p = common_tokenize(ctx, s, false, TMP_FORCE_SPECIAL);
778778
}
779779

780780
prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
@@ -788,7 +788,7 @@ struct server_context {
788788
}
789789
} else {
790790
auto s = json_prompt.template get<std::string>();
791-
prompt_tokens = ::common_tokenize(ctx, s, add_special, TMP_FORCE_SPECIAL);
791+
prompt_tokens = common_tokenize(ctx, s, add_special, TMP_FORCE_SPECIAL);
792792
}
793793

794794
return prompt_tokens;
@@ -1073,7 +1073,7 @@ struct server_context {
10731073
system_tokens.clear();
10741074

10751075
if (!system_prompt.empty()) {
1076-
system_tokens = ::common_tokenize(ctx, system_prompt, true);
1076+
system_tokens = common_tokenize(ctx, system_prompt, true);
10771077

10781078
const int32_t n_batch = llama_n_batch(ctx);
10791079
const int32_t n_tokens_prompt = system_tokens.size();

examples/simple/simple.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ int main(int argc, char ** argv) {
6464
// tokenize the prompt
6565

6666
std::vector<llama_token> tokens_list;
67-
tokens_list = ::common_tokenize(ctx, params.prompt, true);
67+
tokens_list = common_tokenize(ctx, params.prompt, true);
6868

6969
const int n_ctx = llama_n_ctx(ctx);
7070
const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size());

examples/speculative/speculative.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ int main(int argc, char ** argv) {
134134

135135
// Tokenize the prompt
136136
std::vector<llama_token> inp;
137-
inp = ::common_tokenize(ctx_tgt, params.prompt, true, true);
137+
inp = common_tokenize(ctx_tgt, params.prompt, true, true);
138138

139139
const int max_context_size = llama_n_ctx(ctx_tgt);
140140
const int max_tokens_list_size = max_context_size - 4;

examples/tokenize/tokenize.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,7 @@ int main(int raw_argc, char ** raw_argv) {
365365
const bool parse_special = !no_parse_special;
366366

367367
std::vector<llama_token> tokens;
368-
tokens = ::common_tokenize(model, prompt, add_bos, parse_special);
368+
tokens = common_tokenize(model, prompt, add_bos, parse_special);
369369

370370
if (printing_ids) {
371371
printf("[");

0 commit comments

Comments
 (0)