Skip to content

Commit 8ee7363

Browse files
committed
Revert code motion
1 parent 6a94ae6 commit 8ee7363

File tree

1 file changed

+15
-15
lines changed

1 file changed

+15
-15
lines changed

llama.cpp

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -975,6 +975,21 @@ static void llama_nop(struct ggml_tensor * tensor) { // don't offload by default
975975
(void) tensor;
976976
}
977977

978+
static std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) {
979+
std::vector<char> result(8, 0);
980+
const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
981+
if (n_tokens < 0) {
982+
result.resize(-n_tokens);
983+
int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
984+
GGML_ASSERT(check == -n_tokens);
985+
}
986+
else {
987+
result.resize(n_tokens);
988+
}
989+
990+
return std::string(result.data(), result.size());
991+
}
992+
978993
//
979994
// globals
980995
//
@@ -7447,21 +7462,6 @@ void llama_sample_repetition_penalties(
74477462
}
74487463
}
74497464

7450-
static std::string llama_token_to_piece(const struct llama_context* ctx, llama_token token) {
7451-
std::vector<char> result(8, 0);
7452-
const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
7453-
if (n_tokens < 0) {
7454-
result.resize(-n_tokens);
7455-
int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
7456-
GGML_ASSERT(check == -n_tokens);
7457-
}
7458-
else {
7459-
result.resize(n_tokens);
7460-
}
7461-
7462-
return std::string(result.data(), result.size());
7463-
}
7464-
74657465
void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) {
74667466
GGML_ASSERT(ctx);
74677467
const int64_t t_start_sample_us = ggml_time_us();

0 commit comments

Comments
 (0)