Skip to content

Commit 8f429fa

Browse files
committed
perplexity : fix ETA by warming up the model with an empty run
1 parent 6519e9c commit 8f429fa

File tree

2 files changed

+8
-8
lines changed

2 files changed

+8
-8
lines changed

common/common.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -752,6 +752,14 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
752752
params.logit_bias[llama_token_eos(lctx)] = -INFINITY;
753753
}
754754

755+
{
756+
LOG("warming up the model with an empty run\n");
757+
758+
const std::vector<llama_token> tmp = { llama_token_bos(lctx), };
759+
llama_eval(lctx, tmp.data(), tmp.size(), 0, params.n_threads);
760+
llama_reset_timings(lctx);
761+
}
762+
755763
return std::make_tuple(model, lctx);
756764
}
757765

examples/main/main.cpp

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -492,14 +492,6 @@ int main(int argc, char ** argv) {
492492
std::vector<llama_token> embd;
493493
std::vector<llama_token> embd_guidance;
494494

495-
{
496-
LOG("warming up the model with an empty run\n");
497-
498-
const std::vector<llama_token> tmp = { llama_token_bos(ctx), };
499-
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
500-
llama_reset_timings(ctx);
501-
}
502-
503495
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
504496
// predict
505497
if (embd.size() > 0) {

0 commit comments

Comments
 (0)