Skip to content

Commit 4c07964

Browse files
committed
cont : minor
ggml-ci
1 parent 75a0d52 commit 4c07964

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

src/llama-context.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -738,12 +738,10 @@ int llama_context::encode(const llama_batch & batch_inp) {
738738

739739
const uint32_t n_tokens = batch.n_tokens;
740740

741-
const auto & hparams = model.hparams;
742-
743741
GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
744742

745743
// micro-batching is not possible for non-causal encoding, so we process the batch in a single shot
746-
GGML_ASSERT(cparams.n_ubatch >= (uint32_t) n_tokens && "encoder requires n_ubatch >= n_tokens");
744+
GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens");
747745

748746
if (t_compute_start_us == 0) {
749747
t_compute_start_us = ggml_time_us();
@@ -754,6 +752,8 @@ int llama_context::encode(const llama_batch & batch_inp) {
754752

755753
n_queued_tokens += n_tokens;
756754

755+
const auto & hparams = model.hparams;
756+
757757
const int64_t n_embd = hparams.n_embd;
758758

759759
llama_sbatch sbatch = llama_sbatch(batch, n_embd, /* simple_split */ true);

0 commit comments

Comments
 (0)