Skip to content

Commit 9f473c5

Browse files
ggerganovteleprint-me
authored andcommitted
llama : rename n_ctx -> cache.size, less confusing (#0)
1 parent 4c53ab6 commit 9f473c5

File tree

1 file changed

+5
-6
lines changed

1 file changed

+5
-6
lines changed

llama.cpp

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2479,7 +2479,6 @@ static bool llama_kv_cache_init(
24792479
static bool llama_kv_cache_find_slot(
24802480
struct llama_kv_cache & cache,
24812481
const struct llama_batch & batch) {
2482-
const uint32_t n_ctx = cache.size;
24832482
const uint32_t n_tokens = batch.n_tokens;
24842483

24852484
if (cache.recurrent) {
@@ -2530,16 +2529,16 @@ static bool llama_kv_cache_find_slot(
25302529
}
25312530
// otherwise, one cell per token.
25322531

2533-
if (n_tokens > n_ctx) {
2534-
LLAMA_LOG_ERROR("%s: n_tokens=%d > n_ctx=%d\n", __func__, n_tokens, n_ctx);
2532+
if (n_tokens > cache.size) {
2533+
LLAMA_LOG_ERROR("%s: n_tokens=%d > cache.size=%d\n", __func__, n_tokens, cache.size);
25352534
return false;
25362535
}
25372536

25382537
uint32_t n_tested = 0;
25392538

25402539
while (true) {
2541-
if (cache.head + n_tokens > n_ctx) {
2542-
n_tested += n_ctx - cache.head;
2540+
if (cache.head + n_tokens > cache.size) {
2541+
n_tested += cache.size - cache.head;
25432542
cache.head = 0;
25442543
continue;
25452544
}
@@ -2558,7 +2557,7 @@ static bool llama_kv_cache_find_slot(
25582557
break;
25592558
}
25602559

2561-
if (n_tested >= n_ctx) {
2560+
if (n_tested >= cache.size) {
25622561
//LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
25632562
return false;
25642563
}

0 commit comments

Comments
 (0)