Skip to content

Commit 21cdec3

Browse files
committed
kv-cache : fix find_slot() logic for free slots
ggml-ci
1 parent 74b68d2 commit 21cdec3

File tree

2 files changed

+7
-6
lines changed

2 files changed

+7
-6
lines changed

src/llama-kv-cache.cpp

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -570,6 +570,7 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const {
570570

571571
bool found = true;
572572
for (uint32_t i = 0; i < n_tokens; i++) {
573+
const llama_pos pos = ubatch.pos[i];
573574
const llama_seq_id seq_id = ubatch.seq_id[i][0];
574575

575576
// can we use this cell? either:
@@ -578,10 +579,12 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const {
578579
const bool can_use =
579580
cells.is_empty(head_cur + i) ||
580581
(
581-
cells.pos_get(head_cur + i) <= ubatch.pos[i] && // causal mask
582-
cells.seq_has(head_cur + i, seq_id) && // sequence mask
583-
cells.seq_count(head_cur + i) == 1 &&
584-
is_masked_swa(cells.pos_get(head_cur + i), ubatch.seq_pos_min[seq_id]) // SWA mask
582+
cells.seq_has (head_cur + i, seq_id) && // sequence mask
583+
cells.seq_count(head_cur + i) == 1 &&
584+
(
585+
cells.pos_get (head_cur + i) >= pos || // causal mask
586+
is_masked_swa(cells.pos_get(head_cur + i), ubatch.seq_pos_min[seq_id]) // SWA mask
587+
)
585588
);
586589

587590
if (!can_use) {

src/llama-kv-cache.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ struct llama_kv_cache : public llama_memory_i {
3333
// process any pending defrag/shift/etc. operations
3434
// optionally call once before processing a new batch
3535
// return true if any operations were performed
36-
// will reserve a new worst-case graph if needed
3736
virtual bool update(llama_context & lctx) = 0;
3837

3938
// schedule a defrag if the fragmentation threshold is exceeded. otherwise, do nothing
@@ -240,7 +239,6 @@ class llama_kv_cache_unified : public llama_kv_cache {
240239

241240
// utilizes two instances of llama_kv_cache_unified
242241
// the first instance is for the non-SWA layers of the model and the second instance is for the SWA layers
243-
// upon successful processing of the batch, the SWA cache removes old tokens outside the n_swa window
244242

245243
class llama_kv_cache_unified_iswa : public llama_kv_cache {
246244
public:

0 commit comments

Comments
 (0)