Skip to content

Commit b9d4ef0

Browse files
ggerganovNeoZhangJianyu
authored andcommitted
llama : fix defrag logic (ggml-org#11707)
* llama : fix defrag logic ggml-ci * cont : better logic ggml-ci * cont : clamp fragmentation to 0.0 ggml-ci
1 parent 5935bce commit b9d4ef0

File tree

1 file changed

+5
-3
lines changed

1 file changed

+5
-3
lines changed

src/llama.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8805,12 +8805,14 @@ static int llama_decode_impl(
88058805
//llama_synchronize(&lctx);
88068806

88078807
// decide if we need to defrag the kv cache
8808-
if (cparams.causal_attn && cparams.defrag_thold >= 0.0f) {
8809-
const float fragmentation = kv_self.n >= 128 ? 1.0f - float(kv_self.used)/float(kv_self.n) : 0.0f;
8808+
if (cparams.causal_attn && cparams.defrag_thold > 0.0f) {
8809+
// - do not defrag small contexts (i.e. < 2048 tokens)
8810+
// - count the padding towards the number of used tokens
8811+
const float fragmentation = kv_self.n >= 2048 ? std::max(0.0f, 1.0f - float(kv_self.used + llama_kv_cache_get_padding(cparams))/float(kv_self.n)) : 0.0f;
88108812

88118813
// queue defragmentation for next llama_kv_cache_update
88128814
if (fragmentation > cparams.defrag_thold) {
8813-
//LLAMA_LOG_INFO("fragmentation: %.2f\n", fragmentation);
8815+
LLAMA_LOG_DEBUG("%s: fragmentation: %.2f - requesting defrag\n", __func__, fragmentation);
88148816

88158817
llama_kv_cache_defrag(kv_self);
88168818
}

0 commit comments

Comments
 (0)