Skip to content

Commit ffad043

Browse files
authored
server : fix SWA condition for full context reprocess (#14163)
ggml-ci
1 parent 0889eba commit ffad043

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

tools/server/server.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3217,7 +3217,7 @@ struct server_context {
32173217
}
32183218

32193219
const auto n_swa = llama_model_n_swa(model);
3220-
if (pos_min > slot.n_past - n_swa) {
3220+
if (pos_min > std::max(0, slot.n_past - n_swa)) {
32213221
SLT_WRN(slot, "n_past = %d, cache_tokens.size() = %d, seq_id = %d, pos_min = %d, n_swa = %d\n", slot.n_past, (int) slot.cache_tokens.size(), slot.id, pos_min, n_swa);
32223222
SLT_WRN(slot, "forcing full prompt re-processing due to lack of cache data (likely due to SWA, see %s)\n",
32233223
"https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055");

0 commit comments

Comments
 (0)