Skip to content

Commit 3600cc2

Browse files
authored
llama : use n_swa + n_ubatch cells for SWA cache (#13833)
* llama : use n_swa + n_ubatch cells for SWA cache ggml-ci * llama : add warning about multi-sqeuence SWA contexts
1 parent c7e0a20 commit 3600cc2

File tree

6 files changed

+24
-11
lines changed

6 files changed

+24
-11
lines changed

include/llama.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -366,6 +366,8 @@ extern "C" {
366366
bool no_perf; // measure performance timings
367367
bool op_offload; // offload host tensor operations to device
368368
bool swa_full; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)
369+
// NOTE: setting to false when n_seq_max > 1 can cause bad performance in some cases
370+
// ref: https://github.com/ggml-org/llama.cpp/pull/13845#issuecomment-2924800573
369371
};
370372

371373
// model quantization parameters
@@ -502,6 +504,7 @@ extern "C" {
502504
LLAMA_API int32_t llama_model_n_layer (const struct llama_model * model);
503505
LLAMA_API int32_t llama_model_n_head (const struct llama_model * model);
504506
LLAMA_API int32_t llama_model_n_head_kv (const struct llama_model * model);
507+
LLAMA_API int32_t llama_model_n_swa (const struct llama_model * model);
505508

506509
// Get the model's RoPE frequency scaling factor
507510
LLAMA_API float llama_model_rope_freq_scale_train(const struct llama_model * model);

src/llama-context.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,11 @@ llama_context::llama_context(
123123
__func__, n_ctx_per_seq, hparams.n_ctx_train);
124124
}
125125

126+
if (!params.swa_full && cparams.n_seq_max > 1) {
127+
LLAMA_LOG_WARN("%s: requested n_seq_max (%u) > 1, but swa_full is not enabled -- performance may be degraded: %s\n",
128+
__func__, cparams.n_seq_max, "https://github.com/ggml-org/llama.cpp/pull/13845#issuecomment-2924800573");
129+
}
130+
126131
if (!hparams.vocab_only) {
127132
// GPU backends
128133
for (auto * dev : model.devices) {

src/llama-kv-cache.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1731,14 +1731,14 @@ llama_kv_cache_unified_iswa::llama_kv_cache_unified_iswa(
17311731
bool swa_full,
17321732
uint32_t kv_size,
17331733
uint32_t n_seq_max,
1734-
uint32_t n_batch,
1734+
uint32_t n_ubatch,
17351735
uint32_t n_pad) : hparams(model.hparams) {
17361736
llama_kv_cache_unified::layer_filter_cb filter_base = [&](int32_t il) { return !model.hparams.is_swa(il); };
17371737
llama_kv_cache_unified::layer_filter_cb filter_swa = [&](int32_t il) { return model.hparams.is_swa(il); };
17381738

17391739
const uint32_t size_base = kv_size;
17401740

1741-
uint32_t size_swa = std::min(size_base, GGML_PAD(hparams.n_swa*n_seq_max + n_batch, n_pad));
1741+
uint32_t size_swa = std::min(size_base, GGML_PAD(hparams.n_swa*n_seq_max + n_ubatch, n_pad));
17421742

17431743
// when using full-size SWA cache, we set the SWA cache size to be equal to the base cache size
17441744
if (swa_full) {

src/llama-kv-cache.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,7 @@ class llama_kv_cache_unified_iswa : public llama_kv_cache {
339339
bool swa_full,
340340
uint32_t kv_size,
341341
uint32_t n_seq_max,
342-
uint32_t n_batch,
342+
uint32_t n_ubatch,
343343
uint32_t n_pad);
344344

345345
~llama_kv_cache_unified_iswa() = default;

src/llama-model.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13230,7 +13230,7 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params,
1323013230
params.swa_full,
1323113231
cparams.n_ctx,
1323213232
cparams.n_seq_max,
13233-
cparams.n_batch,
13233+
cparams.n_ubatch,
1323413234
padding);
1323513235
} else {
1323613236
GGML_ASSERT(!hparams.is_swa_any());
@@ -13593,6 +13593,10 @@ int32_t llama_model_n_head_kv(const llama_model * model) {
1359313593
return model->hparams.n_head_kv();
1359413594
}
1359513595

13596+
int32_t llama_model_n_swa(const llama_model * model) {
13597+
return model->hparams.n_swa;
13598+
}
13599+
1359613600
// deprecated
1359713601
int32_t llama_n_ctx_train(const llama_model * model) {
1359813602
return llama_model_n_ctx_train(model);

tools/server/server.cpp

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2016,11 +2016,6 @@ struct server_context {
20162016
params_base.n_cache_reuse = 0;
20172017
SRV_WRN("%s\n", "cache_reuse is not supported by this context, it will be disabled");
20182018
}
2019-
2020-
if (!params_base.speculative.model.path.empty()) {
2021-
SRV_ERR("%s\n", "err: speculative decode is not supported by this context");
2022-
return false;
2023-
}
20242019
}
20252020

20262021
return true;
@@ -3215,8 +3210,14 @@ struct server_context {
32153210

32163211
if (slot.n_past > 0 && slot.n_past < (int) slot.cache_tokens.size()) {
32173212
const auto pos_min = llama_kv_self_seq_pos_min(ctx, slot.id);
3218-
if (pos_min > 0) {
3219-
SLT_WRN(slot, "n_past = %d, cache_tokens.size() = %d, seq_id = %d, pos_min = %d\n", slot.n_past, (int) slot.cache_tokens.size(), slot.id, pos_min);
3213+
if (pos_min == -1) {
3214+
SLT_ERR(slot, "n_past = %d, cache_tokens.size() = %d, seq_id = %d, pos_min = %d\n", slot.n_past, (int) slot.cache_tokens.size(), slot.id, pos_min);
3215+
GGML_ABORT("pos_min == -1, but n_past > 0 - should not happen: https://github.com/ggml-org/llama.cpp/pull/13833#discussion_r2116181237");
3216+
}
3217+
3218+
const auto n_swa = llama_model_n_swa(model);
3219+
if (pos_min > slot.n_past - n_swa) {
3220+
SLT_WRN(slot, "n_past = %d, cache_tokens.size() = %d, seq_id = %d, pos_min = %d, n_swa = %d\n", slot.n_past, (int) slot.cache_tokens.size(), slot.id, pos_min, n_swa);
32203221
SLT_WRN(slot, "forcing full prompt re-processing due to lack of cache data (likely due to SWA, see %s)\n",
32213222
"https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055");
32223223
llama_kv_self_seq_rm(ctx, slot.id, 0, -1);

0 commit comments

Comments
 (0)