@@ -2805,6 +2805,11 @@ static void llama_kv_cache_defrag(struct llama_kv_cache & cache) {
2805
2805
cache.do_defrag = true;
2806
2806
}
2807
2807
2808
+ static uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams) {
2809
+ // the FA kernels require padding to avoid extra runtime boundary checks
2810
+ return cparams.flash_attn ? 256u : 32u;
2811
+ }
2812
+
2808
2813
//
2809
2814
// model loading and saving
2810
2815
//
@@ -11510,7 +11515,8 @@ static int llama_decode_internal(
11510
11515
// a heuristic, to avoid attending the full cache if it is not yet utilized
11511
11516
// after enough generations, the benefit from this heuristic disappears
11512
11517
// if we start defragmenting the cache, the benefit from this will be more important
11513
- kv_self.n = std::min(kv_self.size, std::max(256u, GGML_PAD(llama_kv_cache_cell_max(kv_self), 256)));
11518
+ const uint32_t pad = llama_kv_cache_get_padding(cparams);
11519
+ kv_self.n = std::min(kv_self.size, std::max(pad, GGML_PAD(llama_kv_cache_cell_max(kv_self), pad)));
11514
11520
//kv_self.n = llama_kv_cache_cell_max(kv_self);
11515
11521
}
11516
11522
}
@@ -15511,6 +15517,11 @@ struct llama_context * llama_new_context_with_model(
15511
15517
return nullptr;
15512
15518
}
15513
15519
15520
+ if (params.flash_attn && model->arch == LLM_ARCH_GROK) {
15521
+ LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__);
15522
+ params.flash_attn = false;
15523
+ }
15524
+
15514
15525
llama_context * ctx = new llama_context(*model);
15515
15526
15516
15527
const auto & hparams = model->hparams;
@@ -15534,7 +15545,7 @@ struct llama_context * llama_new_context_with_model(
15534
15545
cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
15535
15546
15536
15547
// this is necessary due to kv_self.n being padded later during inference
15537
- cparams.n_ctx = GGML_PAD(cparams.n_ctx, 256 );
15548
+ cparams.n_ctx = GGML_PAD(cparams.n_ctx, llama_kv_cache_get_padding(cparams) );
15538
15549
15539
15550
// with causal attention, the batch size is limited by the context size
15540
15551
cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch;
@@ -15579,11 +15590,6 @@ struct llama_context * llama_new_context_with_model(
15579
15590
}
15580
15591
}
15581
15592
15582
- if (cparams.flash_attn && model->arch == LLM_ARCH_GROK) {
15583
- LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__);
15584
- cparams.flash_attn = false;
15585
- }
15586
-
15587
15593
if (params.seed == LLAMA_DEFAULT_SEED) {
15588
15594
params.seed = time(NULL);
15589
15595
}
0 commit comments