We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 6423642 commit e4dfc15Copy full SHA for e4dfc15
llama.cpp
@@ -2359,7 +2359,7 @@ static bool llama_kv_cache_init(
2359
cache.recurrent = model.arch == LLM_ARCH_MAMBA;
2360
cache.v_trans = !cparams.flash_attn;
2361
2362
- // TODO: support mixed reccurent Transformer architectues
+ // TODO: support mixed recurrent Transformer architectures
2363
// NOTE: (!a || b) is a logical implication (a -> b)
2364
GGML_ASSERT(!cache.recurrent || n_embd_k_gqa == hparams.n_embd_k_s());
2365
GGML_ASSERT(!cache.recurrent || n_embd_v_gqa == hparams.n_embd_v_s());
0 commit comments