Skip to content

Commit 30faf1f

Browse files
committed
fix auto merge
1 parent a1666aa commit 30faf1f

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

src/llama.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10863,7 +10863,7 @@ struct llm_build_context {
1086310863
// special-case: the up and gate tensors are merged into a single tensor
1086410864
// TOOD: support into llm_build_ffn
1086510865
{
10866-
cur = llm_build_ffn(ctx0, cur,
10866+
cur = llm_build_ffn(lctx, ctx0, cur,
1086710867
model.layers[il].ffn_up, NULL, NULL,
1086810868
NULL, NULL, NULL,
1086910869
model.layers[il].ffn_down, NULL, NULL,
@@ -13622,7 +13622,7 @@ struct llm_build_context {
1362213622
);
1362313623
cb(Kcur, "Kcur_rope", il);
1362413624

13625-
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
13625+
cur = llm_build_kv(lctx, ctx0, model, hparams, cparams, kv_self, gf,
1362613626
model.layers[il].wo, NULL,
1362713627
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
1362813628

@@ -13647,7 +13647,7 @@ struct llm_build_context {
1364713647
LLM_NORM_RMS, cb, il);
1364813648
cb(cur, "ffn_norm", il);
1364913649

13650-
cur = llm_build_ffn(ctx0, cur,
13650+
cur = llm_build_ffn(lctx, ctx0, cur,
1365113651
model.layers[il].ffn_up, NULL, NULL,
1365213652
NULL, NULL, NULL,
1365313653
model.layers[il].ffn_down, NULL, NULL,

0 commit comments

Comments
 (0)