Skip to content

Commit c275bc6

Browse files
sroeckergiuseppe
authored andcommitted
Add optional MLP bias for Granite models
Add optional MLP bias for ARCH_LLAMA to support Granite models. Partially addresses /issues/7116 Still needs some more changes to properly support Granite.
1 parent cd93a28 commit c275bc6

File tree

1 file changed

+11
-5
lines changed

1 file changed

+11
-5
lines changed

llama.cpp

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1922,8 +1922,9 @@ struct llama_layer {
19221922
struct ggml_tensor * ffn_up_shexp;
19231923

19241924
// ff bias
1925-
struct ggml_tensor * ffn_down_b; // b2
1926-
struct ggml_tensor * ffn_up_b; // b3
1925+
struct ggml_tensor * ffn_gate_b = nullptr;
1926+
struct ggml_tensor * ffn_down_b = nullptr; // b2
1927+
struct ggml_tensor * ffn_up_b = nullptr; // b3
19271928
struct ggml_tensor * ffn_act;
19281929

19291930
// mamba proj
@@ -5006,6 +5007,11 @@ static bool llm_load_tensors(
50065007
layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
50075008
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
50085009
layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
5010+
5011+
// optional MLP bias
5012+
layer.ffn_gate_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, false);
5013+
layer.ffn_down_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, false);
5014+
layer.ffn_up_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, false);
50095015
} else {
50105016
layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
50115017

@@ -7133,9 +7139,9 @@ struct llm_build_context {
71337139
cb(cur, "ffn_norm", il);
71347140

71357141
cur = llm_build_ffn(ctx0, cur,
7136-
model.layers[il].ffn_up, NULL,
7137-
model.layers[il].ffn_gate, NULL,
7138-
model.layers[il].ffn_down, NULL,
7142+
model.layers[il].ffn_up, model.layers[il].ffn_up_b,
7143+
model.layers[il].ffn_gate, model.layers[il].ffn_gate_b,
7144+
model.layers[il].ffn_down, model.layers[il].ffn_down_b,
71397145
NULL,
71407146
LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
71417147
cb(cur, "ffn_out", il);

0 commit comments

Comments
 (0)