Skip to content

Commit d52b4d8

Browse files
sroeckergiuseppe
authored andcommitted
Add optional MLP bias for Granite models
Add optional MLP bias for ARCH_LLAMA to support Granite models. Partially addresses /issues/7116 Still needs some more changes to properly support Granite.
1 parent 9b82476 commit d52b4d8

File tree

1 file changed

+11
-5
lines changed

1 file changed

+11
-5
lines changed

llama.cpp

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1926,8 +1926,9 @@ struct llama_layer {
19261926
struct ggml_tensor * ffn_up_shexp;
19271927

19281928
// ff bias
1929-
struct ggml_tensor * ffn_down_b; // b2
1930-
struct ggml_tensor * ffn_up_b; // b3
1929+
struct ggml_tensor * ffn_gate_b = nullptr;
1930+
struct ggml_tensor * ffn_down_b = nullptr; // b2
1931+
struct ggml_tensor * ffn_up_b = nullptr; // b3
19311932
struct ggml_tensor * ffn_act;
19321933

19331934
// mamba proj
@@ -5062,6 +5063,11 @@ static bool llm_load_tensors(
50625063
layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
50635064
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
50645065
layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
5066+
5067+
// optional MLP bias
5068+
layer.ffn_gate_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, false);
5069+
layer.ffn_down_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, false);
5070+
layer.ffn_up_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, false);
50655071
} else {
50665072
layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
50675073

@@ -7224,9 +7230,9 @@ struct llm_build_context {
72247230
cb(cur, "ffn_norm", il);
72257231

72267232
cur = llm_build_ffn(ctx0, cur,
7227-
model.layers[il].ffn_up, NULL,
7228-
model.layers[il].ffn_gate, NULL,
7229-
model.layers[il].ffn_down, NULL,
7233+
model.layers[il].ffn_up, model.layers[il].ffn_up_b,
7234+
model.layers[il].ffn_gate, model.layers[il].ffn_gate_b,
7235+
model.layers[il].ffn_down, model.layers[il].ffn_down_b,
72307236
NULL,
72317237
LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
72327238
cb(cur, "ffn_out", il);

0 commit comments

Comments
 (0)