@@ -215,6 +215,7 @@ enum llm_arch {
215
215
LLM_ARCH_EXAONE,
216
216
LLM_ARCH_RWKV6,
217
217
LLM_ARCH_GRANITE,
218
+ LLM_ARCH_GRANITE_MOE,
218
219
LLM_ARCH_UNKNOWN,
219
220
};
220
221
@@ -266,6 +267,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
266
267
{ LLM_ARCH_EXAONE, "exaone" },
267
268
{ LLM_ARCH_RWKV6, "rwkv6" },
268
269
{ LLM_ARCH_GRANITE, "granite" },
270
+ { LLM_ARCH_GRANITE_MOE, "granitemoe" },
269
271
{ LLM_ARCH_UNKNOWN, "(unknown)" },
270
272
};
271
273
@@ -1478,6 +1480,23 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
1478
1480
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1479
1481
},
1480
1482
},
1483
+ {
1484
+ LLM_ARCH_GRANITE_MOE,
1485
+ {
1486
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1487
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1488
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1489
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1490
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1491
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1492
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1493
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1494
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1495
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1496
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1497
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1498
+ },
1499
+ },
1481
1500
{
1482
1501
LLM_ARCH_UNKNOWN,
1483
1502
{
@@ -2396,7 +2415,7 @@ struct llama_hparams {
2396
2415
float f_max_alibi_bias = 0.0f;
2397
2416
float f_logit_scale = 0.0f;
2398
2417
2399
- // Additional scale factors (Granite)
2418
+ // Additional scale factors (Granite/Granite MoE )
2400
2419
float f_residual_scale = 0.0f;
2401
2420
float f_embedding_scale = 0.0f;
2402
2421
float f_attention_scale = 0.0f;
@@ -6052,6 +6071,7 @@ static void llm_load_hparams(
6052
6071
}
6053
6072
} break;
6054
6073
case LLM_ARCH_GRANITE:
6074
+ case LLM_ARCH_GRANITE_MOE:
6055
6075
{
6056
6076
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
6057
6077
ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
@@ -6060,6 +6080,7 @@ static void llm_load_hparams(
6060
6080
ml.get_key(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale);
6061
6081
6062
6082
switch (hparams.n_layer) {
6083
+ case 32: model.type = e_model::MODEL_3B; break;
6063
6084
case 40: model.type = e_model::MODEL_3B; break;
6064
6085
// Add additional layer/vocab/etc checks here for other model sizes
6065
6086
default: model.type = e_model::MODEL_UNKNOWN;
@@ -6764,7 +6785,7 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
6764
6785
LLAMA_LOG_INFO("%s: n_ff_shexp = %d\n", __func__, hparams.n_ff_shexp);
6765
6786
}
6766
6787
6767
- if (model.arch == LLM_ARCH_GRANITE) {
6788
+ if (model.arch == LLM_ARCH_GRANITE || model.arch == LLM_ARCH_GRANITE_MOE ) {
6768
6789
LLAMA_LOG_INFO("%s: f_embedding_scale = %f\n", __func__, hparams.f_embedding_scale);
6769
6790
LLAMA_LOG_INFO("%s: f_residual_scale = %f\n", __func__, hparams.f_residual_scale);
6770
6791
LLAMA_LOG_INFO("%s: f_attention_scale = %f\n", __func__, hparams.f_attention_scale);
@@ -6938,6 +6959,7 @@ static bool llm_load_tensors(
6938
6959
case LLM_ARCH_REFACT:
6939
6960
case LLM_ARCH_MINICPM:
6940
6961
case LLM_ARCH_GRANITE:
6962
+ case LLM_ARCH_GRANITE_MOE:
6941
6963
{
6942
6964
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
6943
6965
@@ -15865,6 +15887,7 @@ static struct ggml_cgraph * llama_build_graph(
15865
15887
switch (model.arch) {
15866
15888
case LLM_ARCH_LLAMA:
15867
15889
case LLM_ARCH_GRANITE:
15890
+ case LLM_ARCH_GRANITE_MOE:
15868
15891
{
15869
15892
result = llm.build_llama();
15870
15893
} break;
@@ -19162,6 +19185,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
19162
19185
case LLM_ARCH_DEEPSEEK2:
19163
19186
case LLM_ARCH_CHATGLM:
19164
19187
case LLM_ARCH_GRANITE:
19188
+ case LLM_ARCH_GRANITE_MOE:
19165
19189
return LLAMA_ROPE_TYPE_NORM;
19166
19190
19167
19191
// the pairs of head values are offset by n_rot/2
0 commit comments