Skip to content

Commit 8c8e43c

Browse files
committed
Settings for MOE >= 8 experts applied to >= 4 experts
1 parent aa4eb59 commit 8c8e43c

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

src/llama.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15344,15 +15344,15 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1534415344
} else if (name.find("attn_v.weight") != std::string::npos) {
1534515345
if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
1534615346
ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
15347-
if (qs.model.hparams.n_expert >= 8) new_type = GGML_TYPE_Q6_K;
15347+
if (qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q6_K;
1534815348
else {
1534915349
if (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_Q4_K;
1535015350
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS) new_type = GGML_TYPE_IQ3_XXS;
1535115351
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ3_S;
1535215352
}
1535315353
++qs.i_attention_wv;
1535415354
}
15355-
else if (qs.model.hparams.n_expert >= 8) {
15355+
else if (qs.model.hparams.n_expert >= 4) {
1535615356
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
1535715357
// TODO: explore better strategies
1535815358
new_type = GGML_TYPE_Q8_0;
@@ -15391,7 +15391,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1539115391
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
1539215392
++qs.i_attention_wv;
1539315393
} else if (name.find("attn_k.weight") != std::string::npos) {
15394-
if (qs.model.hparams.n_expert >= 8) {
15394+
if (qs.model.hparams.n_expert >= 4) {
1539515395
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
1539615396
// TODO: explore better strategies
1539715397
if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
@@ -15436,7 +15436,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1543615436
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_IQ2_S;
1543715437
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
1543815438
ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
15439-
if (qs.model.hparams.n_expert >= 8) new_type = GGML_TYPE_Q3_K;
15439+
if (qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q3_K;
1544015440
}
1544115441
} else if (name.find("ffn_down") != std::string::npos) {
1544215442
auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
@@ -15461,7 +15461,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1546115461
: GGML_TYPE_Q3_K;
1546215462
}
1546315463
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M && (i_layer < n_layer/8 ||
15464-
(qs.model.hparams.n_expert >= 8 && use_more_bits(i_layer, n_layer)))) {
15464+
(qs.model.hparams.n_expert >= 4 && use_more_bits(i_layer, n_layer)))) {
1546515465
new_type = GGML_TYPE_Q4_K;
1546615466
}
1546715467
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
@@ -15492,15 +15492,15 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1549215492
++qs.i_ffn_down;
1549315493
} else if (name.find("attn_output.weight") != std::string::npos) {
1549415494
if (arch != LLM_ARCH_FALCON) {
15495-
if (qs.model.hparams.n_expert >= 8) {
15495+
if (qs.model.hparams.n_expert >= 4) {
1549615496
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
1549715497
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL ||
1549815498
ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S ||
1549915499
ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) new_type = GGML_TYPE_Q5_K;
1550015500
}
1550115501
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
1550215502
ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
15503-
if (qs.model.hparams.n_expert >= 8) new_type = GGML_TYPE_Q4_K;
15503+
if (qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
1550415504
else {
1550515505
if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) new_type = GGML_TYPE_IQ2_XXS;
1550615506
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ3_XXS;

0 commit comments

Comments
 (0)