Skip to content

Commit ec45632

Browse files
authored
Streamline a bit the quant strategies (LostRuins#443)
* Streamline a bit the quant strategies No change over the existing patterns, except for the bump for attn_k and attn_v for the models with 4 and 6 experts (several frankensteins seen on HF, and which also use GQA). The rest is applying the existing patterns to the new IQ_K quants. Also, a Q8_0 for attn_q slipped into the MOEs 8 experts rule, I removed it, because that tensor is much bigger than attn_k or attn_v. * remove <=8 experts condition.
1 parent b94cd3b commit ec45632

File tree

1 file changed

+31
-17
lines changed

1 file changed

+31
-17
lines changed

src/llama.cpp

Lines changed: 31 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -18967,50 +18967,53 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1896718967
if (qs.model.hparams.n_vocab >= 127999 && (qs.model.type == MODEL_8B || qs.model.type == MODEL_70B))
1896818968
new_type = GGML_TYPE_Q6_K;
1896918969
}
18970+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ5_K || ftype == LLAMA_FTYPE_MOSTLY_IQ5_KS) {
18971+
if (qs.model.hparams.n_vocab >= 127999 && (qs.model.type == MODEL_8B || qs.model.type == MODEL_70B))
18972+
new_type = GGML_TYPE_IQ6_K;
18973+
}
1897018974
if (qs.model.type == MODEL_70B) {
1897118975
// In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
1897218976
// 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
1897318977
// nearly negligible increase in model size by quantizing this tensor with more bits:
1897418978
if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
18979+
if (new_type == GGML_TYPE_IQ3_K) new_type = GGML_TYPE_IQ5_K;
1897518980
}
18976-
if (qs.model.hparams.n_expert == 8) {
18977-
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
18981+
if (qs.model.hparams.n_expert >= 4) {
18982+
// for the 4-8-expert model, bumping this to Q8_0 trades just ~128MB
1897818983
// TODO: explore better strategies
1897918984
new_type = GGML_TYPE_Q8_0;
1898018985
}
1898118986
else if (qs.model.hparams.n_gqa() >= 4) {
1898218987
if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
1898318988
else if (new_type == GGML_TYPE_Q2_K_R4 || new_type == GGML_TYPE_IQ3_XXS_R4) new_type = GGML_TYPE_IQ3_K_R4;
18984-
else if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_IQ3_S ) new_type = GGML_TYPE_Q4_K;
18989+
else if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_IQ3_S) new_type = GGML_TYPE_Q4_K;
18990+
else if (new_type == GGML_TYPE_IQ3_K) new_type = GGML_TYPE_IQ4_K;
1898518991
else if (new_type == GGML_TYPE_IQ3_S_R4) new_type = GGML_TYPE_Q4_K_R4;
1898618992
else if (new_type == GGML_TYPE_Q3_K_R4) new_type = GGML_TYPE_Q4_K_R4;
1898718993
else if (new_type == GGML_TYPE_Q4_K || new_type == GGML_TYPE_IQ4_XS) new_type = GGML_TYPE_Q5_K;
1898818994
else if (new_type == GGML_TYPE_IQ4_NL) new_type = GGML_TYPE_Q5_K;
18995+
else if (new_type == GGML_TYPE_IQ4_K || new_type == GGML_TYPE_IQ4_KS) new_type = GGML_TYPE_IQ5_K;
1898918996
else if (new_type == GGML_TYPE_IQ4_NL_R4) new_type = GGML_TYPE_Q5_K;
1899018997
else if (new_type == GGML_TYPE_IQ4_XS_R8) new_type = GGML_TYPE_Q5_K;
1899118998
else if (new_type == GGML_TYPE_Q5_K) new_type = GGML_TYPE_Q6_K;
18999+
else if (new_type == GGML_TYPE_IQ5_K || new_type == GGML_TYPE_IQ5_KS) new_type = GGML_TYPE_IQ6_K;
1899219000
}
1899319001
++qs.i_attention_wv;
1899419002
} else if (name.find("attn_k") != std::string::npos) {
1899519003
if (qs.params->attn_k_type < GGML_TYPE_COUNT) new_type = qs.params->attn_k_type;
18996-
else if (qs.model.hparams.n_expert >= 8) {
18997-
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
19004+
else if (qs.model.hparams.n_expert >= 4) {
19005+
// for the 4-8-expert model, bumping this to Q8_0 trades just ~128MB
1899819006
// TODO: explore better strategies
1899919007
new_type = GGML_TYPE_Q8_0;
1900019008
}
1900119009
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
19002-
new_type = GGML_TYPE_IQ3_XXS;
19010+
new_type = GGML_TYPE_IQ3_XXS; // TODO: explore better strategies?
1900319011
}
1900419012
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4) {
19005-
new_type = GGML_TYPE_IQ2_S;
19013+
new_type = GGML_TYPE_IQ2_S; // TODO: explore better strategies?
1900619014
}
1900719015
} else if (name.find("attn_q") != std::string::npos) {
1900819016
if (qs.params->attn_q_type < GGML_TYPE_COUNT) new_type = qs.params->attn_q_type;
19009-
else if (qs.model.hparams.n_expert >= 8) {
19010-
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
19011-
// TODO: explore better strategies
19012-
new_type = GGML_TYPE_Q8_0;
19013-
}
1901419017
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
1901519018
new_type = GGML_TYPE_IQ3_XXS;
1901619019
}
@@ -19021,6 +19024,14 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1902119024
if (qs.model.hparams.n_vocab >= 127999 && (qs.model.type == MODEL_8B || qs.model.type == MODEL_70B))
1902219025
new_type = GGML_TYPE_Q4_K;
1902319026
}
19027+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ5_K) {
19028+
if (qs.model.hparams.n_vocab >= 127999 && (qs.model.type == MODEL_8B || qs.model.type == MODEL_70B))
19029+
new_type = GGML_TYPE_IQ4_K;
19030+
}
19031+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ5_KS) {
19032+
if (qs.model.hparams.n_vocab >= 127999 && (qs.model.type == MODEL_8B || qs.model.type == MODEL_70B))
19033+
new_type = GGML_TYPE_IQ4_KS;
19034+
}
1902419035
} else if (name.find("ffn_down") != std::string::npos) {
1902519036
auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
1902619037
int i_layer = info.first, n_layer = info.second;
@@ -19044,7 +19055,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1904419055
: GGML_TYPE_Q3_K;
1904519056
}
1904619057
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M && (i_layer < n_layer/8 ||
19047-
(qs.model.hparams.n_expert == 8 && use_more_bits(i_layer, n_layer)))) {
19058+
(qs.model.hparams.n_expert >= 4 && use_more_bits(i_layer, n_layer)))) {
1904819059
new_type = GGML_TYPE_IQ4_K;
1904919060
}
1905019061
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
@@ -19091,19 +19102,22 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1909119102
} else if (name.find("attn_output.weight") != std::string::npos) {
1909219103
if (qs.params->attn_output_type < GGML_TYPE_COUNT) new_type = qs.params->attn_output_type;
1909319104
else if (arch != LLM_ARCH_FALCON) {
19094-
if (qs.model.hparams.n_expert >= 8) {
19105+
if (qs.model.hparams.n_expert >= 4) {
1909519106
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
1909619107
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL ||
1909719108
ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S ||
1909819109
ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ4_K ||
19110+
ftype == LLAMA_FTYPE_MOSTLY_IQ4_KSS || ftype == LLAMA_FTYPE_MOSTLY_IQ4_KS || ftype == LLAMA_FTYPE_MOSTLY_IQ4_KS_R4 ||
19111+
ftype == LLAMA_FTYPE_MOSTLY_IQ5_KS || ftype == LLAMA_FTYPE_MOSTLY_IQ5_KS_R4 ||
1909919112
ftype == LLAMA_FTYPE_MOSTLY_IQ2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_R4 ||
1910019113
ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS_R8 || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_R4 ||
1910119114
ftype == LLAMA_FTYPE_MOSTLY_Q2_K_R4|| ftype == LLAMA_FTYPE_MOSTLY_IQ4_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K_R4 ||
1910219115
ftype == LLAMA_FTYPE_MOSTLY_IQ2_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S_R4) {
19103-
new_type = GGML_TYPE_Q5_K;
19116+
new_type = GGML_TYPE_Q5_K; // should the IQ_K quants be applied here as the new type for the IQ_K ftypes ?
19117+
// also, this condition could be reproduced on attn_q, eventually with Q4_K instead of Q5_K.
1910419118
}
1910519119
} else {
19106-
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K;
19120+
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K; // This list could be generalized and streamlined
1910719121
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
1910819122
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4) new_type = GGML_TYPE_IQ3_K_R4;
1910919123
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ) new_type = GGML_TYPE_Q4_K;
@@ -19120,7 +19134,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1912019134
else if (name.find("attn_qkv.weight") != std::string::npos) {
1912119135
if (qs.params->attn_qkv_type < GGML_TYPE_COUNT) new_type = qs.params->attn_qkv_type;
1912219136
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
19123-
new_type = GGML_TYPE_Q4_K;
19137+
new_type = GGML_TYPE_Q4_K; // That logic could either be generalized, either be ditched?
1912419138
}
1912519139
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M ) new_type = GGML_TYPE_IQ4_K;
1912619140
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;

0 commit comments

Comments
 (0)