@@ -18967,50 +18967,53 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
18967
18967
if (qs.model.hparams.n_vocab >= 127999 && (qs.model.type == MODEL_8B || qs.model.type == MODEL_70B))
18968
18968
new_type = GGML_TYPE_Q6_K;
18969
18969
}
18970
+ else if (ftype == LLAMA_FTYPE_MOSTLY_IQ5_K || ftype == LLAMA_FTYPE_MOSTLY_IQ5_KS) {
18971
+ if (qs.model.hparams.n_vocab >= 127999 && (qs.model.type == MODEL_8B || qs.model.type == MODEL_70B))
18972
+ new_type = GGML_TYPE_IQ6_K;
18973
+ }
18970
18974
if (qs.model.type == MODEL_70B) {
18971
18975
// In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
18972
18976
// 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
18973
18977
// nearly negligible increase in model size by quantizing this tensor with more bits:
18974
18978
if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
18979
+ if (new_type == GGML_TYPE_IQ3_K) new_type = GGML_TYPE_IQ5_K;
18975
18980
}
18976
- if (qs.model.hparams.n_expert == 8 ) {
18977
- // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
18981
+ if (qs.model.hparams.n_expert >= 4 ) {
18982
+ // for the 4- 8-expert model, bumping this to Q8_0 trades just ~128MB
18978
18983
// TODO: explore better strategies
18979
18984
new_type = GGML_TYPE_Q8_0;
18980
18985
}
18981
18986
else if (qs.model.hparams.n_gqa() >= 4) {
18982
18987
if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
18983
18988
else if (new_type == GGML_TYPE_Q2_K_R4 || new_type == GGML_TYPE_IQ3_XXS_R4) new_type = GGML_TYPE_IQ3_K_R4;
18984
- else if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_IQ3_S ) new_type = GGML_TYPE_Q4_K;
18989
+ else if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_IQ3_S) new_type = GGML_TYPE_Q4_K;
18990
+ else if (new_type == GGML_TYPE_IQ3_K) new_type = GGML_TYPE_IQ4_K;
18985
18991
else if (new_type == GGML_TYPE_IQ3_S_R4) new_type = GGML_TYPE_Q4_K_R4;
18986
18992
else if (new_type == GGML_TYPE_Q3_K_R4) new_type = GGML_TYPE_Q4_K_R4;
18987
18993
else if (new_type == GGML_TYPE_Q4_K || new_type == GGML_TYPE_IQ4_XS) new_type = GGML_TYPE_Q5_K;
18988
18994
else if (new_type == GGML_TYPE_IQ4_NL) new_type = GGML_TYPE_Q5_K;
18995
+ else if (new_type == GGML_TYPE_IQ4_K || new_type == GGML_TYPE_IQ4_KS) new_type = GGML_TYPE_IQ5_K;
18989
18996
else if (new_type == GGML_TYPE_IQ4_NL_R4) new_type = GGML_TYPE_Q5_K;
18990
18997
else if (new_type == GGML_TYPE_IQ4_XS_R8) new_type = GGML_TYPE_Q5_K;
18991
18998
else if (new_type == GGML_TYPE_Q5_K) new_type = GGML_TYPE_Q6_K;
18999
+ else if (new_type == GGML_TYPE_IQ5_K || new_type == GGML_TYPE_IQ5_KS) new_type = GGML_TYPE_IQ6_K;
18992
19000
}
18993
19001
++qs.i_attention_wv;
18994
19002
} else if (name.find("attn_k") != std::string::npos) {
18995
19003
if (qs.params->attn_k_type < GGML_TYPE_COUNT) new_type = qs.params->attn_k_type;
18996
- else if (qs.model.hparams.n_expert >= 8 ) {
18997
- // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
19004
+ else if (qs.model.hparams.n_expert >= 4 ) {
19005
+ // for the 4- 8-expert model, bumping this to Q8_0 trades just ~128MB
18998
19006
// TODO: explore better strategies
18999
19007
new_type = GGML_TYPE_Q8_0;
19000
19008
}
19001
19009
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
19002
- new_type = GGML_TYPE_IQ3_XXS;
19010
+ new_type = GGML_TYPE_IQ3_XXS; // TODO: explore better strategies?
19003
19011
}
19004
19012
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4) {
19005
- new_type = GGML_TYPE_IQ2_S;
19013
+ new_type = GGML_TYPE_IQ2_S; // TODO: explore better strategies?
19006
19014
}
19007
19015
} else if (name.find("attn_q") != std::string::npos) {
19008
19016
if (qs.params->attn_q_type < GGML_TYPE_COUNT) new_type = qs.params->attn_q_type;
19009
- else if (qs.model.hparams.n_expert >= 8) {
19010
- // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
19011
- // TODO: explore better strategies
19012
- new_type = GGML_TYPE_Q8_0;
19013
- }
19014
19017
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
19015
19018
new_type = GGML_TYPE_IQ3_XXS;
19016
19019
}
@@ -19021,6 +19024,14 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
19021
19024
if (qs.model.hparams.n_vocab >= 127999 && (qs.model.type == MODEL_8B || qs.model.type == MODEL_70B))
19022
19025
new_type = GGML_TYPE_Q4_K;
19023
19026
}
19027
+ else if (ftype == LLAMA_FTYPE_MOSTLY_IQ5_K) {
19028
+ if (qs.model.hparams.n_vocab >= 127999 && (qs.model.type == MODEL_8B || qs.model.type == MODEL_70B))
19029
+ new_type = GGML_TYPE_IQ4_K;
19030
+ }
19031
+ else if (ftype == LLAMA_FTYPE_MOSTLY_IQ5_KS) {
19032
+ if (qs.model.hparams.n_vocab >= 127999 && (qs.model.type == MODEL_8B || qs.model.type == MODEL_70B))
19033
+ new_type = GGML_TYPE_IQ4_KS;
19034
+ }
19024
19035
} else if (name.find("ffn_down") != std::string::npos) {
19025
19036
auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
19026
19037
int i_layer = info.first, n_layer = info.second;
@@ -19044,7 +19055,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
19044
19055
: GGML_TYPE_Q3_K;
19045
19056
}
19046
19057
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M && (i_layer < n_layer/8 ||
19047
- (qs.model.hparams.n_expert == 8 && use_more_bits(i_layer, n_layer)))) {
19058
+ (qs.model.hparams.n_expert >= 4 && use_more_bits(i_layer, n_layer)))) {
19048
19059
new_type = GGML_TYPE_IQ4_K;
19049
19060
}
19050
19061
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
@@ -19091,19 +19102,22 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
19091
19102
} else if (name.find("attn_output.weight") != std::string::npos) {
19092
19103
if (qs.params->attn_output_type < GGML_TYPE_COUNT) new_type = qs.params->attn_output_type;
19093
19104
else if (arch != LLM_ARCH_FALCON) {
19094
- if (qs.model.hparams.n_expert >= 8 ) {
19105
+ if (qs.model.hparams.n_expert >= 4 ) {
19095
19106
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
19096
19107
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL ||
19097
19108
ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S ||
19098
19109
ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ4_K ||
19110
+ ftype == LLAMA_FTYPE_MOSTLY_IQ4_KSS || ftype == LLAMA_FTYPE_MOSTLY_IQ4_KS || ftype == LLAMA_FTYPE_MOSTLY_IQ4_KS_R4 ||
19111
+ ftype == LLAMA_FTYPE_MOSTLY_IQ5_KS || ftype == LLAMA_FTYPE_MOSTLY_IQ5_KS_R4 ||
19099
19112
ftype == LLAMA_FTYPE_MOSTLY_IQ2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_R4 ||
19100
19113
ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS_R8 || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_R4 ||
19101
19114
ftype == LLAMA_FTYPE_MOSTLY_Q2_K_R4|| ftype == LLAMA_FTYPE_MOSTLY_IQ4_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K_R4 ||
19102
19115
ftype == LLAMA_FTYPE_MOSTLY_IQ2_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S_R4) {
19103
- new_type = GGML_TYPE_Q5_K;
19116
+ new_type = GGML_TYPE_Q5_K; // should the IQ_K quants be applied here as the new type for the IQ_K ftypes ?
19117
+ // also, this condition could be reproduced on attn_q, eventually with Q4_K instead of Q5_K.
19104
19118
}
19105
19119
} else {
19106
- if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K;
19120
+ if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K; // This list could be generalized and streamlined
19107
19121
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
19108
19122
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4) new_type = GGML_TYPE_IQ3_K_R4;
19109
19123
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ) new_type = GGML_TYPE_Q4_K;
@@ -19120,7 +19134,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
19120
19134
else if (name.find("attn_qkv.weight") != std::string::npos) {
19121
19135
if (qs.params->attn_qkv_type < GGML_TYPE_COUNT) new_type = qs.params->attn_qkv_type;
19122
19136
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
19123
- new_type = GGML_TYPE_Q4_K;
19137
+ new_type = GGML_TYPE_Q4_K; // That logic could either be generalized, either be ditched?
19124
19138
}
19125
19139
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M ) new_type = GGML_TYPE_IQ4_K;
19126
19140
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
0 commit comments