Skip to content

Commit aa4eb59

Browse files
committed
Further refactor attn_k
With attn_k set for all quants bellow 3bpw except Q2_K_S.
1 parent 8f1b99f commit aa4eb59

File tree

1 file changed

+32
-10
lines changed

1 file changed

+32
-10
lines changed

src/llama.cpp

Lines changed: 32 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -15391,23 +15391,45 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1539115391
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
1539215392
++qs.i_attention_wv;
1539315393
} else if (name.find("attn_k.weight") != std::string::npos) {
15394-
if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
15395-
ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
15396-
if (qs.model.hparams.n_expert >= 8) new_type = GGML_TYPE_Q5_K;
15397-
}
15398-
else if (qs.model.hparams.n_expert >= 8) {
15394+
if (qs.model.hparams.n_expert >= 8) {
1539915395
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
1540015396
// TODO: explore better strategies
15401-
new_type = GGML_TYPE_Q8_0;
15397+
if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
15398+
ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S ||
15399+
ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q6_K;
15400+
else new_type = GGML_TYPE_Q8_0;
1540215401
}
15403-
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M && (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
15404-
new_type = GGML_TYPE_IQ4_XS;
15402+
else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) && (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
15403+
new_type = GGML_TYPE_IQ1_M;
15404+
}
15405+
else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) && (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
15406+
new_type = GGML_TYPE_IQ2_XXS;
15407+
}
15408+
else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS) && (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
15409+
new_type = GGML_TYPE_IQ2_XS;
15410+
}
15411+
else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS) && (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
15412+
new_type = GGML_TYPE_IQ2_S;
15413+
}
15414+
else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) &&
15415+
(qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
15416+
new_type = GGML_TYPE_IQ3_XXS;
15417+
}
15418+
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K && (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
15419+
new_type = GGML_TYPE_Q3_K;
1540515420
}
1540615421
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && qs.model.hparams.n_gqa() < 2 && qs.model.hparams.n_expert < 2) {
1540715422
new_type = GGML_TYPE_IQ3_XXS;
1540815423
}
15409-
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS && qs.model.hparams.n_gqa() < 2 && qs.model.hparams.n_expert < 2) {
15410-
new_type = GGML_TYPE_IQ2_S;
15424+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M && (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
15425+
new_type = GGML_TYPE_IQ4_XS;
15426+
}
15427+
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L && (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
15428+
new_type = GGML_TYPE_Q4_K;
15429+
}
15430+
else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S ||
15431+
ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) && (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2)) {
15432+
new_type = GGML_TYPE_Q5_K;
1541115433
}
1541215434
} else if (name.find("attn_q.weight") != std::string::npos) {
1541315435
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) new_type = GGML_TYPE_IQ3_XXS;

0 commit comments

Comments
 (0)