Skip to content

Commit f6ea7a0

Browse files
committed
llama : change fallback type IQ4_NL -> Q4_0
ggml-ci
1 parent 0efec57 commit f6ea7a0

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

src/llama.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18090,10 +18090,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1809018090
case GGML_TYPE_IQ1_M:
1809118091
case GGML_TYPE_Q2_K:
1809218092
case GGML_TYPE_Q3_K:
18093-
case GGML_TYPE_IQ4_XS: new_type = GGML_TYPE_IQ4_NL; break;
18094-
case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
18095-
case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
18096-
case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
18093+
case GGML_TYPE_IQ4_XS: new_type = GGML_TYPE_Q4_0; break;
18094+
case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
18095+
case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
18096+
case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
1809718097
default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
1809818098
}
1809918099
LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));

0 commit comments

Comments
 (0)