Skip to content

Commit 74d4cfa

Browse files
authored
Allow "quantizing" to f16 and f32 (#1787)
* Allow "quantizing" to f16 and f32 Fix an issue where quantizing didn't respect LLAMA_NO_K_QUANTS Add brief help to the list of quantization types in the quantize tool Ignore case for quantization type arguments in the quantize tool
1 parent 74a6d92 commit 74d4cfa

File tree

4 files changed

+154
-48
lines changed

4 files changed

+154
-48
lines changed

Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,7 @@ endif
127127

128128
ifndef LLAMA_NO_K_QUANTS
129129
CFLAGS += -DGGML_USE_K_QUANTS
130+
CXXFLAGS += -DGGML_USE_K_QUANTS
130131
OBJS += k_quants.o
131132
endif
132133

examples/quantize/quantize.cpp

Lines changed: 127 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -4,43 +4,135 @@
44

55
#include <cstdio>
66
#include <cstring>
7-
#include <map>
7+
#include <vector>
88
#include <string>
99

10-
static const std::map<std::string, llama_ftype> LLAMA_FTYPE_MAP = {
11-
{"q4_0", LLAMA_FTYPE_MOSTLY_Q4_0},
12-
{"q4_1", LLAMA_FTYPE_MOSTLY_Q4_1},
13-
{"q5_0", LLAMA_FTYPE_MOSTLY_Q5_0},
14-
{"q5_1", LLAMA_FTYPE_MOSTLY_Q5_1},
15-
{"q8_0", LLAMA_FTYPE_MOSTLY_Q8_0},
16-
{"q2_K", LLAMA_FTYPE_MOSTLY_Q2_K},
17-
{"q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M},
18-
{"q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S},
19-
{"q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M},
20-
{"q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L},
21-
{"q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M},
22-
{"q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S},
23-
{"q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M},
24-
{"q5_K", LLAMA_FTYPE_MOSTLY_Q5_K_M},
25-
{"q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S},
26-
{"q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M},
27-
{"q6_K", LLAMA_FTYPE_MOSTLY_Q6_K},
10+
struct quant_option {
11+
std::string name;
12+
llama_ftype ftype;
13+
std::string desc;
2814
};
2915

30-
bool try_parse_ftype(const std::string & ftype_str, llama_ftype & ftype, std::string & ftype_str_out) {
31-
auto it = LLAMA_FTYPE_MAP.find(ftype_str);
32-
if (it != LLAMA_FTYPE_MAP.end()) {
33-
ftype = it->second;
34-
ftype_str_out = it->first;
35-
return true;
16+
static const std::vector<struct quant_option> QUANT_OPTIONS = {
17+
{
18+
"Q4_0",
19+
LLAMA_FTYPE_MOSTLY_Q4_0,
20+
" 3.50G, +0.2499 ppl @ 7B - small, very high quality loss - legacy, prefer using Q3_K_M",
21+
},
22+
{
23+
"Q4_1",
24+
LLAMA_FTYPE_MOSTLY_Q4_1,
25+
" 3.90G, +0.1846 ppl @ 7B - small, substantial quality loss - legacy, prefer using Q3_K_L",
26+
},
27+
{
28+
"Q5_0",
29+
LLAMA_FTYPE_MOSTLY_Q5_0,
30+
" 4.30G, +0.0796 ppl @ 7B - medium, balanced quality - legacy, prefer using Q4_K_M",
31+
},
32+
{
33+
"Q5_1",
34+
LLAMA_FTYPE_MOSTLY_Q5_1,
35+
" 4.70G, +0.0415 ppl @ 7B - medium, low quality loss - legacy, prefer using Q5_K_M",
36+
},
37+
#ifdef GGML_USE_K_QUANTS
38+
{
39+
"Q2_K",
40+
LLAMA_FTYPE_MOSTLY_Q2_K,
41+
" 2.67G, +0.8698 ppl @ 7B - smallest, extreme quality loss - not recommended",
42+
},
43+
{
44+
"Q3_K",
45+
LLAMA_FTYPE_MOSTLY_Q3_K_M,
46+
"alias for Q3_K_M"
47+
},
48+
{
49+
"Q3_K_S",
50+
LLAMA_FTYPE_MOSTLY_Q3_K_S,
51+
" 2.75G, +0.5505 ppl @ 7B - very small, very high quality loss",
52+
},
53+
{
54+
"Q3_K_M",
55+
LLAMA_FTYPE_MOSTLY_Q3_K_M,
56+
" 3.06G, +0.2437 ppl @ 7B - very small, very high quality loss",
57+
},
58+
{
59+
"Q3_K_L",
60+
LLAMA_FTYPE_MOSTLY_Q3_K_L,
61+
" 3.35G, +0.1803 ppl @ 7B - small, substantial quality loss",
62+
},
63+
{
64+
"Q4_K",
65+
LLAMA_FTYPE_MOSTLY_Q4_K_M,
66+
"alias for Q4_K_M",
67+
},
68+
{
69+
"Q4_K_S",
70+
LLAMA_FTYPE_MOSTLY_Q4_K_S,
71+
" 3.56G, +0.1149 ppl @ 7B - small, significant quality loss",
72+
},
73+
{
74+
"Q4_K_M",
75+
LLAMA_FTYPE_MOSTLY_Q4_K_M,
76+
" 3.80G, +0.0535 ppl @ 7B - medium, balanced quality - *recommended*",
77+
},
78+
{
79+
"Q5_K",
80+
LLAMA_FTYPE_MOSTLY_Q5_K_M,
81+
"alias for Q5_K_M",
82+
},
83+
{
84+
"Q5_K_S",
85+
LLAMA_FTYPE_MOSTLY_Q5_K_S,
86+
" 4.33G, +0.0353 ppl @ 7B - large, low quality loss - *recommended*",
87+
},
88+
{
89+
"Q5_K_M",
90+
LLAMA_FTYPE_MOSTLY_Q5_K_M,
91+
" 4.45G, +0.0142 ppl @ 7B - large, very low quality loss - *recommended*",
92+
},
93+
{
94+
"Q6_K",
95+
LLAMA_FTYPE_MOSTLY_Q6_K,
96+
" 5.15G, +0.0044 ppl @ 7B - very large, extremely low quality loss",
97+
},
98+
#endif
99+
{
100+
"Q8_0",
101+
LLAMA_FTYPE_MOSTLY_Q8_0,
102+
" 6.70G, +0.0004 ppl @ 7B - very large, extremely low quality loss - not recommended",
103+
},
104+
{
105+
"F16",
106+
LLAMA_FTYPE_MOSTLY_F16,
107+
"13.00G @ 7B - extremely large, virtually no quality loss - not recommended",
108+
},
109+
{
110+
"F32",
111+
LLAMA_FTYPE_ALL_F32,
112+
"26.00G @ 7B - absolutely huge, lossless - not recommended",
113+
},
114+
};
115+
116+
117+
bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftype, std::string & ftype_str_out) {
118+
std::string ftype_str;
119+
120+
for (auto ch : ftype_str_in) {
121+
ftype_str.push_back(std::toupper(ch));
122+
}
123+
for (auto & it : QUANT_OPTIONS) {
124+
if (it.name == ftype_str) {
125+
ftype = it.ftype;
126+
ftype_str_out = it.name;
127+
return true;
128+
}
36129
}
37-
// try to parse as an integer
38130
try {
39131
int ftype_int = std::stoi(ftype_str);
40-
for (auto it = LLAMA_FTYPE_MAP.begin(); it != LLAMA_FTYPE_MAP.end(); it++) {
41-
if (it->second == ftype_int) {
42-
ftype = it->second;
43-
ftype_str_out = it->first;
132+
for (auto & it : QUANT_OPTIONS) {
133+
if (it.ftype == ftype_int) {
134+
ftype = it.ftype;
135+
ftype_str_out = it.name;
44136
return true;
45137
}
46138
}
@@ -52,15 +144,15 @@ bool try_parse_ftype(const std::string & ftype_str, llama_ftype & ftype, std::st
52144
}
53145

54146
// usage:
55-
// ./quantize models/llama/ggml-model.bin [models/llama/ggml-model-quant.bin] type [nthreads]
147+
// ./quantize [--allow-requantize] [--leave-output-tensor] models/llama/ggml-model.bin [models/llama/ggml-model-quant.bin] type [nthreads]
56148
//
57149
void usage(const char * executable) {
58-
fprintf(stderr, "usage: %s [--help] [--allow-requantize] [--leave-output-tensor] model-f32.bin [model-quant.bin] type [nthreads]\n", executable);
150+
fprintf(stderr, "usage: %s [--help] [--allow-requantize] [--leave-output-tensor] model-f32.bin [model-quant.bin] type [nthreads]\n\n", executable);
59151
fprintf(stderr, " --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
60152
fprintf(stderr, " --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
61-
fprintf(stderr, "Allowed quantization types:\n");
62-
for (auto it = LLAMA_FTYPE_MAP.begin(); it != LLAMA_FTYPE_MAP.end(); it++) {
63-
fprintf(stderr, " type = \"%s\" or %d\n", it->first.c_str(), it->second);
153+
fprintf(stderr, "\nAllowed quantization types:\n");
154+
for (auto & it : QUANT_OPTIONS) {
155+
printf(" %2d or %-6s : %s\n", it.ftype, it.name.c_str(), it.desc.c_str());
64156
}
65157
exit(1);
66158
}

ggml.c

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16301,6 +16301,18 @@ size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, i
1630116301
result = ggml_quantize_q6_K(src + start, block, n, n, hist);
1630216302
} break;
1630316303
#endif
16304+
case GGML_TYPE_F16:
16305+
{
16306+
int elemsize = sizeof(ggml_fp16_t);
16307+
ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
16308+
result = n * elemsize;
16309+
} break;
16310+
case GGML_TYPE_F32:
16311+
{
16312+
int elemsize = sizeof(float);
16313+
result = n * elemsize;
16314+
memcpy((uint8_t *)dst + start * elemsize, src + start, result);
16315+
} break;
1630416316
default:
1630516317
assert(false);
1630616318
}

llama.cpp

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2298,7 +2298,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
22982298
case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break;
22992299
case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break;
23002300
case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break;
2301+
case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
2302+
case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
23012303

2304+
#ifdef GGML_USE_K_QUANTS
23022305
// K-quants
23032306
case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break;
23042307
case LLAMA_FTYPE_MOSTLY_Q3_K_S:
@@ -2309,6 +2312,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
23092312
case LLAMA_FTYPE_MOSTLY_Q5_K_S:
23102313
case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break;
23112314
case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break;
2315+
#endif
23122316
default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
23132317
}
23142318

@@ -2320,6 +2324,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
23202324
/*vocab_only*/ false));
23212325
llama_file_saver file_saver(fname_out.c_str(), model_loader->file_loaders.at(0).get(), params->ftype);
23222326

2327+
#ifdef GGML_USE_K_QUANTS
23232328
int n_attention_wv = 0;
23242329
int n_feed_forward_w2 = 0;
23252330
for (auto& tensor : model_loader->tensors_map.tensors) {
@@ -2333,6 +2338,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
23332338

23342339
int i_attention_wv = 0;
23352340
int i_feed_forward_w2 = 0;
2341+
#endif
23362342

23372343
size_t total_size_org = 0;
23382344
size_t total_size_new = 0;
@@ -2358,12 +2364,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
23582364

23592365
// quantize only 2D tensors
23602366
quantize &= (tensor.ne.size() == 2);
2361-
2362-
// uncomment this to keep the output layer in FP16
2363-
if (!params->quantize_output_tensor && tensor.name == "output.weight") {
2364-
quantize = false;
2365-
}
2366-
quantize = quantize && quantized_type != tensor.type;
2367+
quantize &= params->quantize_output_tensor || tensor.name != "output.weight";
2368+
quantize &= quantized_type != tensor.type;
23672369

23682370
enum ggml_type new_type;
23692371
void * new_data;
@@ -2377,29 +2379,28 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
23772379
printf("size = %8.3f MB\n", tensor.size/1024.0/1024.0);
23782380
} else {
23792381
new_type = quantized_type;
2382+
#ifdef GGML_USE_K_QUANTS
23802383
if (tensor.name == "output.weight") {
2381-
new_type = GGML_TYPE_Q6_K;
2382-
}
2383-
else if (tensor.name.find("attention.wv.weight") != std::string::npos) {
2384+
new_type = GGML_TYPE_Q6_K;
2385+
} else if (tensor.name.find("attention.wv.weight") != std::string::npos) {
23842386
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
23852387
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
23862388
else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
23872389
(i_attention_wv < n_attention_wv/8 || i_attention_wv >= 7*n_attention_wv/8 ||
23882390
(i_attention_wv - n_attention_wv/8)%3 == 2)) new_type = GGML_TYPE_Q6_K;
23892391
++i_attention_wv;
2390-
}
2391-
if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) {
2392+
} else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) {
23922393
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
23932394
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
23942395
else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
23952396
(i_feed_forward_w2 < n_feed_forward_w2/8 || i_feed_forward_w2 >= 7*n_feed_forward_w2/8 ||
23962397
(i_feed_forward_w2 - n_feed_forward_w2/8)%3 == 2)) new_type = GGML_TYPE_Q6_K;
23972398
++i_feed_forward_w2;
2398-
}
2399-
if (tensor.name.find("attention.wo.weight") != std::string::npos) {
2399+
} else if (tensor.name.find("attention.wo.weight") != std::string::npos) {
24002400
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
24012401
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
24022402
}
2403+
#endif
24032404

24042405
float * f32_data;
24052406
size_t nelements = tensor.ne.at(0) * tensor.ne.at(1);

0 commit comments

Comments
 (0)