Skip to content

Commit b1aeff5

Browse files
mishig25younesbelkadaFL33TW00D
committed
Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, Q8_1
Co-authored-by: Younes Belkada <[email protected]> Co-authored-by: FL33TW00D <[email protected]>
1 parent 7e83f0b commit b1aeff5

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

packages/gguf/src/quant_descriptions.ts

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,18 +3,18 @@ import { GGMLQuantizationType } from "./types";
33
export const QUANT_DESCRIPTIONS: Record<GGMLQuantizationType, string> = {
44
[GGMLQuantizationType.F32]: "32-bit standard IEEE 754 single-precision floating-point number.", // src: https://en.wikipedia.org/wiki/Single-precision_floating-point_format
55
[GGMLQuantizationType.F16]: "16-bit standard IEEE 754 half-precision floating-point number.", // src: https://en.wikipedia.org/wiki/Half-precision_floating-point_format
6-
[GGMLQuantizationType.Q4_0]: "", // todo: add description
7-
[GGMLQuantizationType.Q4_1]: "", // todo: add description
8-
[GGMLQuantizationType.Q5_0]: "", // todo: add description
9-
[GGMLQuantizationType.Q5_1]: "", // todo: add description
10-
[GGMLQuantizationType.Q8_0]: "", // todo: add description
11-
[GGMLQuantizationType.Q8_1]: "", // todo: add description
6+
[GGMLQuantizationType.Q4_0]: "4-bit round-to-nearest quantization (q). Each block has 32 weights. Weights are obtained by w = q * block_scale_factor. Legacy quantization method (not used widely as of today)",
7+
[GGMLQuantizationType.Q4_1]: "4-bit round-to-nearest quantization (q). Each block has 32 weights. Weights are obtained by w = q * block_scale_factor + block_minimum. Legacy quantization method (not used widely as of today)",
8+
[GGMLQuantizationType.Q5_0]: "5-bit round-to-nearest quantization (q). Each block has 32 weights. Weights are obtained by w = q * block_scale_factor. Legacy quantization method (not used widely as of today)",
9+
[GGMLQuantizationType.Q5_1]: "5-bit round-to-nearest quantization (q). Each block has 32 weights. Weights are obtained by w = q * block_scale_factor + block_minimum. Legacy quantization method (not used widely as of today)",
10+
[GGMLQuantizationType.Q8_0]: "8-bit round-to-nearest quantization (q). Each block has 32 weights. Weights are obtained by w = q * block_scale_factor. Legacy quantization method (not used widely as of today)",
11+
[GGMLQuantizationType.Q8_1]: "8-bit round-to-nearest quantization (q). Each block has 32 weights. Weights are obtained by w = q * block_scale_factor + block_minimum. Legacy quantization method (not used widely as of today)",
1212
[GGMLQuantizationType.Q2_K]: `2-bit quantization (q). Super-blocks with 16 blocks, each block has 16 weight. Block scales (d) & mins (m) are quantized with 4 bits, resulting in 2.5625 bits-per-weight. Weights are obtained by w = d * q + m.`, // src: https://github.com/ggerganov/llama.cpp/pull/1684#issue-1739619305
1313
[GGMLQuantizationType.Q3_K]: `3-bit quantization (q). Super-blocks with 16 blocks, each block has 16 weights. Block scales (d) is quantized with 6 bits, resulting. 3.4375 bits-per-weight. Weights are obtained by w = d * q.`, // src: https://github.com/ggerganov/llama.cpp/pull/1684#issue-1739619305
1414
[GGMLQuantizationType.Q4_K]: `4-bit quantization (q). Super-blocks with 8 blocks, each block has 32 weights. Block scales (d) & mins (m) are quantized with 6 bits, resulting. 4.5 bits-per-weight are obtained by w = d * q + m.`, // src: https://github.com/ggerganov/llama.cpp/pull/1684#issue-1739619305
1515
[GGMLQuantizationType.Q5_K]: `5-bit quantization (q). Super-blocks with 8 blocks, each block has 32 weights. Block scales (d) & mins (m) are quantized with 6 bits, resulting in 5.5 bits-per-weight. Weights are obtained by w = d * q + m.`, // src: https://github.com/ggerganov/llama.cpp/pull/1684#issue-1739619305
1616
[GGMLQuantizationType.Q6_K]: `6-bit quantization (q). Super-blocks with 16 blocks, each block has 16 weights. Block scales (d) is quantized with 8 bits, resulting in 6.5625 bits-per-weight. Weights are obtained by w = d * q.`, // src: https://github.com/ggerganov/llama.cpp/pull/1684#issue-1739619305
17-
[GGMLQuantizationType.Q8_K]: `8-bit quantization (q). Only used for quantizing intermediate results. The difference to the existing Q8_0 is that the block size is 256. All 2-6 bit dot products are implemented for this quantization type. Weights are obtained by w = d * q.`, // src: https://github.com/ggerganov/llama.cpp/pull/1684#issue-1739619305
17+
[GGMLQuantizationType.Q8_K]: `8-bit quantization (q). Each block has 256 weights. Only used for quantizing intermediate results. All 2-6 bit dot products are implemented for this quantization type. Weights are obtained by w = q * block_scale_factor.`, // src: https://github.com/ggerganov/llama.cpp/pull/1684#issue-1739619305
1818
[GGMLQuantizationType.IQ2_XXS]: "", // todo: add description
1919
[GGMLQuantizationType.IQ2_XS]: "", // todo: add description
2020
[GGMLQuantizationType.IQ3_XXS]: "", // todo: add description
@@ -23,4 +23,4 @@ export const QUANT_DESCRIPTIONS: Record<GGMLQuantizationType, string> = {
2323
[GGMLQuantizationType.IQ3_S]: "", // todo: add description
2424
[GGMLQuantizationType.IQ2_S]: "", // todo: add description
2525
[GGMLQuantizationType.IQ4_XS]: "", // todo: add description
26-
};
26+
};

0 commit comments

Comments
 (0)