Skip to content

Commit eed925a

Browse files
committed
wip : Q2_2 now faster than Q4_K on with AVX2
1 parent f820e5a commit eed925a

File tree

3 files changed

+65
-145
lines changed

3 files changed

+65
-145
lines changed

convert-hf-to-gguf.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -294,6 +294,8 @@ def write_tensors(self):
294294
))
295295

296296
if self.ftype != gguf.LlamaFileType.ALL_F32 and extra_f16 and not extra_f32:
297+
# TODO: cleaner model-specific per-tensor types
298+
# NOTE: Q1_3 is only relevant for BitNet 1.58b
297299
if self.ftype == gguf.LlamaFileType.MOSTLY_Q1_3 and not any(
298300
self.match_model_tensor_name(new_name, key, None)
299301
for key in [

ggml-common.h

Lines changed: 0 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -1037,73 +1037,6 @@ GGML_TABLE_BEGIN(uint32_t, iq3s_grid, 512)
10371037
0x0f090307, 0x0f090501, 0x0f090b01, 0x0f0b0505, 0x0f0b0905, 0x0f0d0105, 0x0f0d0703, 0x0f0f0101,
10381038
GGML_TABLE_END()
10391039

1040-
GGML_TABLE_BEGIN(uint32_t, q22_grid, 256)
1041-
0x00000000, 0x01000000, 0x00000000, 0xff000000,
1042-
0x00010000, 0x01010000, 0x00010000, 0xff010000,
1043-
0x00000000, 0x01000000, 0x00000000, 0xff000000,
1044-
0x00ff0000, 0x01ff0000, 0x00ff0000, 0xffff0000,
1045-
0x00000100, 0x01000100, 0x00000100, 0xff000100,
1046-
0x00010100, 0x01010100, 0x00010100, 0xff010100,
1047-
0x00000100, 0x01000100, 0x00000100, 0xff000100,
1048-
0x00ff0100, 0x01ff0100, 0x00ff0100, 0xffff0100,
1049-
0x00000000, 0x01000000, 0x00000000, 0xff000000,
1050-
0x00010000, 0x01010000, 0x00010000, 0xff010000,
1051-
0x00000000, 0x01000000, 0x00000000, 0xff000000,
1052-
0x00ff0000, 0x01ff0000, 0x00ff0000, 0xffff0000,
1053-
0x0000ff00, 0x0100ff00, 0x0000ff00, 0xff00ff00,
1054-
0x0001ff00, 0x0101ff00, 0x0001ff00, 0xff01ff00,
1055-
0x0000ff00, 0x0100ff00, 0x0000ff00, 0xff00ff00,
1056-
0x00ffff00, 0x01ffff00, 0x00ffff00, 0xffffff00,
1057-
0x00000001, 0x01000001, 0x00000001, 0xff000001,
1058-
0x00010001, 0x01010001, 0x00010001, 0xff010001,
1059-
0x00000001, 0x01000001, 0x00000001, 0xff000001,
1060-
0x00ff0001, 0x01ff0001, 0x00ff0001, 0xffff0001,
1061-
0x00000101, 0x01000101, 0x00000101, 0xff000101,
1062-
0x00010101, 0x01010101, 0x00010101, 0xff010101,
1063-
0x00000101, 0x01000101, 0x00000101, 0xff000101,
1064-
0x00ff0101, 0x01ff0101, 0x00ff0101, 0xffff0101,
1065-
0x00000001, 0x01000001, 0x00000001, 0xff000001,
1066-
0x00010001, 0x01010001, 0x00010001, 0xff010001,
1067-
0x00000001, 0x01000001, 0x00000001, 0xff000001,
1068-
0x00ff0001, 0x01ff0001, 0x00ff0001, 0xffff0001,
1069-
0x0000ff01, 0x0100ff01, 0x0000ff01, 0xff00ff01,
1070-
0x0001ff01, 0x0101ff01, 0x0001ff01, 0xff01ff01,
1071-
0x0000ff01, 0x0100ff01, 0x0000ff01, 0xff00ff01,
1072-
0x00ffff01, 0x01ffff01, 0x00ffff01, 0xffffff01,
1073-
0x00000000, 0x01000000, 0x00000000, 0xff000000,
1074-
0x00010000, 0x01010000, 0x00010000, 0xff010000,
1075-
0x00000000, 0x01000000, 0x00000000, 0xff000000,
1076-
0x00ff0000, 0x01ff0000, 0x00ff0000, 0xffff0000,
1077-
0x00000100, 0x01000100, 0x00000100, 0xff000100,
1078-
0x00010100, 0x01010100, 0x00010100, 0xff010100,
1079-
0x00000100, 0x01000100, 0x00000100, 0xff000100,
1080-
0x00ff0100, 0x01ff0100, 0x00ff0100, 0xffff0100,
1081-
0x00000000, 0x01000000, 0x00000000, 0xff000000,
1082-
0x00010000, 0x01010000, 0x00010000, 0xff010000,
1083-
0x00000000, 0x01000000, 0x00000000, 0xff000000,
1084-
0x00ff0000, 0x01ff0000, 0x00ff0000, 0xffff0000,
1085-
0x0000ff00, 0x0100ff00, 0x0000ff00, 0xff00ff00,
1086-
0x0001ff00, 0x0101ff00, 0x0001ff00, 0xff01ff00,
1087-
0x0000ff00, 0x0100ff00, 0x0000ff00, 0xff00ff00,
1088-
0x00ffff00, 0x01ffff00, 0x00ffff00, 0xffffff00,
1089-
0x000000ff, 0x010000ff, 0x000000ff, 0xff0000ff,
1090-
0x000100ff, 0x010100ff, 0x000100ff, 0xff0100ff,
1091-
0x000000ff, 0x010000ff, 0x000000ff, 0xff0000ff,
1092-
0x00ff00ff, 0x01ff00ff, 0x00ff00ff, 0xffff00ff,
1093-
0x000001ff, 0x010001ff, 0x000001ff, 0xff0001ff,
1094-
0x000101ff, 0x010101ff, 0x000101ff, 0xff0101ff,
1095-
0x000001ff, 0x010001ff, 0x000001ff, 0xff0001ff,
1096-
0x00ff01ff, 0x01ff01ff, 0x00ff01ff, 0xffff01ff,
1097-
0x000000ff, 0x010000ff, 0x000000ff, 0xff0000ff,
1098-
0x000100ff, 0x010100ff, 0x000100ff, 0xff0100ff,
1099-
0x000000ff, 0x010000ff, 0x000000ff, 0xff0000ff,
1100-
0x00ff00ff, 0x01ff00ff, 0x00ff00ff, 0xffff00ff,
1101-
0x0000ffff, 0x0100ffff, 0x0000ffff, 0xff00ffff,
1102-
0x0001ffff, 0x0101ffff, 0x0001ffff, 0xff01ffff,
1103-
0x0000ffff, 0x0100ffff, 0x0000ffff, 0xff00ffff,
1104-
0x00ffffff, 0x01ffffff, 0x00ffffff, 0xffffffff,
1105-
GGML_TABLE_END()
1106-
11071040
GGML_TABLE_BEGIN(uint32_t, q1_3_grid, 256)
11081041
0xffffffff, 0xffffffff, 0xffffff00, 0xffffff01, 0xffff00ff, 0xffff0000, 0xffff0001, 0xffff01ff,
11091042
0xffff0100, 0xffff0101, 0xff00ffff, 0xff00ff00, 0xff00ff01, 0xff0000ff, 0xff000000, 0xff000001,

ggml-quants.c

Lines changed: 63 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -657,6 +657,35 @@ static inline __m128i packNibbles( __m256i bytes ) {
657657
}
658658
#endif //__loongarch_asx
659659

660+
void quantize_row_q2_2_reference(const float * restrict x, block_q2_2 * restrict y, int64_t k) {
661+
static const int qk = QK2_2;
662+
663+
assert(k % qk == 0);
664+
665+
const int nb = k / qk;
666+
667+
for (int i = 0; i < nb; i++) {
668+
669+
for (int j = 0; j < qk/4; ++j) {
670+
int8_t x0 = (int8_t)x[i*qk + 0 + j];
671+
int8_t x1 = (int8_t)x[i*qk + 1*qk/4 + j];
672+
int8_t x2 = (int8_t)x[i*qk + 2*qk/4 + j];
673+
int8_t x3 = (int8_t)x[i*qk + 3*qk/4 + j];
674+
675+
const uint8_t xi0 = x0 < 0 ? 1 : x0 == 0 ? 2 : 3;
676+
const uint8_t xi1 = x1 < 0 ? 1 : x1 == 0 ? 2 : 3;
677+
const uint8_t xi2 = x2 < 0 ? 1 : x2 == 0 ? 2 : 3;
678+
const uint8_t xi3 = x3 < 0 ? 1 : x3 == 0 ? 2 : 3;
679+
680+
y[i].qs[j] = 0;
681+
y[i].qs[j] |= (xi0 << 0);
682+
y[i].qs[j] |= (xi1 << 2);
683+
y[i].qs[j] |= (xi2 << 4);
684+
y[i].qs[j] |= (xi3 << 6);
685+
}
686+
}
687+
}
688+
660689
// reference implementation for deterministic creation of model files
661690
void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int64_t k) {
662691
static const int qk = QK4_0;
@@ -1512,6 +1541,26 @@ void quantize_row_q8_1(const float * restrict x, void * restrict vy, int64_t k)
15121541
#endif
15131542
}
15141543

1544+
void dequantize_row_q2_2(const block_q2_2 * restrict x, float * restrict y, int64_t k) {
1545+
static const int qk = QK2_2;
1546+
1547+
assert(k % qk == 0);
1548+
1549+
const int nb = k / qk;
1550+
1551+
for (int i = 0; i < nb; i++) {
1552+
1553+
for (int j = 0; j < qk/4; ++j) {
1554+
const int8_t q = x[i].qs[j];
1555+
1556+
y[i*qk + j + 0 ] = (float) (((q >> 0) & 3) - 2);
1557+
y[i*qk + j + 1*qk/4] = (float) (((q >> 2) & 3) - 2);
1558+
y[i*qk + j + 2*qk/4] = (float) (((q >> 4) & 3) - 2);
1559+
y[i*qk + j + 3*qk/4] = (float) (((q >> 6) & 3) - 2);
1560+
}
1561+
}
1562+
}
1563+
15151564
void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int64_t k) {
15161565
static const int qk = QK4_0;
15171566

@@ -3876,82 +3925,18 @@ void ggml_vec_dot_q2_2_q8_0(int n, float * restrict s, size_t bs, const void * r
38763925
#if defined(__AVX2__)
38773926
__m256 acc = _mm256_setzero_ps();
38783927

3879-
int leftovers = nb % 2;
3880-
3881-
for (int i = 0; i < nb - leftovers; i += 2) {
3882-
3883-
const __m256 d0 = _mm256_set1_ps( GGML_FP16_TO_FP32(y[i + 0].d) );
3884-
const __m256 d1 = _mm256_set1_ps( GGML_FP16_TO_FP32(y[i + 1].d) );
3885-
3886-
// assuming two consecutive blocks are contiguous AND aligned
3887-
__m128i xq16b = _mm_load_si128((const __m128i *) (x[i].qs));
3888-
__m256i xq16 = MM256_SET_M128I(xq16b, xq16b);
3889-
__m256i xq8l0 = _mm256_shuffle_epi8(xq16, _mm256_set_epi8(5, -1, 5, -1, 5, -1, 5, -1,
3890-
4, -1, 4, -1, 4, -1, 4, -1,
3891-
1, -1, 1, -1, 1, -1, 1, -1,
3892-
0, -1, 0, -1, 0, -1, 0, -1));
3893-
__m256i xq8h0 = _mm256_shuffle_epi8(xq16, _mm256_set_epi8(7, -1, 7, -1, 7, -1, 7, -1,
3894-
6, -1, 6, -1, 6, -1, 6, -1,
3895-
3, -1, 3, -1, 3, -1, 3, -1,
3896-
2, -1, 2, -1, 2, -1, 2, -1));
3897-
__m256i xq8l1 = _mm256_shuffle_epi8(xq16, _mm256_set_epi8(13, -1, 13, -1, 13, -1, 13, -1,
3898-
12, -1, 12, -1, 12, -1, 12, -1,
3899-
9, -1, 9, -1, 9, -1, 9, -1,
3900-
8, -1, 8, -1, 8, -1, 8, -1));
3901-
__m256i xq8h1 = _mm256_shuffle_epi8(xq16, _mm256_set_epi8(15, -1, 15, -1, 15, -1, 15, -1,
3902-
14, -1, 14, -1, 14, -1, 14, -1,
3903-
11, -1, 11, -1, 11, -1, 11, -1,
3904-
10, -1, 10, -1, 10, -1, 10, -1));
3905-
__m256i shift = _mm256_set_epi16(64, 16, 4, 1,
3906-
64, 16, 4, 1,
3907-
64, 16, 4, 1,
3908-
64, 16, 4, 1);
3909-
xq8l0 = _mm256_mullo_epi16(xq8l0, shift);
3910-
xq8h0 = _mm256_mullo_epi16(xq8h0, shift);
3911-
xq8l1 = _mm256_mullo_epi16(xq8l1, shift);
3912-
xq8h1 = _mm256_mullo_epi16(xq8h1, shift);
3913-
xq8l0 = _mm256_srai_epi16(xq8l0, 14);
3914-
xq8h0 = _mm256_srai_epi16(xq8h0, 14);
3915-
xq8l1 = _mm256_srai_epi16(xq8l1, 14);
3916-
xq8h1 = _mm256_srai_epi16(xq8h1, 14);
3917-
__m256i xq8_0 = _mm256_packs_epi16(xq8l0, xq8h0);
3918-
__m256i xq8_1 = _mm256_packs_epi16(xq8l1, xq8h1);
3919-
3920-
__m256i yq8_0 = _mm256_loadu_si256((const __m256i *) (y[i + 0].qs));
3921-
__m256i yq8_1 = _mm256_loadu_si256((const __m256i *) (y[i + 1].qs));
3922-
3923-
const __m256 q0 = mul_sum_i8_pairs_float(xq8_0, yq8_0);
3924-
const __m256 q1 = mul_sum_i8_pairs_float(xq8_1, yq8_1);
3925-
3926-
acc = _mm256_fmadd_ps( d0, q0, acc );
3927-
acc = _mm256_fmadd_ps( d1, q1, acc );
3928-
}
3929-
3930-
for (int i = nb - leftovers; i < nb; ++i) {
3928+
for (int i = 0; i < nb; ++i) {
39313929

39323930
const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(y[i].d) );
39333931

3934-
__m128i xq8b = _mm_loadu_si64(x[i].qs);
3935-
__m256i xq8 = MM256_SET_M128I(xq8b, xq8b);
3936-
__m256i xq8l = _mm256_shuffle_epi8(xq8, _mm256_set_epi8(5, -1, 5, -1, 5, -1, 5, -1,
3937-
4, -1, 4, -1, 4, -1, 4, -1,
3938-
1, -1, 1, -1, 1, -1, 1, -1,
3939-
0, -1, 0, -1, 0, -1, 0, -1));
3940-
__m256i xq8h = _mm256_shuffle_epi8(xq8, _mm256_set_epi8(7, -1, 7, -1, 7, -1, 7, -1,
3941-
6, -1, 6, -1, 6, -1, 6, -1,
3942-
3, -1, 3, -1, 3, -1, 3, -1,
3943-
2, -1, 2, -1, 2, -1, 2, -1));
3944-
__m256i shift = _mm256_set_epi16(64, 16, 4, 1,
3945-
64, 16, 4, 1,
3946-
64, 16, 4, 1,
3947-
64, 16, 4, 1);
3948-
xq8l = _mm256_mullo_epi16(xq8l, shift);
3949-
xq8h = _mm256_mullo_epi16(xq8h, shift);
3950-
xq8l = _mm256_srai_epi16(xq8l, 14);
3951-
xq8h = _mm256_srai_epi16(xq8h, 14);
3952-
xq8 = _mm256_packs_epi16(xq8l, xq8h);
3953-
3954-
__m256i yq8 = _mm256_loadu_si256((const __m256i *) (y[i].qs));
3932+
// assuming this is always aligned
3933+
__m256i xq8 = _mm256_set1_epi64x(*(const int64_t *) x[i].qs);
3934+
xq8 = _mm256_srlv_epi64(xq8, _mm256_set_epi64x(6, 4, 2, 0));
3935+
xq8 = _mm256_and_si256(xq8, _mm256_set1_epi8(0x03));
3936+
// stangely enough, this is much slower with 1 instead of 2
3937+
xq8 = _mm256_sub_epi8(xq8, _mm256_set1_epi8(2));
3938+
3939+
const __m256i yq8 = _mm256_loadu_si256((const __m256i *) (y[i].qs));
39553940
const __m256 q = mul_sum_i8_pairs_float(xq8, yq8);
39563941

39573942
acc = _mm256_fmadd_ps( d, q, acc );
@@ -3964,11 +3949,11 @@ void ggml_vec_dot_q2_2_q8_0(int n, float * restrict s, size_t bs, const void * r
39643949
for (int i = 0; i < nb; i++) {
39653950
int sumi = 0;
39663951
for (int j = 0; j < qk / 4; j++) {
3967-
const int8_t* weight = (const int8_t *)(q22_grid + x[i].qs[j]);
3968-
sumi += (int)y[i].qs[4*j+0] * weight[0];
3969-
sumi += (int)y[i].qs[4*j+1] * weight[1];
3970-
sumi += (int)y[i].qs[4*j+2] * weight[2];
3971-
sumi += (int)y[i].qs[4*j+3] * weight[3];
3952+
const uint8_t weight = x[i].qs[j];
3953+
sumi += (int)y[i].qs[j + 0*qk/4] * ((weight >> 0) & 3) - 2;
3954+
sumi += (int)y[i].qs[j + 1*qk/4] * ((weight >> 2) & 3) - 2;
3955+
sumi += (int)y[i].qs[j + 2*qk/4] * ((weight >> 4) & 3) - 2;
3956+
sumi += (int)y[i].qs[j + 3*qk/4] * ((weight >> 6) & 3) - 2;
39723957
}
39733958
sumf += (float)(sumi)*(GGML_FP16_TO_FP32(y[i].d));
39743959
}

0 commit comments

Comments
 (0)