@@ -11362,10 +11362,19 @@ void ggml_vec_dot_iq1_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const
11362
11362
__m256i sumi = _mm256_setzero_si256();
11363
11363
int sumi1 = 0;
11364
11364
for (int ib = 0; ib < QK_K/32; ib += 2) {
11365
+ #ifdef __BMI2__
11366
+ const uint64_t packed_idx1 = _pdep_u64(*(const uint32_t *)qs, 0x00ff00ff00ff00ffULL) | _pdep_u64(qh[ib], 0x700070007000700ULL);
11367
+ const uint64_t packed_idx2 = _pdep_u64(*(const uint32_t *)(qs + 4), 0x00ff00ff00ff00ffULL) | _pdep_u64(qh[ib + 1], 0x700070007000700ULL);
11368
+ const uint16_t *idx1 = (const uint16_t *)(&packed_idx1);
11369
+ const uint16_t *idx2 = (const uint16_t *)(&packed_idx2);
11370
+ const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[idx1[3]], iq1s_grid[idx1[2]], iq1s_grid[idx1[1]], iq1s_grid[idx1[0]]);
11371
+ const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[idx2[3]], iq1s_grid[idx2[2]], iq1s_grid[idx2[1]], iq1s_grid[idx2[0]]);
11372
+ #else
11365
11373
const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)],
11366
11374
iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)]);
11367
11375
const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)],
11368
11376
iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)]);
11377
+ #endif
11369
11378
qs += 8;
11370
11379
const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
11371
11380
const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
@@ -11709,8 +11718,9 @@ void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const
11709
11718
11710
11719
#elif defined __AVX2__
11711
11720
11712
- const __m256i mask = _mm256_set1_epi16(0x7);
11721
+ const __m256i mask = _mm256_set1_epi16(2 * 0x7);
11713
11722
const __m256i mone = _mm256_set1_epi16(1);
11723
+ const __m256i mone8 = _mm256_set1_epi8(1);
11714
11724
11715
11725
__m256 accum1 = _mm256_setzero_ps();
11716
11726
__m256 accum2 = _mm256_setzero_ps();
@@ -11726,6 +11736,21 @@ void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const
11726
11736
__m256i sumi1 = _mm256_setzero_si256();
11727
11737
__m256i sumi2 = _mm256_setzero_si256();
11728
11738
for (int ib = 0; ib < QK_K/32; ib += 2) {
11739
+ #ifdef __BMI2__
11740
+ const uint64_t packed_idx1 = _pdep_u64(*(const uint32_t *)qs, 0x00ff00ff00ff00ffULL)
11741
+ | _pdep_u64(*(const uint16_t*)(qh) & 0x7777, 0xf000f000f000f00ULL);
11742
+ const uint64_t packed_idx2 = _pdep_u64(*(const uint32_t *)(qs + 4), 0x00ff00ff00ff00ffULL)
11743
+ | _pdep_u64(*(const uint16_t*)(qh + 2) & 0x7777, 0xf000f000f000f00ULL);
11744
+ const uint16_t *idx1 = (const uint16_t *)(&packed_idx1);
11745
+ const uint16_t *idx2 = (const uint16_t *)(&packed_idx2);
11746
+ const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[idx1[3]], iq1s_grid[idx1[2]], iq1s_grid[idx1[1]], iq1s_grid[idx1[0]]);
11747
+ const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[idx2[3]], iq1s_grid[idx2[2]], iq1s_grid[idx2[1]], iq1s_grid[idx2[0]]);
11748
+
11749
+ // Convert signs to bytes 0x81 (negative) or 0x01 (positive)
11750
+ const uint64_t delta_sign = _pdep_u64(*(const uint32_t*)(qh) & 0x88888888, 0xf0f0f0f0f0f0f0f0ULL);
11751
+ const __m256i delta1 = _mm256_or_si256(mone8, _mm256_cvtepi8_epi64(_mm_set1_epi32(delta_sign)));
11752
+ const __m256i delta2 = _mm256_or_si256(mone8, _mm256_cvtepi8_epi64(_mm_set1_epi32(delta_sign >> 32)));
11753
+ #else
11729
11754
const __m256i q1b_1 = _mm256_set_epi64x(
11730
11755
iq1s_grid[qs[3] | (((uint16_t)qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | (((uint16_t)qh[1] << 8) & 0x700)],
11731
11756
iq1s_grid[qs[1] | (((uint16_t)qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | (((uint16_t)qh[0] << 8) & 0x700)]
@@ -11734,11 +11759,6 @@ void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const
11734
11759
iq1s_grid[qs[7] | (((uint16_t)qh[3] << 4) & 0x700)], iq1s_grid[qs[6] | (((uint16_t)qh[3] << 8) & 0x700)],
11735
11760
iq1s_grid[qs[5] | (((uint16_t)qh[2] << 4) & 0x700)], iq1s_grid[qs[4] | (((uint16_t)qh[2] << 8) & 0x700)]
11736
11761
);
11737
- const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
11738
- const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
11739
-
11740
- const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1);
11741
- const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2);
11742
11762
11743
11763
const __m256i delta1 = _mm256_set_epi64x(qh[1] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101,
11744
11764
qh[1] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101,
@@ -11748,15 +11768,20 @@ void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const
11748
11768
qh[3] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101,
11749
11769
qh[2] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101,
11750
11770
qh[2] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101);
11771
+ #endif
11772
+ const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
11773
+ const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
11751
11774
11752
- const __m256i dot3 = mul_add_epi8(delta1, q8b_1);
11753
- const __m256i dot4 = mul_add_epi8(delta2, q8b_2);
11775
+ const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1);
11776
+ const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2);
11777
+ const __m256i dot3 = _mm256_maddubs_epi16(mone8, _mm256_sign_epi8(q8b_1, delta1));
11778
+ const __m256i dot4 = _mm256_maddubs_epi16(mone8, _mm256_sign_epi8(q8b_2, delta2));
11754
11779
11755
- __m256i scale1 = MM256_SET_M128I(_mm_set1_epi16(sc[ib/2] >> 3 ), _mm_set1_epi16(sc[ib/2] >> 0 ));
11756
- __m256i scale2 = MM256_SET_M128I(_mm_set1_epi16(sc[ib/2] >> 9 ), _mm_set1_epi16(sc[ib/2] >> 6 ));
11780
+ __m256i scale1 = MM256_SET_M128I(_mm_set1_epi16(sc[ib/2] >> 2 ), _mm_set1_epi16(sc[ib/2] << 1 ));
11781
+ __m256i scale2 = MM256_SET_M128I(_mm_set1_epi16(sc[ib/2] >> 8 ), _mm_set1_epi16(sc[ib/2] >> 5 ));
11757
11782
11758
- scale1 = _mm256_add_epi16(_mm256_slli_epi16( _mm256_and_si256(scale1, mask), 1 ), mone);
11759
- scale2 = _mm256_add_epi16(_mm256_slli_epi16( _mm256_and_si256(scale2, mask), 1 ), mone);
11783
+ scale1 = _mm256_add_epi16(_mm256_and_si256(scale1, mask), mone);
11784
+ scale2 = _mm256_add_epi16(_mm256_and_si256(scale2, mask), mone);
11760
11785
const __m256i p1 = _mm256_madd_epi16(dot1, scale1);
11761
11786
const __m256i p2 = _mm256_madd_epi16(dot2, scale2);
11762
11787
const __m256i p3 = _mm256_madd_epi16(dot3, scale1);
0 commit comments