@@ -6088,6 +6088,7 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * r
6088
6088
6089
6089
const uint8_t * restrict q2 = x[i].qs;
6090
6090
const int8_t * restrict q8 = y[i].qs;
6091
+
6091
6092
const __m128i mins_and_scales = __lsx_vld((const __m128i*)x[i].scales, 0);
6092
6093
const __m128i scales8 = __lsx_vand_v(mins_and_scales, m4);
6093
6094
const __m128i mins8 = __lsx_vand_v(__lsx_vsrli_h(mins_and_scales, 4), m4);
@@ -6807,6 +6808,8 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * r
6807
6808
for (int i = 0; i < nb; ++i) {
6808
6809
6809
6810
const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
6811
+ const uint8_t * restrict q3 = x[i].qs;
6812
+ const int8_t * restrict q8 = y[i].qs;
6810
6813
// Set up scales
6811
6814
memcpy(aux, x[i].scales, 12);
6812
6815
__m128i scales128 = lsx_set_w(
@@ -6830,8 +6833,6 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * r
6830
6833
int is = 0;
6831
6834
__m256i xvbit;
6832
6835
6833
- const uint8_t * restrict q3 = x[i].qs;
6834
- const int8_t * restrict q8 = y[i].qs;
6835
6836
6836
6837
for (int j = 0; j < QK_K/128; ++j) {
6837
6838
// load low 2 bits
@@ -7419,6 +7420,11 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
7419
7420
const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
7420
7421
7421
7422
memcpy(utmp, x[i].scales, 12);
7423
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
7424
+ const uint32_t uaux = utmp[1] & kmask1;
7425
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
7426
+ utmp[2] = uaux;
7427
+ utmp[0] &= kmask1;
7422
7428
7423
7429
const uint8_t * restrict q4 = x[i].qs;
7424
7430
const int8_t * restrict q8 = y[i].qs;
@@ -7458,16 +7464,17 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
7458
7464
7459
7465
__m256 vd = __lasx_xvreplfr2vr_s(d);
7460
7466
acc = __lasx_xvfmadd_s(vd, __lasx_xvffint_s_w(sumi), acc);
7467
+
7461
7468
}
7462
7469
7463
7470
acc_m = __lsx_vfadd_s(acc_m, (__m128)__lsx_vpermi_w((__m128i)acc_m, (__m128i)acc_m, 0xee));
7464
7471
__m128i tmp1 = __lsx_vinsgr2vr_w(__lsx_vldi(0), __lsx_vpickve2gr_w((__m128i)acc_m, 1), 0);
7465
7472
acc_m = __lsx_vfadd_s(acc_m, (__m128)tmp1);
7466
7473
7474
+
7467
7475
ft_union fi;
7468
7476
fi.i = __lsx_vpickve2gr_w(acc_m, 0);
7469
7477
*s = hsum_float_8(acc) + fi.f ;
7470
-
7471
7478
#else
7472
7479
7473
7480
const uint8_t * scales = (const uint8_t*)&utmp[0];
@@ -8026,6 +8033,11 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
8026
8033
const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
8027
8034
8028
8035
memcpy(utmp, x[i].scales, 12);
8036
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
8037
+ const uint32_t uaux = utmp[1] & kmask1;
8038
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
8039
+ utmp[2] = uaux;
8040
+ utmp[0] &= kmask1;
8029
8041
8030
8042
const __m256i mins_and_scales = lasx_extu8_16(lsx_set_w(utmp[3], utmp[2], utmp[1], utmp[0]));
8031
8043
@@ -8075,10 +8087,12 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
8075
8087
p16_1 = lasx_madd_h(scale_1, p16_1);
8076
8088
8077
8089
sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_1));
8090
+
8078
8091
}
8079
8092
8080
8093
__m256 vd = __lasx_xvreplfr2vr_s(d);
8081
8094
acc = __lasx_xvfmadd_s(vd, __lasx_xvffint_s_w(sumi), acc);
8095
+
8082
8096
}
8083
8097
8084
8098
*s = hsum_float_8(acc) + summs;
0 commit comments