Skip to content

Commit 3c192de

Browse files
ikawrakowKawrakow
authored andcommitted
ggml-quants : fix compiler warnings (shadow variable) (ggml-org#5472)
Co-authored-by: Iwan Kawrakow <[email protected]>
1 parent ac96068 commit 3c192de

File tree

1 file changed

+18
-18
lines changed

1 file changed

+18
-18
lines changed

ggml-quants.c

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -3819,15 +3819,15 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r
38193819
/* Compute combined scale for the block */
38203820
const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
38213821

3822-
__m256i bx = bytes_from_nibbles_32(x[i].qs);
3822+
__m256i qx = bytes_from_nibbles_32(x[i].qs);
38233823

38243824
// Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
38253825
const __m256i off = _mm256_set1_epi8( 8 );
3826-
bx = _mm256_sub_epi8( bx, off );
3826+
qx = _mm256_sub_epi8( qx, off );
38273827

3828-
__m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
3828+
__m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
38293829

3830-
const __m256 q = mul_sum_i8_pairs_float(bx, by);
3830+
const __m256 q = mul_sum_i8_pairs_float(qx, qy);
38313831

38323832
/* Multiply q with scale and accumulate */
38333833
acc = _mm256_fmadd_ps( d, q, acc );
@@ -4196,10 +4196,10 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r
41964196
const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
41974197

41984198
// Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
4199-
const __m256i bx = bytes_from_nibbles_32(x[i].qs);
4200-
const __m256i by = _mm256_loadu_si256( (const __m256i *)y[i].qs );
4199+
const __m256i qx = bytes_from_nibbles_32(x[i].qs);
4200+
const __m256i qy = _mm256_loadu_si256( (const __m256i *)y[i].qs );
42014201

4202-
const __m256 xy = mul_sum_us8_pairs_float(bx, by);
4202+
const __m256 xy = mul_sum_us8_pairs_float(qx, qy);
42034203

42044204
// Accumulate d0*d1*x*y
42054205
#if defined(__AVX2__)
@@ -4418,14 +4418,14 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, size_t bs, const void * r
44184418
/* Compute combined scale for the block */
44194419
const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
44204420

4421-
__m256i bx = bytes_from_nibbles_32(x[i].qs);
4421+
__m256i qx = bytes_from_nibbles_32(x[i].qs);
44224422
__m256i bxhi = bytes_from_bits_32(x[i].qh);
44234423
bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
4424-
bx = _mm256_or_si256(bx, bxhi);
4424+
qx = _mm256_or_si256(qx, bxhi);
44254425

4426-
__m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
4426+
__m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
44274427

4428-
const __m256 q = mul_sum_i8_pairs_float(bx, by);
4428+
const __m256 q = mul_sum_i8_pairs_float(qx, qy);
44294429

44304430
/* Multiply q with scale and accumulate */
44314431
acc = _mm256_fmadd_ps(d, q, acc);
@@ -4722,15 +4722,15 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * r
47224722

47234723
summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
47244724

4725-
__m256i bx = bytes_from_nibbles_32(x[i].qs);
4725+
__m256i qx = bytes_from_nibbles_32(x[i].qs);
47264726
__m256i bxhi = bytes_from_bits_32(x[i].qh);
47274727
bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
4728-
bx = _mm256_or_si256(bx, bxhi);
4728+
qx = _mm256_or_si256(qx, bxhi);
47294729

47304730
const __m256 dy = _mm256_set1_ps(y[i].d);
4731-
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
4731+
const __m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
47324732

4733-
const __m256 q = mul_sum_us8_pairs_float(bx, by);
4733+
const __m256 q = mul_sum_us8_pairs_float(qx, qy);
47344734

47354735
acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
47364736
}
@@ -4973,10 +4973,10 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * r
49734973
for (int i = 0; i < nb; ++i) {
49744974
// Compute combined scale for the block
49754975
const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
4976-
__m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs);
4977-
__m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
4976+
__m256i qx = _mm256_loadu_si256((const __m256i *)x[i].qs);
4977+
__m256i qy = _mm256_loadu_si256((const __m256i *)y[i].qs);
49784978

4979-
const __m256 q = mul_sum_i8_pairs_float(bx, by);
4979+
const __m256 q = mul_sum_i8_pairs_float(qx, qy);
49804980

49814981
// Multiply q with scale and accumulate
49824982
#if defined(__AVX2__)

0 commit comments

Comments
 (0)