@@ -1944,7 +1944,7 @@ static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * rest
1944
1944
// Initialize accumulator with zeros
1945
1945
__m256 acc = _mm256_setzero_ps ();
1946
1946
1947
- /* Prepare the constants we will need during execution */
1947
+ /* Prepare the constants we will need during execution */
1948
1948
const __m256i lowMask = _mm256_set1_epi8 ( 0xF );
1949
1949
const __m256i offset_8 = _mm256_set1_epi16 ( 8 );
1950
1950
@@ -1954,61 +1954,59 @@ static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * rest
1954
1954
1955
1955
// Main loop
1956
1956
for (int i = 0 ; i < nb ; i += UNROLL_COUNT ) {
1957
-
1958
- // This loop will be unrolled by the compiler
1957
+ // This loop will be unrolled by the compiler
1959
1958
for (int u = 0 ;u < UNROLL_COUNT ;u ++ ) {
1960
- /* Compute combined scale for the block */
1961
- const __m256 scale = _mm256_mul_ps (
1962
- _mm256_broadcast_ss ( & x [i + u ].d ),
1963
- _mm256_broadcast_ss ( & y [i + u ].d ) );
1964
-
1965
- /* get input from x
1966
- Input: 32 Nibbles (16 bytes) at *x[i+u]
1967
- Output: 2 vectors with 16 values of type int16_t (x_high_q, x_low_q) */
1968
-
1969
- /* Load 16 bytes from memory */
1970
- const __m128i tmp_x = _mm_loadu_si128 ( ( const __m128i * ) x [i + u ].qs );
1971
- /* Expand bytes into uint16_t values */
1972
- const __m256i bytes_x = _mm256_cvtepu8_epi16 (tmp_x );
1959
+ /* Compute combined scale for the block */
1960
+ const __m256 scale = _mm256_mul_ps (
1961
+ _mm256_broadcast_ss ( & x [i + u ].d ),
1962
+ _mm256_broadcast_ss ( & y [i + u ].d ) );
1963
+
1964
+ /* get input from x
1965
+ Input: 32 Nibbles (16 bytes) at *x[i+u]
1966
+ Output: 2 vectors with 16 values of type int16_t (x_high_q, x_low_q) */
1967
+
1968
+ /* Load 16 bytes from memory */
1969
+ const __m128i tmp_x = _mm_loadu_si128 ( ( const __m128i * ) x [i + u ].qs );
1970
+ /* Expand bytes into uint16_t values */
1971
+ const __m256i bytes_x = _mm256_cvtepu8_epi16 (tmp_x );
1973
1972
/* Unpack values into individual bytes */
1974
1973
__m256i x_low_q = _mm256_and_si256 ( lowMask , bytes_x );
1975
1974
const __m256i pre_shift_x_high_q = _mm256_andnot_si256 ( lowMask , bytes_x );
1976
- __m256i x_high_q = _mm256_srli_epi16 ( pre_shift_x_high_q , 4 );
1975
+ __m256i x_high_q = _mm256_srli_epi16 ( pre_shift_x_high_q , 4 );
1977
1976
/* Now we have two vectors with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. */
1978
- x_high_q = _mm256_sub_epi16 ( x_high_q , offset_8 );
1979
- x_low_q = _mm256_sub_epi16 ( x_low_q , offset_8 );
1977
+ x_high_q = _mm256_sub_epi16 ( x_high_q , offset_8 );
1978
+ x_low_q = _mm256_sub_epi16 ( x_low_q , offset_8 );
1980
1979
1981
- /* get input from y
1982
- Input: 32 Nibbles (16 bytes) at *y[i+u]
1983
- Output: 2 vectors with 16 values of type int16_t (y_high_q, y_low_q) */
1980
+ /* get input from y
1981
+ Input: 32 Nibbles (16 bytes) at *y[i+u]
1982
+ Output: 2 vectors with 16 values of type int16_t (y_high_q, y_low_q) */
1984
1983
1985
- /* Load 16 bytes from memory */
1986
- const __m128i tmp_y = _mm_loadu_si128 ( (const __m128i * ) y [i + u ].qs );
1987
- /* Expand bytes into uint16_t values */
1988
- const __m256i bytes_y = _mm256_cvtepu8_epi16 (tmp_y );
1984
+ /* Load 16 bytes from memory */
1985
+ const __m128i tmp_y = _mm_loadu_si128 ( (const __m128i * ) y [i + u ].qs );
1986
+ /* Expand bytes into uint16_t values */
1987
+ const __m256i bytes_y = _mm256_cvtepu8_epi16 (tmp_y );
1989
1988
/* Unpack values into individual bytes */
1990
- const __m256i pre_shift_y_high_q = _mm256_andnot_si256 ( lowMask , bytes_y );
1991
- __m256i y_high_q = _mm256_srli_epi16 ( pre_shift_y_high_q , 4 );
1992
- __m256i y_low_q = _mm256_and_si256 ( lowMask , bytes_y );
1989
+ const __m256i pre_shift_y_high_q = _mm256_andnot_si256 ( lowMask , bytes_y );
1990
+ __m256i y_high_q = _mm256_srli_epi16 ( pre_shift_y_high_q , 4 );
1991
+ __m256i y_low_q = _mm256_and_si256 ( lowMask , bytes_y );
1993
1992
/* Now we have two vectors with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. */
1994
- y_high_q = _mm256_sub_epi16 ( y_high_q , offset_8 );
1995
- y_low_q = _mm256_sub_epi16 ( y_low_q , offset_8 );
1993
+ y_high_q = _mm256_sub_epi16 ( y_high_q , offset_8 );
1994
+ y_low_q = _mm256_sub_epi16 ( y_low_q , offset_8 );
1996
1995
1997
- /* Compute products of int16_t integers, add pairwise, store as int32_t */
1998
- __m256i xy_high_q = _mm256_madd_epi16 ( x_high_q , y_high_q );
1999
- __m256i xy_low_q = _mm256_madd_epi16 ( x_low_q , y_low_q );
1996
+ /* Compute products of int16_t integers, add pairwise, store as int32_t */
1997
+ __m256i xy_high_q = _mm256_madd_epi16 ( x_high_q , y_high_q );
1998
+ __m256i xy_low_q = _mm256_madd_epi16 ( x_low_q , y_low_q );
2000
1999
2001
- /* Accumulate the products of int32_t integers -> we now have a vector of 8 int_32t */
2002
- __m256i xy_q = _mm256_add_epi32 ( xy_high_q , xy_low_q );
2000
+ /* Accumulate the products of int32_t integers -> we now have a vector of 8 int_32t */
2001
+ __m256i xy_q = _mm256_add_epi32 ( xy_high_q , xy_low_q );
2003
2002
2004
- /* Convert to vectore of 8 int32_t to 8 floats */
2005
- __m256 q = _mm256_cvtepi32_ps ( xy_q );
2003
+ /* Convert to vectore of 8 int32_t to 8 floats */
2004
+ __m256 q = _mm256_cvtepi32_ps ( xy_q );
2006
2005
2007
- /* Multiply q with scale and accumulate */
2008
- acc = _mm256_fmadd_ps ( scale , q , acc );
2006
+ /* Multiply q with scale and accumulate */
2007
+ acc = _mm256_fmadd_ps ( scale , q , acc );
2009
2008
}
2010
-
2011
- }
2009
+ }
2012
2010
2013
2011
// Return horizontal sum of the acc vector
2014
2012
__m128 res = _mm256_extractf128_ps ( acc , 1 );
0 commit comments