|
| 1 | +#version 450 |
| 2 | +#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require |
| 3 | + |
| 4 | +#include "mul_mat_vec_base.comp" |
| 5 | + |
| 6 | +layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; |
| 7 | + |
| 8 | +FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; |
| 9 | + |
| 10 | +void calc_superblock(const uint a_offset, const uint b_offset, const uint ib32, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows) { |
| 11 | + const uint y_idx = i * QUANT_K + 32 * ib32; |
| 12 | + |
| 13 | + uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i; |
| 14 | + [[unroll]] for (uint n = 0; n < num_rows; ++n) { |
| 15 | + const uint16_t[4] scales = data_a[ibi].scales; |
| 16 | + const u16vec4 s = u16vec4(scales[0], scales[1], scales[2], scales[3]) >> 12; |
| 17 | + const float d = float(uint16BitsToHalf(s.x | (s.y << 4) | (s.z << 8) | (s.w << 12))); |
| 18 | + |
| 19 | + const uint sc = data_a[ibi].scales[ib32 / 2] >> (6 * (ib32 & 1)); |
| 20 | + [[unroll]] for (uint l = 0; l < 4; ++l) { |
| 21 | + const uint qh = data_a[ibi].qh[2 * ib32 + l / 2] >> (4 * (l&1)); |
| 22 | + const uint qs = data_a[ibi].qs[4 * ib32 + l]; |
| 23 | + const float delta = ((qh & 8) != 0) ? -IQ1M_DELTA : IQ1M_DELTA; |
| 24 | + const float dl = d * (2 * bitfieldExtract(sc, 3 * int(l / 2), 3) + 1); |
| 25 | + |
| 26 | + const int16_t grid = int16_t(iq1s_grid[qs | ((qh & 7) << 8)]); |
| 27 | + |
| 28 | + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { |
| 29 | + vec4 b0 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 0]); |
| 30 | + vec4 b4 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 1]); |
| 31 | + |
| 32 | + FLOAT_TYPE sum = FLOAT_TYPE(0.0); |
| 33 | + [[unroll]] for (int k = 0; k < 4; ++k) { |
| 34 | + sum = fma(FLOAT_TYPE(b0[k]), bitfieldExtract(grid, 2 * k, 2) + delta, |
| 35 | + fma(FLOAT_TYPE(b4[k]), bitfieldExtract(grid, 8 + 2 * k, 2) + delta, sum)); |
| 36 | + } |
| 37 | + temp[j][n] = fma(dl, sum, temp[j][n]); |
| 38 | + } |
| 39 | + } |
| 40 | + ibi += num_blocks_per_row; |
| 41 | + } |
| 42 | +} |
| 43 | + |
| 44 | +void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { |
| 45 | + uint a_offset, b_offset, d_offset; |
| 46 | + get_offsets(a_offset, b_offset, d_offset); |
| 47 | + |
| 48 | + const uint num_blocks_per_row = p.ncols / QUANT_K; |
| 49 | + |
| 50 | + // 8 threads are used to process each block |
| 51 | + const uint blocks_per_wg = gl_WorkGroupSize.x/8; |
| 52 | + const uint tid = gl_LocalInvocationID.x; |
| 53 | + const uint itid = tid % 8; // 0...7 |
| 54 | + const uint ix = tid / 8; |
| 55 | + |
| 56 | + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { |
| 57 | + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { |
| 58 | + temp[j][i] = FLOAT_TYPE(0); |
| 59 | + } |
| 60 | + } |
| 61 | + |
| 62 | + [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += blocks_per_wg) |
| 63 | + calc_superblock(a_offset, b_offset, itid, i, num_blocks_per_row, first_row, num_rows); |
| 64 | + |
| 65 | + reduce_result(temp, d_offset, first_row, num_rows, tid); |
| 66 | +} |
| 67 | + |
| 68 | +void main() { |
| 69 | + const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z); |
| 70 | + |
| 71 | + init_iq_shmem(gl_WorkGroupSize); |
| 72 | + |
| 73 | + // do NUM_ROWS at a time, unless there aren't enough remaining rows |
| 74 | + if (first_row + NUM_ROWS <= p.stride_d) { |
| 75 | + compute_outputs(first_row, NUM_ROWS); |
| 76 | + } else { |
| 77 | + if (first_row >= p.stride_d) { |
| 78 | + return; |
| 79 | + } |
| 80 | + compute_outputs(first_row, p.stride_d - first_row); |
| 81 | + } |
| 82 | +} |
0 commit comments