We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent df84919 commit 95e1888Copy full SHA for 95e1888
ggml/src/ggml-cuda/fattn-mma-f16.cuh
@@ -895,6 +895,11 @@ static __device__ __forceinline__ void flash_attn_ext_f16_process_tile(
895
float2 * dstk_fixup_meta = dstk_fixup + (gridDim.x + blockIdx.x)*ncols;
896
dstk_fixup_meta[(threadIdx.y/np)*cols_per_warp + threadIdx.x] = make_float2(KQ_cmn, KQ_crs);
897
}
898
+ } else if (np > 1) {
899
+ // Warps with threadIdx.y % np == 0 execute a __syncthreads() in the if branch.
900
+ // Therefore, all other warps also need to execute a __syncthreads().
901
+ // Otherwise the points at which warps synchronize with each other would become misaligned.
902
+ __syncthreads();
903
904
905
#pragma unroll
0 commit comments