@@ -1924,16 +1924,16 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor
1924
1924
} else if (!split && any_gpus_with_slow_fp16 && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous (src0) && !ggml_is_transposed (src1) && src1->ne [1 ] == 1 ) {
1925
1925
// FP32 precision KQV single-batch for batch size 1 without FlashAttention
1926
1926
ggml_cuda_mul_mat_vec_nc (ctx, src0, src1, dst);
1927
+ } else if (!split && src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16 || !any_gpus_with_slow_fp16)
1928
+ && !ggml_is_transposed (src0) && !ggml_is_transposed (src1) && src1->ne [2 ]*src1->ne [3 ] > 1 ) {
1929
+ // KQ + KQV multi-batch without FlashAttention
1930
+ ggml_cuda_mul_mat_batched_cublas (ctx, src0, src1, dst);
1927
1931
} else if (use_dequantize_mul_mat_vec) {
1928
1932
ggml_cuda_op_mul_mat (ctx, src0, src1, dst, ggml_cuda_op_dequantize_mul_mat_vec, nullptr );
1929
1933
} else if (use_mul_mat_vec_q) {
1930
1934
ggml_cuda_op_mul_mat (ctx, src0, src1, dst, ggml_cuda_op_mul_mat_vec_q, quantize_row_q8_1_cuda);
1931
1935
} else if (use_mul_mat_q) {
1932
1936
ggml_cuda_op_mul_mat (ctx, src0, src1, dst, ggml_cuda_op_mul_mat_q, quantize_mmq_q8_1_cuda);
1933
- } else if (!split && src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16 || !any_gpus_with_slow_fp16)
1934
- && !ggml_is_transposed (src0) && !ggml_is_transposed (src1) && src1->ne [2 ]*src1->ne [3 ] > 1 ) {
1935
- // KQ + KQV multi-batch without FlashAttention
1936
- ggml_cuda_mul_mat_batched_cublas (ctx, src0, src1, dst);
1937
1937
} else {
1938
1938
ggml_cuda_op_mul_mat (ctx, src0, src1, dst, ggml_cuda_op_mul_mat_cublas, nullptr );
1939
1939
}
0 commit comments