@@ -6539,26 +6539,29 @@ void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_
6539
6539
src1->backend == GGML_BACKEND_GPU && dst->backend == GGML_BACKEND_GPU;
6540
6540
const bool src0_is_quantized = ggml_is_quantized (src0->type );
6541
6541
6542
- if (all_on_device && ggml_is_permuted (src0) && ggml_is_permuted (src1) && src1->ne [1 ] == 1 ) {
6542
+ int min_compute_capability = INT_MAX;
6543
+ for (int id = 0 ; id < g_device_count; ++id) {
6544
+ if (min_compute_capability > g_compute_capabilities[id]
6545
+ && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1 ] : 1 .0f )) {
6546
+ min_compute_capability = g_compute_capabilities[id];
6547
+ }
6548
+ }
6549
+
6550
+ // no quantized non-contiguous support for lower CC kernels implemented
6551
+ const bool nc_okay = src0->type == GGML_TYPE_F16 || g_compute_capabilities[g_main_device] >= MIN_CC_DP4A;
6552
+
6553
+ if (all_on_device && nc_okay && ggml_is_permuted (src0) && ggml_is_permuted (src1) && src1->ne [1 ] == 1 ) {
6543
6554
ggml_cuda_mul_mat_vec_p021 (src0, src1, dst);
6544
- } else if (all_on_device && !ggml_is_contiguous (src0) && ggml_is_contiguous (src1) && src1->ne [1 ] == 1 ) {
6555
+ } else if (all_on_device && nc_okay && !ggml_is_contiguous (src0) && ggml_is_contiguous (src1) && src1->ne [1 ] == 1 ) {
6545
6556
ggml_cuda_mul_mat_vec_nc (src0, src1, dst);
6546
6557
}else if (src0->type == GGML_TYPE_F32) {
6547
6558
ggml_cuda_op (src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true , false );
6548
6559
} else if (ggml_is_quantized (src0->type ) || src0->type == GGML_TYPE_F16) {
6549
6560
if (src1->ne [1 ] == 1 && src0->ne [0 ] % GGML_CUDA_DMMV_X == 0 ) {
6550
6561
ggml_cuda_op (src0, src1, dst, ggml_cuda_op_mul_mat_vec<true >, false , false );
6551
6562
} else {
6552
- int min_compute_capability = INT_MAX;
6553
- for (int id = 0 ; id < g_device_count; ++id) {
6554
- if (min_compute_capability > g_compute_capabilities[id]
6555
- && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1 ] : 1 .0f )) {
6556
- min_compute_capability = g_compute_capabilities[id];
6557
- }
6558
- }
6559
-
6560
6563
if (g_mul_mat_q && ggml_is_quantized (src0->type ) && min_compute_capability >= MIN_CC_DP4A) {
6561
- if (all_on_device && src0->backend != GGML_BACKEND_GPU_SPLIT) {
6564
+ if (all_on_device && nc_okay && src0->backend != GGML_BACKEND_GPU_SPLIT) {
6562
6565
ggml_cuda_mul_mat_nc (src0, src1, dst);
6563
6566
} else {
6564
6567
ggml_cuda_op (src0, src1, dst, ggml_cuda_op_mul_mat_q<true >, false , false );
0 commit comments