@@ -7485,6 +7485,8 @@ static void ggml_cuda_op_dequantize_mul_mat_vec(
7485
7485
const int64_t ne00 = src0->ne [0 ];
7486
7486
const int64_t row_diff = row_high - row_low;
7487
7487
7488
+ GGML_ASSERT (src1->type == GGML_TYPE_F32);
7489
+
7488
7490
// on some GPUs it is faster to convert src1 to half and to use half precision intrinsics
7489
7491
#ifdef GGML_CUDA_F16
7490
7492
cuda_pool_alloc<half> src1_dfloat_a;
@@ -7577,6 +7579,7 @@ static void ggml_cuda_op_mul_mat_cublas(
7577
7579
const int compute_capability = g_device_caps[id].cc ;
7578
7580
7579
7581
if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized (src0->type )) && ggml_is_contiguous (src0) && row_diff == src0->ne [1 ] && dst->op_params [0 ] == GGML_PREC_DEFAULT) {
7582
+ // printf("this branch\n");
7580
7583
// convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32
7581
7584
cuda_pool_alloc<half> src0_as_f16;
7582
7585
if (src0->type != GGML_TYPE_F16) {
@@ -7614,17 +7617,25 @@ static void ggml_cuda_op_mul_mat_cublas(
7614
7617
7615
7618
const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda (GGML_TYPE_F16);
7616
7619
to_fp32_cuda (dst_f16.get (), dst_dd_i, row_diff*src1_ncols, stream);
7617
- }
7618
- else {
7620
+ } else {
7619
7621
cuda_pool_alloc<float > src0_ddq_as_f32;
7622
+ cuda_pool_alloc<float > src1_ddq_as_f32;
7620
7623
7621
7624
if (src0->type != GGML_TYPE_F32) {
7622
7625
const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda (src0->type );
7623
7626
GGML_ASSERT (to_fp32_cuda != nullptr );
7624
7627
src0_ddq_as_f32.alloc (row_diff*ne00);
7625
7628
to_fp32_cuda (src0_dd_i, src0_ddq_as_f32.get (), row_diff*ne00, stream);
7626
7629
}
7630
+ if (src1->type != GGML_TYPE_F32) {
7631
+ const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda (src1->type );
7632
+ GGML_ASSERT (to_fp32_cuda != nullptr );
7633
+ src1_ddq_as_f32.alloc (src1_ncols*ne10);
7634
+ to_fp32_cuda (src1_ddf_i, src1_ddq_as_f32.get (), src1_ncols*ne10, stream);
7635
+ }
7636
+
7627
7637
const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32.get ();
7638
+ const float * src1_ddf1_i = src1->type == GGML_TYPE_F32 ? (const float *) src1_ddf_i : src1_ddq_as_f32.get ();
7628
7639
7629
7640
const float alpha = 1 .0f ;
7630
7641
const float beta = 0 .0f ;
@@ -7633,9 +7644,9 @@ static void ggml_cuda_op_mul_mat_cublas(
7633
7644
CUBLAS_CHECK (
7634
7645
cublasSgemm (g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
7635
7646
row_diff, src1_ncols, ne10,
7636
- &alpha, src0_ddf_i, ne00,
7637
- src1_ddf_i , ne10,
7638
- &beta, dst_dd_i, ldc));
7647
+ &alpha, src0_ddf_i, ne00,
7648
+ src1_ddf1_i , ne10,
7649
+ &beta, dst_dd_i, ldc));
7639
7650
}
7640
7651
7641
7652
(void ) dst;
@@ -8035,6 +8046,7 @@ static void ggml_cuda_op_mul_mat(
8035
8046
8036
8047
GGML_ASSERT (dst->backend != GGML_BACKEND_GPU_SPLIT);
8037
8048
GGML_ASSERT (src1->backend != GGML_BACKEND_GPU_SPLIT);
8049
+ GGML_ASSERT (src1->type == GGML_TYPE_F32 || (src1->ne [2 ] == 1 && src1->ne [3 ] == 1 ));
8038
8050
8039
8051
GGML_ASSERT (ne12 >= ne02 && ne12 % ne02 == 0 );
8040
8052
@@ -8481,9 +8493,9 @@ static __global__ void k_compute_batched_ptrs(
8481
8493
int64_t i03 = i13 / r3;
8482
8494
int64_t i02 = i12 / r2;
8483
8495
8484
- ptrs_src[0 *ne23 + i12 + i13*ne12] = (const char *) src0_as_f16 + i02*nb02 + i03*nb03;
8485
- ptrs_src[1 *ne23 + i12 + i13*ne12] = (const char *) src1_as_f16 + i12*nb12/ 2 + i13*nb13/ 2 ;
8486
- ptrs_dst[0 *ne23 + i12 + i13*ne12] = ( char *) dst + i12*nbd2 + i13*nbd3;
8496
+ ptrs_src[0 *ne23 + i12 + i13*ne12] = (const char *) src0_as_f16 + i02*nb02 + i03*nb03;
8497
+ ptrs_src[1 *ne23 + i12 + i13*ne12] = (const char *) src1_as_f16 + i12*nb12 + i13*nb13;
8498
+ ptrs_dst[0 *ne23 + i12 + i13*ne12] = ( char *) dst + i12*nbd2 + i13*nbd3;
8487
8499
}
8488
8500
8489
8501
static void ggml_cuda_mul_mat_mat_batched_cublas (const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
@@ -8492,28 +8504,10 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const
8492
8504
8493
8505
GGML_ASSERT (src0->backend != GGML_BACKEND_GPU_SPLIT);
8494
8506
GGML_ASSERT (src0->type == GGML_TYPE_F16);
8495
- GGML_ASSERT (src1->type == GGML_TYPE_F32);
8496
8507
8497
- const int64_t ne00 = src0->ne [0 ]; GGML_UNUSED (ne00);
8498
- const int64_t ne01 = src0->ne [1 ];
8499
- const int64_t ne02 = src0->ne [2 ];
8500
- const int64_t ne03 = src0->ne [3 ];
8501
-
8502
- const int64_t nb01 = src0->nb [1 ];
8503
- const int64_t nb02 = src0->nb [2 ]; GGML_UNUSED (nb02);
8504
- const int64_t nb03 = src0->nb [3 ]; GGML_UNUSED (nb03);
8505
-
8506
- const int64_t ne10 = src1->ne [0 ];
8507
- const int64_t ne11 = src1->ne [1 ];
8508
- const int64_t ne12 = src1->ne [2 ];
8509
- const int64_t ne13 = src1->ne [3 ];
8510
-
8511
- const int64_t nb11 = src1->nb [1 ];
8512
- const int64_t nb12 = src1->nb [2 ]; GGML_UNUSED (nb12);
8513
- const int64_t nb13 = src1->nb [3 ]; GGML_UNUSED (nb13);
8508
+ GGML_TENSOR_BINARY_OP_LOCALS
8514
8509
8515
- const int64_t ne1 = ggml_nelements (src1);
8516
- const int64_t ne = ggml_nelements (dst);
8510
+ const int64_t ne_dst = ggml_nelements (dst);
8517
8511
8518
8512
ggml_cuda_set_device (g_main_device);
8519
8513
cudaStream_t main_stream = g_cudaStreams[g_main_device][0 ];
@@ -8522,7 +8516,7 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const
8522
8516
8523
8517
ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra ;
8524
8518
void * src0_ddq = src0_extra->data_device [g_main_device];
8525
- half * src0_as_f16 = (half *) src0_ddq;
8519
+ half * src0_f16 = (half *) src0_ddq;
8526
8520
8527
8521
ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra ;
8528
8522
float * src1_ddf = (float *) src1_extra->data_device [g_main_device];
@@ -8531,11 +8525,15 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const
8531
8525
float * dst_ddf = (float *) dst_extra->data_device [g_main_device];
8532
8526
8533
8527
// convert src1 to fp16
8534
- const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda (src1->type );
8535
- GGML_ASSERT (to_fp16_cuda != nullptr );
8536
-
8537
- cuda_pool_alloc<half> src1_as_f16 (ne1);
8538
- to_fp16_cuda (src1_ddf, src1_as_f16.get (), ne1, main_stream);
8528
+ cuda_pool_alloc<half> src1_f16_alloc;
8529
+ if (src1->type != GGML_TYPE_F16) {
8530
+ const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda (src1->type );
8531
+ const int64_t ne_src1 = ggml_nelements (src1);
8532
+ src1_f16_alloc.alloc (ne_src1);
8533
+ GGML_ASSERT (to_fp16_cuda != nullptr );
8534
+ to_fp16_cuda (src1_ddf, src1_f16_alloc.get (), ne_src1, main_stream);
8535
+ }
8536
+ half * src1_f16 = src1->type == GGML_TYPE_F16 ? (half *) src1_ddf : src1_f16_alloc.get ();
8539
8537
8540
8538
cuda_pool_alloc<half> dst_f16;
8541
8539
char * dst_t ;
@@ -8557,7 +8555,7 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const
8557
8555
const void * beta = &beta_f16;
8558
8556
8559
8557
if (dst->op_params [0 ] == GGML_PREC_DEFAULT) {
8560
- dst_t = (char *) dst_f16.alloc (ne );
8558
+ dst_t = (char *) dst_f16.alloc (ne_dst );
8561
8559
8562
8560
nbd2 /= sizeof (float ) / sizeof (half);
8563
8561
nbd3 /= sizeof (float ) / sizeof (half);
@@ -8604,9 +8602,9 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const
8604
8602
CUBLAS_CHECK (
8605
8603
cublasGemmStridedBatchedEx (g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N,
8606
8604
ne01, ne11, ne10,
8607
- alpha, (const char *) src0_as_f16, CUDA_R_16F, nb01/sizeof (half), src0-> nb [ 2 ]/ sizeof (half) , // strideA
8608
- (const char *) src1_as_f16. get () , CUDA_R_16F, nb11/sizeof ( float ), src1-> nb [ 2 ]/ sizeof ( float ), // strideB
8609
- beta, ( char *) dst_t , cu_data_type, ne01, dst-> nb [ 2 ]/ sizeof ( float ), // strideC
8605
+ alpha, (const char *) src0_f16, CUDA_R_16F, nb01/nb00, nb02/nb00 , // strideA
8606
+ (const char *) src1_f16 , CUDA_R_16F, nb11/nb10, nb12/nb10, // strideB
8607
+ beta, ( char *) dst_t , cu_data_type, ne01, nb2/nb0, // strideC
8610
8608
ne12*ne13,
8611
8609
cu_compute_type,
8612
8610
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
@@ -8619,21 +8617,22 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const
8619
8617
8620
8618
dim3 block_dims (ne13, ne12);
8621
8619
k_compute_batched_ptrs<<<1 , block_dims, 0 , main_stream>>> (
8622
- src0_as_f16, src1_as_f16. get () , dst_t ,
8620
+ src0_f16, src1_f16 , dst_t ,
8623
8621
ptrs_src.get (), ptrs_dst.get (),
8624
8622
ne12, ne13,
8625
8623
ne23,
8626
8624
nb02, nb03,
8627
- nb12, nb13,
8625
+ src1->type == GGML_TYPE_F16 ? nb12 : nb12/2 ,
8626
+ src1->type == GGML_TYPE_F16 ? nb13 : nb13/2 ,
8628
8627
nbd2, nbd3,
8629
8628
r2, r3);
8630
8629
CUDA_CHECK (cudaGetLastError ());
8631
8630
8632
8631
CUBLAS_CHECK (
8633
8632
cublasGemmBatchedEx (g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N,
8634
8633
ne01, ne11, ne10,
8635
- alpha, (const void **) (ptrs_src.get () + 0 *ne23), CUDA_R_16F, nb01/sizeof (half) ,
8636
- (const void **) (ptrs_src.get () + 1 *ne23), CUDA_R_16F, nb11/sizeof ( float ) ,
8634
+ alpha, (const void **) (ptrs_src.get () + 0 *ne23), CUDA_R_16F, nb01/nb00 ,
8635
+ (const void **) (ptrs_src.get () + 1 *ne23), CUDA_R_16F, nb11/nb10 ,
8637
8636
beta, ( void **) (ptrs_dst.get () + 0 *ne23), cu_data_type, ne01,
8638
8637
ne23,
8639
8638
cu_compute_type,
@@ -8643,7 +8642,7 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const
8643
8642
8644
8643
if (dst->op_params [0 ] == GGML_PREC_DEFAULT) {
8645
8644
const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda (GGML_TYPE_F16);
8646
- to_fp32_cuda (dst_f16.get (), dst_ddf, ne , main_stream);
8645
+ to_fp32_cuda (dst_f16.get (), dst_ddf, ne_dst , main_stream);
8647
8646
}
8648
8647
}
8649
8648
@@ -8682,13 +8681,13 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
8682
8681
} else if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous (src0) && !ggml_is_transposed (src1) && src1->ne [1 ] == 1 ) {
8683
8682
// KQV single-batch
8684
8683
ggml_cuda_mul_mat_vec_nc (src0, src1, dst);
8685
- } else if (!split && all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && src1-> type == GGML_TYPE_F32 && !ggml_is_transposed (src0) && !ggml_is_transposed (src1)) {
8684
+ } else if (!split && all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_transposed (src0) && !ggml_is_transposed (src1)) {
8686
8685
// KQ + KQV multi-batch
8687
8686
ggml_cuda_mul_mat_mat_batched_cublas (src0, src1, dst);
8688
8687
} else if (src0->type == GGML_TYPE_F32) {
8689
8688
ggml_cuda_op_mul_mat (src0, src1, dst, ggml_cuda_op_mul_mat_cublas, false );
8690
8689
} else if (ggml_is_quantized (src0->type ) || src0->type == GGML_TYPE_F16) {
8691
- if (src1->ne [1 ] == 1 && src0->ne [0 ] % GGML_CUDA_DMMV_X == 0 ) {
8690
+ if (src1->ne [1 ] == 1 && src0->ne [0 ] % GGML_CUDA_DMMV_X == 0 && src1-> type == GGML_TYPE_F32 ) {
8692
8691
#ifdef GGML_CUDA_FORCE_DMMV
8693
8692
const bool use_mul_mat_vec_q = false ;
8694
8693
#else
0 commit comments