@@ -667,7 +667,7 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr
667
667
size_t d_size;
668
668
cl_mem d_X;
669
669
if (src0->backend == GGML_BACKEND_CL) {
670
- d_X = * (cl_mem* ) src0->data ;
670
+ d_X = (cl_mem) src0->data ;
671
671
} else {
672
672
d_X = ggml_cl_pool_malloc (sizeof (ggml_fp16_t ) * x_ne, &x_size, CL_MEM_READ_ONLY);
673
673
}
@@ -743,7 +743,7 @@ static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
743
743
size_t d_size;
744
744
cl_mem d_X;
745
745
if (src0->backend == GGML_BACKEND_CL) {
746
- d_X = * (cl_mem* ) src0->data ;
746
+ d_X = (cl_mem) src0->data ;
747
747
} else {
748
748
d_X = ggml_cl_pool_malloc (sizeof (ggml_fp16_t ) * x_ne, &x_size, CL_MEM_READ_ONLY);
749
749
}
@@ -868,7 +868,7 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
868
868
if (src0->backend == GGML_BACKEND_CPU) {
869
869
CL_CHECK (ggml_cl_h2d_tensor_2d (queue, d_Q, 0 , src0, i03, i02, NULL ));
870
870
} else if (src0->backend == GGML_BACKEND_CL) {
871
- d_Q = * (cl_mem* ) src0->data ;
871
+ d_Q = (cl_mem) src0->data ;
872
872
} else {
873
873
GGML_ASSERT (false );
874
874
}
@@ -1011,14 +1011,13 @@ void ggml_cl_transform_tensor(ggml_tensor * tensor) {
1011
1011
const size_t q_sz = ggml_type_size (type) * ne0 * ne1 * ne2 * ne3 / ggml_blck_size (type);
1012
1012
1013
1013
size_t q_size;
1014
- cl_mem* dst = (cl_mem*) malloc (sizeof (cl_mem));
1015
- *dst = ggml_cl_pool_malloc (q_sz, &q_size, CL_MEM_READ_ONLY);
1014
+ cl_mem dst = ggml_cl_pool_malloc (q_sz, &q_size, CL_MEM_READ_ONLY);
1016
1015
1017
1016
// copy tensor to device
1018
1017
for (int64_t i3 = 0 ; i3 < ne3; i3++) {
1019
1018
for (int64_t i2 = 0 ; i2 < ne2; i2++) {
1020
1019
int i = i3*ne2 + i2;
1021
- CL_CHECK (ggml_cl_h2d_tensor_2d (queue, * dst, i*ne0*ne1, tensor, i3, i2, NULL ));
1020
+ CL_CHECK (ggml_cl_h2d_tensor_2d (queue, dst, i*ne0*ne1, tensor, i3, i2, NULL ));
1022
1021
}
1023
1022
}
1024
1023
0 commit comments