@@ -355,7 +355,7 @@ ggml_backend_sycl_buffer_init_tensor(ggml_backend_buffer_t buffer,
355
355
if (padded_size > original_size && tensor->view_src == nullptr ) {
356
356
SYCL_CHECK (CHECK_TRY_ERROR (ctx->stream ->memset (
357
357
(char *)tensor->data + original_size, 0 ,
358
- padded_size - original_size). wait () ));
358
+ padded_size - original_size)));
359
359
}
360
360
}
361
361
return GGML_STATUS_SUCCESS;
@@ -489,7 +489,7 @@ static void ggml_backend_sycl_buffer_clear(ggml_backend_buffer_t buffer,
489
489
490
490
SYCL_CHECK (CHECK_TRY_ERROR ((*stream)
491
491
.memset (ctx->dev_ptr , value, buffer->size )
492
- . wait () ));
492
+ ));
493
493
}
494
494
catch (sycl::exception const &exc) {
495
495
std::cerr << exc.what () << " Exception caught at file:" << __FILE__
@@ -511,7 +511,6 @@ static void ggml_backend_sycl_buffer_memset_tensor(ggml_backend_buffer_t buffer,
511
511
}
512
512
void * target_ptr = static_cast <char *>(tensor->data ) + offset;
513
513
SYCL_CHECK (CHECK_TRY_ERROR ((*stream).memset (target_ptr, value, size)));
514
- SYCL_CHECK (CHECK_TRY_ERROR ((*stream).wait ()));
515
514
}
516
515
517
516
static void ggml_backend_sycl_buffer_reset (ggml_backend_buffer_t buffer) {
@@ -833,7 +832,7 @@ ggml_backend_sycl_split_buffer_init_tensor(ggml_backend_buffer_t buffer,
833
832
SYCL_CHECK (CHECK_TRY_ERROR (
834
833
(*stream)
835
834
.memset (buf + original_size, 0 , size - original_size)
836
- . wait () ));
835
+ ));
837
836
}
838
837
839
838
extra->data_device [i] = buf;
@@ -901,7 +900,7 @@ ggml_backend_sycl_split_buffer_set_tensor(ggml_backend_buffer_t buffer,
901
900
SYCL_CHECK (CHECK_TRY_ERROR (
902
901
(*stream)
903
902
.memcpy (extra->data_device [i], buf_host, original_size)
904
- . wait () ));
903
+ ));
905
904
}
906
905
}
907
906
catch (sycl::exception const &exc) {
@@ -954,7 +953,7 @@ ggml_backend_sycl_split_buffer_get_tensor(ggml_backend_buffer_t buffer,
954
953
SYCL_CHECK (CHECK_TRY_ERROR (
955
954
(*stream)
956
955
.memcpy (buf_host, extra->data_device [i], original_size)
957
- . wait () ));
956
+ ));
958
957
}
959
958
}
960
959
catch (sycl::exception const &exc) {
@@ -2487,7 +2486,7 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten
2487
2486
SYCL_CHECK (CHECK_TRY_ERROR (stream->memcpy (
2488
2487
src1_ddq_i, src1_ddq_i_source,
2489
2488
src1_ncols * src1_padded_col_size * q8_1_ts /
2490
- q8_1_bs). wait () ));
2489
+ q8_1_bs)));
2491
2490
} else {
2492
2491
2493
2492
float * src1_ddf_i_source = (float *) src1_extra->data_device [ctx.device ];
@@ -2554,7 +2553,7 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten
2554
2553
dhf_dst_i += src1_col_0*ne0;
2555
2554
SYCL_CHECK (CHECK_TRY_ERROR (
2556
2555
stream->memcpy (dhf_dst_i, dst_dd_i,
2557
- src1_ncols * ne0 * sizeof (float )). wait () ));
2556
+ src1_ncols * ne0 * sizeof (float ))));
2558
2557
}
2559
2558
}
2560
2559
@@ -3682,7 +3681,7 @@ static void ggml_backend_sycl_get_tensor_async(ggml_backend_t backend,
3682
3681
GGML_ASSERT (buf->buft == ggml_backend_sycl_buffer_type (sycl_ctx->device ) && " unsupported buffer type" );
3683
3682
const queue_ptr stream = sycl_ctx->stream (sycl_ctx->device , 0 );
3684
3683
SYCL_CHECK (CHECK_TRY_ERROR ((stream)->memcpy (
3685
- data, (const char *)tensor->data + offset, size). wait () ));
3684
+ data, (const char *)tensor->data + offset, size)));
3686
3685
}
3687
3686
catch (sycl::exception const &exc) {
3688
3687
std::cerr << exc.what () << " Exception caught at file:" << __FILE__
@@ -3702,7 +3701,7 @@ static bool ggml_backend_sycl_cpy_tensor_async(ggml_backend_t backend,
3702
3701
*/
3703
3702
const queue_ptr stream = sycl_ctx->stream (sycl_ctx->device , 0 );
3704
3703
SYCL_CHECK (CHECK_TRY_ERROR ((stream)->memcpy (
3705
- dst->data , src->data , ggml_nbytes (dst)). wait () ));
3704
+ dst->data , src->data , ggml_nbytes (dst))));
3706
3705
return true ;
3707
3706
}
3708
3707
0 commit comments