@@ -463,12 +463,6 @@ static __dpct_inline__ Tp* get_pointer(sycl::local_accessor<Tp, dim> acc) {
463
463
464
464
int64_t downsample_sycl_global_range (int64_t accumulate_block_num, int64_t block_size);
465
465
466
- typedef void (*ggml_sycl_op_flatten_t )(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
467
- const ggml_tensor *src1,
468
- ggml_tensor *dst, const float *src0_dd,
469
- const float *src1_dd, float *dst_dd,
470
- const queue_ptr &main_stream);
471
-
472
466
template <float (*bin_op)(const float , const float ), typename src0_t , typename src1_t , typename dst_t >
473
467
static void k_bin_bcast (const src0_t * src0, const src1_t * src1, dst_t * dst,
474
468
int ne0, int ne1, int ne2, int ne3,
@@ -691,24 +685,23 @@ struct bin_bcast_sycl {
691
685
692
686
template <class op >
693
687
inline void ggml_sycl_op_bin_bcast (ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
694
- const ggml_tensor *src1, ggml_tensor *dst,
695
- const float *src0_dd, const float *src1_dd,
696
- float *dst_dd,
697
- const queue_ptr &main_stream) {
688
+ const ggml_tensor *src1, ggml_tensor *dst) {
689
+ /* TODO: Refactor bbincast */
690
+ dpct::queue_ptr main_stream = ctx.stream ();
698
691
699
692
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
700
- op ()(ctx, src0, src1, dst, src0_dd, src1_dd, dst_dd , main_stream);
693
+ op ()(ctx, src0, src1, dst, ( const float *)src0-> data , ( const float *)src1-> data , ( float *)dst-> data , main_stream);
701
694
} else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
702
- op ()(ctx, src0, src1, dst, (const sycl::half *)src0_dd, src1_dd ,
703
- (sycl::half *)dst_dd , main_stream);
695
+ op ()(ctx, src0, src1, dst, (const sycl::half *)src0-> data , ( const float *)src1-> data ,
696
+ (sycl::half *)dst-> data , main_stream);
704
697
} else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
705
- op ()(ctx, src0, src1, dst, (const sycl::half *)src0_dd, src1_dd, dst_dd ,
698
+ op ()(ctx, src0, src1, dst, (const sycl::half *)src0-> data , ( const float *)src1-> data , ( float *)dst-> data ,
706
699
main_stream);
707
700
} else if (src0->type == GGML_TYPE_I32 && dst->type == GGML_TYPE_I32) {
708
- op ()(ctx, src0, src1, dst, (const int32_t *)src0_dd , (const int32_t *)src1_dd , (int32_t *)dst_dd ,
701
+ op ()(ctx, src0, src1, dst, (const int32_t *)src0-> data , (const int32_t *)src1-> data , (int32_t *)dst-> data ,
709
702
main_stream);
710
703
} else if (src0->type == GGML_TYPE_I16 && dst->type == GGML_TYPE_I16) {
711
- op ()(ctx, src0, src1, dst, (const int16_t *)src0_dd , (const int16_t *)src1_dd , (int16_t *)dst_dd ,
704
+ op ()(ctx, src0, src1, dst, (const int16_t *)src0-> data , (const int16_t *)src1-> data , (int16_t *)dst-> data ,
712
705
main_stream);
713
706
} else {
714
707
fprintf (stderr, " %s: unsupported types: dst: %s, src0: %s, src1: %s\n " , __func__,
@@ -718,8 +711,4 @@ inline void ggml_sycl_op_bin_bcast(ggml_backend_sycl_context & ctx, const ggml_t
718
711
}
719
712
720
713
bool gpu_has_xmx (sycl::device &dev);
721
-
722
- void ggml_sycl_op_flatten (ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
723
- const ggml_tensor *src1, ggml_tensor *dst,
724
- const ggml_sycl_op_flatten_t op);
725
714
#endif // GGML_SYCL_COMMON_HPP
0 commit comments