Skip to content

Commit 6c02a03

Browse files
authored
SYCL: Remove misleading ggml_sycl_op_flatten function (#12387)
* SYCL: Remove misleading ggml_sycl_op_flatten function * remove trailing whitespace * Fix L2 norm from rebase * remove try catch block from element_wise.cpp * remove comment from common.hp * ggml-sycl.cpp: Add try catch sycl::exception block in compute_forward * norm.cpp: remove try catch exception block
1 parent f52d59d commit 6c02a03

File tree

12 files changed

+368
-585
lines changed

12 files changed

+368
-585
lines changed

ggml/src/ggml-sycl/common.cpp

Lines changed: 0 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -66,41 +66,6 @@ int64_t downsample_sycl_global_range(int64_t accumulate_block_num, int64_t block
6666
return sycl_down_blk_size;
6767
}
6868

69-
void ggml_sycl_op_flatten(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
70-
const ggml_tensor *src1, ggml_tensor *dst,
71-
const ggml_sycl_op_flatten_t op) try {
72-
73-
const bool use_src1 = src1 != nullptr;
74-
if(use_src1)
75-
GGML_ASSERT(strcmp(src1->buffer->buft->iface.get_name(src1->buffer->buft), GGML_SYCL_NAME "_Split") != 0);
76-
GGML_ASSERT(strcmp(dst->buffer->buft->iface.get_name(dst->buffer->buft), GGML_SYCL_NAME "_Split") != 0);
77-
78-
// dd = data device
79-
float * src0_ddf = (float *) src0->data;
80-
float * src1_ddf = use_src1 ? (float *) src1->data : nullptr;
81-
float * dst_ddf = (float *) dst->data;
82-
83-
ggml_sycl_pool_alloc<float> src0_f(ctx.pool());
84-
ggml_sycl_pool_alloc<float> src1_f(ctx.pool());
85-
ggml_sycl_pool_alloc<float> dst_f(ctx.pool());
86-
87-
ggml_sycl_set_device(ctx.device);
88-
queue_ptr main_stream = ctx.stream();
89-
// GGML_SYCL_DEBUG("ctx.device=%d, main_stream=%p src0_on_device=%d, src1_on_device=%d, dst_on_device=%d\n",
90-
// ctx.device, main_stream, src0_on_device, src1_on_device, dst_on_device);
91-
92-
// do the computation
93-
op(ctx, src0, src1, dst, src0_ddf, src1_ddf, dst_ddf, main_stream);
94-
// print_ggml_tensor("tensor", dst);
95-
}
96-
catch (sycl::exception const &exc) {
97-
98-
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
99-
<< ", line:" << __LINE__ << std::endl;
100-
std::exit(1);
101-
}
102-
103-
10469
void release_extra_gpu(ggml_tensor_extra_gpu * extra, std::vector<queue_ptr> streams) {
10570
for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
10671
for (int64_t is = 0; is < GGML_SYCL_MAX_STREAMS; ++is) {

ggml/src/ggml-sycl/common.hpp

Lines changed: 8 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -494,12 +494,6 @@ static __dpct_inline__ Tp* get_pointer(sycl::local_accessor<Tp, dim> acc) {
494494

495495
int64_t downsample_sycl_global_range(int64_t accumulate_block_num, int64_t block_size);
496496

497-
typedef void (*ggml_sycl_op_flatten_t)(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
498-
const ggml_tensor *src1,
499-
ggml_tensor *dst, const float *src0_dd,
500-
const float *src1_dd, float *dst_dd,
501-
const queue_ptr &main_stream);
502-
503497
template<float (*bin_op)(const float, const float), typename src0_t, typename src1_t, typename dst_t>
504498
static void k_bin_bcast(const src0_t * src0, const src1_t * src1, dst_t * dst,
505499
int ne0, int ne1, int ne2, int ne3,
@@ -757,24 +751,22 @@ struct bin_bcast_sycl {
757751

758752
template <class op>
759753
inline void ggml_sycl_op_bin_bcast(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
760-
const ggml_tensor *src1, ggml_tensor *dst,
761-
const float *src0_dd, const float *src1_dd,
762-
float *dst_dd,
763-
const queue_ptr &main_stream) {
754+
const ggml_tensor *src1, ggml_tensor *dst) {
755+
dpct::queue_ptr main_stream = ctx.stream();
764756

765757
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
766-
op()(ctx, src0, src1, dst, src0_dd, src1_dd, dst_dd, main_stream);
758+
op()(ctx, src0, src1, dst, (const float *)src0->data, (const float *)src1->data, (float *)dst->data, main_stream);
767759
} else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
768-
op()(ctx, src0, src1, dst, (const sycl::half *)src0_dd, src1_dd,
769-
(sycl::half *)dst_dd, main_stream);
760+
op()(ctx, src0, src1, dst, (const sycl::half *)src0->data, (const float *)src1->data,
761+
(sycl::half *)dst->data, main_stream);
770762
} else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
771-
op()(ctx, src0, src1, dst, (const sycl::half *)src0_dd, src1_dd, dst_dd,
763+
op()(ctx, src0, src1, dst, (const sycl::half *)src0->data, (const float *)src1->data, (float *)dst->data,
772764
main_stream);
773765
} else if (src0->type == GGML_TYPE_I32 && dst->type == GGML_TYPE_I32) {
774-
op()(ctx, src0, src1, dst, (const int32_t *)src0_dd, (const int32_t *)src1_dd, (int32_t *)dst_dd,
766+
op()(ctx, src0, src1, dst, (const int32_t *)src0->data, (const int32_t *)src1->data, (int32_t *)dst->data,
775767
main_stream);
776768
} else if (src0->type == GGML_TYPE_I16 && dst->type == GGML_TYPE_I16) {
777-
op()(ctx, src0, src1, dst, (const int16_t *)src0_dd, (const int16_t *)src1_dd, (int16_t *)dst_dd,
769+
op()(ctx, src0, src1, dst, (const int16_t *)src0->data, (const int16_t *)src1->data, (int16_t *)dst->data,
778770
main_stream);
779771
} else {
780772
fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s, src1: %s\n", __func__,
@@ -784,8 +776,4 @@ inline void ggml_sycl_op_bin_bcast(ggml_backend_sycl_context & ctx, const ggml_t
784776
}
785777

786778
bool gpu_has_xmx(sycl::device &dev);
787-
788-
void ggml_sycl_op_flatten(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
789-
const ggml_tensor *src1, ggml_tensor *dst,
790-
const ggml_sycl_op_flatten_t op);
791779
#endif // GGML_SYCL_COMMON_HPP

0 commit comments

Comments
 (0)