Skip to content

Commit a9cae48

Browse files
authored
tests : add non-cont unary tests (ggml-org#7857)
* tests : add non-cont unary tests * ggml : update unary asserts and "supports_op" ggml-ci
1 parent bfaa676 commit a9cae48

File tree

8 files changed

+90
-66
lines changed

8 files changed

+90
-66
lines changed

ggml-cuda.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2740,7 +2740,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
27402740
case GGML_UNARY_OP_HARDSWISH:
27412741
case GGML_UNARY_OP_GELU_QUICK:
27422742
case GGML_UNARY_OP_TANH:
2743-
return true;
2743+
return ggml_is_contiguous(op->src[0]);
27442744
default:
27452745
return false;
27462746
}

ggml-cuda/unary.cu

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,8 @@ void ggml_cuda_op_gelu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
148148
float * dst_d = (float *)dst->data;
149149
cudaStream_t stream = ctx.stream();
150150

151+
GGML_ASSERT(ggml_is_contiguous(src0));
152+
151153
GGML_ASSERT(src0->type == GGML_TYPE_F32);
152154
GGML_ASSERT( dst->type == GGML_TYPE_F32);
153155

@@ -160,6 +162,8 @@ void ggml_cuda_op_silu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
160162
float * dst_d = (float *)dst->data;
161163
cudaStream_t stream = ctx.stream();
162164

165+
GGML_ASSERT(ggml_is_contiguous(src0));
166+
163167
GGML_ASSERT(src0->type == GGML_TYPE_F32);
164168
GGML_ASSERT( dst->type == GGML_TYPE_F32);
165169

@@ -172,6 +176,8 @@ void ggml_cuda_op_gelu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst)
172176
float * dst_d = (float *)dst->data;
173177
cudaStream_t stream = ctx.stream();
174178

179+
GGML_ASSERT(ggml_is_contiguous(src0));
180+
175181
GGML_ASSERT(src0->type == GGML_TYPE_F32);
176182
GGML_ASSERT( dst->type == GGML_TYPE_F32);
177183

@@ -184,6 +190,8 @@ void ggml_cuda_op_tanh(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
184190
float * dst_d = (float *)dst->data;
185191
cudaStream_t stream = ctx.stream();
186192

193+
GGML_ASSERT(ggml_is_contiguous(src0));
194+
187195
GGML_ASSERT(src0->type == GGML_TYPE_F32);
188196
GGML_ASSERT( dst->type == GGML_TYPE_F32);
189197

@@ -196,6 +204,8 @@ void ggml_cuda_op_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
196204
float * dst_d = (float *)dst->data;
197205
cudaStream_t stream = ctx.stream();
198206

207+
GGML_ASSERT(ggml_is_contiguous(src0));
208+
199209
GGML_ASSERT(src0->type == GGML_TYPE_F32);
200210
GGML_ASSERT( dst->type == GGML_TYPE_F32);
201211

@@ -208,6 +218,8 @@ void ggml_cuda_op_sigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
208218
float * dst_d = (float *)dst->data;
209219
cudaStream_t stream = ctx.stream();
210220

221+
GGML_ASSERT(ggml_is_contiguous(src0));
222+
211223
GGML_ASSERT(src0->type == GGML_TYPE_F32);
212224
GGML_ASSERT( dst->type == GGML_TYPE_F32);
213225

@@ -220,6 +232,8 @@ void ggml_cuda_op_hardsigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst
220232
float * dst_d = (float *)dst->data;
221233
cudaStream_t stream = ctx.stream();
222234

235+
GGML_ASSERT(ggml_is_contiguous(src0));
236+
223237
GGML_ASSERT(src0->type == GGML_TYPE_F32);
224238
GGML_ASSERT( dst->type == GGML_TYPE_F32);
225239

@@ -232,6 +246,8 @@ void ggml_cuda_op_hardswish(ggml_backend_cuda_context & ctx, ggml_tensor * dst)
232246
float * dst_d = (float *)dst->data;
233247
cudaStream_t stream = ctx.stream();
234248

249+
GGML_ASSERT(ggml_is_contiguous(src0));
250+
235251
GGML_ASSERT(src0->type == GGML_TYPE_F32);
236252
GGML_ASSERT( dst->type == GGML_TYPE_F32);
237253

@@ -244,6 +260,8 @@ void ggml_cuda_op_leaky_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst)
244260
float * dst_d = (float *)dst->data;
245261
cudaStream_t stream = ctx.stream();
246262

263+
GGML_ASSERT(ggml_is_contiguous(src0));
264+
247265
GGML_ASSERT(src0->type == GGML_TYPE_F32);
248266
GGML_ASSERT( dst->type == GGML_TYPE_F32);
249267

@@ -259,6 +277,8 @@ void ggml_cuda_op_sqr(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
259277
float * dst_d = (float *)dst->data;
260278
cudaStream_t stream = ctx.stream();
261279

280+
GGML_ASSERT(ggml_is_contiguous(src0));
281+
262282
GGML_ASSERT(src0->type == GGML_TYPE_F32);
263283
GGML_ASSERT( dst->type == GGML_TYPE_F32);
264284

ggml-kompute.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1340,7 +1340,7 @@ static bool ggml_vk_supports_op(const struct ggml_tensor * op) {
13401340
case GGML_UNARY_OP_RELU:
13411341
case GGML_UNARY_OP_GELU:
13421342
case GGML_UNARY_OP_SILU:
1343-
return true;
1343+
return ggml_is_contiguous(op->src[0]);
13441344
default:
13451345
;
13461346
}

ggml-metal.m

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -744,7 +744,7 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const
744744
case GGML_UNARY_OP_GELU:
745745
case GGML_UNARY_OP_GELU_QUICK:
746746
case GGML_UNARY_OP_SILU:
747-
return true;
747+
return ggml_is_contiguous(op->src[0]);
748748
default:
749749
return false;
750750
}

ggml-sycl.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17190,7 +17190,7 @@ GGML_CALL static bool ggml_backend_sycl_supports_op(ggml_backend_t backend, cons
1719017190
case GGML_UNARY_OP_HARDSWISH:
1719117191
case GGML_UNARY_OP_GELU_QUICK:
1719217192
case GGML_UNARY_OP_TANH:
17193-
return true;
17193+
return ggml_is_contiguous(op->src[0]);
1719417194
default:
1719517195
return false;
1719617196
}

ggml-vulkan.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6439,7 +6439,7 @@ GGML_CALL static bool ggml_backend_vk_supports_op(ggml_backend_t backend, const
64396439
case GGML_UNARY_OP_GELU:
64406440
case GGML_UNARY_OP_SILU:
64416441
case GGML_UNARY_OP_RELU:
6442-
return true;
6442+
return ggml_is_contiguous(op->src[0]);
64436443
default:
64446444
return false;
64456445
}

0 commit comments

Comments
 (0)