Skip to content

Commit deb15e3

Browse files
vulkan: implement GGML_OP_ARGMAX
1 parent abf4c2e commit deb15e3

File tree

3 files changed

+75
-0
lines changed

3 files changed

+75
-0
lines changed

ggml/src/ggml-vulkan/ggml-vulkan.cpp

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -252,6 +252,7 @@ struct vk_device_struct {
252252
vk_pipeline pipeline_rope_neox_f32, pipeline_rope_neox_f16;
253253
vk_pipeline pipeline_argsort_f32;
254254
vk_pipeline pipeline_sum_rows_f32;
255+
vk_pipeline pipeline_argmax_f32;
255256
vk_pipeline pipeline_im2col_f32, pipeline_im2col_f32_f16;
256257
vk_pipeline pipeline_timestep_embedding_f32;
257258
vk_pipeline pipeline_pool2d_f32;
@@ -2149,6 +2150,8 @@ static void ggml_vk_load_shaders(vk_device& device) {
21492150

21502151
ggml_vk_create_pipeline(device, device->pipeline_argsort_f32, "argsort_f32", argsort_f32_len, argsort_f32_data, "main", 2, sizeof(vk_op_argsort_push_constants), {1024, 1, 1}, {}, 1);
21512152

2153+
ggml_vk_create_pipeline(device, device->pipeline_argmax_f32, "argmax_f32", argmax_f32_len, argmax_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
2154+
21522155
ggml_vk_create_pipeline(device, device->pipeline_sum_rows_f32, "sum_rows_f32", sum_rows_f32_len, sum_rows_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
21532156

21542157
ggml_vk_create_pipeline(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32_len, im2col_f32_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true);
@@ -5282,6 +5285,11 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
52825285
return ctx->device->pipeline_sum_rows_f32;
52835286
}
52845287
return nullptr;
5288+
case GGML_OP_ARGMAX:
5289+
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_I32) {
5290+
return ctx->device->pipeline_argmax_f32;
5291+
}
5292+
return nullptr;
52855293
case GGML_OP_IM2COL:
52865294
if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
52875295
return ctx->device->pipeline_im2col_f32;
@@ -5545,6 +5553,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
55455553
case GGML_OP_RMS_NORM:
55465554
case GGML_OP_SOFT_MAX:
55475555
case GGML_OP_SUM_ROWS:
5556+
case GGML_OP_ARGMAX:
55485557
{
55495558
const uint32_t nr = ggml_nrows(src0);
55505559
if (nr > 262144) {
@@ -6149,6 +6158,10 @@ static void ggml_vk_sum_rows(ggml_backend_vk_context * ctx, vk_context& subctx,
61496158
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SUM_ROWS, { (uint32_t)src0->ne[0], 0, 0.0f, 0.0f }, dryrun);
61506159
}
61516160

6161+
static void ggml_vk_argmax(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
6162+
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_ARGMAX, { (uint32_t)src0->ne[0], 0, 0.0f, 0.0f }, dryrun);
6163+
}
6164+
61526165
static void ggml_vk_im2col(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
61536166
const int32_t s0 = dst->op_params[0];
61546167
const int32_t s1 = dst->op_params[1];
@@ -7040,6 +7053,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod
70407053
case GGML_OP_ARGSORT:
70417054
case GGML_OP_SUM:
70427055
case GGML_OP_SUM_ROWS:
7056+
case GGML_OP_ARGMAX:
70437057
case GGML_OP_IM2COL:
70447058
case GGML_OP_TIMESTEP_EMBEDDING:
70457059
case GGML_OP_POOL_2D:
@@ -7092,6 +7106,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod
70927106
case GGML_OP_ARGSORT:
70937107
case GGML_OP_SUM:
70947108
case GGML_OP_SUM_ROWS:
7109+
case GGML_OP_ARGMAX:
70957110
case GGML_OP_IM2COL:
70967111
case GGML_OP_TIMESTEP_EMBEDDING:
70977112
case GGML_OP_POOL_2D:
@@ -7219,6 +7234,10 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod
72197234
case GGML_OP_SUM_ROWS:
72207235
ggml_vk_sum_rows(ctx, compute_ctx, src0, node, dryrun);
72217236

7237+
break;
7238+
case GGML_OP_ARGMAX:
7239+
ggml_vk_argmax(ctx, compute_ctx, src0, node, dryrun);
7240+
72227241
break;
72237242
case GGML_OP_IM2COL:
72247243
ggml_vk_im2col(ctx, compute_ctx, src0, src1, node, dryrun);
@@ -7331,6 +7350,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_tensor *
73317350
case GGML_OP_ARGSORT:
73327351
case GGML_OP_SUM:
73337352
case GGML_OP_SUM_ROWS:
7353+
case GGML_OP_ARGMAX:
73347354
case GGML_OP_IM2COL:
73357355
case GGML_OP_TIMESTEP_EMBEDDING:
73367356
case GGML_OP_POOL_2D:
@@ -8266,6 +8286,7 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
82668286
case GGML_OP_ARGSORT:
82678287
case GGML_OP_SUM:
82688288
case GGML_OP_SUM_ROWS:
8289+
case GGML_OP_ARGMAX:
82698290
case GGML_OP_IM2COL:
82708291
case GGML_OP_TIMESTEP_EMBEDDING:
82718292
case GGML_OP_POOL_2D:
@@ -8840,6 +8861,8 @@ static void ggml_vk_check_results_0(ggml_tensor * tensor) {
88408861
tensor_clone = ggml_sum(ggml_ctx, src0_clone);
88418862
} else if (tensor->op == GGML_OP_SUM_ROWS) {
88428863
tensor_clone = ggml_sum_rows(ggml_ctx, src0_clone);
8864+
} else if (tensor->op == GGML_OP_ARGMAX) {
8865+
tensor_clone = ggml_argmax(ggml_ctx, src0_clone);
88438866
} else if (tensor->op == GGML_OP_IM2COL) {
88448867
const int32_t s0 = tensor->op_params[0];
88458868
const int32_t s1 = tensor->op_params[1];
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
#version 450
2+
3+
#include "generic_head.comp"
4+
#include "types.comp"
5+
6+
#extension GL_EXT_control_flow_attributes : enable
7+
8+
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
9+
10+
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
11+
layout (binding = 1) writeonly buffer D {D_TYPE data_d[];};
12+
13+
layout (constant_id = 0) const uint BLOCK_SIZE = 32;
14+
15+
shared FLOAT_TYPE tmpmax[BLOCK_SIZE];
16+
shared uint tmp[BLOCK_SIZE];
17+
18+
void main() {
19+
const uint row = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x;
20+
const uint col = gl_LocalInvocationID.x;
21+
22+
if (col >= p.KX) {
23+
return;
24+
}
25+
A_TYPE amax = data_a[row*p.KX + col];
26+
tmp[col] = col;
27+
28+
for (uint i = col + BLOCK_SIZE; i < p.KX; i += BLOCK_SIZE) {
29+
A_TYPE val = data_a[row*p.KX + i];
30+
if (val > amax) {
31+
amax = val;
32+
tmp[col] = i;
33+
}
34+
}
35+
tmpmax[col] = amax;
36+
37+
barrier();
38+
[[unroll]] for (int s = int(BLOCK_SIZE) / 2; s > 0; s >>= 1) {
39+
if (col < s && col + s < p.KX) {
40+
if (tmpmax[col] < tmpmax[col + s]) {
41+
tmpmax[col] = tmpmax[col + s];
42+
tmp[col] = tmp[col + s];
43+
}
44+
}
45+
barrier();
46+
}
47+
48+
if (col == 0) {
49+
data_d[row] = D_TYPE(tmp[0]);
50+
}
51+
}

ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -484,6 +484,7 @@ void process_shaders() {
484484

485485
string_to_spv("argsort_f32", "argsort.comp", {{"A_TYPE", "float"}});
486486

487+
string_to_spv("argmax_f32", "argmax.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "int"}}));
487488
string_to_spv("sum_rows_f32", "sum_rows.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
488489

489490
string_to_spv("im2col_f32", "im2col.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));

0 commit comments

Comments
 (0)