Skip to content

Commit 79bc1ea

Browse files
balisujohnggerganov
authored andcommitted
ggml : add ggml_upscale_ext (ggml/814)
* initial commit with CPU implementation of upscale to shape and test, cuda implementation next * experimental commit to see if dst shape is correct * test version * test * removed unnecessary params * refactor * fixed tests * ggml : metal impl + cleanup + sycl dev warnings * patched ggml_upscale cuda op to handle non-contiguous tensors, added test for non-contiguous behavior * metal : fix upsacle op to support nb00 + style --------- Co-authored-by: Georgi Gerganov <[email protected]>
1 parent 53332ff commit 79bc1ea

File tree

7 files changed

+146
-60
lines changed

7 files changed

+146
-60
lines changed

ggml-cuda/upscale.cu

Lines changed: 33 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,36 @@
11
#include "upscale.cuh"
22

3-
static __global__ void upscale_f32(const float * x, float * dst, const int ne00, const int ne00xne01, const int scale_factor) {
4-
// blockIdx.z: idx of ne02*ne03
5-
// blockIdx.y: idx of ne01*scale_factor, aka ne1
6-
// blockIDx.x: idx of ne00*scale_factor / BLOCK_SIZE
7-
// ne00xne01: ne00 * ne01
8-
int ne0 = ne00 * scale_factor;
9-
int nidx = threadIdx.x + blockIdx.x * blockDim.x;
10-
if (nidx >= ne0) {
3+
static __global__ void upscale_f32(const float * x, float * dst,
4+
const int nb00, const int nb01, const int nb02, const int nb03,
5+
const int ne10, const int ne11, const int ne12, const int ne13,
6+
const float sf0, const float sf1, const float sf2, const float sf3) {
7+
int index = threadIdx.x + blockIdx.x * blockDim.x;
8+
if (index >= ne10 * ne11 * ne12 * ne13) {
119
return;
1210
}
13-
// operation
14-
int i00 = nidx / scale_factor;
15-
int i01 = blockIdx.y / scale_factor;
16-
int offset_src =
17-
i00 +
18-
i01 * ne00 +
19-
blockIdx.z * ne00xne01;
20-
int offset_dst =
21-
nidx +
22-
blockIdx.y * ne0 +
23-
blockIdx.z * ne0 * gridDim.y;
24-
dst[offset_dst] = x[offset_src];
11+
12+
int i10 = index % ne10;
13+
int i11 = (index / ne10) % ne11;
14+
int i12 = (index / (ne10 * ne11)) % ne12;
15+
int i13 = (index / (ne10 * ne11 * ne12)) % ne13;
16+
17+
int i00 = i10 / sf0;
18+
int i01 = i11 / sf1;
19+
int i02 = i12 / sf2;
20+
int i03 = i13 / sf3;
21+
22+
dst[index] = *(float *)((char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00);
2523
}
2624

27-
static void upscale_f32_cuda(const float * x, float * dst, const int ne00, const int ne01, const int ne02, const int ne03,
28-
const int scale_factor, cudaStream_t stream) {
29-
int ne0 = (ne00 * scale_factor);
30-
int num_blocks = (ne0 + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE;
31-
dim3 gridDim(num_blocks, (ne01 * scale_factor), ne02*ne03);
32-
upscale_f32<<<gridDim, CUDA_UPSCALE_BLOCK_SIZE, 0, stream>>>(x, dst, ne00, ne00 * ne01, scale_factor);
25+
static void upscale_f32_cuda(const float * x, float * dst,
26+
const int nb00, const int nb01, const int nb02, const int nb03,
27+
const int ne10, const int ne11, const int ne12, const int ne13,
28+
const float sf0, const float sf1, const float sf2, const float sf3,
29+
cudaStream_t stream) {
30+
int dst_size = ne10 * ne11 * ne12 * ne13;
31+
int num_blocks = (dst_size + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE;
32+
33+
upscale_f32<<<num_blocks, CUDA_UPSCALE_BLOCK_SIZE,0,stream>>>(x, dst, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3);
3334
}
3435

3536
void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
@@ -39,10 +40,12 @@ void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
3940
cudaStream_t stream = ctx.stream();
4041

4142
GGML_ASSERT(src0->type == GGML_TYPE_F32);
42-
GGML_ASSERT(dst->type == GGML_TYPE_F32);
43-
GGML_ASSERT(src0->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors
43+
GGML_ASSERT( dst->type == GGML_TYPE_F32);
4444

45-
const int scale_factor = dst->op_params[0];
45+
const float sf0 = (float)dst->ne[0]/src0->ne[0];
46+
const float sf1 = (float)dst->ne[1]/src0->ne[1];
47+
const float sf2 = (float)dst->ne[2]/src0->ne[2];
48+
const float sf3 = (float)dst->ne[3]/src0->ne[3];
4649

47-
upscale_f32_cuda(src0_d, dst_d, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], scale_factor, stream);
50+
upscale_f32_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, stream);
4851
}

ggml-metal.m

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2353,7 +2353,10 @@ static enum ggml_status ggml_metal_graph_compute(
23532353
{
23542354
GGML_ASSERT(src0->type == GGML_TYPE_F32);
23552355

2356-
const int sf = dst->op_params[0];
2356+
const float sf0 = (float)ne0/src0->ne[0];
2357+
const float sf1 = (float)ne1/src0->ne[1];
2358+
const float sf2 = (float)ne2/src0->ne[2];
2359+
const float sf3 = (float)ne3/src0->ne[3];
23572360

23582361
const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_UPSCALE_F32].pipeline;
23592362

@@ -2376,7 +2379,10 @@ static enum ggml_status ggml_metal_graph_compute(
23762379
[encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
23772380
[encoder setBytes:&nb2 length:sizeof(nb2) atIndex:16];
23782381
[encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17];
2379-
[encoder setBytes:&sf length:sizeof(sf) atIndex:18];
2382+
[encoder setBytes:&sf0 length:sizeof(sf0) atIndex:18];
2383+
[encoder setBytes:&sf1 length:sizeof(sf1) atIndex:19];
2384+
[encoder setBytes:&sf2 length:sizeof(sf2) atIndex:20];
2385+
[encoder setBytes:&sf3 length:sizeof(sf3) atIndex:21];
23802386

23812387
const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0);
23822388

ggml-metal.metal

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1852,7 +1852,10 @@ kernel void kernel_upscale_f32(
18521852
constant uint64_t & nb1,
18531853
constant uint64_t & nb2,
18541854
constant uint64_t & nb3,
1855-
constant int32_t & sf,
1855+
constant float & sf0,
1856+
constant float & sf1,
1857+
constant float & sf2,
1858+
constant float & sf3,
18561859
uint3 tgpig[[threadgroup_position_in_grid]],
18571860
uint3 tpitg[[thread_position_in_threadgroup]],
18581861
uint3 ntg[[threads_per_threadgroup]]) {
@@ -1861,15 +1864,17 @@ kernel void kernel_upscale_f32(
18611864
const int64_t i2 = tgpig.y;
18621865
const int64_t i1 = tgpig.x;
18631866

1864-
const int64_t i03 = i3;
1865-
const int64_t i02 = i2;
1866-
const int64_t i01 = i1/sf;
1867-
1868-
device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01);
1869-
device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1);
1867+
const int64_t i03 = i3/sf3;
1868+
const int64_t i02 = i2/sf2;
1869+
const int64_t i01 = i1/sf1;
18701870

18711871
for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) {
1872-
dst_ptr[i0] = src0_ptr[i0/sf];
1872+
const int64_t i00 = i0/sf0;
1873+
1874+
device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
1875+
device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
1876+
1877+
dst_ptr[0] = src0_ptr[0];
18731878
}
18741879
}
18751880

ggml-sycl.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13987,6 +13987,10 @@ inline void ggml_sycl_op_upscale(const ggml_tensor *src0,
1398713987
GGML_ASSERT(dst->type == GGML_TYPE_F32);
1398813988
GGML_ASSERT(src0->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors
1398913989

13990+
#pragma message("TODO: generalize upscale operator")
13991+
#pragma message(" https://github.com/ggerganov/ggml/pull/814")
13992+
GGML_ASSERT(false && "TODO: generalize upscale operator);
13993+
1399013994
const int scale_factor = dst->op_params[0];
1399113995

1399213996
upscale_f32_sycl(src0_dd, dst_dd, src0->ne[0], src0->ne[1], src0->ne[2], scale_factor, main_stream);

ggml.c

Lines changed: 47 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -6293,27 +6293,56 @@ struct ggml_tensor * ggml_pool_2d(
62936293
static struct ggml_tensor * ggml_upscale_impl(
62946294
struct ggml_context * ctx,
62956295
struct ggml_tensor * a,
6296-
int scale_factor) {
6296+
int ne0,
6297+
int ne1,
6298+
int ne2,
6299+
int ne3) {
62976300
bool is_node = false;
62986301

62996302
if (a->grad) {
63006303
GGML_ASSERT(false); // TODO: implement backward
63016304
is_node = true;
63026305
}
63036306

6307+
GGML_ASSERT(a->ne[0] <= ne0);
6308+
GGML_ASSERT(a->ne[1] <= ne1);
6309+
GGML_ASSERT(a->ne[2] <= ne2);
6310+
GGML_ASSERT(a->ne[3] <= ne3);
6311+
63046312
struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
6305-
a->ne[0] * scale_factor,
6306-
a->ne[1] * scale_factor,
6307-
a->ne[2], a->ne[3]);
6313+
ne0,
6314+
ne1,
6315+
ne2,
6316+
ne3
6317+
);
63086318

63096319
result->op = GGML_OP_UPSCALE;
6310-
result->op_params[0] = scale_factor;
6320+
63116321
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
63126322
result->src[0] = a;
63136323

63146324
return result;
63156325
}
63166326

6327+
struct ggml_tensor * ggml_upscale(
6328+
struct ggml_context * ctx,
6329+
struct ggml_tensor * a,
6330+
int scale_factor) {
6331+
return ggml_upscale_impl(ctx, a, a->ne[0] * scale_factor, a->ne[1] * scale_factor, a->ne[2], a->ne[3]);
6332+
}
6333+
6334+
struct ggml_tensor * ggml_upscale_ext(
6335+
struct ggml_context * ctx,
6336+
struct ggml_tensor * a,
6337+
int ne0,
6338+
int ne1,
6339+
int ne2,
6340+
int ne3) {
6341+
return ggml_upscale_impl(ctx, a, ne0, ne1, ne2, ne3);
6342+
}
6343+
6344+
// ggml_pad
6345+
63176346
struct ggml_tensor * ggml_pad(
63186347
struct ggml_context * ctx,
63196348
struct ggml_tensor * a,
@@ -6338,12 +6367,7 @@ struct ggml_tensor * ggml_pad(
63386367
return result;
63396368
}
63406369

6341-
struct ggml_tensor * ggml_upscale(
6342-
struct ggml_context * ctx,
6343-
struct ggml_tensor * a,
6344-
int scale_factor) {
6345-
return ggml_upscale_impl(ctx, a, scale_factor);
6346-
}
6370+
// ggml_arange
63476371

63486372
struct ggml_tensor * ggml_arange(
63496373
struct ggml_context * ctx,
@@ -6365,6 +6389,8 @@ struct ggml_tensor * ggml_arange(
63656389
return result;
63666390
}
63676391

6392+
// ggml_timestep_embedding
6393+
63686394
struct ggml_tensor * ggml_timestep_embedding(
63696395
struct ggml_context * ctx,
63706396
struct ggml_tensor * timesteps,
@@ -14820,25 +14846,28 @@ static void ggml_compute_forward_upscale_f32(
1482014846
return;
1482114847
}
1482214848

14823-
GGML_ASSERT(src0->nb[0] == sizeof(float));
14849+
GGML_ASSERT(src0->type == GGML_TYPE_F32);
1482414850

1482514851
const int ith = params->ith;
1482614852
const int nth = params->nth;
1482714853

1482814854
GGML_TENSOR_UNARY_OP_LOCALS
1482914855

14830-
const int scale_factor = dst->op_params[0];
14856+
const float sf0 = (float)ne0/src0->ne[0];
14857+
const float sf1 = (float)ne1/src0->ne[1];
14858+
const float sf2 = (float)ne2/src0->ne[2];
14859+
const float sf3 = (float)ne3/src0->ne[3];
1483114860

1483214861
// TODO: optimize
1483314862

1483414863
for (int64_t i3 = 0; i3 < ne3; i3++) {
14835-
const int64_t i03 = i3;
14864+
const int64_t i03 = i3 / sf3;
1483614865
for (int64_t i2 = ith; i2 < ne2; i2 += nth) {
14837-
const int64_t i02 = i2;
14866+
const int64_t i02 = i2 / sf2;
1483814867
for (int64_t i1 = 0; i1 < ne1; i1++) {
14839-
const int64_t i01 = i1 / scale_factor;
14868+
const int64_t i01 = i1 / sf1;
1484014869
for (int64_t i0 = 0; i0 < ne0; i0++) {
14841-
const int64_t i00 = i0 / scale_factor;
14870+
const int64_t i00 = i0 / sf0;
1484214871

1484314872
const float * x = (float *)((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
1484414873
float * y = (float *)((char *) dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3);
@@ -14868,6 +14897,7 @@ static void ggml_compute_forward_upscale(
1486814897
}
1486914898
}
1487014899

14900+
1487114901
// ggml_compute_forward_pad
1487214902

1487314903
static void ggml_compute_forward_pad_f32(

ggml.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1674,12 +1674,24 @@ extern "C" {
16741674
float p1);
16751675

16761676
// nearest interpolate
1677+
// multiplies ne0 and ne1 by scale factor
16771678
// used in stable-diffusion
16781679
GGML_API struct ggml_tensor * ggml_upscale(
16791680
struct ggml_context * ctx,
16801681
struct ggml_tensor * a,
16811682
int scale_factor);
16821683

1684+
// nearest interpolate
1685+
// nearest interpolate to specified dimensions
1686+
// used in tortoise.cpp
1687+
GGML_API struct ggml_tensor * ggml_upscale_ext(
1688+
struct ggml_context * ctx,
1689+
struct ggml_tensor * a,
1690+
int ne0,
1691+
int ne1,
1692+
int ne2,
1693+
int ne3);
1694+
16831695
// pad each dimension with zeros: [x, ..., x] -> [x, ..., x, 0, ..., 0]
16841696
GGML_API struct ggml_tensor * ggml_pad(
16851697
struct ggml_context * ctx,

tests/test-backend-ops.cpp

Lines changed: 29 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1329,23 +1329,47 @@ struct test_upscale : public test_case {
13291329
const ggml_type type;
13301330
const std::array<int64_t, 4> ne;
13311331
const int32_t scale_factor;
1332+
const bool transpose;
13321333

13331334
std::string vars() override {
1334-
return VARS_TO_STR3(type, ne, scale_factor);
1335+
return VARS_TO_STR4(type, ne, scale_factor, transpose);
13351336
}
13361337

13371338
test_upscale(ggml_type type = GGML_TYPE_F32,
13381339
std::array<int64_t, 4> ne = {512, 512, 3, 1},
1339-
int32_t scale_factor = 2)
1340-
: type(type), ne(ne), scale_factor(scale_factor) {}
1340+
int32_t scale_factor = 2, bool transpose = false)
1341+
: type(type), ne(ne), scale_factor(scale_factor), transpose(transpose) {}
13411342

13421343
ggml_tensor * build_graph(ggml_context * ctx) override {
13431344
ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
1345+
if (transpose) a = ggml_transpose(ctx, a);
13441346
ggml_tensor * out = ggml_upscale(ctx, a, scale_factor);
13451347
return out;
13461348
}
13471349
};
13481350

1351+
// GGML_OP_UPSCALE (ext)
1352+
struct test_upscale_ext : public test_case {
1353+
const ggml_type type;
1354+
const std::array<int64_t, 4> ne;
1355+
const std::array<int64_t, 4> ne_tgt;
1356+
1357+
std::string vars() override {
1358+
return VARS_TO_STR3(type, ne, ne_tgt);
1359+
}
1360+
1361+
test_upscale_ext(ggml_type type = GGML_TYPE_F32,
1362+
std::array<int64_t, 4> ne = {2, 5, 7, 11},
1363+
std::array<int64_t, 4> ne_tgt = {5, 7, 11, 13})
1364+
: type(type), ne(ne), ne_tgt(ne_tgt) {}
1365+
1366+
ggml_tensor * build_graph(ggml_context * ctx) override {
1367+
ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
1368+
ggml_tensor * out = ggml_upscale_ext(ctx, a, ne_tgt[0], ne_tgt[1],ne_tgt[2], ne_tgt[3]);
1369+
return out;
1370+
}
1371+
};
1372+
13491373
// GGML_OP_GROUP_NORM
13501374
struct test_group_norm : public test_case {
13511375
const ggml_type type;
@@ -2169,6 +2193,8 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
21692193

21702194
test_cases.emplace_back(new test_sum_rows());
21712195
test_cases.emplace_back(new test_upscale());
2196+
test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, { 512, 512, 3, 1 }, 2, true));
2197+
test_cases.emplace_back(new test_upscale_ext());
21722198
test_cases.emplace_back(new test_group_norm());
21732199
test_cases.emplace_back(new test_acc());
21742200
test_cases.emplace_back(new test_pad());

0 commit comments

Comments
 (0)