Skip to content

Commit 4764842

Browse files
committed
change name of GGML_OP_ADD_AT to GGML_OP_ACC
1 parent e0de09d commit 4764842

File tree

3 files changed

+36
-36
lines changed

3 files changed

+36
-36
lines changed

ggml.c

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -3960,7 +3960,7 @@ static const char * GGML_OP_LABEL[GGML_OP_COUNT] = {
39603960
"DUP",
39613961
"ADD",
39623962
"ADD1",
3963-
"ADD_AT",
3963+
"ACC",
39643964
"SUB",
39653965
"MUL",
39663966
"DIV",
@@ -4020,7 +4020,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
40204020
"x",
40214021
"x+y",
40224022
"x+y",
4023-
"x[offset:]+y",
4023+
"view(x,nb,offset)+=y->x",
40244024
"x-y",
40254025
"x*y",
40264026
"x/y",
@@ -5054,9 +5054,9 @@ struct ggml_tensor * ggml_add1_inplace(
50545054
return ggml_add1_impl(ctx, a, b, true);
50555055
}
50565056

5057-
// ggml_add_at
5057+
// ggml_acc
50585058

5059-
struct ggml_tensor * ggml_add_at_impl(
5059+
struct ggml_tensor * ggml_acc_impl(
50605060
struct ggml_context * ctx,
50615061
struct ggml_tensor * a,
50625062
struct ggml_tensor * b,
@@ -5084,7 +5084,7 @@ struct ggml_tensor * ggml_add_at_impl(
50845084
((int32_t *) c->data)[3] = offset;
50855085
((int32_t *) c->data)[4] = inplace ? 1 : 0;
50865086

5087-
result->op = GGML_OP_ADD_AT;
5087+
result->op = GGML_OP_ACC;
50885088
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
50895089
result->src0 = a;
50905090
result->src1 = b;
@@ -5093,26 +5093,26 @@ struct ggml_tensor * ggml_add_at_impl(
50935093
return result;
50945094
}
50955095

5096-
struct ggml_tensor * ggml_add_at(
5096+
struct ggml_tensor * ggml_acc(
50975097
struct ggml_context * ctx,
50985098
struct ggml_tensor * a,
50995099
struct ggml_tensor * b,
51005100
size_t nb1,
51015101
size_t nb2,
51025102
size_t nb3,
51035103
size_t offset) {
5104-
return ggml_add_at_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
5104+
return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
51055105
}
51065106

5107-
struct ggml_tensor * ggml_add_at_inplace(
5107+
struct ggml_tensor * ggml_acc_inplace(
51085108
struct ggml_context * ctx,
51095109
struct ggml_tensor * a,
51105110
struct ggml_tensor * b,
51115111
size_t nb1,
51125112
size_t nb2,
51135113
size_t nb3,
51145114
size_t offset) {
5115-
return ggml_add_at_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
5115+
return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
51165116
}
51175117

51185118
// ggml_sub
@@ -8215,9 +8215,9 @@ static void ggml_compute_forward_add1(
82158215
}
82168216

82178217

8218-
// ggml_compute_forward_add_at
8218+
// ggml_compute_forward_acc
82198219

8220-
static void ggml_compute_forward_add_at_f32(
8220+
static void ggml_compute_forward_acc_f32(
82218221
const struct ggml_compute_params * params,
82228222
const struct ggml_tensor * src0,
82238223
const struct ggml_tensor * src1,
@@ -8229,7 +8229,7 @@ static void ggml_compute_forward_add_at_f32(
82298229
GGML_ASSERT(opt0->type == GGML_TYPE_I32);
82308230
GGML_ASSERT(ggml_nelements(opt0) == 5);
82318231

8232-
// view src0 and dst with these strides and data offset inbytes during add_at
8232+
// view src0 and dst with these strides and data offset inbytes during acc
82338233
// nb0 is implicitely element_size because src0 and dst are contiguous
82348234
size_t nb1 = ((int32_t *) opt0->data)[0];
82358235
size_t nb2 = ((int32_t *) opt0->data)[1];
@@ -8266,7 +8266,7 @@ static void ggml_compute_forward_add_at_f32(
82668266
const size_t nb12 = src1->nb[2];
82678267
const size_t nb13 = src1->nb[3];
82688268

8269-
// src0 and dst as viewed during add_at
8269+
// src0 and dst as viewed during acc
82708270
const size_t nb0 = ggml_element_size(src0);
82718271

82728272
const size_t nb00 = nb0;
@@ -8307,7 +8307,7 @@ static void ggml_compute_forward_add_at_f32(
83078307
}
83088308
}
83098309

8310-
static void ggml_compute_forward_add_at(
8310+
static void ggml_compute_forward_acc(
83118311
const struct ggml_compute_params * params,
83128312
const struct ggml_tensor * src0,
83138313
const struct ggml_tensor * src1,
@@ -8317,7 +8317,7 @@ static void ggml_compute_forward_add_at(
83178317
switch (src0->type) {
83188318
case GGML_TYPE_F32:
83198319
{
8320-
ggml_compute_forward_add_at_f32(params, src0, src1, opt0, dst);
8320+
ggml_compute_forward_acc_f32(params, src0, src1, opt0, dst);
83218321
} break;
83228322
case GGML_TYPE_F16:
83238323
case GGML_TYPE_Q4_0:
@@ -13168,9 +13168,9 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
1316813168
{
1316913169
ggml_compute_forward_add1(params, tensor->src0, tensor->src1, tensor);
1317013170
} break;
13171-
case GGML_OP_ADD_AT:
13171+
case GGML_OP_ACC:
1317213172
{
13173-
ggml_compute_forward_add_at(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
13173+
ggml_compute_forward_acc(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
1317413174
} break;
1317513175
case GGML_OP_SUB:
1317613176
{
@@ -13404,7 +13404,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
1340413404
inplace);
1340513405
}
1340613406
} break;
13407-
case GGML_OP_ADD_AT:
13407+
case GGML_OP_ACC:
1340813408
{
1340913409
if (src0->grad) {
1341013410
src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
@@ -13767,7 +13767,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
1376713767
if (src0->grad) {
1376813768
src0->grad = ggml_add_impl(ctx,
1376913769
src0->grad,
13770-
ggml_add_at_impl(ctx,
13770+
ggml_acc_impl(ctx,
1377113771
tensor->grad,
1377213772
ggml_neg(ctx, tensor_grad_view),
1377313773
nb1, nb2, nb3, offset, false),
@@ -13848,7 +13848,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
1384813848
nb3 = (nb3 / n0) * ng;
1384913849
}
1385013850

13851-
src0->grad = ggml_add_at_impl(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, inplace);
13851+
src0->grad = ggml_acc_impl(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, inplace);
1385213852
}
1385313853
} break;
1385413854
case GGML_OP_PERMUTE:
@@ -14394,7 +14394,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
1439414394

1439514395
work_size = MAX(work_size, cur);
1439614396
} break;
14397-
case GGML_OP_ADD_AT:
14397+
case GGML_OP_ACC:
1439814398
{
1439914399
node->n_tasks = n_threads;
1440014400

ggml.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ extern "C" {
253253
GGML_OP_DUP,
254254
GGML_OP_ADD,
255255
GGML_OP_ADD1,
256-
GGML_OP_ADD_AT,
256+
GGML_OP_ACC,
257257
GGML_OP_SUB,
258258
GGML_OP_MUL,
259259
GGML_OP_DIV,
@@ -496,7 +496,7 @@ extern "C" {
496496
struct ggml_tensor * a,
497497
struct ggml_tensor * b);
498498

499-
GGML_API struct ggml_tensor * ggml_add_at(
499+
GGML_API struct ggml_tensor * ggml_acc(
500500
struct ggml_context * ctx,
501501
struct ggml_tensor * a,
502502
struct ggml_tensor * b,
@@ -505,7 +505,7 @@ extern "C" {
505505
size_t nb3,
506506
size_t offset);
507507

508-
GGML_API struct ggml_tensor * ggml_add_at_inplace(
508+
GGML_API struct ggml_tensor * ggml_acc_inplace(
509509
struct ggml_context * ctx,
510510
struct ggml_tensor * a,
511511
struct ggml_tensor * b,

tests/test-grad0.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -697,7 +697,7 @@ int main(int argc, const char ** argv) {
697697
}
698698
}
699699

700-
// add_at 1d
700+
// acc 1d
701701
{
702702
int64_t ne2[4] = { 1, 1, 1, 1 };
703703

@@ -718,13 +718,13 @@ int main(int argc, const char ** argv) {
718718
const int max_offset = MAX(0, ggml_nelements(x[0]) - ggml_nelements(x[1]));
719719
const int offset = irand(max_offset) * ggml_element_size(x[0]);
720720

721-
struct ggml_tensor * f = ggml_sum(ctx0, ggml_add_at(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
721+
struct ggml_tensor * f = ggml_sum(ctx0, ggml_acc(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
722722

723-
check_gradient("add_at 1d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
723+
check_gradient("acc 1d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
724724
}
725725
}
726726

727-
// add_at 2d
727+
// acc 2d
728728
{
729729
int64_t ne2[4] = { 1, 1, 1, 1 };
730730
int64_t max_offsets[4] = { 0, 0, 0, 0 };
@@ -750,13 +750,13 @@ int main(int argc, const char ** argv) {
750750
offsets[1] = irand(max_offsets[1]) * x[0]->nb[1];
751751
const int offset = offsets[0] + offsets[1];
752752

753-
struct ggml_tensor * f = ggml_sum(ctx0, ggml_add_at(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
753+
struct ggml_tensor * f = ggml_sum(ctx0, ggml_acc(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
754754

755-
check_gradient("add_at 2d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
755+
check_gradient("acc 2d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
756756
}
757757
}
758758

759-
// add_at 3d
759+
// acc 3d
760760
{
761761
int64_t ne2[4] = { 1, 1, 1, 1 };
762762
int64_t max_offsets[4] = { 0, 0, 0, 0 };
@@ -784,13 +784,13 @@ int main(int argc, const char ** argv) {
784784
offsets[2] = irand(max_offsets[2]) * x[0]->nb[2];
785785
const int offset = offsets[0] + offsets[1] + offsets[2];
786786

787-
struct ggml_tensor * f = ggml_sum(ctx0, ggml_add_at(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
787+
struct ggml_tensor * f = ggml_sum(ctx0, ggml_acc(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
788788

789-
check_gradient("add_at 3d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
789+
check_gradient("acc 3d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
790790
}
791791
}
792792

793-
// add_at 4d
793+
// acc 4d
794794
{
795795
int64_t ne2[4] = { 1, 1, 1, 1 };
796796
int64_t max_offsets[4] = { 0, 0, 0, 0 };
@@ -820,9 +820,9 @@ int main(int argc, const char ** argv) {
820820
offsets[3] = irand(max_offsets[3]) * x[0]->nb[3];
821821
const int offset = offsets[0] + offsets[1] + offsets[2] + offsets[3];
822822

823-
struct ggml_tensor * f = ggml_sum(ctx0, ggml_add_at(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
823+
struct ggml_tensor * f = ggml_sum(ctx0, ggml_acc(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
824824

825-
check_gradient("add_at 4d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
825+
check_gradient("acc 4d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
826826
}
827827
}
828828

0 commit comments

Comments
 (0)