@@ -3960,7 +3960,7 @@ static const char * GGML_OP_LABEL[GGML_OP_COUNT] = {
3960
3960
"DUP",
3961
3961
"ADD",
3962
3962
"ADD1",
3963
- "ADD_AT ",
3963
+ "ACC ",
3964
3964
"SUB",
3965
3965
"MUL",
3966
3966
"DIV",
@@ -4020,7 +4020,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
4020
4020
"x",
4021
4021
"x+y",
4022
4022
"x+y",
4023
- "x[ offset:]+y ",
4023
+ "view(x,nb, offset)+=y->x ",
4024
4024
"x-y",
4025
4025
"x*y",
4026
4026
"x/y",
@@ -5054,9 +5054,9 @@ struct ggml_tensor * ggml_add1_inplace(
5054
5054
return ggml_add1_impl(ctx, a, b, true);
5055
5055
}
5056
5056
5057
- // ggml_add_at
5057
+ // ggml_acc
5058
5058
5059
- struct ggml_tensor * ggml_add_at_impl (
5059
+ struct ggml_tensor * ggml_acc_impl (
5060
5060
struct ggml_context * ctx,
5061
5061
struct ggml_tensor * a,
5062
5062
struct ggml_tensor * b,
@@ -5084,7 +5084,7 @@ struct ggml_tensor * ggml_add_at_impl(
5084
5084
((int32_t *) c->data)[3] = offset;
5085
5085
((int32_t *) c->data)[4] = inplace ? 1 : 0;
5086
5086
5087
- result->op = GGML_OP_ADD_AT ;
5087
+ result->op = GGML_OP_ACC ;
5088
5088
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
5089
5089
result->src0 = a;
5090
5090
result->src1 = b;
@@ -5093,26 +5093,26 @@ struct ggml_tensor * ggml_add_at_impl(
5093
5093
return result;
5094
5094
}
5095
5095
5096
- struct ggml_tensor * ggml_add_at (
5096
+ struct ggml_tensor * ggml_acc (
5097
5097
struct ggml_context * ctx,
5098
5098
struct ggml_tensor * a,
5099
5099
struct ggml_tensor * b,
5100
5100
size_t nb1,
5101
5101
size_t nb2,
5102
5102
size_t nb3,
5103
5103
size_t offset) {
5104
- return ggml_add_at_impl (ctx, a, b, nb1, nb2, nb3, offset, false);
5104
+ return ggml_acc_impl (ctx, a, b, nb1, nb2, nb3, offset, false);
5105
5105
}
5106
5106
5107
- struct ggml_tensor * ggml_add_at_inplace (
5107
+ struct ggml_tensor * ggml_acc_inplace (
5108
5108
struct ggml_context * ctx,
5109
5109
struct ggml_tensor * a,
5110
5110
struct ggml_tensor * b,
5111
5111
size_t nb1,
5112
5112
size_t nb2,
5113
5113
size_t nb3,
5114
5114
size_t offset) {
5115
- return ggml_add_at_impl (ctx, a, b, nb1, nb2, nb3, offset, true);
5115
+ return ggml_acc_impl (ctx, a, b, nb1, nb2, nb3, offset, true);
5116
5116
}
5117
5117
5118
5118
// ggml_sub
@@ -8215,9 +8215,9 @@ static void ggml_compute_forward_add1(
8215
8215
}
8216
8216
8217
8217
8218
- // ggml_compute_forward_add_at
8218
+ // ggml_compute_forward_acc
8219
8219
8220
- static void ggml_compute_forward_add_at_f32 (
8220
+ static void ggml_compute_forward_acc_f32 (
8221
8221
const struct ggml_compute_params * params,
8222
8222
const struct ggml_tensor * src0,
8223
8223
const struct ggml_tensor * src1,
@@ -8229,7 +8229,7 @@ static void ggml_compute_forward_add_at_f32(
8229
8229
GGML_ASSERT(opt0->type == GGML_TYPE_I32);
8230
8230
GGML_ASSERT(ggml_nelements(opt0) == 5);
8231
8231
8232
- // view src0 and dst with these strides and data offset inbytes during add_at
8232
+ // view src0 and dst with these strides and data offset inbytes during acc
8233
8233
// nb0 is implicitely element_size because src0 and dst are contiguous
8234
8234
size_t nb1 = ((int32_t *) opt0->data)[0];
8235
8235
size_t nb2 = ((int32_t *) opt0->data)[1];
@@ -8266,7 +8266,7 @@ static void ggml_compute_forward_add_at_f32(
8266
8266
const size_t nb12 = src1->nb[2];
8267
8267
const size_t nb13 = src1->nb[3];
8268
8268
8269
- // src0 and dst as viewed during add_at
8269
+ // src0 and dst as viewed during acc
8270
8270
const size_t nb0 = ggml_element_size(src0);
8271
8271
8272
8272
const size_t nb00 = nb0;
@@ -8307,7 +8307,7 @@ static void ggml_compute_forward_add_at_f32(
8307
8307
}
8308
8308
}
8309
8309
8310
- static void ggml_compute_forward_add_at (
8310
+ static void ggml_compute_forward_acc (
8311
8311
const struct ggml_compute_params * params,
8312
8312
const struct ggml_tensor * src0,
8313
8313
const struct ggml_tensor * src1,
@@ -8317,7 +8317,7 @@ static void ggml_compute_forward_add_at(
8317
8317
switch (src0->type) {
8318
8318
case GGML_TYPE_F32:
8319
8319
{
8320
- ggml_compute_forward_add_at_f32 (params, src0, src1, opt0, dst);
8320
+ ggml_compute_forward_acc_f32 (params, src0, src1, opt0, dst);
8321
8321
} break;
8322
8322
case GGML_TYPE_F16:
8323
8323
case GGML_TYPE_Q4_0:
@@ -13168,9 +13168,9 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
13168
13168
{
13169
13169
ggml_compute_forward_add1(params, tensor->src0, tensor->src1, tensor);
13170
13170
} break;
13171
- case GGML_OP_ADD_AT :
13171
+ case GGML_OP_ACC :
13172
13172
{
13173
- ggml_compute_forward_add_at (params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
13173
+ ggml_compute_forward_acc (params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
13174
13174
} break;
13175
13175
case GGML_OP_SUB:
13176
13176
{
@@ -13404,7 +13404,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
13404
13404
inplace);
13405
13405
}
13406
13406
} break;
13407
- case GGML_OP_ADD_AT :
13407
+ case GGML_OP_ACC :
13408
13408
{
13409
13409
if (src0->grad) {
13410
13410
src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
@@ -13767,7 +13767,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
13767
13767
if (src0->grad) {
13768
13768
src0->grad = ggml_add_impl(ctx,
13769
13769
src0->grad,
13770
- ggml_add_at_impl (ctx,
13770
+ ggml_acc_impl (ctx,
13771
13771
tensor->grad,
13772
13772
ggml_neg(ctx, tensor_grad_view),
13773
13773
nb1, nb2, nb3, offset, false),
@@ -13848,7 +13848,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
13848
13848
nb3 = (nb3 / n0) * ng;
13849
13849
}
13850
13850
13851
- src0->grad = ggml_add_at_impl (ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, inplace);
13851
+ src0->grad = ggml_acc_impl (ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, inplace);
13852
13852
}
13853
13853
} break;
13854
13854
case GGML_OP_PERMUTE:
@@ -14394,7 +14394,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
14394
14394
14395
14395
work_size = MAX(work_size, cur);
14396
14396
} break;
14397
- case GGML_OP_ADD_AT :
14397
+ case GGML_OP_ACC :
14398
14398
{
14399
14399
node->n_tasks = n_threads;
14400
14400
0 commit comments