Skip to content

Commit dea9c93

Browse files
committed
c++ in baby-llama example
use c++ includes instead of c includes use std::min, std::max instead of MIN, MAX macros
1 parent 1ecbece commit dea9c93

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

examples/baby-llama/baby-llama.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1225,12 +1225,12 @@ int main(int argc, char ** argv) {
12251225
// struct ggml_tensor * e = cross_entropy_loss(ctx0, targets1, logits1);
12261226
// struct ggml_tensor * e = square_error_loss(ctx0, targets1, logits1);
12271227

1228-
struct ggml_tensor * e = ggml_add(ctx0,
1229-
square_error_loss(ctx0, targets1, logits1),
1230-
square_error_loss(ctx0, targets2, logits2));
12311228
// struct ggml_tensor * e = ggml_add(ctx0,
1232-
// cross_entropy_loss(ctx0, targets1, logits1),
1233-
// cross_entropy_loss(ctx0, targets2, logits2));
1229+
// square_error_loss(ctx0, targets1, logits1),
1230+
// square_error_loss(ctx0, targets2, logits2));
1231+
struct ggml_tensor * e = ggml_add(ctx0,
1232+
cross_entropy_loss(ctx0, targets1, logits1),
1233+
cross_entropy_loss(ctx0, targets2, logits2));
12341234
// struct ggml_tensor * e = ggml_add(ctx0,
12351235
// ggml_add(ctx0,
12361236
// cross_entropy_loss(ctx0, targets1, logits1),
@@ -1258,8 +1258,8 @@ int main(int argc, char ** argv) {
12581258
opt_params_lbfgs.print_backward_graph = false;
12591259
opt_params_adam.adam.n_iter = 16;
12601260
opt_params_lbfgs.lbfgs.n_iter = 16;
1261-
// ggml_opt(ctx0, opt_params_adam, e);
1262-
ggml_opt(ctx0, opt_params_lbfgs, e);
1261+
ggml_opt(ctx0, opt_params_adam, e);
1262+
// ggml_opt(ctx0, opt_params_lbfgs, e);
12631263
//
12641264
ggml_build_forward_expand(&gf, e);
12651265
ggml_graph_compute(ctx0, &gf);

0 commit comments

Comments
 (0)