Skip to content

Commit 0d72207

Browse files
committed
c++ in baby-llama example
use c++ includes instead of c includes use std::min, std::max instead of MIN, MAX macros
1 parent dea9c93 commit 0d72207

File tree

1 file changed

+9
-14
lines changed

1 file changed

+9
-14
lines changed

examples/baby-llama/baby-llama.cpp

Lines changed: 9 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,8 @@
11
#include "ggml.h"
22
#include <vector>
3-
#include <assert.h>
3+
#include <cassert>
44
#include <random>
5-
#include <string.h>
6-
7-
#undef MIN
8-
#undef MAX
9-
#define MIN(a, b) ((a) < (b) ? (a) : (b))
10-
#define MAX(a, b) ((a) > (b) ? (a) : (b))
5+
#include <cstring>
116

127
float frand() {
138
return (float)rand()/(float)RAND_MAX;
@@ -1068,7 +1063,7 @@ void get_example_targets(int example_id, struct ggml_tensor * tokens_input, stru
10681063
float z = (y+1.0f)*0.5f; // scale to [0..1]
10691064
z += (frand()-0.5f)*(randomness/n_vocab);
10701065
z = (z < 0.0f) ? 0.0f : (z > 1.0f) ? 1.0f : z; // clamp to [0..1]
1071-
int token = MAX(1,MIN(1+(int)(z*(float)(n_vocab-1)), n_vocab-1));
1066+
int token = std::max(1,std::min(1+(int)(z*(float)(n_vocab-1)), n_vocab-1));
10721067
ggml_set_f32_1d(targets, (i-1)*n_vocab + token, +1.0f);
10731068
if (i<n_tokens) {
10741069
ggml_set_i32_1d(tokens_input, i, token);
@@ -1119,7 +1114,7 @@ int main(int argc, char ** argv) {
11191114
model.hparams.n_mult = 2;
11201115
model.hparams.n_head = 8;
11211116
model.hparams.n_layer = 1;
1122-
model.hparams.n_rot = MIN(16, model.hparams.n_embd / model.hparams.n_head);
1117+
model.hparams.n_rot = std::min(16u, model.hparams.n_embd / model.hparams.n_head);
11231118

11241119
// model.hparams.n_embd = 32;
11251120
// model.hparams.n_mult = 2;
@@ -1225,12 +1220,12 @@ int main(int argc, char ** argv) {
12251220
// struct ggml_tensor * e = cross_entropy_loss(ctx0, targets1, logits1);
12261221
// struct ggml_tensor * e = square_error_loss(ctx0, targets1, logits1);
12271222

1228-
// struct ggml_tensor * e = ggml_add(ctx0,
1229-
// square_error_loss(ctx0, targets1, logits1),
1230-
// square_error_loss(ctx0, targets2, logits2));
12311223
struct ggml_tensor * e = ggml_add(ctx0,
1232-
cross_entropy_loss(ctx0, targets1, logits1),
1233-
cross_entropy_loss(ctx0, targets2, logits2));
1224+
square_error_loss(ctx0, targets1, logits1),
1225+
square_error_loss(ctx0, targets2, logits2));
1226+
// struct ggml_tensor * e = ggml_add(ctx0,
1227+
// cross_entropy_loss(ctx0, targets1, logits1),
1228+
// cross_entropy_loss(ctx0, targets2, logits2));
12341229
// struct ggml_tensor * e = ggml_add(ctx0,
12351230
// ggml_add(ctx0,
12361231
// cross_entropy_loss(ctx0, targets1, logits1),

0 commit comments

Comments
 (0)