Skip to content

Commit 81844fb

Browse files
authored
tests : Fix compilation warnings (Linux/GCC) (#2451)
* fix hellaswag print format, cast away warning in test-double-float * c++11 cannot use designated initializers * add static to test-grad0.c internal functions * use memcpy in test-double-float.c * port c tests to c++ * use initializer list for ggml_init_params
1 parent a312193 commit 81844fb

File tree

7 files changed

+40
-37
lines changed

7 files changed

+40
-37
lines changed

Makefile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -411,13 +411,13 @@ benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.h ggml.o
411411
vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS)
412412
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
413413

414-
tests/test-double-float: tests/test-double-float.c build-info.h ggml.o llama.o common.o $(OBJS)
414+
tests/test-double-float: tests/test-double-float.cpp build-info.h ggml.o llama.o common.o $(OBJS)
415415
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
416416

417-
tests/test-grad0: tests/test-grad0.c build-info.h ggml.o llama.o common.o $(OBJS)
417+
tests/test-grad0: tests/test-grad0.cpp build-info.h ggml.o llama.o common.o $(OBJS)
418418
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
419419

420-
tests/test-opt: tests/test-opt.c build-info.h ggml.o llama.o common.o $(OBJS)
420+
tests/test-opt: tests/test-opt.cpp build-info.h ggml.o llama.o common.o $(OBJS)
421421
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
422422

423423
tests/test-quantize-fns: tests/test-quantize-fns.cpp build-info.h ggml.o llama.o common.o $(OBJS)

examples/common.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -572,7 +572,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
572572
fprintf(stdout, " --temp N temperature (default: %.1f)\n", (double)params.temp);
573573
fprintf(stdout, " --perplexity compute perplexity over each ctx window of the prompt\n");
574574
fprintf(stdout, " --hellaswag compute HellaSwag score over random tasks from datafile supplied with -f\n");
575-
fprintf(stdout, " --hellaswag-tasks N number of tasks to use when computing the HellaSwag score (default: %d)\n", params.hellaswag_tasks);
575+
fprintf(stdout, " --hellaswag-tasks N number of tasks to use when computing the HellaSwag score (default: %zu)\n", params.hellaswag_tasks);
576576
fprintf(stdout, " --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
577577
fprintf(stdout, " --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks);
578578
if (llama_mlock_supported()) {

scripts/sync-ggml.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,5 +10,5 @@ cp -rpv ../ggml/src/ggml-metal.m ./ggml-metal.m
1010
cp -rpv ../ggml/src/ggml-metal.metal ./ggml-metal.metal
1111
cp -rpv ../ggml/include/ggml/ggml.h ./ggml.h
1212

13-
cp -rpv ../ggml/tests/test-opt.c ./tests/test-opt.c
14-
cp -rpv ../ggml/tests/test-grad0.c ./tests/test-grad0.c
13+
cp -rpv ../ggml/tests/test-opt.cpp ./tests/test-opt.cpp
14+
cp -rpv ../ggml/tests/test-grad0.cpp ./tests/test-grad0.cpp

tests/CMakeLists.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,10 @@ function(llama_add_test source)
66
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
77
endfunction()
88

9-
# llama_add_test(test-double-float.c) # SLOW
9+
# llama_add_test(test-double-float.cpp) # SLOW
1010
llama_add_test(test-quantize-fns.cpp)
1111
llama_add_test(test-quantize-perf.cpp)
1212
llama_add_test(test-sampling.cpp)
1313
llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin)
14-
llama_add_test(test-grad0.c) # SLOW
15-
# llama_add_test(test-opt.c) # SLOW
14+
llama_add_test(test-grad0.cpp) # SLOW
15+
# llama_add_test(test-opt.cpp) # SLOW

tests/test-double-float.c renamed to tests/test-double-float.cpp

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,11 @@
33
// This is done by checking all finite (non-NaN, non-infinite) floats.
44

55
#undef NDEBUG
6-
#include <assert.h>
6+
#include <cassert>
77
#include <immintrin.h>
8-
#include <math.h>
9-
#include <stdint.h>
8+
#include <cmath>
9+
#include <cstdint>
10+
#include <cstring>
1011

1112
#pragma GCC diagnostic push
1213
#pragma GCC diagnostic ignored "-Wdouble-promotion"
@@ -32,8 +33,9 @@ inline static float silu_float(float x) {
3233
int main(void) {
3334
uint32_t x = UINT32_MAX;
3435
do {
35-
float f = *(float *)&x;
36-
assert(!isfinite(f) || (round_orig(f) == round_float(f)));
36+
float f;
37+
memcpy(&f, &x, sizeof(x));
38+
assert(!std::isfinite(f) || (round_orig(f) == round_float(f)));
3739
} while (x--);
3840

3941
#ifdef __F16C__

tests/test-grad0.c renamed to tests/test-grad0.cpp

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
22
#include "ggml.h"
33

4-
#include <math.h>
5-
#include <stdio.h>
6-
#include <stdlib.h>
7-
#include <assert.h>
4+
#include <cmath>
5+
#include <cstdio>
6+
#include <cstdlib>
7+
#include <cassert>
88

99
#if defined(_MSC_VER)
1010
#pragma warning(disable: 4244 4267) // possible loss of data
@@ -47,24 +47,24 @@
4747

4848
#define GGML_PRINT(...) printf(__VA_ARGS__)
4949

50-
float frand(void) {
50+
static float frand(void) {
5151
return (float)rand()/(float)RAND_MAX;
5252
}
5353

54-
int irand(int n) {
54+
static int irand(int n) {
5555
if (n == 0) return 0;
5656
return rand()%n;
5757
}
5858

59-
void get_random_dims(int64_t * dims, int ndims) {
59+
static void get_random_dims(int64_t * dims, int ndims) {
6060
dims[0] = dims[1] = dims[2] = dims[3] = 1;
6161

6262
for (int i = 0; i < ndims; i++) {
6363
dims[i] = 1 + irand(4);
6464
}
6565
}
6666

67-
struct ggml_tensor * get_random_tensor_f32(
67+
static struct ggml_tensor * get_random_tensor_f32(
6868
struct ggml_context * ctx0,
6969
int ndims,
7070
int64_t ne[],
@@ -112,7 +112,7 @@ struct ggml_tensor * get_random_tensor_f32(
112112
return result;
113113
}
114114

115-
struct ggml_tensor * get_random_tensor_f16(
115+
static struct ggml_tensor * get_random_tensor_f16(
116116
struct ggml_context * ctx0,
117117
int ndims,
118118
int64_t ne[],
@@ -160,7 +160,7 @@ struct ggml_tensor * get_random_tensor_f16(
160160
return result;
161161
}
162162

163-
struct ggml_tensor * get_random_tensor_i32(
163+
static struct ggml_tensor * get_random_tensor_i32(
164164
struct ggml_context * ctx0,
165165
int ndims,
166166
int64_t ne[],
@@ -208,7 +208,7 @@ struct ggml_tensor * get_random_tensor_i32(
208208
return result;
209209
}
210210

211-
void print_elements(const char* label, const struct ggml_tensor * t) {
211+
static void print_elements(const char* label, const struct ggml_tensor * t) {
212212
if (!t) {
213213
printf("%s: %s = null\n", __func__, label);
214214
return;
@@ -228,7 +228,7 @@ void print_elements(const char* label, const struct ggml_tensor * t) {
228228

229229
}
230230

231-
bool check_gradient(
231+
static bool check_gradient(
232232
const char * op_name,
233233
struct ggml_context * ctx0,
234234
struct ggml_tensor * x[],
@@ -310,7 +310,7 @@ bool check_gradient(
310310
}
311311

312312
// TODO: clean-up this ..
313-
bool check_mat_mul(
313+
static bool check_mat_mul(
314314
const struct ggml_tensor * y,
315315
const struct ggml_tensor * x0,
316316
const struct ggml_tensor * x1) {
@@ -373,9 +373,9 @@ bool check_mat_mul(
373373

374374
int main(int argc, const char ** argv) {
375375
struct ggml_init_params params = {
376-
.mem_size = 128*1024*1024,
377-
.mem_buffer = NULL,
378-
.no_alloc = false,
376+
/* .mem_size = */ 128*1024*1024,
377+
/* .mem_buffer = */ NULL,
378+
/* .no_alloc = */ false,
379379
};
380380

381381
int64_t ne[4];

tests/test-opt.c renamed to tests/test-opt.cpp

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
#include "ggml.h"
22

3-
#include <math.h>
4-
#include <stdio.h>
5-
#include <stdlib.h>
6-
#include <assert.h>
3+
#include <cmath>
4+
#include <cstdio>
5+
#include <cstdlib>
6+
#include <cassert>
77

88
#define MAX_NARGS 2
99

@@ -119,10 +119,11 @@ void set_element(struct ggml_tensor * t, int idx, float value) {
119119

120120
int main(void) {
121121
struct ggml_init_params params = {
122-
.mem_size = 1024*1024*1024,
123-
.mem_buffer = NULL,
124-
.no_alloc = false,
122+
/* .mem_size = */ 1024*1024*1024,
123+
/* .mem_buffer = */ NULL,
124+
/* .no_alloc = */ false,
125125
};
126+
126127
struct ggml_context * ctx = ggml_init(params);
127128

128129
int64_t ne1[4] = {4, 128, 1, 1};

0 commit comments

Comments
 (0)