Skip to content

Commit 9cbf50c

Browse files
authored
build : fix and ignore MSVC warnings (#1889)
1 parent 3d01122 commit 9cbf50c

File tree

16 files changed

+89
-38
lines changed

16 files changed

+89
-38
lines changed

examples/baby-llama/baby-llama.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,10 @@
44
#include <random>
55
#include <cstring>
66

7+
#if defined(_MSC_VER)
8+
#pragma warning(disable: 4244 4267) // possible loss of data
9+
#endif
10+
711
float frand() {
812
return (float)rand()/(float)RAND_MAX;
913
}
@@ -1470,7 +1474,7 @@ struct ggml_tensor * square_error_loss(struct ggml_context * ctx, struct ggml_te
14701474
}
14711475

14721476
struct ggml_tensor * cross_entropy_loss(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) {
1473-
const float eps = 1e-3;
1477+
const float eps = 1e-3f;
14741478
return
14751479
ggml_sum(ctx,
14761480
ggml_neg(ctx,

examples/benchmark/benchmark-matmult.cpp

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,10 @@
1616
#include <iterator>
1717
#include <algorithm>
1818

19+
#if defined(_MSC_VER)
20+
#pragma warning(disable: 4244 4267) // possible loss of data
21+
#endif
22+
1923
float tensor_sum_elements(const ggml_tensor * tensor) {
2024
float sum = 0;
2125
if (tensor->type==GGML_TYPE_F32) {
@@ -29,9 +33,9 @@ float tensor_sum_elements(const ggml_tensor * tensor) {
2933
}
3034

3135
void tensor_dump(const ggml_tensor * tensor, const char * name) {
32-
printf("%15s: type = %i (%5s) ne = %5d x %5d x %5d, nb = (%5li, %5li, %5li) - ", name,
36+
printf("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi) - ", name,
3337
tensor->type, ggml_type_name(tensor->type),
34-
(int) tensor->ne[0], (int) tensor->ne[1], (int) tensor->ne[2], tensor->nb[0], tensor->nb[1], tensor->nb[2]);
38+
tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->nb[0], tensor->nb[1], tensor->nb[2]);
3539
float sum = tensor_sum_elements(tensor);
3640
printf("Sum of tensor %s is %6.2f\n", name, sum);
3741
}
@@ -120,7 +124,7 @@ int main(int argc, char ** argv) {
120124
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS
121125
ctx_size += 1024*1024*16;
122126

123-
printf("Allocating Memory of size %li bytes, %li MB\n",ctx_size, (ctx_size/1024/1024));
127+
printf("Allocating Memory of size %zi bytes, %zi MB\n",ctx_size, (ctx_size/1024/1024));
124128

125129
struct ggml_init_params params = {
126130
/*.mem_size =*/ ctx_size,

examples/common.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,10 @@
2828
#include <wchar.h>
2929
#endif
3030

31+
#if defined(_MSC_VER)
32+
#pragma warning(disable: 4244 4267) // possible loss of data
33+
#endif
34+
3135
int32_t get_num_physical_cores() {
3236
#ifdef __linux__
3337
// enumerate the set of thread siblings, num entries is num cores
@@ -373,7 +377,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
373377
} else {
374378
throw std::exception();
375379
}
376-
} catch (const std::exception &e) {
380+
} catch (const std::exception&) {
377381
invalid_param = true;
378382
break;
379383
}

examples/embedding/embedding.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,10 @@
44

55
#include <ctime>
66

7+
#if defined(_MSC_VER)
8+
#pragma warning(disable: 4244 4267) // possible loss of data
9+
#endif
10+
711
int main(int argc, char ** argv) {
812
gpt_params params;
913

examples/main/main.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,10 @@
2828
#include <signal.h>
2929
#endif
3030

31+
#if defined(_MSC_VER)
32+
#pragma warning(disable: 4244 4267) // possible loss of data
33+
#endif
34+
3135
static console_state con_st;
3236
static llama_context ** g_ctx;
3337

@@ -348,7 +352,7 @@ int main(int argc, char ** argv) {
348352
if ((int)embd.size() > max_embd_size) {
349353
auto skipped_tokens = embd.size() - max_embd_size;
350354
console_set_color(con_st, CONSOLE_COLOR_ERROR);
351-
printf("<<input too long: skipped %ld token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
355+
printf("<<input too long: skipped %" PRIu64 " token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
352356
console_set_color(con_st, CONSOLE_COLOR_DEFAULT);
353357
fflush(stdout);
354358
embd.resize(max_embd_size);

examples/perplexity/perplexity.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,10 @@
55
#include <cmath>
66
#include <ctime>
77

8+
#if defined(_MSC_VER)
9+
#pragma warning(disable: 4244 4267) // possible loss of data
10+
#endif
11+
812
std::vector<float> softmax(const std::vector<float>& logits) {
913
std::vector<float> probs(logits.size());
1014
float max_logit = logits[0];

examples/quantize-stats/quantize-stats.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,10 @@
1919
#include <thread>
2020
#include <mutex>
2121

22+
#if defined(_MSC_VER)
23+
#pragma warning(disable: 4244 4267) // possible loss of data
24+
#endif
25+
2226
struct quantize_stats_params {
2327
std::string model = "models/7B/ggml-model-f16.bin";
2428
bool verbose = false;

examples/save-load-state/save-load-state.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ int main(int argc, char ** argv) {
3737
// init
3838
auto ctx = llama_init_from_file(params.model.c_str(), lparams);
3939
auto tokens = std::vector<llama_token>(params.n_ctx);
40-
auto n_prompt_tokens = llama_tokenize(ctx, params.prompt.c_str(), tokens.data(), tokens.size(), true);
40+
auto n_prompt_tokens = llama_tokenize(ctx, params.prompt.c_str(), tokens.data(), int(tokens.size()), true);
4141

4242
if (n_prompt_tokens < 1) {
4343
fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);

examples/train-text-from-scratch/train-text-from-scratch.cpp

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,9 @@
1212
#include <algorithm>
1313
#include <string>
1414

15+
#if defined(_MSC_VER)
16+
#pragma warning(disable: 4244 4267) // possible loss of data
17+
#endif
1518

1619
struct random_normal_distribution {
1720
std::mt19937 gen;
@@ -20,7 +23,6 @@ struct random_normal_distribution {
2023
float max;
2124
};
2225

23-
2426
struct random_uniform_distribution {
2527
std::mt19937 gen;
2628
std::uniform_real_distribution<float> rd;
@@ -2366,7 +2368,7 @@ void write_tensor(struct llama_file * file, struct ggml_tensor * tensor) {
23662368
file->write_u32(0);
23672369
file->write_u32(0);
23682370
file->write_u32(GGML_TYPE_F32);
2369-
file->seek(-file->tell() & 31, SEEK_CUR);
2371+
file->seek(0-file->tell() & 31, SEEK_CUR);
23702372
return;
23712373
}
23722374
const char * name = ggml_get_name(tensor);
@@ -2381,7 +2383,7 @@ void write_tensor(struct llama_file * file, struct ggml_tensor * tensor) {
23812383
file->write_u32(tensor->type);
23822384
file->write_raw(ne, sizeof(ne[0]) * nd);
23832385
file->write_raw(name, name_len);
2384-
file->seek(-file->tell() & 31, SEEK_CUR);
2386+
file->seek(0-file->tell() & 31, SEEK_CUR);
23852387
file->write_raw(tensor->data, ggml_nbytes(tensor));
23862388
}
23872389

@@ -2402,7 +2404,7 @@ void read_tensor(struct llama_file * file, struct ggml_tensor * tensor) {
24022404
std::string name = file->read_string(name_len);
24032405
GGML_ASSERT(strncmp(ggml_get_name(tensor), name.c_str(), sizeof(tensor->name)-1) == 0);
24042406

2405-
file->seek(-file->tell() & 31, SEEK_CUR);
2407+
file->seek(0-file->tell() & 31, SEEK_CUR);
24062408
file->read_raw(tensor->data, ggml_nbytes(tensor));
24072409
}
24082410

@@ -2756,8 +2758,8 @@ struct train_params get_default_train_params() {
27562758

27572759
params.lbfgs_n_iter = 16;
27582760
params.adam_n_iter = 16;
2759-
params.adam_alpha = 1e-3;
2760-
params.adam_decay = 1e-3;
2761+
params.adam_alpha = 1e-3f;
2762+
params.adam_decay = 1e-3f;
27612763

27622764
params.mem_model_gb = 2;
27632765
params.mem_compute_gb = 24;
@@ -3331,8 +3333,8 @@ int main(int argc, char ** argv) {
33313333
int n_gen = params.n_predict;
33323334
int sample_ctx = n_tokens - n_tokens/8;
33333335

3334-
sampler.params.temp = 0.2;
3335-
sampler.params.repeat_penalty = 1.1;
3336+
sampler.params.temp = 0.2f;
3337+
sampler.params.repeat_penalty = 1.1f;
33363338
sampler.params.mirostat = 2;
33373339
init_sampler(&sampler, lctx);
33383340

ggml.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,12 @@
3535
#define static_assert(cond, msg) struct global_scope_noop_trick
3636
#endif
3737

38+
#if defined(_MSC_VER)
39+
// disable "possible loss of data" to avoid hundreds of casts
40+
// we should just be careful :)
41+
#pragma warning(disable: 4244 4267)
42+
#endif
43+
3844
#if defined(_WIN32)
3945

4046
#include <windows.h>

llama.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,10 @@
4040
#include <sstream>
4141
#include <numeric>
4242

43+
#if defined(_MSC_VER)
44+
#pragma warning(disable: 4244 4267) // possible loss of data
45+
#endif
46+
4347
#define LLAMA_USE_SCRATCH
4448
#define LLAMA_MAX_SCRATCH_BUFFERS 16
4549

pocs/vdot/vdot.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,10 @@
1010

1111
#include <ggml.h>
1212

13+
#if defined(_MSC_VER)
14+
#pragma warning(disable: 4244 4267) // possible loss of data
15+
#endif
16+
1317
constexpr int kVecSize = 1 << 18;
1418

1519
float drawFromGaussianPdf(std::mt19937& rndm) {

tests/test-quantize-fns.cpp

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,15 @@
99
#include <string>
1010
#include <vector>
1111

12-
13-
const float MAX_QUANTIZATION_REFERENCE_ERROR = 0.0001;
14-
const float MAX_QUANTIZATION_TOTAL_ERROR = 0.002;
15-
const float MAX_QUANTIZATION_TOTAL_ERROR_2BITS = 0.0075;
16-
const float MAX_QUANTIZATION_TOTAL_ERROR_3BITS = 0.0040;
17-
const float MAX_DOT_PRODUCT_ERROR = 0.02;
12+
#if defined(_MSC_VER)
13+
#pragma warning(disable: 4244 4267) // possible loss of data
14+
#endif
15+
16+
const float MAX_QUANTIZATION_REFERENCE_ERROR = 0.0001f;
17+
const float MAX_QUANTIZATION_TOTAL_ERROR = 0.002f;
18+
const float MAX_QUANTIZATION_TOTAL_ERROR_2BITS = 0.0075f;
19+
const float MAX_QUANTIZATION_TOTAL_ERROR_3BITS = 0.0040f;
20+
const float MAX_DOT_PRODUCT_ERROR = 0.02f;
1821

1922
const char* RESULT_STR[] = {"ok", "FAILED"};
2023

tests/test-quantize-perf.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,10 @@
1313
#include <string>
1414
#include <vector>
1515

16+
#if defined(_MSC_VER)
17+
#pragma warning(disable: 4244 4267) // possible loss of data
18+
#endif
19+
1620
#define MAX_ALIGNMENT 64
1721
#define QK 32
1822
#define WARMUP 5

tests/test-sampling.cpp

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -176,27 +176,27 @@ void test_frequency_presence_penalty(
176176
int main(void) {
177177
ggml_time_init();
178178

179-
test_top_k({0.1, 0.2, 0.3, 0.4}, {0.4}, 1);
180-
test_top_k({0.1, 0.2, 0.3, 0.4}, {0.4, 0.3, 0.2}, 3);
179+
test_top_k({0.1f, 0.2f, 0.3f, 0.4f}, {0.4f}, 1);
180+
test_top_k({0.1f, 0.2f, 0.3f, 0.4f}, {0.4f, 0.3f, 0.2f}, 3);
181181

182-
test_top_p({0.1, 0.2, 0.3, 0.4}, {0.4}, 0);
183-
test_top_p({0.1, 0.2, 0.3, 0.4}, {0.4, 0.3}, 0.7);
184-
test_top_p({0.1, 0.2, 0.3, 0.4}, {0.4, 0.3, 0.2, 0.1}, 1);
182+
test_top_p({0.1f, 0.2f, 0.3f, 0.4f}, {0.4f}, 0);
183+
test_top_p({0.1f, 0.2f, 0.3f, 0.4f}, {0.4f, 0.3f}, 0.7f);
184+
test_top_p({0.1f, 0.2f, 0.3f, 0.4f}, {0.4f, 0.3f, 0.2f, 0.1f}, 1);
185185

186-
test_tfs({0.1, 0.15, 0.2, 0.25, 0.3}, {0.3}, 0.25);
187-
test_tfs({0.1, 0.15, 0.2, 0.25, 0.3}, {0.3, 0.25}, 0.75);
188-
test_tfs({0.1, 0.15, 0.2, 0.25, 0.3}, {0.3, 0.25}, 0.99);
186+
test_tfs({0.1f, 0.15f, 0.2f, 0.25f, 0.3f}, {0.3f}, 0.25f);
187+
test_tfs({0.1f, 0.15f, 0.2f, 0.25f, 0.3f}, {0.3f, 0.25f}, 0.75f);
188+
test_tfs({0.1f, 0.15f, 0.2f, 0.25f, 0.3f}, {0.3f, 0.25f}, 0.99f);
189189

190-
test_typical({0.97, 0.01, 0.01, 0.01}, {0.97}, 0.5);
191-
test_typical({0.4, 0.2, 0.2, 0.2}, {0.2, 0.2, 0.2}, 0.5);
190+
test_typical({0.97f, 0.01f, 0.01f, 0.01f}, {0.97f}, 0.5f);
191+
test_typical({0.4f, 0.2f, 0.2f, 0.2f}, {0.2f, 0.2f, 0.2f}, 0.5f);
192192

193-
test_repetition_penalty({0.2, 0.2, 0.2, 0.2, 0.2}, {0}, {0.25, 0.25, 0.25, 0.25, 0}, 50.0);
194-
test_repetition_penalty({0.2, 0.2, 0.2, 0.2, 0.2}, {0, 1, 2}, {0.5, 0.5, 0, 0, 0}, 50.0);
195-
test_repetition_penalty({0.2, 0.2, 0.2, 0.2, 0.2}, {0, 1, 2, 0, 0}, {0.5, 0.5, 0, 0, 0}, 50.0);
193+
test_repetition_penalty({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0}, {0.25f, 0.25f, 0.25f, 0.25f, 0}, 50.0f);
194+
test_repetition_penalty({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0, 1, 2}, {0.5f, 0.5f, 0, 0, 0}, 50.0f);
195+
test_repetition_penalty({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0, 1, 2, 0, 0}, {0.5f, 0.5f, 0, 0, 0}, 50.0f);
196196

197-
test_frequency_presence_penalty({0.2, 0.2, 0.2, 0.2, 0.2}, {0}, {0.249997, 0.249997, 0.249997, 0.249997, 0.000011}, 5.0, 5.0);
198-
test_frequency_presence_penalty({0.2, 0.2, 0.2, 0.2, 0.2}, {0, 1, 2}, {0.499966, 0.499966, 0.000023, 0.000023, 0.000023}, 5.0, 5.0);
199-
test_frequency_presence_penalty({0.2, 0.2, 0.2, 0.2, 0.2}, {0, 1, 2, 0, 0}, {0.499977, 0.499977, 0.000023, 0.000023, 0.000000}, 5.0, 5.0);
197+
test_frequency_presence_penalty({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0}, {0.249997f, 0.249997f, 0.249997f, 0.249997f, 0.000011f}, 5.0f, 5.0f);
198+
test_frequency_presence_penalty({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0, 1, 2}, {0.499966f, 0.499966f, 0.000023f, 0.000023f, 0.000023f}, 5.0f, 5.0f);
199+
test_frequency_presence_penalty({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0, 1, 2, 0, 0}, {0.499977f, 0.499977f, 0.000023f, 0.000023f, 0.000000f}, 5.0f, 5.0f);
200200

201201
printf("OK\n");
202202
}

tests/test-tokenizer-0.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ int main(int argc, char **argv) {
5353

5454
for (const auto & test_kv : k_tests()) {
5555
std::vector<llama_token> res(test_kv.first.size());
56-
const int n = llama_tokenize(ctx, test_kv.first.c_str(), res.data(), res.size(), true);
56+
const int n = llama_tokenize(ctx, test_kv.first.c_str(), res.data(), int(res.size()), true);
5757
res.resize(n);
5858

5959
bool correct = res.size() == test_kv.second.size();

0 commit comments

Comments
 (0)