Skip to content

Commit 503db28

Browse files
maximegmdgithub-actions[bot]ggerganov
authored
llama : fix name shadowing and C4146 (#1526)
* Fix name shadowing and C4146 * Fix if macros not using defined when required * Update llama-util.h Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> * Update llama-util.h Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> * Code style Co-authored-by: Georgi Gerganov <[email protected]> --------- Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Georgi Gerganov <[email protected]>
1 parent 8a203f9 commit 503db28

File tree

3 files changed

+26
-25
lines changed

3 files changed

+26
-25
lines changed

ggml.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -512,7 +512,7 @@ static inline int hsum_i32_4(const __m128i a) {
512512
return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
513513
}
514514

515-
#if __AVX2__ || __AVX512F__
515+
#if defined(__AVX2__) || defined(__AVX512F__)
516516
// spread 32 bits to 32 bytes { 0x00, 0xFF }
517517
static inline __m256i bytes_from_bits_32(const uint8_t * x) {
518518
uint32_t x32;
@@ -688,7 +688,7 @@ static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128
688688
#endif // __AVX__ || __AVX2__ || __AVX512F__
689689
#endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
690690

691-
#if __ARM_NEON
691+
#if defined(__ARM_NEON)
692692

693693
#if !defined(__aarch64__)
694694

llama-util.h

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -101,12 +101,12 @@ struct llama_file {
101101
LLAMA_ASSERT(ret == 0); // same
102102
}
103103

104-
void read_raw(void * ptr, size_t size) {
105-
if (size == 0) {
104+
void read_raw(void * ptr, size_t len) const {
105+
if (len == 0) {
106106
return;
107107
}
108108
errno = 0;
109-
std::size_t ret = std::fread(ptr, size, 1, fp);
109+
std::size_t ret = std::fread(ptr, len, 1, fp);
110110
if (ferror(fp)) {
111111
throw std::runtime_error(format("read error: %s", strerror(errno)));
112112
}
@@ -127,12 +127,12 @@ struct llama_file {
127127
return std::string(chars.data(), len);
128128
}
129129

130-
void write_raw(const void * ptr, size_t size) {
131-
if (size == 0) {
130+
void write_raw(const void * ptr, size_t len) const {
131+
if (len == 0) {
132132
return;
133133
}
134134
errno = 0;
135-
size_t ret = std::fwrite(ptr, size, 1, fp);
135+
size_t ret = std::fwrite(ptr, len, 1, fp);
136136
if (ret != 1) {
137137
throw std::runtime_error(format("write error: %s", strerror(errno)));
138138
}
@@ -267,9 +267,9 @@ struct llama_mlock {
267267
}
268268
}
269269

270-
void init(void * addr) {
271-
LLAMA_ASSERT(this->addr == NULL && this->size == 0);
272-
this->addr = addr;
270+
void init(void * ptr) {
271+
LLAMA_ASSERT(addr == NULL && size == 0);
272+
addr = ptr;
273273
}
274274

275275
void grow_to(size_t target_size) {
@@ -340,14 +340,14 @@ struct llama_mlock {
340340
return (size_t) si.dwPageSize;
341341
}
342342

343-
bool raw_lock(void * addr, size_t size) {
343+
bool raw_lock(void * ptr, size_t len) {
344344
for (int tries = 1; ; tries++) {
345-
if (VirtualLock(addr, size)) {
345+
if (VirtualLock(ptr, len)) {
346346
return true;
347347
}
348348
if (tries == 2) {
349349
fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
350-
size, this->size, llama_format_win_err(GetLastError()).c_str());
350+
len, size, llama_format_win_err(GetLastError()).c_str());
351351
return false;
352352
}
353353

@@ -363,7 +363,7 @@ struct llama_mlock {
363363
// is equal to the number of pages in its minimum working set minus
364364
// a small overhead."
365365
// Hopefully a megabyte is enough overhead:
366-
size_t increment = size + 1048576;
366+
size_t increment = len + 1048576;
367367
// The minimum must be <= the maximum, so we need to increase both:
368368
min_ws_size += increment;
369369
max_ws_size += increment;
@@ -375,8 +375,8 @@ struct llama_mlock {
375375
}
376376
}
377377

378-
void raw_unlock(void * addr, size_t size) {
379-
if (!VirtualUnlock(addr, size)) {
378+
void raw_unlock(void * ptr, size_t len) {
379+
if (!VirtualUnlock(ptr, len)) {
380380
fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
381381
llama_format_win_err(GetLastError()).c_str());
382382
}
@@ -388,12 +388,12 @@ struct llama_mlock {
388388
return (size_t) 65536;
389389
}
390390

391-
bool raw_lock(const void * addr, size_t size) {
391+
bool raw_lock(const void * addr, size_t len) {
392392
fprintf(stderr, "warning: mlock not supported on this system\n");
393393
return false;
394394
}
395395

396-
void raw_unlock(const void * addr, size_t size) {}
396+
void raw_unlock(const void * addr, size_t len) {}
397397
#endif
398398
};
399399

@@ -404,10 +404,10 @@ struct llama_buffer {
404404

405405
llama_buffer() = default;
406406

407-
void resize(size_t size) {
407+
void resize(size_t len) {
408408
delete[] addr;
409-
addr = new uint8_t[size];
410-
this->size = size;
409+
addr = new uint8_t[len];
410+
size = len;
411411
}
412412

413413
~llama_buffer() {

llama.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ enum e_model {
4545
MODEL_65B,
4646
};
4747

48+
4849
static const size_t MB = 1024*1024;
4950

5051
// computed for n_ctx == 2048
@@ -110,7 +111,7 @@ struct llama_hparams {
110111
enum llama_ftype ftype = LLAMA_FTYPE_MOSTLY_F16;
111112

112113
bool operator!=(const llama_hparams & other) const {
113-
return memcmp(this, &other, sizeof(llama_hparams));
114+
return static_cast<bool>(memcmp(this, &other, sizeof(llama_hparams)));
114115
}
115116
};
116117

@@ -502,7 +503,7 @@ struct llama_file_loader {
502503

503504
if (file_version >= LLAMA_FILE_VERSION_GGJT_V1) {
504505
// skip to the next multiple of 32 bytes
505-
file.seek(-file.tell() & 31, SEEK_CUR);
506+
file.seek(-static_cast<ptrdiff_t>(file.tell()) & 31, SEEK_CUR);
506507
}
507508
shard.file_idx = file_idx;
508509
shard.file_off = file.tell();
@@ -577,7 +578,7 @@ struct llama_file_saver {
577578
file.write_u32(new_type);
578579
file.write_raw(tensor.ne.data(), sizeof(tensor.ne[0]) * tensor.ne.size());
579580
file.write_raw(tensor.name.data(), tensor.name.size());
580-
file.seek(-file.tell() & 31, SEEK_CUR);
581+
file.seek(-static_cast<ptrdiff_t>(file.tell()) & 31, SEEK_CUR);
581582
LLAMA_ASSERT(new_size == llama_calc_tensor_size(tensor.ne, new_type));
582583
file.write_raw(new_data, new_size);
583584
}

0 commit comments

Comments
 (0)