Skip to content

Commit 776b2cb

Browse files
committed
Add enum llama_ftype, sync ggml_type to model files
1 parent 180b693 commit 776b2cb

File tree

5 files changed

+74
-57
lines changed

5 files changed

+74
-57
lines changed

examples/quantize/quantize.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,15 +5,15 @@
55
#include <string>
66

77
// usage:
8-
// ./llama-quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type
8+
// ./quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type
99
//
1010
int main(int argc, char ** argv) {
1111
ggml_time_init();
1212

1313
if (argc != 4) {
1414
fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]);
15-
fprintf(stderr, " type = 2 - q4_0\n");
16-
fprintf(stderr, " type = 3 - q4_1\n");
15+
fprintf(stderr, " type = %d - q4_0\n", LLAMA_FTYPE_MOSTLY_Q4_0);
16+
fprintf(stderr, " type = %d - q4_1\n", LLAMA_FTYPE_MOSTLY_Q4_1);
1717
return 1;
1818
}
1919

@@ -27,7 +27,7 @@ int main(int argc, char ** argv) {
2727
const std::string fname_inp = argv[1];
2828
const std::string fname_out = argv[2];
2929

30-
const int itype = atoi(argv[3]);
30+
const enum llama_ftype ftype = (enum llama_ftype)atoi(argv[3]);
3131

3232
const int64_t t_main_start_us = ggml_time_us();
3333

@@ -37,7 +37,7 @@ int main(int argc, char ** argv) {
3737
{
3838
const int64_t t_start_us = ggml_time_us();
3939

40-
if (llama_model_quantize(fname_inp.c_str(), fname_out.c_str(), itype)) {
40+
if (llama_model_quantize(fname_inp.c_str(), fname_out.c_str(), ftype)) {
4141
fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str());
4242
return 1;
4343
}

ggml.c

Lines changed: 16 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2567,29 +2567,26 @@ inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x
25672567
//
25682568

25692569
static const int GGML_BLCK_SIZE[GGML_TYPE_COUNT] = {
2570-
QK,
2571-
QK,
2572-
1,
2573-
1,
2574-
1,
2575-
1,
2576-
1,
2570+
[GGML_TYPE_F32] = 1,
2571+
[GGML_TYPE_F16] = 1,
2572+
[GGML_TYPE_Q4_0] = QK,
2573+
[GGML_TYPE_Q4_1] = QK,
2574+
[GGML_TYPE_I8] = 1,
2575+
[GGML_TYPE_I16] = 1,
2576+
[GGML_TYPE_I32] = 1,
25772577
};
2578-
2579-
static_assert(GGML_TYPE_COUNT == 7, "GGML_TYPE_COUNT != 5");
2578+
static_assert(GGML_TYPE_COUNT == 7, "GGML_BLCK_SIZE is outdated");
25802579

25812580
static const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = {
2582-
sizeof(block_q4_0),
2583-
sizeof(block_q4_1),
2584-
sizeof(int8_t ),
2585-
sizeof(int16_t),
2586-
sizeof(int32_t),
2587-
sizeof(ggml_fp16_t),
2588-
sizeof(float ),
2581+
[GGML_TYPE_F32] = sizeof(float),
2582+
[GGML_TYPE_F16] = sizeof(ggml_fp16_t),
2583+
[GGML_TYPE_Q4_0] = sizeof(block_q4_0),
2584+
[GGML_TYPE_Q4_1] = sizeof(block_q4_1),
2585+
[GGML_TYPE_I8] = sizeof(int8_t),
2586+
[GGML_TYPE_I16] = sizeof(int16_t),
2587+
[GGML_TYPE_I32] = sizeof(int32_t),
25892588
};
2590-
2591-
// don't forget to update the array above when adding new types
2592-
static_assert(GGML_TYPE_COUNT == 7, "GGML_TYPE_COUNT != 5");
2589+
static_assert(GGML_TYPE_COUNT == 7, "GGML_TYPE_SIZE is outdated");
25932590

25942591
static const char * GGML_OP_LABEL[GGML_OP_COUNT] = {
25952592
"NONE",

ggml.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -198,13 +198,14 @@ struct ggml_object;
198198
struct ggml_context;
199199

200200
enum ggml_type {
201-
GGML_TYPE_Q4_0,
202-
GGML_TYPE_Q4_1,
201+
// explicitly numbered values are used in llama.cpp files
202+
GGML_TYPE_F32 = 0,
203+
GGML_TYPE_F16 = 1,
204+
GGML_TYPE_Q4_0 = 2,
205+
GGML_TYPE_Q4_1 = 3,
203206
GGML_TYPE_I8,
204207
GGML_TYPE_I16,
205208
GGML_TYPE_I32,
206-
GGML_TYPE_F16,
207-
GGML_TYPE_F32,
208209
GGML_TYPE_COUNT,
209210
};
210211

llama.cpp

Lines changed: 39 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ struct llama_hparams {
7777
uint32_t n_head = 32;
7878
uint32_t n_layer = 32;
7979
uint32_t n_rot = 64;
80-
uint32_t f16 = 1;
80+
enum llama_ftype ftype = LLAMA_FTYPE_MOSTLY_F16;
8181

8282
bool operator!=(const llama_hparams & other) const {
8383
return memcmp(this, &other, sizeof(llama_hparams));
@@ -427,7 +427,7 @@ struct llama_file_loader {
427427
hparams.n_head = file.read_u32();
428428
hparams.n_layer = file.read_u32();
429429
hparams.n_rot = file.read_u32();
430-
hparams.f16 = file.read_u32();
430+
hparams.ftype = (enum llama_ftype) file.read_u32();
431431
}
432432
void read_vocab() {
433433
vocab.id_to_token.resize(hparams.n_vocab);
@@ -453,20 +453,21 @@ struct llama_file_loader {
453453
llama_load_tensor_shard shard;
454454
uint32_t n_dims = file.read_u32();
455455
uint32_t name_len = file.read_u32();
456-
uint32_t ftype = file.read_u32();
456+
shard.type = (enum ggml_type) file.read_u32();
457457
shard.ne.resize(n_dims);
458458
file.read_raw(shard.ne.data(), sizeof(shard.ne[0]) * n_dims);
459459
std::string name = file.read_string(name_len);
460460
if (n_dims < 1 || n_dims > 2) {
461461
throw format("llama.cpp: tensor '%s' should not be %u-dimensional", name.c_str(), n_dims);
462462
}
463-
switch (ftype) {
464-
case 0: shard.type = GGML_TYPE_F32; break;
465-
case 1: shard.type = GGML_TYPE_F16; break;
466-
case 2: shard.type = GGML_TYPE_Q4_0; break;
467-
case 3: shard.type = GGML_TYPE_Q4_1; break;
463+
switch (shard.type) {
464+
case GGML_TYPE_F32:
465+
case GGML_TYPE_F16:
466+
case GGML_TYPE_Q4_0:
467+
case GGML_TYPE_Q4_1:
468+
break;
468469
default: {
469-
throw format("unrecognized ftype %u\n", ftype);
470+
throw format("unrecognized tensor type %u\n", shard.type);
470471
}
471472
}
472473

@@ -497,26 +498,26 @@ struct llama_file_loader {
497498
struct llama_file_saver {
498499
llama_file file;
499500
llama_file_loader * any_file_loader;
500-
llama_file_saver(const char * fname, llama_file_loader * any_file_loader, uint32_t new_f16)
501+
llama_file_saver(const char * fname, llama_file_loader * any_file_loader, enum llama_ftype new_ftype)
501502
: file(fname, "wb"), any_file_loader(any_file_loader) {
502503
fprintf(stderr, "llama.cpp: saving model to %s\n", fname);
503504
write_magic();
504-
write_hparams(new_f16);
505+
write_hparams(new_ftype);
505506
write_vocab();
506507
}
507508
void write_magic() {
508509
file.write_u32('ggjt'); // magic
509510
file.write_u32(1); // version
510511
}
511-
void write_hparams(uint32_t new_f16) {
512+
void write_hparams(enum llama_ftype new_ftype) {
512513
const llama_hparams & hparams = any_file_loader->hparams;
513514
file.write_u32(hparams.n_vocab);
514515
file.write_u32(hparams.n_embd);
515516
file.write_u32(hparams.n_mult);
516517
file.write_u32(hparams.n_head);
517518
file.write_u32(hparams.n_layer);
518519
file.write_u32(hparams.n_rot);
519-
file.write_u32(new_f16);
520+
file.write_u32(new_ftype);
520521
}
521522
void write_vocab() {
522523
if (any_file_loader->file_version == LLAMA_FILE_VERSION_GGML) {
@@ -531,17 +532,17 @@ struct llama_file_saver {
531532
}
532533
}
533534
void write_tensor(llama_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) {
534-
uint32_t ftype;
535535
switch (new_type) {
536-
case GGML_TYPE_F32: ftype = 0; break;
537-
case GGML_TYPE_F16: ftype = 1; break;
538-
case GGML_TYPE_Q4_0: ftype = 2; break;
539-
case GGML_TYPE_Q4_1: ftype = 3; break;
536+
case GGML_TYPE_F32:
537+
case GGML_TYPE_F16:
538+
case GGML_TYPE_Q4_0:
539+
case GGML_TYPE_Q4_1:
540+
break;
540541
default: LLAMA_ASSERT(false);
541542
}
542543
file.write_u32((uint32_t) tensor.ne.size());
543544
file.write_u32((uint32_t) tensor.name.size());
544-
file.write_u32(ftype);
545+
file.write_u32(new_type);
545546
file.write_raw(tensor.ne.data(), sizeof(tensor.ne[0]) * tensor.ne.size());
546547
file.write_raw(tensor.name.data(), tensor.name.size());
547548
file.seek(-file.tell() & 31, SEEK_CUR);
@@ -815,6 +816,16 @@ static const char *llama_file_version_name(llama_file_version version) {
815816
}
816817
}
817818

819+
static const char *llama_ftype_name(enum llama_ftype ftype) {
820+
switch (ftype) {
821+
case LLAMA_FTYPE_ALL_F32: return "all F32";
822+
case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16";
823+
case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
824+
case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
825+
default: LLAMA_ASSERT(false);
826+
}
827+
}
828+
818829
static const char *llama_model_type_name(e_model type) {
819830
switch (type) {
820831
case MODEL_7B: return "7B";
@@ -867,7 +878,7 @@ static void llama_model_load_internal(
867878
fprintf(stderr, "%s: n_head = %u\n", __func__, hparams.n_head);
868879
fprintf(stderr, "%s: n_layer = %u\n", __func__, hparams.n_layer);
869880
fprintf(stderr, "%s: n_rot = %u\n", __func__, hparams.n_rot);
870-
fprintf(stderr, "%s: f16 = %u\n", __func__, hparams.f16);
881+
fprintf(stderr, "%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype));
871882
fprintf(stderr, "%s: n_ff = %u\n", __func__, n_ff);
872883
fprintf(stderr, "%s: n_parts = %zu\n", __func__, ml->file_loaders.size());
873884
fprintf(stderr, "%s: model size = %s\n", __func__, llama_model_type_name(model.type));
@@ -1539,17 +1550,17 @@ static llama_vocab::id llama_sample_top_p_top_k(
15391550
// quantization
15401551
//
15411552

1542-
static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, int itype) {
1553+
static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, enum llama_ftype ftype) {
15431554
ggml_type quantized_type;
1544-
switch (itype) {
1545-
case 2: quantized_type = GGML_TYPE_Q4_0; break;
1546-
case 3: quantized_type = GGML_TYPE_Q4_1; break;
1547-
default: throw format("invalid quantization type %d\n", itype);
1555+
switch (ftype) {
1556+
case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break;
1557+
case LLAMA_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break;
1558+
default: throw format("invalid output file type %d\n", ftype);
15481559
};
15491560

15501561
std::unique_ptr<llama_model_loader> model_loader(new llama_model_loader(fname_inp.c_str(), /*use_mmap*/ false,
15511562
/*vocab_only*/ false));
1552-
llama_file_saver file_saver(fname_out.c_str(), model_loader->file_loaders.at(0).get(), (uint32_t) itype);
1563+
llama_file_saver file_saver(fname_out.c_str(), model_loader->file_loaders.at(0).get(), ftype);
15531564

15541565
size_t total_size_org = 0;
15551566
size_t total_size_new = 0;
@@ -1740,9 +1751,9 @@ void llama_free(struct llama_context * ctx) {
17401751
int llama_model_quantize(
17411752
const char * fname_inp,
17421753
const char * fname_out,
1743-
int itype) {
1754+
enum llama_ftype ftype) {
17441755
try {
1745-
llama_model_quantize_internal(fname_inp, fname_out, itype);
1756+
llama_model_quantize_internal(fname_inp, fname_out, ftype);
17461757
return 0;
17471758
} catch (const std::string & err) {
17481759
fprintf(stderr, "%s: failed to quantize: %s\n", __func__, err.c_str());

llama.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,14 @@ extern "C" {
6565
void * progress_callback_user_data;
6666
};
6767

68+
// model file types
69+
enum llama_ftype {
70+
LLAMA_FTYPE_ALL_F32 = 0,
71+
LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
72+
LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
73+
LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
74+
};
75+
6876
LLAMA_API struct llama_context_params llama_context_default_params();
6977

7078
LLAMA_API bool llama_mmap_supported();
@@ -85,7 +93,7 @@ extern "C" {
8593
LLAMA_API int llama_model_quantize(
8694
const char * fname_inp,
8795
const char * fname_out,
88-
int itype);
96+
enum llama_ftype ftype);
8997

9098
// Returns the KV cache that will contain the context for the
9199
// ongoing prediction with the model.

0 commit comments

Comments
 (0)