Skip to content

Commit 7b92973

Browse files
committed
add command line switch to use f16 instead of f32 for memory k+v
1 parent 23f5f54 commit 7b92973

File tree

3 files changed

+11
-6
lines changed

3 files changed

+11
-6
lines changed

main.cpp

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ struct llama_model {
8686
};
8787

8888
// load the model's weights from a file
89-
bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab & vocab, int n_ctx) {
89+
bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab & vocab, int n_ctx, ggml_type memory_type = GGML_TYPE_F32) {
9090
fprintf(stderr, "%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
9191

9292
std::vector<char> f_buf(1024*1024);
@@ -209,8 +209,8 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
209209
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w2
210210
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w3
211211

212-
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_k
213-
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_v
212+
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k
213+
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v
214214

215215
ctx_size += (5 + 10*n_layer)*256; // object overhead
216216

@@ -296,8 +296,8 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
296296
const int n_mem = n_layer*n_ctx;
297297
const int n_elements = n_embd*n_mem;
298298

299-
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
300-
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
299+
model.memory_k = ggml_new_tensor_1d(ctx, memory_type, n_elements);
300+
model.memory_v = ggml_new_tensor_1d(ctx, memory_type, n_elements);
301301

302302
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
303303

@@ -819,8 +819,9 @@ int main(int argc, char ** argv) {
819819

820820
// load the model
821821
{
822+
const ggml_type memory_type = params.memory_f16 ? GGML_TYPE_F16 : GGML_TYPE_F32;
822823
const int64_t t_start_us = ggml_time_us();
823-
if (!llama_model_load(params.model, model, vocab, params.n_ctx)) {
824+
if (!llama_model_load(params.model, model, vocab, params.n_ctx, memory_type)) {
824825
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
825826
return 1;
826827
}

utils.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
5151
params.top_k = std::stoi(argv[++i]);
5252
} else if (arg == "-c" || arg == "--ctx_size") {
5353
params.n_ctx = std::stoi(argv[++i]);
54+
} else if (arg == "--memory_f16") {
55+
params.memory_f16 = true;
5456
} else if (arg == "--top_p") {
5557
params.top_p = std::stof(argv[++i]);
5658
} else if (arg == "--temp") {
@@ -107,6 +109,7 @@ void gpt_print_usage(int argc, char ** argv, const gpt_params & params) {
107109
fprintf(stderr, " --repeat_last_n N last n tokens to consider for penalize (default: %d)\n", params.repeat_last_n);
108110
fprintf(stderr, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f)\n", params.repeat_penalty);
109111
fprintf(stderr, " -c N, --ctx_size N size of the prompt context (default: %d)\n", params.n_ctx);
112+
fprintf(stderr, " --memory_f16 use f16 instead of f32 for memory key+value\n");
110113
fprintf(stderr, " --temp N temperature (default: %.1f)\n", params.temp);
111114
fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch);
112115
fprintf(stderr, " -m FNAME, --model FNAME\n");

utils.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ struct gpt_params {
1818
int32_t n_predict = 128; // new tokens to predict
1919
int32_t repeat_last_n = 64; // last n tokens to penalize
2020
int32_t n_ctx = 512; //context size
21+
bool memory_f16 = false; // use f16 instead of f32 for memory kv
2122

2223
// sampling parameters
2324
int32_t top_k = 40;

0 commit comments

Comments
 (0)