Skip to content

Commit 23f5f54

Browse files
ty-everettGreen-Sky
authored andcommitted
Use F16 for memory_k and memory_v
1 parent 22213a1 commit 23f5f54

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

main.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -209,8 +209,8 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
209209
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w2
210210
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w3
211211

212-
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_k
213-
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_v
212+
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_k
213+
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_v
214214

215215
ctx_size += (5 + 10*n_layer)*256; // object overhead
216216

@@ -296,8 +296,8 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
296296
const int n_mem = n_layer*n_ctx;
297297
const int n_elements = n_embd*n_mem;
298298

299-
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements);
300-
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements);
299+
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
300+
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
301301

302302
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
303303

0 commit comments

Comments
 (0)