Skip to content

Commit 616d9e8

Browse files
CausalLMarthw
authored andcommitted
llama : fix ChatGLM4 wrong shape (ggml-org#9194)
This should fix THUDM/glm-4-9b-chat-1m and CausalLM/miniG
1 parent dbaf7e6 commit 616d9e8

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

src/llama.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8120,8 +8120,8 @@ static bool llm_load_tensors(
81208120

81218121
layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
81228122

8123-
layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + (hparams.n_embd_head_k << 2)});
8124-
layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + (hparams.n_embd_head_k << 2)});
8123+
layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
8124+
layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
81258125

81268126
layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
81278127

0 commit comments

Comments
 (0)