Skip to content

Commit 73cf442

Browse files
llama : fix Gemma-2 Query scaling factors (#8473)
* 9B - query_pre_attn_scalar = 256 not 224 See google/gemma_pytorch@03e6575 Gemma 9b should use 256 and not 224 (self.config.hidden_size // self.config.num_attention_heads) * llama : fix Gemma-2 Query scaling factor ggml-ci --------- Co-authored-by: Daniel Han <[email protected]>
1 parent e236528 commit 73cf442

File tree

2 files changed

+6
-6
lines changed

2 files changed

+6
-6
lines changed

convert_hf_to_gguf.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2504,11 +2504,6 @@ def set_gguf_parameters(self):
25042504
)
25052505
self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
25062506

2507-
# sanity check
2508-
attn_scalar = self.hparams["query_pre_attn_scalar"]
2509-
if attn_scalar != hparams["hidden_size"] / hparams["num_attention_heads"]:
2510-
raise ValueError("query_pre_attn_scalar must be equal to n_embd / n_head")
2511-
25122507
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
25132508
del bid # unused
25142509

src/llama.cpp

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11680,7 +11680,12 @@ struct llm_build_context {
1168011680
ext_factor, attn_factor, beta_fast, beta_slow);
1168111681
cb(Qcur, "Qcur", il);
1168211682

11683-
Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head)));
11683+
// ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e
11684+
switch (model.type) {
11685+
case e_model::MODEL_9B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); break;
11686+
case e_model::MODEL_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break;
11687+
default: GGML_ASSERT(false);
11688+
};
1168411689
cb(Qcur, "Qcur_scaled", il);
1168511690

1168611691
Kcur = ggml_rope_ext(

0 commit comments

Comments
 (0)