Skip to content

Commit 9651dc8

Browse files
robbiemuarthw
authored andcommitted
llama : add 128k yarn context for Qwen (ggml-org#10698)
* add 128k yarn context for Qwen * added property for model tensors * removing useless line
1 parent c5ad0bf commit 9651dc8

File tree

2 files changed

+9
-0
lines changed

2 files changed

+9
-0
lines changed

convert_hf_to_gguf.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1992,6 +1992,14 @@ def set_vocab(self):
19921992
except FileNotFoundError:
19931993
self._set_vocab_gpt2()
19941994

1995+
def set_gguf_parameters(self):
1996+
super().set_gguf_parameters()
1997+
if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
1998+
if self.hparams["rope_scaling"].get("type") == "yarn":
1999+
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
2000+
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
2001+
self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
2002+
19952003

19962004
@Model.register("Qwen2MoeForCausalLM")
19972005
class Qwen2MoeModel(Model):

gguf-py/gguf/constants.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -761,6 +761,7 @@ class MODEL_TENSOR(IntEnum):
761761
MODEL_TENSOR.TOKEN_EMBD,
762762
MODEL_TENSOR.OUTPUT_NORM,
763763
MODEL_TENSOR.OUTPUT,
764+
MODEL_TENSOR.ROPE_FREQS,
764765
MODEL_TENSOR.ATTN_NORM,
765766
MODEL_TENSOR.ATTN_Q,
766767
MODEL_TENSOR.ATTN_K,

0 commit comments

Comments
 (0)