We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 9070e33 commit 7a7d1baCopy full SHA for 7a7d1ba
convert-llama-hf-to-gguf.py
@@ -126,7 +126,7 @@ def count_model_parts(dir_model: str) -> int:
126
gguf_writer.add_head_count_kv(head_count_kv)
127
gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
128
129
-if "rope_scaling" in hparams and "factor" in hparams["rope_scaling"]:
+if "rope_scaling" in hparams and hparams["rope_scaling"] != None and "factor" in hparams["rope_scaling"]:
130
if "type" in hparams["rope_scaling"]:
131
if hparams["rope_scaling"]["type"] == "linear":
132
gguf_writer.add_rope_scale_linear(hparams["rope_scaling"]["factor"])
0 commit comments