We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent c082b9f commit 9070e33Copy full SHA for 9070e33
convert-llama-7b-pth-to-gguf.py
@@ -118,7 +118,7 @@ def count_model_parts(dir_model: str) -> int:
118
gguf_writer.add_head_count_kv(head_count_kv)
119
gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
120
121
-if "rope_scaling" in hparams and "factor" in hparams["rope_scaling"]:
+if "rope_scaling" in hparams and hparams["rope_scaling"] != None and "factor" in hparams["rope_scaling"]:
122
if "type" in hparams["rope_scaling"]:
123
if hparams["rope_scaling"]["type"] == "linear":
124
gguf_writer.add_rope_scale_linear(hparams["rope_scaling"]["factor"])
0 commit comments