Skip to content

Commit 49d4309

Browse files
committed
convert : update Falcon script for new HF config
1 parent 1c84003 commit 49d4309

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

convert-falcon-hf-to-gguf.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ def parse_args() -> argparse.Namespace:
9999
with open(dir_model / "config.json", "r", encoding="utf-8") as f:
100100
hparams = json.load(f)
101101

102-
if hparams["architectures"][0] != "RWForCausalLM":
102+
if hparams["architectures"][0] != "FalconForCausalLM":
103103
print("Model architecture not supported: " + hparams["architectures"][0])
104104

105105
sys.exit(1)
@@ -112,15 +112,15 @@ def parse_args() -> argparse.Namespace:
112112

113113
print("gguf: get model metadata")
114114

115-
block_count = hparams["n_layer"]
115+
block_count = hparams["num_hidden_layers"]
116116

117117
gguf_writer.add_name("Falcon")
118118
gguf_writer.add_context_length(2048) # not in config.json
119119
gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform
120120
gguf_writer.add_embedding_length(hparams["hidden_size"])
121121
gguf_writer.add_feed_forward_length(4 * hparams["hidden_size"])
122122
gguf_writer.add_block_count(block_count)
123-
gguf_writer.add_head_count(hparams["n_head"])
123+
gguf_writer.add_head_count(hparams["num_attention_heads"])
124124
if "n_head_kv" in hparams:
125125
gguf_writer.add_head_count_kv(hparams["n_head_kv"])
126126
else:
@@ -179,7 +179,7 @@ def parse_args() -> argparse.Namespace:
179179
tensor_map = gguf.get_tensor_name_map(ARCH,block_count)
180180

181181
# params for qkv transform
182-
n_head = hparams["n_head"]
182+
n_head = hparams["num_attention_heads"]
183183
n_head_kv = hparams["n_head_kv"] if "n_head_kv" in hparams else 1
184184

185185
head_dim = hparams["hidden_size"] // n_head

0 commit comments

Comments
 (0)