Skip to content

Commit 9932158

Browse files
ymckimglambda
authored andcommitted
convert : fix Llama-3_1-Nemotron-51B rope settings (ggml-org#11008)
* conflict resolution * move comments after bracket to its own line * DeciLMCausalModel now reads rope_theta from config.json properly
1 parent fa96cae commit 9932158

File tree

1 file changed

+3
-9
lines changed

1 file changed

+3
-9
lines changed

convert_hf_to_gguf.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1764,25 +1764,19 @@ def set_vocab(self):
17641764
self.gguf_writer.add_token_list(tokens)
17651765
self.gguf_writer.add_token_types(toktypes)
17661766

1767-
special_vocab = gguf.SpecialVocab(
1768-
self.dir_model, load_merges=True,
1769-
special_token_types = ['bos', 'eos', 'eom', 'eot']
1770-
)
1771-
special_vocab._set_special_token("bos", 128000)
1772-
special_vocab._set_special_token("eos", 128001)
1773-
special_vocab._set_special_token("eom", 128008)
1774-
special_vocab._set_special_token("eot", 128009)
1767+
special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
17751768
special_vocab.add_to_gguf(self.gguf_writer)
17761769
else:
17771770
# DeciLM-7B
17781771
self._set_vocab_llama_hf()
1779-
# self._set_vocab_gpt2()
17801772

17811773
def set_gguf_parameters(self):
17821774
if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B
17831775
assert self.block_count == len(self._num_kv_heads)
17841776
assert self.block_count == len(self._num_heads)
17851777
assert self.block_count == len(self._ffn_dims)
1778+
if (rope_theta := self.hparams.get("rope_theta")) is not None:
1779+
self.gguf_writer.add_rope_freq_base(rope_theta)
17861780
self.gguf_writer.add_head_count_kv(self._num_kv_heads)
17871781
self.gguf_writer.add_head_count(self._num_heads)
17881782
self.gguf_writer.add_feed_forward_length(self._ffn_dims)

0 commit comments

Comments
 (0)