We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 48a7ef6 commit b8ff85eCopy full SHA for b8ff85e
convert-hf-to-gguf.py
@@ -1763,9 +1763,7 @@ def get_tensors(self):
1763
for name, data in super().get_tensors():
1764
# Nomic Embed's token embeddings tensor is padded, but llama.cpp wants tensor sizes to match exactly.
1765
if name == 'embeddings.word_embeddings.weight' and data.shape[1] != self.vocab_size:
1766
- rounded_vocab_size = (self.vocab_size + 7) // 8 * 8
1767
- print(data.shape)
1768
- print(rounded_vocab_size, self.hparams["n_embd"])
+ rounded_vocab_size = (self.vocab_size + 63) // 64 * 64
1769
assert data.shape == (rounded_vocab_size, self.hparams["n_embd"])
1770
data = data[:self.vocab_size, :]
1771
yield name, data
0 commit comments