Skip to content

Commit 0988c58

Browse files
kunal-vaishnavihodlen
authored andcommitted
gemma : fix bfloat16 -> float16 conversion issue (ggml-org#5810)
1 parent e667d98 commit 0988c58

File tree

1 file changed

+3
-4
lines changed

1 file changed

+3
-4
lines changed

convert-hf-to-gguf.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1811,16 +1811,15 @@ def write_tensors(self):
18111811
tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
18121812

18131813
for name, data_torch in self.get_tensors():
1814-
# ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
1815-
if name.endswith("norm.weight"):
1816-
data_torch = data_torch + 1
1817-
18181814
old_dtype = data_torch.dtype
18191815

18201816
# convert any unsupported data types to float32
18211817
if data_torch.dtype not in (torch.float16, torch.float32):
18221818
data_torch = data_torch.to(torch.float32)
18231819

1820+
# ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89
1821+
if name.endswith("norm.weight"):
1822+
data_torch = data_torch + 1
18241823
data = data_torch.squeeze().numpy()
18251824

18261825
# map tensor names

0 commit comments

Comments
 (0)