We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 26e8f23 commit ce9413dCopy full SHA for ce9413d
convert-hf-to-gguf.py
@@ -160,7 +160,7 @@ def write_tensors(self):
160
data = data.astype(np.float32)
161
162
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
163
- if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
+ if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
164
165
166
# if f16 desired, convert any float32 2-dim weight tensors to float16
0 commit comments