Skip to content

Commit 9e22bb7

Browse files
committed
fix: lint
1 parent f7d2e91 commit 9e22bb7

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

convert_hf_to_gguf.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2731,7 +2731,7 @@ def set_vocab(self):
27312731
else:
27322732
# Use the GPT-NeoX tokenizer when no tokenizer files are present
27332733
self._set_vocab_builtin("gpt-neox", vocab_size)
2734-
2734+
27352735
def set_gguf_parameters(self):
27362736
d_model = self.find_hparam(["hidden_size", "d_model"])
27372737
d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
@@ -2741,7 +2741,7 @@ def set_gguf_parameters(self):
27412741
# ref: https://stackoverflow.com/a/17511341/22827863
27422742
# ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
27432743
dt_rank = self.find_hparam(["time_step_rank", "dt_rank"], optional=True) or -(d_model // -16)
2744-
rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
2744+
rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
27452745
use_dt_b_c_norm = False
27462746
# For falconmamba we do apply RMS norm on B / DT and C layers
27472747
if self.find_hparam(["model_type"], optional=True) in ("falcon_mamba",):
@@ -3858,7 +3858,7 @@ def prepare_tensors(self):
38583858
self.gguf_writer.add_tensor(self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), np.array(rope_factors, dtype=np.float32))
38593859

38603860
super().prepare_tensors()
3861-
3861+
38623862

38633863
###### CONVERSION LOGIC ######
38643864

0 commit comments

Comments
 (0)