Skip to content

Commit d637bb9

Browse files
committed
fix: change name
1 parent 4553502 commit d637bb9

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

convert_hf_to_gguf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2759,7 +2759,7 @@ def set_gguf_parameters(self):
27592759
self.gguf_writer.add_ssm_state_size(d_state)
27602760
self.gguf_writer.add_ssm_time_step_rank(dt_rank)
27612761
self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
2762-
self.gguf_writer.add_mamba_dt_b_c_rms(use_dt_b_c_norm) # For classic Mamba we don't apply rms norm on B / DT layers
2762+
self.gguf_writer.add_ssm_dt_b_c_rms(use_dt_b_c_norm) # For classic Mamba we don't apply rms norm on B / DT layers
27632763
self.gguf_writer.add_file_type(self.ftype)
27642764

27652765
_tok_embd = None

gguf-py/gguf/gguf_writer.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -714,9 +714,6 @@ def add_rope_scaling_orig_ctx_len(self, value: int) -> None:
714714

715715
def add_rope_scaling_finetuned(self, value: bool) -> None:
716716
self.add_bool(Keys.Rope.SCALING_FINETUNED.format(arch=self.arch), value)
717-
718-
def add_ssm_dt_b_c_rms(self, value: bool) -> None:
719-
self.add_bool(Keys.SSM.DT_B_C_RMS.format(arch=self.arch), value)
720717

721718
def add_rope_scaling_yarn_log_mul(self, value: float) -> None:
722719
self.add_float32(Keys.Rope.SCALING_YARN_LOG_MUL.format(arch=self.arch), value)
@@ -733,6 +730,9 @@ def add_ssm_state_size(self, value: int) -> None:
733730
def add_ssm_time_step_rank(self, value: int) -> None:
734731
self.add_uint32(Keys.SSM.TIME_STEP_RANK.format(arch=self.arch), value)
735732

733+
def add_ssm_dt_b_c_rms(self, value: bool) -> None:
734+
self.add_bool(Keys.SSM.DT_B_C_RMS.format(arch=self.arch), value)
735+
736736
def add_tokenizer_model(self, model: str) -> None:
737737
self.add_string(Keys.Tokenizer.MODEL, model)
738738

0 commit comments

Comments
 (0)