Skip to content

Commit 698f0b3

Browse files
committed
convert-hf : remove unused n_dims in extra_*_tensors
1 parent c33775b commit 698f0b3

File tree

2 files changed

+10
-12
lines changed

2 files changed

+10
-12
lines changed

convert-hf-to-gguf.py

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -165,10 +165,10 @@ def set_gguf_parameters(self):
165165
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
166166
return [(self.map_tensor_name(name), data_torch)]
167167

168-
def extra_f32_tensors(self, n_dims: int, name: str, new_name: str, bid: int | None) -> bool:
168+
def extra_f32_tensors(self, name: str, new_name: str, bid: int | None) -> bool:
169169
return False
170170

171-
def extra_f16_tensors(self, n_dims: int, name: str, new_name: str, bid: int | None) -> bool:
171+
def extra_f16_tensors(self, name: str, new_name: str, bid: int | None) -> bool:
172172
return False
173173

174174
def write_tensors(self):
@@ -199,8 +199,8 @@ def write_tensors(self):
199199
data = data.astype(np.float32)
200200

201201
# when both are true, the tensor keeps its original type
202-
extra_f32 = self.extra_f32_tensors(n_dims, name, new_name, bid)
203-
extra_f16 = self.extra_f16_tensors(n_dims, name, new_name, bid)
202+
extra_f32 = self.extra_f32_tensors(name, new_name, bid)
203+
extra_f16 = self.extra_f16_tensors(name, new_name, bid)
204204

205205
# 1d tensors need to be converted to float32
206206
if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or extra_f32) and not extra_f16:
@@ -1038,8 +1038,8 @@ def set_vocab(self):
10381038
# self.gguf_writer.add_bos_token_id(71013)
10391039
# self.gguf_writer.add_eos_token_id(71013)
10401040

1041-
def extra_f32_tensors(self, n_dims: int, name: str, new_name: str) -> bool:
1042-
del n_dims, name, new_name # unused
1041+
def extra_f32_tensors(self, name: str, new_name: str, bid: int | None) -> bool:
1042+
del name, new_name, bid # unused
10431043

10441044
# TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?)
10451045
return True
@@ -2152,8 +2152,8 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
21522152

21532153
return [(self.map_tensor_name(name), data_torch)]
21542154

2155-
def extra_f32_tensors(self, n_dims: int, name: str, new_name: str, bid: int | None) -> bool:
2156-
del n_dims, new_name, bid # unused
2155+
def extra_f32_tensors(self, name: str, new_name: str, bid: int | None) -> bool:
2156+
del new_name, bid # unused
21572157

21582158
# not used with get_rows, must be F32
21592159
return name == "embeddings.token_type_embeddings.weight"
@@ -2345,9 +2345,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
23452345

23462346
return [(new_name, data_torch)]
23472347

2348-
def extra_f32_tensors(self, n_dims: int, name: str, new_name: str, bid: int | None) -> bool:
2349-
del n_dims # unused
2350-
2348+
def extra_f32_tensors(self, name: str, new_name: str, bid: int | None) -> bool:
23512349
return new_name in (self.format_tensor_name(n, bid, ".weight" if name.endswith(".weight") else "") for n in [
23522350
gguf.MODEL_TENSOR.SSM_CONV1D,
23532351
gguf.MODEL_TENSOR.SSM_X,

gguf-py/scripts/gguf-new-metadata.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from pathlib import Path
88

99
import numpy as np
10-
from typing import Any, Mapping, Sequence
10+
from typing import Any, Sequence
1111

1212
# Necessary to load the local gguf package
1313
if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists():

0 commit comments

Comments
 (0)