Skip to content

Commit 62303e7

Browse files
committed
convert-hf : minor changes for consistency
1 parent bc78bf4 commit 62303e7

File tree

1 file changed

+5
-3
lines changed

1 file changed

+5
-3
lines changed

convert-hf-to-gguf.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2079,6 +2079,8 @@ def set_gguf_parameters(self):
20792079
self.gguf_writer.add_file_type(self.ftype)
20802080

20812081
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
2082+
del bid # unused
2083+
20822084
# lm_head is not used in llama.cpp, while autoawq will include this tensor in model
20832085
# To prevent errors, skip loading lm_head.weight.
20842086
if name == "lm_head.weight":
@@ -2089,7 +2091,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
20892091
if name.endswith("norm.weight"):
20902092
data_torch = data_torch + 1
20912093

2092-
return super().modify_tensors(data_torch, name, bid)
2094+
return [(self.map_tensor_name(name), data_torch)]
20932095

20942096

20952097
@Model.register("Starcoder2ForCausalLM")
@@ -2277,7 +2279,7 @@ def __init__(self, *, meta: Tensor, data: Tensor | None = None, args: tuple = ()
22772279

22782280
@staticmethod
22792281
def _recurse_apply(o: Any, fn: Callable[[Any], Any]) -> Any:
2280-
# TODO: dicts
2282+
# TODO: dict and set
22812283
if isinstance(o, (list, tuple)):
22822284
L = []
22832285
for item in o:
@@ -2379,7 +2381,7 @@ def __neg__(self, *args): # mamba
23792381
def __add__(self, *args): # gemma
23802382
return self._wrap_fn(torch.Tensor.__add__)(self, *args)
23812383

2382-
def __getitem__(self, *args): # bloom falcon internlm2
2384+
def __getitem__(self, *args): # bloom falcon refact internlm2
23832385
return self._wrap_fn(torch.Tensor.__getitem__)(self, *args)
23842386

23852387

0 commit comments

Comments
 (0)