Skip to content

Commit 21ec0ff

Browse files
committed
*.py: Convert logger error and sys.exit() into a raise exception (for atypical error)
1 parent d74613e commit 21ec0ff

File tree

2 files changed

+30
-57
lines changed

2 files changed

+30
-57
lines changed

convert-hf-to-gguf.py

Lines changed: 28 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -153,8 +153,7 @@ def write_tensors(self):
153153
# map tensor names
154154
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
155155
if new_name is None:
156-
logger.error(f"Can not map tensor {name!r}")
157-
sys.exit()
156+
raise ValueError(f"Can not map tensor {name!r}")
158157

159158
n_dims = len(data.shape)
160159
data_dtype = data.dtype
@@ -486,8 +485,7 @@ def write_tensors(self):
486485
# map tensor names
487486
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
488487
if new_name is None:
489-
logger.error(f"Can not map tensor {name!r}")
490-
sys.exit()
488+
raise ValueError(f"Can not map tensor {name!r}")
491489

492490
n_dims = len(data.shape)
493491
data_dtype = data.dtype
@@ -570,8 +568,7 @@ def write_tensors(self):
570568
else:
571569
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
572570
if new_name is None:
573-
logger.error(f"Can not map tensor {name!r}")
574-
sys.exit()
571+
raise ValueError(f"Can not map tensor {name!r}")
575572

576573
n_dims = len(data.shape)
577574
data_dtype = data.dtype
@@ -614,8 +611,7 @@ def set_gguf_parameters(self):
614611
elif "model_max_length" in self.hparams:
615612
ctx_length = self.hparams["model_max_length"]
616613
else:
617-
logger.error("gguf: can not find ctx length parameter.")
618-
sys.exit()
614+
raise ValueError("gguf: can not find ctx length parameter.")
619615

620616
self.gguf_writer.add_file_type(self.ftype)
621617
self.gguf_writer.add_name(self.dir_model.name)
@@ -653,8 +649,7 @@ def write_tensors(self):
653649
# map tensor names
654650
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
655651
if new_name is None:
656-
logger.error(f"Can not map tensor {name!r}")
657-
sys.exit()
652+
raise ValueError(f"Can not map tensor {name!r}")
658653

659654
n_dims = len(data.shape)
660655
data_dtype = data.dtype
@@ -696,8 +691,7 @@ def set_gguf_parameters(self):
696691
elif "model_max_length" in self.hparams:
697692
ctx_length = self.hparams["model_max_length"]
698693
else:
699-
logger.error("gguf: can not find ctx length parameter.")
700-
sys.exit()
694+
raise ValueError("gguf: can not find ctx length parameter.")
701695

702696
self.gguf_writer.add_name(self.dir_model.name)
703697
self.gguf_writer.add_source_hf_repo(hf_repo)
@@ -751,8 +745,7 @@ def write_tensors(self):
751745
# map tensor names
752746
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
753747
if new_name is None:
754-
logger.error(f"Can not map tensor {name!r}")
755-
sys.exit()
748+
raise ValueError(f"Can not map tensor {name!r}")
756749

757750
n_dims = len(data.shape)
758751
data_dtype = data.dtype
@@ -853,8 +846,7 @@ def set_gguf_parameters(self):
853846
elif "model_max_length" in self.hparams:
854847
ctx_length = self.hparams["model_max_length"]
855848
else:
856-
logger.error("gguf: can not find ctx length parameter.")
857-
sys.exit()
849+
raise ValueError("gguf: can not find ctx length parameter.")
858850

859851
self.gguf_writer.add_name(self.dir_model.name)
860852
self.gguf_writer.add_source_hf_repo(hf_repo)
@@ -903,8 +895,7 @@ def write_tensors(self):
903895
# map tensor names
904896
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
905897
if new_name is None:
906-
logger.error(f"Can not map tensor {name!r}")
907-
sys.exit()
898+
raise ValueError(f"Can not map tensor {name!r}")
908899

909900
n_dims = len(data.shape)
910901
data_dtype = data.dtype
@@ -1008,8 +999,7 @@ def write_tensors(self):
1008999
# map tensor names
10091000
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
10101001
if new_name is None:
1011-
logger.error(f"Can not map tensor {name!r}")
1012-
sys.exit()
1002+
raise ValueError(f"Can not map tensor {name!r}")
10131003

10141004
n_dims = len(data.shape)
10151005
data_dtype = data.dtype
@@ -1111,10 +1101,9 @@ def write_tensors(self):
11111101
data = data_torch.squeeze().numpy()
11121102

11131103
# map tensor names
1114-
new_name = tensor_map.get_name(name, try_suffixes=(".weight",))
1104+
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
11151105
if new_name is None:
1116-
logger.error(f"Can not map tensor {name!r}")
1117-
sys.exit()
1106+
raise ValueError(f"Can not map tensor {name!r}")
11181107

11191108
n_dims = len(data.shape)
11201109
data_dtype = data.dtype
@@ -1180,8 +1169,7 @@ def write_tensors(self):
11801169
data = data_torch.to(torch.float32).squeeze().numpy()
11811170
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
11821171
if new_name is None:
1183-
logger.error(f"Can not map tensor {name!r}")
1184-
sys.exit()
1172+
raise ValueError(f"Can not map tensor {name!r}")
11851173
n_dims = len(data.shape)
11861174
logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
11871175
self.gguf_writer.add_tensor(new_name, data)
@@ -1293,8 +1281,7 @@ def write_tensors(self):
12931281

12941282
new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
12951283
if new_name is None:
1296-
logger.error(f"Can not map tensor {name!r}")
1297-
sys.exit()
1284+
raise ValueError(f"Can not map tensor {name!r}")
12981285

12991286
logger.info(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
13001287

@@ -1304,8 +1291,7 @@ def write_tensors(self):
13041291
# map tensor names
13051292
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
13061293
if new_name is None:
1307-
logger.error(f"Can not map tensor {name!r}")
1308-
sys.exit()
1294+
raise ValueError(f"Can not map tensor {name!r}")
13091295

13101296
n_dims = len(data.shape)
13111297
data_dtype = data.dtype
@@ -1397,8 +1383,7 @@ def write_tensors(self):
13971383

13981384
new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
13991385
if new_name is None:
1400-
logger.error(f"Can not map tensor {name!r}")
1401-
sys.exit()
1386+
raise ValueError(f"Can not map tensor {name!r}")
14021387

14031388
logger.info(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
14041389

@@ -1408,8 +1393,7 @@ def write_tensors(self):
14081393
# map tensor names
14091394
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
14101395
if new_name is None:
1411-
logger.error(f"Can not map tensor {name!r}")
1412-
sys.exit()
1396+
raise ValueError(f"Can not map tensor {name!r}")
14131397

14141398
n_dims = len(data.shape)
14151399
data_dtype = data.dtype
@@ -1502,17 +1486,15 @@ def write_tensors(self):
15021486
# https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
15031487
new_name = tensor_map.get_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
15041488
if new_name is None:
1505-
logger.error(f"Can not map tensor {name!r}")
1506-
sys.exit()
1489+
raise ValueError(f"Can not map tensor {name!r}")
15071490

15081491
n_dims = len(data.shape)
15091492
data_dtype = data.dtype
15101493

15111494
# Most of the codebase that takes in 1D tensors only handles F32 tensors
15121495
# and most of the outputs tensors are F32.
15131496
if data_dtype != np.float32 and n_dims == 1:
1514-
logger.error(f"Can not map tensor {name!r}: all 1D tensors must be F32")
1515-
sys.exit()
1497+
raise ValueError(f"Can not map tensor {name!r}: all 1D tensors must be F32")
15161498

15171499
# if f32 desired, convert any float16 to float32
15181500
if self.ftype == 0 and data_dtype == np.float16:
@@ -1584,8 +1566,7 @@ def write_tensors(self):
15841566
# map tensor names
15851567
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
15861568
if new_name is None:
1587-
logger.error(f"Can not map tensor {name!r}")
1588-
sys.exit()
1569+
raise ValueError(f"Can not map tensor {name!r}")
15891570

15901571
n_dims = len(data.shape)
15911572
data_dtype = data.dtype
@@ -1668,8 +1649,7 @@ def write_tensors(self):
16681649
# map tensor names
16691650
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
16701651
if new_name is None:
1671-
logger.error(f"Can not map tensor {name!r}")
1672-
sys.exit()
1652+
raise ValueError(f"Can not map tensor {name!r}")
16731653

16741654
n_dims = len(data.shape)
16751655
data_dtype = data.dtype
@@ -1732,8 +1712,7 @@ def write_tensors(self):
17321712
# map tensor names
17331713
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
17341714
if new_name is None:
1735-
logger.error(f"Can not map tensor {name!r}")
1736-
sys.exit()
1715+
raise ValueError(f"Can not map tensor {name!r}")
17371716

17381717
n_dims = len(data.shape)
17391718
data_dtype = data.dtype
@@ -1830,8 +1809,7 @@ def write_tensors(self):
18301809
# map tensor names
18311810
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
18321811
if new_name is None:
1833-
logger.error(f"Can not map tensor {name!r}")
1834-
sys.exit()
1812+
raise ValueError(f"Can not map tensor {name!r}")
18351813

18361814
# shuffle for broadcasting of gqa in ggml_mul_mat
18371815
if new_name.endswith("attn_q.weight"):
@@ -1908,8 +1886,7 @@ def write_tensors(self):
19081886
# map tensor names
19091887
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
19101888
if new_name is None:
1911-
logger.error(f"Can not map tensor {name!r}")
1912-
sys.exit()
1889+
raise ValueError(f"Can not map tensor {name!r}")
19131890

19141891
n_dims = len(data.shape)
19151892
data_dtype = data.dtype
@@ -2054,8 +2031,7 @@ def post_write_tensors(self, tensor_map, name, data_torch):
20542031
# map tensor names
20552032
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
20562033
if new_name is None:
2057-
logger.error(f"Can not map tensor {name!r}")
2058-
sys.exit()
2034+
raise ValueError(f"Can not map tensor {name!r}")
20592035

20602036
n_dims = len(data.shape)
20612037
data_dtype = data.dtype
@@ -2182,8 +2158,7 @@ def write_tensors(self):
21822158
# map tensor names
21832159
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
21842160
if new_name is None:
2185-
logger.error(f"Can not map tensor {name!r}")
2186-
sys.exit()
2161+
raise ValueError(f"Can not map tensor {name!r}")
21872162

21882163
data = data_torch.squeeze().numpy()
21892164
n_dims = len(data.shape)
@@ -2279,8 +2254,7 @@ def write_tensors(self):
22792254
# map tensor names
22802255
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
22812256
if new_name is None:
2282-
logger.error(f"Can not map tensor {name!r}")
2283-
sys.exit()
2257+
raise ValueError(f"Can not map tensor {name!r}")
22842258

22852259
n_dims = len(data.shape)
22862260
data_dtype = data.dtype
@@ -2382,8 +2356,7 @@ def write_tensors(self):
23822356
# map tensor names
23832357
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
23842358
if new_name is None:
2385-
logger.error(f"Can not map tensor {name!r}")
2386-
sys.exit()
2359+
raise ValueError(f"Can not map tensor {name!r}")
23872360

23882361
if name.endswith(".A_log"):
23892362
logger.debug("A_log --> A ==> " + new_name)

convert-persimmon-to-gguf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,8 +121,8 @@ def main():
121121
data = data_torch.to(torch.float32).squeeze().numpy()
122122
new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias"))
123123
if new_name is None:
124-
logger.error(f"Can not map tensor '{name}'")
125-
sys.exit()
124+
raise ValueError(f"Can not map tensor '{name}'")
125+
126126
n_dims = len(data.shape)
127127
logger.debug(f"{new_name}, n_dims = {str(n_dims)}, {str(old_dtype)} --> {str(data.dtype)}")
128128
gguf_writer.add_tensor(new_name, data)

0 commit comments

Comments
 (0)