Skip to content

Commit 566daa5

Browse files
authored
*.py: Stylistic adjustments for python (#8233)
* Superflous parens in conditionals were removed. * Unused args in function were removed. * Replaced unused `idx` var with `_` * Initializing file_format and format_version attributes * Renaming constant to capitals * Preventing redefinition of the `f` var Signed-off-by: Jiri Podivin <[email protected]>
1 parent 6f11a83 commit 566daa5

File tree

3 files changed

+17
-13
lines changed

3 files changed

+17
-13
lines changed

convert_hf_to_gguf.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -737,7 +737,7 @@ def _create_vocab_sentencepiece(self):
737737
added_tokens_json = json.load(f)
738738
for key in added_tokens_json:
739739
token_id = added_tokens_json[key]
740-
if (token_id >= vocab_size):
740+
if token_id >= vocab_size:
741741
logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
742742
continue
743743

@@ -2005,7 +2005,7 @@ def set_vocab(self):
20052005

20062006
for key in added_tokens_json:
20072007
token_id = added_tokens_json[key]
2008-
if (token_id >= vocab_size):
2008+
if token_id >= vocab_size:
20092009
logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
20102010
continue
20112011

@@ -2081,7 +2081,7 @@ def set_gguf_parameters(self):
20812081

20822082
# write rope scaling for long context (128k) model
20832083
rope_scaling = self.find_hparam(['rope_scaling'], True)
2084-
if (rope_scaling is None):
2084+
if rope_scaling is None:
20852085
return
20862086

20872087
scale = max_pos_embds / orig_max_pos_embds
@@ -2728,7 +2728,7 @@ def get_tensors(self):
27282728

27292729
yield name, data
27302730

2731-
def set_vocab(self, *args, **kwargs):
2731+
def set_vocab(self):
27322732
tokenizer_class = 'BertTokenizer'
27332733
with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f:
27342734
tokenizer_class = json.load(f)['tokenizer_class']
@@ -2876,7 +2876,7 @@ def set_vocab(self):
28762876
added_tokens_decoder = tokenizer_config_json["added_tokens_decoder"]
28772877
for token_id, token_json in added_tokens_decoder.items():
28782878
token_id = int(token_id)
2879-
if (token_id >= vocab_size):
2879+
if token_id >= vocab_size:
28802880
logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
28812881
continue
28822882

@@ -3125,7 +3125,7 @@ def set_vocab(self):
31253125
added_tokens_json = json.load(f)
31263126
for key in added_tokens_json:
31273127
token_id = added_tokens_json[key]
3128-
if (token_id >= vocab_size):
3128+
if token_id >= vocab_size:
31293129
logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
31303130
continue
31313131

convert_hf_to_gguf_update.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ class TOKENIZER_TYPE(IntEnum):
5050

5151
# TODO: this string has to exercise as much pre-tokenizer functionality as possible
5252
# will be updated with time - contributions welcome
53-
chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
53+
CHK_TXT = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
5454

5555
if len(sys.argv) == 2:
5656
token = sys.argv[1]
@@ -100,8 +100,8 @@ def download_file_with_auth(url, token, save_path):
100100
response = sess.get(url, headers=headers)
101101
response.raise_for_status()
102102
os.makedirs(os.path.dirname(save_path), exist_ok=True)
103-
with open(save_path, 'wb') as f:
104-
f.write(response.content)
103+
with open(save_path, 'wb') as downloaded_file:
104+
downloaded_file.write(response.content)
105105
logger.info(f"File {save_path} downloaded successfully")
106106

107107

@@ -160,7 +160,7 @@ def download_model(model):
160160
logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}")
161161
continue # Skip to the next model if the tokenizer can't be loaded
162162

163-
chktok = tokenizer.encode(chktxt)
163+
chktok = tokenizer.encode(CHK_TXT)
164164
chkhsh = sha256(str(chktok).encode()).hexdigest()
165165

166166
logger.info(f"model: {name}")
@@ -192,7 +192,7 @@ def get_vocab_base_pre(self, tokenizer) -> str:
192192
# we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can
193193
# use in llama.cpp to implement the same pre-tokenizer
194194
195-
chktxt = {repr(chktxt)}
195+
chktxt = {repr(CHK_TXT)}
196196
197197
chktok = tokenizer.encode(chktxt)
198198
chkhsh = sha256(str(chktok).encode()).hexdigest()
@@ -288,7 +288,7 @@ def get_vocab_base_pre(self, tokenizer) -> str:
288288
"333333333",
289289
"Cửa Việt", # llama-bpe fails on this
290290
" discards",
291-
chktxt,
291+
CHK_TXT,
292292
]
293293

294294
# write the tests to ./models/ggml-vocab-{name}.gguf.inp

convert_llama_ggml_to_gguf.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,10 @@ def load(self, data, offset):
132132

133133

134134
class GGMLModel:
135+
136+
file_format: GGMLFormat
137+
format_version: int
138+
135139
def __init__(self):
136140
self.hyperparameters = None
137141
self.vocab = None
@@ -290,7 +294,7 @@ def add_vocab(self, gguf_writer):
290294
if self.vocab_override is not None:
291295
vo = self.vocab_override
292296
logger.info('* Adding vocab item(s)')
293-
for (idx, (vbytes, score, ttype)) in enumerate(vo.all_tokens()):
297+
for (_, (vbytes, score, ttype)) in enumerate(vo.all_tokens()):
294298
tokens.append(vbytes)
295299
scores.append(score)
296300
toktypes.append(ttype)

0 commit comments

Comments
 (0)