Skip to content

Commit 29e6edc

Browse files
committed
chore: Remove rebase artifacts
1 parent 45f29bf commit 29e6edc

7 files changed

+7
-310
lines changed

convert_hf_to_gguf_update.py

100644100755
Lines changed: 7 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ class TOKENIZER_TYPE(IntEnum):
5050

5151
# TODO: this string has to exercise as much pre-tokenizer functionality as possible
5252
# will be updated with time - contributions welcome
53-
chktxt = "\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````\"\"\"\"......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL"
53+
chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
5454

5555
if len(sys.argv) == 2:
5656
token = sys.argv[1]
@@ -99,7 +99,7 @@ def download_file_with_auth(url, token, save_path):
9999
response = sess.get(url, headers=headers)
100100
response.raise_for_status()
101101
os.makedirs(os.path.dirname(save_path), exist_ok=True)
102-
with open(save_path, "wb") as f:
102+
with open(save_path, 'wb') as f:
103103
f.write(response.content)
104104
logger.info(f"File {save_path} downloaded successfully")
105105

@@ -156,9 +156,7 @@ def download_model(model):
156156
else:
157157
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
158158
except OSError as e:
159-
logger.error(
160-
f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}"
161-
)
159+
logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}")
162160
continue # Skip to the next model if the tokenizer can't be loaded
163161

164162
chktok = tokenizer.encode(chktxt)
@@ -178,15 +176,13 @@ def download_model(model):
178176
pre_tokenizer = cfg["pre_tokenizer"]
179177
logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
180178
if "ignore_merges" in cfg["model"]:
181-
logger.info(
182-
"ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4)
183-
)
179+
logger.info("ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4))
184180

185181
logger.info("")
186182

187-
src_ifs += f' if chkhsh == "{chkhsh}":\n'
183+
src_ifs += f" if chkhsh == \"{chkhsh}\":\n"
188184
src_ifs += f" # ref: {model['repo']}\n"
189-
src_ifs += f' res = "{name}"\n'
185+
src_ifs += f" res = \"{name}\"\n"
190186

191187
src_func = f"""
192188
def get_vocab_base_pre(self, tokenizer) -> str:
@@ -347,8 +343,6 @@ def get_vocab_base_pre(self, tokenizer) -> str:
347343
for model in models:
348344
name = model["name"]
349345

350-
print(
351-
f"python3 convert-hf-to-gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only"
352-
) # noqa: NP100
346+
print(f"python3 convert-hf-to-gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only") # noqa: NP100
353347

354348
logger.info("\n")

convert_lora_to_ggml.py

Lines changed: 0 additions & 149 deletions
This file was deleted.

convert_persimmon_to_gguf.py

Lines changed: 0 additions & 137 deletions
This file was deleted.

pyproject.toml

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,4 @@ build-backend = "poetry.core.masonry.api"
3838
[tool.poetry.scripts]
3939
llama-convert-hf-to-gguf = "convert_hf_to_gguf:main"
4040
llama-convert-llama-ggml-to-gguf = "convert_llama_ggml_to_gguf:main"
41-
llama-convert-lora-to-ggml = "convert_lora_to_ggml:main"
42-
llama-convert-persimmon-to-gguf = "convert_persimmon_to_gguf:main"
43-
llama-convert = "convert:main"
4441
llama-ggml-vk-generate-shaders = "ggml_vk_generate_shaders:main"

requirements.txt

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,5 +9,3 @@
99
-r ./requirements/requirements-convert_hf_to_gguf.txt
1010
-r ./requirements/requirements-convert_hf_to_gguf_update.txt
1111
-r ./requirements/requirements-convert_llama_ggml_to_gguf.txt
12-
-r ./requirements/requirements-convert_lora_to_ggml.txt
13-
-r ./requirements/requirements-convert_persimmon_to_gguf.txt

requirements/requirements-convert_lora_to_ggml.txt

Lines changed: 0 additions & 3 deletions
This file was deleted.

requirements/requirements-convert_persimmon_to_gguf.txt

Lines changed: 0 additions & 3 deletions
This file was deleted.

0 commit comments

Comments
 (0)