Skip to content

Commit 4b05bf7

Browse files
committed
chore: Remove rebase artifacts
1 parent 3115494 commit 4b05bf7

7 files changed

+7
-310
lines changed

convert_hf_to_gguf_update.py

100644100755
Lines changed: 7 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ class TOKENIZER_TYPE(IntEnum):
4949

5050
# TODO: this string has to exercise as much pre-tokenizer functionality as possible
5151
# will be updated with time - contributions welcome
52-
chktxt = "\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````\"\"\"\"......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL"
52+
chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
5353

5454
if len(sys.argv) == 2:
5555
token = sys.argv[1]
@@ -95,7 +95,7 @@ def download_file_with_auth(url, token, save_path):
9595
response = sess.get(url, headers=headers)
9696
response.raise_for_status()
9797
os.makedirs(os.path.dirname(save_path), exist_ok=True)
98-
with open(save_path, "wb") as f:
98+
with open(save_path, 'wb') as f:
9999
f.write(response.content)
100100
logger.info(f"File {save_path} downloaded successfully")
101101

@@ -145,9 +145,7 @@ def download_model(model):
145145
try:
146146
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
147147
except OSError as e:
148-
logger.error(
149-
f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}"
150-
)
148+
logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}")
151149
continue # Skip to the next model if the tokenizer can't be loaded
152150

153151
chktok = tokenizer.encode(chktxt)
@@ -167,15 +165,13 @@ def download_model(model):
167165
pre_tokenizer = cfg["pre_tokenizer"]
168166
logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
169167
if "ignore_merges" in cfg["model"]:
170-
logger.info(
171-
"ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4)
172-
)
168+
logger.info("ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4))
173169

174170
logger.info("")
175171

176-
src_ifs += f' if chkhsh == "{chkhsh}":\n'
172+
src_ifs += f" if chkhsh == \"{chkhsh}\":\n"
177173
src_ifs += f" # ref: {model['repo']}\n"
178-
src_ifs += f' res = "{name}"\n'
174+
src_ifs += f" res = \"{name}\"\n"
179175

180176
src_func = f"""
181177
def get_vocab_base_pre(self, tokenizer) -> str:
@@ -331,8 +327,6 @@ def get_vocab_base_pre(self, tokenizer) -> str:
331327
for model in models:
332328
name = model["name"]
333329

334-
print(
335-
f"python3 convert-hf-to-gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only"
336-
) # noqa: NP100
330+
print(f"python3 convert-hf-to-gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only") # noqa: NP100
337331

338332
logger.info("\n")

convert_lora_to_ggml.py

Lines changed: 0 additions & 149 deletions
This file was deleted.

convert_persimmon_to_gguf.py

Lines changed: 0 additions & 137 deletions
This file was deleted.

pyproject.toml

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,4 @@ build-backend = "poetry.core.masonry.api"
3838
[tool.poetry.scripts]
3939
llama-convert-hf-to-gguf = "convert_hf_to_gguf:main"
4040
llama-convert-llama-ggml-to-gguf = "convert_llama_ggml_to_gguf:main"
41-
llama-convert-lora-to-ggml = "convert_lora_to_ggml:main"
42-
llama-convert-persimmon-to-gguf = "convert_persimmon_to_gguf:main"
43-
llama-convert = "convert:main"
4441
llama-ggml-vk-generate-shaders = "ggml_vk_generate_shaders:main"

requirements.txt

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,5 +9,3 @@
99
-r ./requirements/requirements-convert_hf_to_gguf.txt
1010
-r ./requirements/requirements-convert_hf_to_gguf_update.txt
1111
-r ./requirements/requirements-convert_llama_ggml_to_gguf.txt
12-
-r ./requirements/requirements-convert_lora_to_ggml.txt
13-
-r ./requirements/requirements-convert_persimmon_to_gguf.txt

requirements/requirements-convert_lora_to_ggml.txt

Lines changed: 0 additions & 3 deletions
This file was deleted.

requirements/requirements-convert_persimmon_to_gguf.txt

Lines changed: 0 additions & 3 deletions
This file was deleted.

0 commit comments

Comments
 (0)