Skip to content

Commit 7392f1c

Browse files
authored
Improved quantize script (abetlen#222)
* Improved quantize script I improved the quantize script by adding error handling and allowing to select many models for quantization at once in the command line. I also converted it to Python for generalization as well as extensibility. * Fixes and improvements based on Matt's observations Fixed and improved many things in the script based on the reviews made by @mattsta. The parallelization suggestion is still to be revised, but code for it was still added (commented). * Small fixes to the previous commit * Corrected to use the original glob pattern The original Bash script uses a glob pattern to match files that have endings such as ...bin.0, ...bin.1, etc. That has been translated correctly to Python now. * Added support for Windows and updated README to use this script New code to set the name of the quantize script binary depending on the platform has been added (quantize.exe if working on Windows) and the README.md file has been updated to use this script instead of the Bash one. * Fixed a typo and removed shell=True in the subprocess.run call Fixed a typo regarding the new filenames of the quantized models and removed the shell=True parameter in the subprocess.run call as it was conflicting with the list of parameters. * Corrected previous commit * Small tweak: changed the name of the program in argparse This was making the automatic help message to be suggesting the program's usage as being literally "$ Quantization Script [arguments]". It should now be something like "$ python3 quantize.py [arguments]".
1 parent ad5fd5b commit 7392f1c

File tree

3 files changed

+127
-16
lines changed

3 files changed

+127
-16
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ python3 -m pip install torch numpy sentencepiece
147147
python3 convert-pth-to-ggml.py models/7B/ 1
148148

149149
# quantize the model to 4-bits
150-
./quantize.sh 7B
150+
python3 quantize.py 7B
151151

152152
# run the inference
153153
./main -m ./models/7B/ggml-model-q4_0.bin -n 128

quantize.py

Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,126 @@
1+
#!/usr/bin/env python3
2+
3+
"""Script to execute the "quantize" script on a given set of models."""
4+
5+
import subprocess
6+
import argparse
7+
import glob
8+
import sys
9+
import os
10+
11+
12+
def main():
13+
"""Update the quantize binary name depending on the platform and parse
14+
the command line arguments and execute the script.
15+
"""
16+
17+
if "linux" in sys.platform or "darwin" in sys.platform:
18+
quantize_script_binary = "quantize"
19+
20+
elif "win32" in sys.platform or "cygwin" in sys.platform:
21+
quantize_script_binary = "quantize.exe"
22+
23+
else:
24+
print("WARNING: Unknown platform. Assuming a UNIX-like OS.\n")
25+
quantize_script_binary = "quantize"
26+
27+
parser = argparse.ArgumentParser(
28+
prog='python3 quantize.py',
29+
description='This script quantizes the given models by applying the '
30+
f'"{quantize_script_binary}" script on them.'
31+
)
32+
parser.add_argument(
33+
'models', nargs='+', choices=('7B', '13B', '30B', '65B'),
34+
help='The models to quantize.'
35+
)
36+
parser.add_argument(
37+
'-r', '--remove-16', action='store_true', dest='remove_f16',
38+
help='Remove the f16 model after quantizing it.'
39+
)
40+
parser.add_argument(
41+
'-m', '--models-path', dest='models_path',
42+
default=os.path.join(os.getcwd(), "models"),
43+
help='Specify the directory where the models are located.'
44+
)
45+
parser.add_argument(
46+
'-q', '--quantize-script-path', dest='quantize_script_path',
47+
default=os.path.join(os.getcwd(), quantize_script_binary),
48+
help='Specify the path to the "quantize" script.'
49+
)
50+
51+
# TODO: Revise this code
52+
# parser.add_argument(
53+
# '-t', '--threads', dest='threads', type='int',
54+
# default=os.cpu_count(),
55+
# help='Specify the number of threads to use to quantize many models at '
56+
# 'once. Defaults to os.cpu_count().'
57+
# )
58+
59+
args = parser.parse_args()
60+
61+
if not os.path.isfile(args.quantize_script_path):
62+
print(
63+
f'The "{quantize_script_binary}" script was not found in the '
64+
"current location.\nIf you want to use it from another location, "
65+
"set the --quantize-script-path argument from the command line."
66+
)
67+
sys.exit(1)
68+
69+
for model in args.models:
70+
# The model is separated in various parts
71+
# (ggml-model-f16.bin, ggml-model-f16.bin.0, ggml-model-f16.bin.1...)
72+
f16_model_path_base = os.path.join(
73+
args.models_path, model, "ggml-model-f16.bin"
74+
)
75+
76+
f16_model_parts_paths = map(
77+
lambda filename: os.path.join(f16_model_path_base, filename),
78+
glob.glob(f"{f16_model_path_base}*")
79+
)
80+
81+
for f16_model_part_path in f16_model_parts_paths:
82+
if not os.path.isfile(f16_model_part_path):
83+
print(
84+
f"The f16 model {os.path.basename(f16_model_part_path)} "
85+
f"was not found in {args.models_path}{os.path.sep}{model}"
86+
". If you want to use it from another location, set the "
87+
"--models-path argument from the command line."
88+
)
89+
sys.exit(1)
90+
91+
__run_quantize_script(
92+
args.quantize_script_path, f16_model_part_path
93+
)
94+
95+
if args.remove_f16:
96+
os.remove(f16_model_part_path)
97+
98+
99+
# This was extracted to a top-level function for parallelization, if
100+
# implemented. See https://github.com/ggerganov/llama.cpp/pull/222/commits/f8db3d6cd91bf1a1342db9d29e3092bc12dd783c#r1140496406
101+
102+
def __run_quantize_script(script_path, f16_model_part_path):
103+
"""Run the quantize script specifying the path to it and the path to the
104+
f16 model to quantize.
105+
"""
106+
107+
new_quantized_model_path = f16_model_part_path.replace("f16", "q4_0")
108+
subprocess.run(
109+
[script_path, f16_model_part_path, new_quantized_model_path, "2"],
110+
check=True
111+
)
112+
113+
114+
if __name__ == "__main__":
115+
try:
116+
main()
117+
118+
except subprocess.CalledProcessError:
119+
print("\nAn error ocurred while trying to quantize the models.")
120+
sys.exit(1)
121+
122+
except KeyboardInterrupt:
123+
sys.exit(0)
124+
125+
else:
126+
print("\nSuccesfully quantized all models.")

quantize.sh

Lines changed: 0 additions & 15 deletions
This file was deleted.

0 commit comments

Comments
 (0)