Skip to content

Commit 667c501

Browse files
ostix360prusnak
andauthored
py : cast lora_alpha to int in convert-lora-to-ggml (#1170)
Co-authored-by: Pavol Rusnak <[email protected]>
1 parent bb98e77 commit 667c501

File tree

1 file changed

+7
-2
lines changed

1 file changed

+7
-2
lines changed

convert-lora-to-ggml.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,12 @@ def translate_tensor_name(t: str) -> str:
4949
def write_file_header(fout: TextIO, params: Dict[str, Any]) -> None:
5050
fout.write(b"ggla"[::-1]) # magic (ggml lora)
5151
fout.write(struct.pack("i", 1)) # file version
52-
fout.write(struct.pack("ii", params["r"], params["lora_alpha"]))
52+
fout.write(struct.pack("i", params["r"]))
53+
# https://opendelta.readthedocs.io/en/latest/modules/deltas.html says that `lora_alpha` is an int
54+
# but some models ship a float value instead
55+
# let's convert to int, but fail if lossless conversion is not possible
56+
assert int(params["lora_alpha"]) == params["lora_alpha"], "cannot convert float to int losslessly"
57+
fout.write(struct.pack("i", int(params["lora_alpha"])))
5358

5459

5560
def write_tensor_header(
@@ -89,7 +94,7 @@ def write_tensor_header(
8994
print(f"Error: unsupported adapter type {params['peft_type']}, expected LORA")
9095
sys.exit(1)
9196

92-
if params["fan_in_fan_out"] == True:
97+
if params["fan_in_fan_out"] is True:
9398
print("Error: param fan_in_fan_out is not supported")
9499
sys.exit(1)
95100

0 commit comments

Comments
 (0)