Skip to content

Commit a43a0d6

Browse files
committed
Improve help text, expand warning
1 parent ad9f307 commit a43a0d6

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

convert-llama-ggmlv3-to-gguf.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -227,19 +227,19 @@ def handle_args():
227227
parser.add_argument('--input', '-i', help = 'Input GGMLv3 filename')
228228
parser.add_argument('--output', '-o', help ='Output GGUF filename')
229229
parser.add_argument('--gqa', type = int, default = 1, help = 'grouped-query attention factor (use 8 for LLaMA2 70B)')
230-
parser.add_argument('--eps', default = '5.0e-06', help = 'RMS norm eps (use 1e-5 for LLaMA2)')
231-
parser.add_argument('--context-length', '-c', type=int, default = 2048, help = 'Default max context length')
230+
parser.add_argument('--eps', default = '5.0e-06', help = 'RMS norm eps: Use 1e-6 for LLaMA1 and OpenLLaMA, use 1e-5 for LLaMA2')
231+
parser.add_argument('--context-length', '-c', type=int, default = 2048, help = 'Default max context length: LLaMA1 is typically 2048, LLaMA2 is typically 4096')
232232
return parser.parse_args()
233233

234234
def main():
235235
cfg = handle_args()
236236
print(f'* Using config: {cfg}')
237-
print('\n=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===\n')
237+
print('\n=== WARNING === Be aware that this conversion script is best-effort. Special tokens may not be converted correctly. Use a native GGUF model if possible. === WARNING ===\n')
238238
data = np.memmap(cfg.input, mode = 'r')
239239
model = GGMLV3Model()
240240
print('* Scanning GGML input file')
241241
offset = model.load(data, 0)
242-
print(model.hyperparameters)
242+
print(f'* GGML model hyperparameters: {model.hyperparameters}')
243243
converter = GGMLToGGUF(model, data, cfg)
244244
converter.save()
245245
print(f'* Successful completion. Output saved to: {cfg.output}')

0 commit comments

Comments
 (0)