|
| 1 | +import sys, struct, math, argparse |
| 2 | +from pathlib import Path |
| 3 | + |
| 4 | +import numpy as np |
| 5 | + |
| 6 | +import gguf |
| 7 | + |
| 8 | +# Note: Does not support GGML_QKK_64 |
| 9 | +QK_K = 256 |
| 10 | +# Items here are (block size, type size) |
| 11 | +GGML_QUANT_SIZES = { |
| 12 | + gguf.GGMLQuantizationType.F32 : (1, 4), |
| 13 | + gguf.GGMLQuantizationType.F16 : (1, 2), |
| 14 | + gguf.GGMLQuantizationType.Q4_0 : (32, 2 + 16), |
| 15 | + gguf.GGMLQuantizationType.Q4_1 : (32, 2 + 2 + 16), |
| 16 | + gguf.GGMLQuantizationType.Q5_0 : (32, 2 + 4 + 16), |
| 17 | + gguf.GGMLQuantizationType.Q5_1 : (32, 2 + 2 + 4 + 16), |
| 18 | + gguf.GGMLQuantizationType.Q8_0 : (32, 2 + 32), |
| 19 | + gguf.GGMLQuantizationType.Q8_1 : (32, 4 + 4 + 32), |
| 20 | + gguf.GGMLQuantizationType.Q2_K : (256, 2 + 2 + QK_K // 16 + QK_K // 4), |
| 21 | + gguf.GGMLQuantizationType.Q3_K : (256, 2 + QK_K // 4 + QK_K // 8 + 12), |
| 22 | + gguf.GGMLQuantizationType.Q4_K : (256, 2 + 2 + QK_K // 2 + 12), |
| 23 | + gguf.GGMLQuantizationType.Q5_K : (256, 2 + 2 + QK_K // 2 + QK_K // 8 + 12), |
| 24 | + gguf.GGMLQuantizationType.Q6_K : (256, 2 + QK_K // 2 + QK_K // 4 + QK_K // 16), |
| 25 | + gguf.GGMLQuantizationType.Q8_K : (256, 4 + QK_K + QK_K // 8), |
| 26 | +} |
| 27 | + |
| 28 | +class Hyperparameters: |
| 29 | + def __init__(self): |
| 30 | + self.n_vocab = self.n_embd = self.n_mult = self.n_head = self.n_layer = self.n_rot = self.ftype = 0 |
| 31 | + self.n_ff = 0 |
| 32 | + |
| 33 | + def set_n_ff(self, model): |
| 34 | + ff_tensor_idx = model.tensor_map.get(b'layers.0.feed_forward.w1.weight') |
| 35 | + assert ff_tensor_idx is not None, 'Missing layer 0 FF tensor' |
| 36 | + ff_tensor = model.tensors[ff_tensor_idx] |
| 37 | + self.n_ff = ff_tensor.dims[1] |
| 38 | + |
| 39 | + def load(self, data, offset): |
| 40 | + ( |
| 41 | + self.n_vocab, |
| 42 | + self.n_embd, |
| 43 | + self.n_mult, |
| 44 | + self.n_head, |
| 45 | + self.n_layer, |
| 46 | + self.n_rot, |
| 47 | + self.ftype, |
| 48 | + ) = struct.unpack('<7I', data[offset:offset + (4 * 7)]) |
| 49 | + return 4 * 7 |
| 50 | + |
| 51 | + def __str__(self): |
| 52 | + return f'<Hyperparameters: n_vocab={self.n_vocab}, n_embd={self.n_embd}, n_mult={self.n_mult}, n_head={self.n_head}, n_layer={self.n_layer}, n_rot={self.n_rot}, n_ff={self.n_ff}, ftype={self.ftype}>' |
| 53 | + |
| 54 | +class Vocab: |
| 55 | + def __init__(self): |
| 56 | + self.items = [] |
| 57 | + |
| 58 | + def load(self, data, offset, n_vocab): |
| 59 | + orig_offset = offset |
| 60 | + for _ in range(n_vocab): |
| 61 | + itemlen = struct.unpack('<I', data[offset:offset + 4])[0] |
| 62 | + assert itemlen < 4096, 'Absurd vocab item length' |
| 63 | + offset += 4 |
| 64 | + vocab = bytes(data[offset:offset + itemlen]) |
| 65 | + offset += itemlen |
| 66 | + score = struct.unpack('<f', data[offset:offset + 4])[0] |
| 67 | + offset += 4 |
| 68 | + self.items.append((vocab, score)) |
| 69 | + return offset - orig_offset |
| 70 | + |
| 71 | +class Tensor: |
| 72 | + def __init__(self): |
| 73 | + self.name = None |
| 74 | + self.dims = () |
| 75 | + self.dtype = None |
| 76 | + self.start_offset = 0 |
| 77 | + self.len_bytes = 0 |
| 78 | + |
| 79 | + def load(self, data, offset): |
| 80 | + orig_offset = offset |
| 81 | + (n_dims, name_len, dtype) = struct.unpack('<3I', data[offset:offset + 12]) |
| 82 | + assert n_dims >= 0 and n_dims <= 4, f'Invalid tensor dimensions {n_dims}' |
| 83 | + assert name_len < 4096, 'Absurd tensor name length' |
| 84 | + quant = GGML_QUANT_SIZES.get(dtype) |
| 85 | + assert quant is not None, 'Unknown tensor type' |
| 86 | + (blksize, tysize) = quant |
| 87 | + offset += 12 |
| 88 | + self.dtype= dtype |
| 89 | + self.dims = struct.unpack(f'<{n_dims}I', data[offset:offset + (4 * n_dims)]) |
| 90 | + offset += 4 * n_dims |
| 91 | + self.name = bytes(data[offset:offset + name_len]) |
| 92 | + offset += name_len |
| 93 | + pad = ((offset + 31) & ~31) - offset |
| 94 | + offset += pad |
| 95 | + n_elems = np.prod(self.dims) |
| 96 | + n_bytes = (n_elems * tysize) // blksize |
| 97 | + self.start_offset = offset |
| 98 | + self.len_bytes = n_bytes |
| 99 | + offset += n_bytes |
| 100 | + # print(n_dims, name_len, dtype, self.dims, self.name, pad) |
| 101 | + return offset - orig_offset |
| 102 | + |
| 103 | +class GGMLV3Model: |
| 104 | + def __init__(self): |
| 105 | + self.hyperparameters = None |
| 106 | + self.vocab = None |
| 107 | + self.tensor_map = {} |
| 108 | + self.tensors = [] |
| 109 | + |
| 110 | + def validate_header(self, data, offset): |
| 111 | + if bytes(data[offset:offset + 4]) != b'tjgg' or struct.unpack('<I', data[offset + 4:offset + 8])[0] != 3: |
| 112 | + raise ValueError('Only GGJTv3 supported') |
| 113 | + return 8 |
| 114 | + |
| 115 | + def load(self, data, offset): |
| 116 | + offset += self.validate_header(data, offset) |
| 117 | + hp = Hyperparameters() |
| 118 | + offset += hp.load(data, offset) |
| 119 | + vocab = Vocab() |
| 120 | + offset += vocab.load(data, offset, hp.n_vocab) |
| 121 | + tensors = [] |
| 122 | + tensor_map = {} |
| 123 | + while offset < len(data): |
| 124 | + tensor = Tensor() |
| 125 | + offset += tensor.load(data, offset) |
| 126 | + tensor_map[tensor.name] = len(tensors) |
| 127 | + tensors.append(tensor) |
| 128 | + self.hyperparameters = hp |
| 129 | + self.vocab = vocab |
| 130 | + self.tensors = tensors |
| 131 | + self.tensor_map = tensor_map |
| 132 | + hp.set_n_ff(self) |
| 133 | + return offset |
| 134 | + |
| 135 | +class GGMLToGGUF: |
| 136 | + def __init__(self, ggml_model, data, cfg, params_override = None, vocab_override = None): |
| 137 | + hp = ggml_model.hyperparameters |
| 138 | + self.model = ggml_model |
| 139 | + self.data = data |
| 140 | + self.cfg = cfg |
| 141 | + self.params_override = params_override |
| 142 | + self.vocab_override = vocab_override |
| 143 | + if params_override is not None: |
| 144 | + n_kv_head = params_override.n_head_kv |
| 145 | + else: |
| 146 | + if cfg.gqa == 1: |
| 147 | + n_kv_head = hp.n_head |
| 148 | + else: |
| 149 | + gqa = float(cfg.gqa) |
| 150 | + n_kv_head = None |
| 151 | + for x in range(1, 256): |
| 152 | + if float(hp.n_head) / float(x) == gqa: |
| 153 | + n_kv_head = x |
| 154 | + assert n_kv_head is not None, "Couldn't determine n_kv_head from GQA param" |
| 155 | + print(f'- Guessed n_kv_head = {n_kv_head} based on GQA {cfg.gqa}') |
| 156 | + self.n_kv_head = n_kv_head |
| 157 | + self.name_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.LLAMA, ggml_model.hyperparameters.n_layer) |
| 158 | + |
| 159 | + def save(self): |
| 160 | + print('* Preparing to save GGUF file') |
| 161 | + gguf_writer = gguf.GGUFWriter(self.cfg.output, gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.LLAMA], use_temp_file = False) |
| 162 | + self.add_params(gguf_writer) |
| 163 | + self.add_vocab(gguf_writer) |
| 164 | + self.add_tensors(gguf_writer) |
| 165 | + print(" gguf: write header") |
| 166 | + gguf_writer.write_header_to_file() |
| 167 | + print(" gguf: write metadata") |
| 168 | + gguf_writer.write_kv_data_to_file() |
| 169 | + print(" gguf: write tensors") |
| 170 | + gguf_writer.write_tensors_to_file() |
| 171 | + gguf_writer.close() |
| 172 | + |
| 173 | + def add_params(self, gguf_writer): |
| 174 | + hp = self.model.hyperparameters |
| 175 | + cfg = self.cfg |
| 176 | + desc = cfg.desc if cfg.desc is not None else 'converted from legacy GGJTv3 format' |
| 177 | + try: |
| 178 | + # Filenames aren't necessarily valid UTF8. |
| 179 | + name = cfg.name if cfg.name is not None else cfg.input.name |
| 180 | + except UnicodeDecodeError: |
| 181 | + name = None |
| 182 | + print('* Adding model parameters and KV items') |
| 183 | + if name is not None: |
| 184 | + gguf_writer.add_name(name) |
| 185 | + gguf_writer.add_description(desc) |
| 186 | + if self.params_override is not None: |
| 187 | + po = self.params_override |
| 188 | + assert po.n_embd == hp.n_embd, 'Model hyperparams mismatch' |
| 189 | + assert po.n_layer == hp.n_layer, 'Model hyperparams mismatch' |
| 190 | + assert po.n_head == hp.n_head, 'Model hyperparams mismatch' |
| 191 | + gguf_writer.add_context_length (po.n_ctx) |
| 192 | + gguf_writer.add_embedding_length (po.n_embd) |
| 193 | + gguf_writer.add_block_count (po.n_layer) |
| 194 | + gguf_writer.add_feed_forward_length (po.n_ff) |
| 195 | + gguf_writer.add_rope_dimension_count(po.n_embd // po.n_head) |
| 196 | + gguf_writer.add_head_count (po.n_head) |
| 197 | + gguf_writer.add_head_count_kv (po.n_head_kv) |
| 198 | + gguf_writer.add_layer_norm_rms_eps (po.f_norm_eps) |
| 199 | + return |
| 200 | + gguf_writer.add_context_length(cfg.context_length) |
| 201 | + gguf_writer.add_embedding_length(hp.n_embd) |
| 202 | + gguf_writer.add_block_count(hp.n_layer) |
| 203 | + gguf_writer.add_feed_forward_length(hp.n_ff) |
| 204 | + gguf_writer.add_rope_dimension_count(hp.n_embd // hp.n_head) |
| 205 | + gguf_writer.add_head_count(hp.n_head) |
| 206 | + gguf_writer.add_head_count_kv(self.n_kv_head) |
| 207 | + gguf_writer.add_layer_norm_rms_eps(float(cfg.eps)) |
| 208 | + |
| 209 | + def add_vocab(self, gguf_writer): |
| 210 | + hp = self.model.hyperparameters |
| 211 | + gguf_writer.add_tokenizer_model('llama') |
| 212 | + tokens = [] |
| 213 | + scores = [] |
| 214 | + toktypes = [] |
| 215 | + if self.vocab_override is not None: |
| 216 | + vo = self.vocab_override |
| 217 | + print('* Adding vocab item(s)') |
| 218 | + for (idx, vitem) in enumerate(vo.all_tokens()): |
| 219 | + if len(vitem) == 3: |
| 220 | + tokens.append(vitem[0]) |
| 221 | + scores.append(vitem[1]) |
| 222 | + toktypes.append(vitem[2]) |
| 223 | + else: |
| 224 | + # Maybe try to guess the token type here? |
| 225 | + tokens.append(vitem[0]) |
| 226 | + scores.append(vitem[1]) |
| 227 | + assert len(tokens) == hp.n_vocab, f'Override vocab has a different number of items than hyperparameters - override = {len(tokens)} but n_vocab={hp.n_vocab}' |
| 228 | + gguf_writer.add_token_list(tokens) |
| 229 | + gguf_writer.add_token_scores(scores) |
| 230 | + if len(toktypes) > 0: |
| 231 | + gguf_writer.add_token_types(toktypes) |
| 232 | + return |
| 233 | + print(f'* Adding {hp.n_vocab} vocab item(s)') |
| 234 | + for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items): |
| 235 | + tt = 1 # Normal |
| 236 | + if len(vbytes) == 0: |
| 237 | + tt = 3 # Control |
| 238 | + elif tokid >= 3 and tokid <= 258 and len(vbytes) == 1: |
| 239 | + hv = hex(vbytes[0])[2:].upper() |
| 240 | + vbytes = bytes(f'<0x{hv}>', encoding = 'UTF-8') |
| 241 | + tt = 6 # Byte |
| 242 | + else: |
| 243 | + vbytes = vbytes.replace(b' ', b'\xe2\x96\x81') |
| 244 | + toktypes.append(tt) |
| 245 | + tokens.append(vbytes) |
| 246 | + scores.append(vscore) |
| 247 | + gguf_writer.add_token_list(tokens) |
| 248 | + gguf_writer.add_token_scores(scores) |
| 249 | + gguf_writer.add_token_types(toktypes) |
| 250 | + |
| 251 | + def add_tensors(self, gguf_writer): |
| 252 | + nm = self.name_map |
| 253 | + data = self.data |
| 254 | + print(f'* Adding {len(self.model.tensors)} tensor(s)') |
| 255 | + for tensor in self.model.tensors: |
| 256 | + name = str(tensor.name, 'UTF-8') |
| 257 | + if name.endswith('.weight'): |
| 258 | + name = name[:-7] |
| 259 | + suffix = '.weight' |
| 260 | + elif name.endswith('.bias'): |
| 261 | + name = name[:-5] |
| 262 | + suffix = '.bias' |
| 263 | + mapped_name = nm.get(name) |
| 264 | + assert mapped_name is not None, f'Bad name {name}' |
| 265 | + mapped_name += suffix |
| 266 | + tempdims = list(tensor.dims[:]) |
| 267 | + if len(tempdims) > 1: |
| 268 | + temp = tempdims[1] |
| 269 | + tempdims[1] = tempdims[0] |
| 270 | + tempdims[0] = temp |
| 271 | + # print(f'+ {tensor.name} | {mapped_name} {tensor.dims} :: {tempdims}') |
| 272 | + gguf_writer.add_tensor(mapped_name, data[tensor.start_offset:tensor.start_offset + tensor.len_bytes], raw_shape = tempdims, raw_dtype = tensor.dtype) |
| 273 | + |
| 274 | +def handle_metadata(cfg, hp): |
| 275 | + import convert |
| 276 | + assert cfg.model_metadata_dir.is_dir(), 'Metadata dir is not a directory' |
| 277 | + hf_config_path = cfg.model_metadata_dir / "config.json" |
| 278 | + orig_config_path = cfg.model_metadata_dir / "params.json" |
| 279 | + # We pass a fake model here. "original" mode will check the shapes of some |
| 280 | + # tensors if information is missing in the .json file: other than that, the |
| 281 | + # model data isn't used so this should be safe (at least for now). |
| 282 | + fakemodel = { |
| 283 | + 'tok_embeddings.weight': convert.LazyTensor.__new__(convert.LazyTensor), |
| 284 | + 'layers.0.feed_forward.w1.weight': convert.LazyTensor.__new__(convert.LazyTensor), |
| 285 | + } |
| 286 | + fakemodel['tok_embeddings.weight'].shape = [hp.n_vocab] |
| 287 | + fakemodel['layers.0.feed_forward.w1.weight'].shape = [hp.n_ff] |
| 288 | + if hf_config_path.exists(): |
| 289 | + params = convert.Params.loadHFTransformerJson(fakemodel, hf_config_path) |
| 290 | + elif orig_config_path.exists(): |
| 291 | + params = convert.Params.loadOriginalParamsJson(fakemodel, orig_config_path) |
| 292 | + else: |
| 293 | + raise ValueError('Unable to load metadata') |
| 294 | + vocab = convert.load_vocab(cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir, cfg.vocabtype) |
| 295 | + convert.check_vocab_size(params, vocab) |
| 296 | + return (params, vocab) |
| 297 | + |
| 298 | +def handle_args(): |
| 299 | + parser = argparse.ArgumentParser(description = 'Convert GGMLv3 models to GGUF') |
| 300 | + parser.add_argument('--input', '-i', type = Path, help = 'Input GGMLv3 filename') |
| 301 | + parser.add_argument('--output', '-o', type = Path, help ='Output GGUF filename') |
| 302 | + parser.add_argument('--name', help = 'Set model name') |
| 303 | + parser.add_argument('--desc', help = 'Set model description') |
| 304 | + parser.add_argument('--gqa', type = int, default = 1, help = 'grouped-query attention factor (use 8 for LLaMA2 70B)') |
| 305 | + parser.add_argument('--eps', default = '5.0e-06', help = 'RMS norm eps: Use 1e-6 for LLaMA1 and OpenLLaMA, use 1e-5 for LLaMA2') |
| 306 | + parser.add_argument('--context-length', '-c', type=int, default = 2048, help = 'Default max context length: LLaMA1 is typically 2048, LLaMA2 is typically 4096') |
| 307 | + parser.add_argument('--model-metadata-dir', '-m', type = Path, help ='Load HuggingFace/.pth vocab and metadata from the specified directory') |
| 308 | + parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file - only meaningful with --model-metadata-dir") |
| 309 | + parser.add_argument("--vocabtype", choices=["spm", "bpe"], help="vocab format - only meaningful with --model-metadata-dir and/or --vocab-dir (default: spm)", default="spm") |
| 310 | + return parser.parse_args() |
| 311 | + |
| 312 | +def main(): |
| 313 | + cfg = handle_args() |
| 314 | + print(f'* Using config: {cfg}') |
| 315 | + print('\n=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===\n') |
| 316 | + data = np.memmap(cfg.input, mode = 'r') |
| 317 | + model = GGMLV3Model() |
| 318 | + print('* Scanning GGML input file') |
| 319 | + offset = model.load(data, 0) |
| 320 | + print(f'* GGML model hyperparameters: {model.hyperparameters}') |
| 321 | + vocab_override = None |
| 322 | + params_override = None |
| 323 | + if cfg.model_metadata_dir is not None: |
| 324 | + (params_override, vocab_override) = handle_metadata(cfg, model.hyperparameters) |
| 325 | + print('!! Note: When overriding params the --gqa, --eps and --context-length options are ignored.') |
| 326 | + print(f'* Overriding params: {params_override}') |
| 327 | + print(f'* Overriding vocab: {vocab_override}') |
| 328 | + else: |
| 329 | + print('\n=== WARNING === Special tokens may not be converted correctly. Use --model-metadata-dir if possible === WARNING ===\n') |
| 330 | + converter = GGMLToGGUF(model, data, cfg, params_override = params_override, vocab_override = vocab_override) |
| 331 | + converter.save() |
| 332 | + print(f'* Successful completion. Output saved to: {cfg.output}') |
| 333 | + |
| 334 | +main() |
0 commit comments