Skip to content

Commit dc8feea

Browse files
committed
gguf-dump.py: add --markdown dump output
1 parent 42b53d1 commit dc8feea

File tree

1 file changed

+126
-1
lines changed

1 file changed

+126
-1
lines changed

gguf-py/scripts/gguf-dump.py

Lines changed: 126 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,25 +101,150 @@ def dump_metadata_json(reader: GGUFReader, args: argparse.Namespace) -> None:
101101
json.dump(result, sys.stdout)
102102

103103

104+
def element_count_rounded_notation(count: int) -> str:
105+
if count > 1e15 :
106+
# Quadrillion
107+
scaled_amount = count * 1e-15
108+
scale_suffix = "Q"
109+
elif count > 1e12 :
110+
# Trillions
111+
scaled_amount = count * 1e-12
112+
scale_suffix = "T"
113+
elif count > 1e9 :
114+
# Billions
115+
scaled_amount = count * 1e-9
116+
scale_suffix = "B"
117+
elif count > 1e6 :
118+
# Millions
119+
scaled_amount = count * 1e-6
120+
scale_suffix = "M"
121+
elif count > 1e3 :
122+
# Thousands
123+
scaled_amount = count * 1e-3
124+
scale_suffix = "K"
125+
else:
126+
# Under Thousands
127+
scaled_amount = count
128+
scale_suffix = ""
129+
return f"{'~' if count > 1e3 else ''}{round(scaled_amount)}{scale_suffix}"
130+
131+
132+
def translate_tensor_name(name):
133+
import re
134+
words = re.split(r"[._]", name)
135+
136+
abbreviation_dictionary = {
137+
'ffn' : 'Feed Forward',
138+
'attn' : 'Attention',
139+
'blk' : 'Block',
140+
'norm' : 'Normalization',
141+
'embd' : 'Embedding',
142+
}
143+
144+
expanded_words = []
145+
for word in words:
146+
word_norm = word.strip().lower()
147+
if word_norm in abbreviation_dictionary:
148+
expanded_words.append(abbreviation_dictionary[word_norm])
149+
else:
150+
expanded_words.append(word.title())
151+
152+
return ' '.join(expanded_words)
153+
154+
155+
def dump_markdown_metadata(reader: GGUFReader, args: argparse.Namespace) -> None:
156+
host_endian, file_endian = get_file_host_endian(reader)
157+
print(f'# {args.model} - GGUF Internal File Dump') # noqa: NP100
158+
print(f'* Endian: {file_endian} endian') # noqa: NP100
159+
print('') # noqa: NP100
160+
print('## Key Value Metadata Store') # noqa: NP100
161+
print(f'There is {len(reader.fields)} key/value pair(s) in this file') # noqa: NP100
162+
print('') # noqa: NP100
163+
164+
print('| POS | TYPE | Elements | Key | Value |') # noqa: NP100
165+
print('|-----|------------|----------|----------------------------------------|--------------------------------------------------------------------------------|') # noqa: NP100
166+
167+
for n, field in enumerate(reader.fields.values(), 1):
168+
if not field.types:
169+
pretty_type = 'N/A'
170+
elif field.types[0] == GGUFValueType.ARRAY:
171+
nest_count = len(field.types) - 1
172+
pretty_type = '[' * nest_count + str(field.types[-1].name) + ']' * nest_count
173+
else:
174+
pretty_type = str(field.types[-1].name)
175+
176+
if len(field.types) == 1:
177+
curr_type = field.types[0]
178+
if curr_type == GGUFValueType.STRING:
179+
value = repr(str(bytes(field.parts[-1]), encoding='utf-8')[:60])
180+
elif field.types[0] in reader.gguf_scalar_to_np:
181+
value = field.parts[-1][0]
182+
print(f'| {n:3} | {pretty_type:10} | {len(field.data):8} | {field.name:38} | {value:<78} |') # noqa: NP100
183+
184+
print("\n") # noqa: NP100
185+
186+
if not args.no_tensors:
187+
# Group tensors by their prefix and maintain order
188+
tensor_prefix_order = []
189+
tensor_groups = {}
190+
total_elements = sum(tensor.n_elements for tensor in reader.tensors)
191+
192+
for tensor in reader.tensors:
193+
tensor_name = tensor.name.replace(".weight", "")
194+
tensor_components = tensor_name.split('.')
195+
tensor_prefix = tensor_components[0]
196+
if tensor_prefix == 'blk':
197+
tensor_prefix = f"{tensor_components[0]}.{tensor_components[1]}"
198+
199+
if tensor_prefix not in tensor_groups:
200+
tensor_groups[tensor_prefix] = []
201+
tensor_prefix_order.append(tensor_prefix)
202+
203+
tensor_groups[tensor_prefix].append(tensor)
204+
205+
# Generate Markdown metadata
206+
for group in tensor_prefix_order:
207+
tensors = tensor_groups[group]
208+
group_elements = sum(tensor.n_elements for tensor in tensors)
209+
group_percentage = group_elements / total_elements * 100
210+
211+
print(f"## {translate_tensor_name(group)} Tensor Group : {element_count_rounded_notation(group_elements)} Elements") # noqa: NP100
212+
print("| Tensor Name | Human Friendly Name | Elements | Shape | Type |") # noqa: NP100
213+
print("|----------------------|-------------------------------------|----------------|---------------------------------|------|") # noqa: NP100
214+
215+
for tensor in tensors:
216+
tensor_name = tensor.name.replace(".weight", "")
217+
human_friendly_name = translate_tensor_name(tensor.name.replace(".weight", ""))
218+
prettydims = ' x '.join('{0:^5}'.format(d) for d in list(tensor.shape) + [1] * (4 - len(tensor.shape)))
219+
print(f"| {tensor_name:20} | {human_friendly_name:35} | ({element_count_rounded_notation(tensor.n_elements):>4}) {tensor.n_elements:7} | [{prettydims:29}] | {tensor.tensor_type.name:4} |") # noqa: NP100
220+
print("") # noqa: NP100
221+
print(f"- Total elements in {group}: ({element_count_rounded_notation(group_elements):>4}) {group_elements}") # noqa: NP100
222+
print(f"- Percentage of total elements: {group_percentage:.2f}%") # noqa: NP100
223+
print("\n") # noqa: NP100
224+
225+
104226
def main() -> None:
105227
parser = argparse.ArgumentParser(description="Dump GGUF file metadata")
106228
parser.add_argument("model", type=str, help="GGUF format model filename")
107229
parser.add_argument("--no-tensors", action="store_true", help="Don't dump tensor metadata")
108230
parser.add_argument("--json", action="store_true", help="Produce JSON output")
109231
parser.add_argument("--json-array", action="store_true", help="Include full array values in JSON output (long)")
232+
parser.add_argument("--markdown", action="store_true", help="Produce markdown output")
110233
parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
111234

112235
args = parser.parse_args(None if len(sys.argv) > 1 else ["--help"])
113236

114237
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
115238

116-
if not args.json:
239+
if not args.json and not args.markdown:
117240
logger.info(f'* Loading: {args.model}')
118241

119242
reader = GGUFReader(args.model, 'r')
120243

121244
if args.json:
122245
dump_metadata_json(reader, args)
246+
elif args.markdown:
247+
dump_markdown_metadata(reader, args)
123248
else:
124249
dump_metadata(reader, args)
125250

0 commit comments

Comments
 (0)