Skip to content

Commit 62490f1

Browse files
committed
gguf : use UNIX line ending
1 parent 0c19ae7 commit 62490f1

File tree

5 files changed

+5606
-5606
lines changed

5 files changed

+5606
-5606
lines changed

constants.py

Lines changed: 54 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -1,54 +1,54 @@
1-
GGUF_MAGIC = 0x47475546
2-
GGUF_VERSION = 1
3-
GGUF_DEFAULT_ALIGNMENT = 32
4-
5-
# general
6-
KEY_GENERAL_ARCHITECTURE = "general.architecture"
7-
KEY_GENERAL_QUANTIZATION_VERSION = "general.quantization_version"
8-
KEY_GENERAL_ALIGNMENT = "general.alignment"
9-
KEY_GENERAL_NAME = "general.name"
10-
KEY_GENERAL_AUTHOR = "general.author"
11-
KEY_GENERAL_URL = "general.url"
12-
KEY_GENERAL_DESCRIPTION = "general.description"
13-
KEY_GENERAL_FILE_TYPE = "general.file_type"
14-
KEY_GENERAL_LICENSE = "general.license"
15-
KEY_GENERAL_SOURCE_URL = "general.source.url"
16-
KEY_GENERAL_SOURCE_HF_REPO = "general.source.hugginface.repository"
17-
18-
# LLM
19-
KEY_LLM_CONTEXT_LENGTH = "{llm}.context_length"
20-
KEY_LLM_EMBEDDING_LENGTH = "{llm}.embedding_length"
21-
KEY_LLM_BLOCK_COUNT = "{llm}.block_count"
22-
KEY_LLM_FEED_FORWARD_LENGTH = "{llm}.feed_forward_length"
23-
KEY_LLM_USE_PARALLEL_RESIDUAL = "{llm}.use_parallel_residual"
24-
KEY_LLM_TENSOR_DATA_LAYOUT = "{llm}.tensor_data_layout"
25-
26-
# attention
27-
KEY_ATTENTION_HEAD_COUNT = "{llm}.attention.head_count"
28-
KEY_ATTENTION_HEAD_COUNT_KV = "{llm}.attention.head_count_kv"
29-
KEY_ATTENTION_MAX_ALIBI_BIAS = "{llm}.attention.max_alibi_bias"
30-
KEY_ATTENTION_CLAMP_KQV = "{llm}.attention.clamp_kqv"
31-
KEY_ATTENTION_LAYERNORM_EPS = "{llm}.attention.layer_norm_epsilon"
32-
KEY_ATTENTION_LAYERNORM_RMS_EPS = "{llm}.attention.layer_norm_rms_epsilon"
33-
34-
# RoPE
35-
KEY_ROPE_DIMENSION_COUNT = "{llm}.rope.dimension_count"
36-
KEY_ROPE_SCALE = "{llm}.rope.scale"
37-
38-
# tokenization
39-
KEY_TOKENIZER_MODEL = "tokenizer.ggml.model"
40-
KEY_TOKENIZER_LIST = "tokenizer.ggml.tokens"
41-
KEY_TOKENIZER_SCORES = "tokenizer.ggml.scores"
42-
KEY_TOKENIZER_MERGES = "tokenizer.ggml.merges"
43-
KEY_TOKENIZER_BOS_ID = "tokenizer.ggml.bos_token_id"
44-
KEY_TOKENIZER_EOS_ID = "tokenizer.ggml.eos_token_id"
45-
KEY_TOKENIZER_UNK_ID = "tokenizer.ggml.unknown_token_id"
46-
KEY_TOKENIZER_SEP_ID = "tokenizer.ggml.seperator_token_id"
47-
KEY_TOKENIZER_PAD_ID = "tokenizer.ggml.padding_token_id"
48-
KEY_TOKENIZER_HF_JSON = "tokenizer.huggingface.json"
49-
KEY_TOKENIZER_RWKV = "tokenizer.rwkv.world"
50-
KEY_TOKENIZER_BOS_ID = "tokenizer.ggml.bos_token_id"
51-
KEY_TOKENIZER_EOS_ID = "tokenizer.ggml.eos_token_id"
52-
KEY_TOKENIZER_UNK_ID = "tokenizer.ggml.unknown_token_id"
53-
KEY_TOKENIZER_SEP_ID = "tokenizer.ggml.separator_token_id"
54-
KEY_TOKENIZER_PAD_ID = "tokenizer.ggml.padding_token_id"
1+
GGUF_MAGIC = 0x47475546
2+
GGUF_VERSION = 1
3+
GGUF_DEFAULT_ALIGNMENT = 32
4+
5+
# general
6+
KEY_GENERAL_ARCHITECTURE = "general.architecture"
7+
KEY_GENERAL_QUANTIZATION_VERSION = "general.quantization_version"
8+
KEY_GENERAL_ALIGNMENT = "general.alignment"
9+
KEY_GENERAL_NAME = "general.name"
10+
KEY_GENERAL_AUTHOR = "general.author"
11+
KEY_GENERAL_URL = "general.url"
12+
KEY_GENERAL_DESCRIPTION = "general.description"
13+
KEY_GENERAL_FILE_TYPE = "general.file_type"
14+
KEY_GENERAL_LICENSE = "general.license"
15+
KEY_GENERAL_SOURCE_URL = "general.source.url"
16+
KEY_GENERAL_SOURCE_HF_REPO = "general.source.hugginface.repository"
17+
18+
# LLM
19+
KEY_LLM_CONTEXT_LENGTH = "{llm}.context_length"
20+
KEY_LLM_EMBEDDING_LENGTH = "{llm}.embedding_length"
21+
KEY_LLM_BLOCK_COUNT = "{llm}.block_count"
22+
KEY_LLM_FEED_FORWARD_LENGTH = "{llm}.feed_forward_length"
23+
KEY_LLM_USE_PARALLEL_RESIDUAL = "{llm}.use_parallel_residual"
24+
KEY_LLM_TENSOR_DATA_LAYOUT = "{llm}.tensor_data_layout"
25+
26+
# attention
27+
KEY_ATTENTION_HEAD_COUNT = "{llm}.attention.head_count"
28+
KEY_ATTENTION_HEAD_COUNT_KV = "{llm}.attention.head_count_kv"
29+
KEY_ATTENTION_MAX_ALIBI_BIAS = "{llm}.attention.max_alibi_bias"
30+
KEY_ATTENTION_CLAMP_KQV = "{llm}.attention.clamp_kqv"
31+
KEY_ATTENTION_LAYERNORM_EPS = "{llm}.attention.layer_norm_epsilon"
32+
KEY_ATTENTION_LAYERNORM_RMS_EPS = "{llm}.attention.layer_norm_rms_epsilon"
33+
34+
# RoPE
35+
KEY_ROPE_DIMENSION_COUNT = "{llm}.rope.dimension_count"
36+
KEY_ROPE_SCALE = "{llm}.rope.scale"
37+
38+
# tokenization
39+
KEY_TOKENIZER_MODEL = "tokenizer.ggml.model"
40+
KEY_TOKENIZER_LIST = "tokenizer.ggml.tokens"
41+
KEY_TOKENIZER_SCORES = "tokenizer.ggml.scores"
42+
KEY_TOKENIZER_MERGES = "tokenizer.ggml.merges"
43+
KEY_TOKENIZER_BOS_ID = "tokenizer.ggml.bos_token_id"
44+
KEY_TOKENIZER_EOS_ID = "tokenizer.ggml.eos_token_id"
45+
KEY_TOKENIZER_UNK_ID = "tokenizer.ggml.unknown_token_id"
46+
KEY_TOKENIZER_SEP_ID = "tokenizer.ggml.seperator_token_id"
47+
KEY_TOKENIZER_PAD_ID = "tokenizer.ggml.padding_token_id"
48+
KEY_TOKENIZER_HF_JSON = "tokenizer.huggingface.json"
49+
KEY_TOKENIZER_RWKV = "tokenizer.rwkv.world"
50+
KEY_TOKENIZER_BOS_ID = "tokenizer.ggml.bos_token_id"
51+
KEY_TOKENIZER_EOS_ID = "tokenizer.ggml.eos_token_id"
52+
KEY_TOKENIZER_UNK_ID = "tokenizer.ggml.unknown_token_id"
53+
KEY_TOKENIZER_SEP_ID = "tokenizer.ggml.separator_token_id"
54+
KEY_TOKENIZER_PAD_ID = "tokenizer.ggml.padding_token_id"

0 commit comments

Comments
 (0)