Skip to content

Commit c5ede38

Browse files
committed
convert : add custom attention mapping
1 parent f162d45 commit c5ede38

File tree

1 file changed

+2
-0
lines changed

1 file changed

+2
-0
lines changed

gguf-py/gguf/tensor_mapping.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,7 @@ class TensorNameMap:
146146
# Attention query
147147
MODEL_TENSOR.ATTN_Q: (
148148
"model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron olmoe olmo2
149+
"model.layers.{bid}.self_attn.q_proj_no_perm", # llama-custom
149150
"layers.{bid}.attention.wq", # llama-pth
150151
"encoder.layer.{bid}.attention.self.query", # bert
151152
"transformer.h.{bid}.attn.q_proj", # gpt-j
@@ -158,6 +159,7 @@ class TensorNameMap:
158159
# Attention key
159160
MODEL_TENSOR.ATTN_K: (
160161
"model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron olmoe olmo2
162+
"model.layers.{bid}.self_attn.k_proj_no_perm", # llama-custom
161163
"layers.{bid}.attention.wk", # llama-pth
162164
"encoder.layer.{bid}.attention.self.key", # bert
163165
"transformer.h.{bid}.attn.k_proj", # gpt-j

0 commit comments

Comments
 (0)