Skip to content

Commit fe957cd

Browse files
committed
Fix formatting issues
1 parent 0d49878 commit fe957cd

File tree

3 files changed

+6
-5
lines changed

3 files changed

+6
-5
lines changed

convert_hf_to_gguf.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3604,6 +3604,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
36043604
name = name.removeprefix("transformer.")
36053605
return [(self.map_tensor_name(name), data_torch)]
36063606

3607+
36073608
@Model.register("NemotronForCausalLM")
36083609
class NemotronModel(Model):
36093610
model_arch = gguf.MODEL_ARCH.NEMOTRON

gguf-py/gguf/constants.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1064,7 +1064,7 @@ class MODEL_TENSOR(IntEnum):
10641064
MODEL_TENSOR.FFN_NORM,
10651065
MODEL_TENSOR.FFN_DOWN,
10661066
MODEL_TENSOR.FFN_UP,
1067-
],
1067+
],
10681068
# TODO
10691069
}
10701070

@@ -1108,7 +1108,7 @@ class MODEL_TENSOR(IntEnum):
11081108
MODEL_ARCH.NEMOTRON: [
11091109
MODEL_TENSOR.ROPE_FREQS,
11101110
MODEL_TENSOR.ATTN_ROT_EMBD,
1111-
],
1111+
],
11121112
}
11131113

11141114
#

src/llama.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1306,7 +1306,7 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
13061306
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
13071307
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
13081308
},
1309-
},
1309+
},
13101310
{
13111311
LLM_ARCH_UNKNOWN,
13121312
{
@@ -5237,7 +5237,7 @@ static void llm_load_hparams(
52375237
case 32: model.type = e_model::MODEL_4B; break;
52385238
default: model.type = e_model::MODEL_UNKNOWN;
52395239
}
5240-
} break;
5240+
} break;
52415241
default: (void)0;
52425242
}
52435243

@@ -7570,7 +7570,7 @@ static bool llm_load_tensors(
75707570
layer.ffn_down_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
75717571
layer.ffn_up_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
75727572
}
7573-
} break;
7573+
} break;
75747574
default:
75757575
throw std::runtime_error("unknown architecture");
75767576
}

0 commit comments

Comments
 (0)