We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 712c127 commit d94d0aeCopy full SHA for d94d0ae
llama.cpp
@@ -597,15 +597,6 @@ struct llama_model_loader {
597
return false;
598
}
599
600
- uint32_t guess_n_parts() const {
601
- auto it = tensors_map.name_to_idx.find("tok_embeddings.weight");
602
- if (it == tensors_map.name_to_idx.end()) {
603
- throw std::runtime_error(std::string("missing tok_embeddings.weight"));
604
- }
605
- const llama_load_tensor & lt = tensors_map.tensors.at(it->second);
606
- return file_loader->hparams.n_embd / lt.ne.at(0);
607
608
-
609
void calc_sizes(size_t * ctx_size_p, size_t * mmapped_size_p) const {
610
*ctx_size_p = *mmapped_size_p = 0;
611
for (const llama_load_tensor & lt : tensors_map.tensors) {
0 commit comments