We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent a9e88c6 commit b19af36Copy full SHA for b19af36
llama.cpp
@@ -5135,9 +5135,11 @@ static bool llm_load_tensors(
5135
ml.done_getting_tensors();
5136
5137
ml.init_mappings(true, &model.mlock_mmaps);
5138
+ model.mappings.reserve(ml.mappings.size());
5139
5140
// create the backend buffers
5141
std::vector<std::pair<ggml_context *, std::unordered_map<uint32_t, ggml_backend_buffer_t>>> ctx_bufs;
5142
+ ctx_bufs.reserve(ctx_map.size());
5143
5144
// Ensure we have enough capacity for the maximum backend buffer we will potentially create
5145
size_t n_max_backend_buffer = ctx_map.size() * ml.files.size();
0 commit comments