Skip to content

Commit 4c04400

Browse files
committed
llama_model_loader: fix map -> unordered map
1 parent b19af36 commit 4c04400

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3237,7 +3237,7 @@ struct llama_model_loader {
32373237
std::vector<std::pair<size_t, size_t>> mmaps_used;
32383238

32393239
// Returns false if cancelled by progress_callback
3240-
bool load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, std::map<uint32_t, ggml_backend_buffer *> & bufs_mmap, std::vector<std::unique_ptr<llama_mlock>> * lmlocks) {
3240+
bool load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, std::unordered_map<uint32_t, ggml_backend_buffer *> & bufs_mmap, std::vector<std::unique_ptr<llama_mlock>> * lmlocks) {
32413241
GGML_ASSERT(size_data != 0 && "call init_mappings() first");
32423242

32433243
std::vector<no_init<uint8_t>> read_buf;

0 commit comments

Comments
 (0)