Skip to content

Commit a2400dd

Browse files
committed
https://github.com/ggerganov/llama.cpp/pull/11448
1 parent bcf0194 commit a2400dd

File tree

2 files changed

+8
-2
lines changed

2 files changed

+8
-2
lines changed

src/llama-vocab.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,13 @@ struct llama_vocab {
3434
std::vector<id> cache_special_tokens;
3535
std::vector<token> cache_token_to_piece; // llama_token_to_piece(special = true);
3636

37-
std::map<std::pair<std::string, std::string>, int> bpe_ranks;
37+
struct pair_hash {
38+
size_t operator()(const std::pair<std::string, std::string> & p) const {
39+
return std::hash<std::string>{}(p.first) ^ //create some hash for pair
40+
(std::hash<std::string>{}(p.second) << 1);
41+
}
42+
};
43+
std::unordered_map<std::pair<std::string, std::string>, int, pair_hash> bpe_ranks;
3844

3945
// default LLaMA special tokens
4046
// TODO: should we set all of these to LLAMA_TOKEN_NULL?

src/llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4960,7 +4960,7 @@ struct llama_model_loader {
49604960
for (const auto & file : files) {
49614961
auto * reg = ggml_backend_dev_backend_reg(ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU));
49624962
auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa");
4963-
std::unique_ptr<llama_mmap> mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, is_numa_fn()));
4963+
std::unique_ptr<llama_mmap> mapping = std::make_unique<llama_mmap>(file.get(), prefetch ? -1 : 0, is_numa_fn());
49644964
mmaps_used.emplace_back(mapping->size, 0);
49654965
if (mlock_mmaps) {
49664966
std::unique_ptr<llama_mlock> mlock_mmap(new llama_mlock());

0 commit comments

Comments
 (0)