|
| 1 | +#include "ngram-cache.h" |
| 2 | +#include "log.h" |
| 3 | + |
| 4 | +#include <fstream> |
| 5 | + |
| 6 | +void llama_ngram_cache_update(llama_ngram_cache & ngram_cache, int ngram_min, int ngram_max, |
| 7 | + std::vector<llama_token> & inp, int nnew, bool print_progress) { |
| 8 | + const int64_t t_start_ms = ggml_time_ms(); |
| 9 | + const int inp_size = inp.size(); |
| 10 | + |
| 11 | + for (int ngram_size = ngram_min; ngram_size <= ngram_max; ++ngram_size) { |
| 12 | + const int i_start = std::max(inp_size - nnew, ngram_size); |
| 13 | + for (int i = i_start; i < inp_size; ++i) { |
| 14 | + const int ngram_start = i - ngram_size; |
| 15 | + llama_ngram ngram(&inp[ngram_start], ngram_size); |
| 16 | + const llama_token token = inp[i]; |
| 17 | + |
| 18 | + llama_ngram_cache::iterator part_it = ngram_cache.find(ngram); |
| 19 | + if (part_it == ngram_cache.end()) { |
| 20 | + llama_ngram_cache_part part; |
| 21 | + part.emplace(token, 1); |
| 22 | + ngram_cache.emplace(ngram, part); |
| 23 | + } else { |
| 24 | + llama_ngram_cache_part::iterator token_count_it = part_it->second.find(token); |
| 25 | + if (token_count_it == part_it->second.end()) { |
| 26 | + part_it->second.emplace(token, 1); |
| 27 | + } else { |
| 28 | + token_count_it->second++; |
| 29 | + } |
| 30 | + } |
| 31 | + if (print_progress && i % 10000000 == 0) { |
| 32 | + const int64_t t_now_ms = ggml_time_ms(); |
| 33 | + const int64_t eta_ms = (inp_size - i) * (t_now_ms - t_start_ms) / i; |
| 34 | + const int64_t eta_min = eta_ms / (60*1000); |
| 35 | + const int64_t eta_s = (eta_ms - 60*1000*eta_min) / 1000; |
| 36 | + |
| 37 | + fprintf(stderr, "%s: %d/%d done, ETA: %02ld:%02ld\n", __func__, i, inp_size, eta_min, eta_s); |
| 38 | + } |
| 39 | + } |
| 40 | + } |
| 41 | +} |
| 42 | + |
| 43 | +// Helper function to get a token from the combined, speculative sequence of inp and draft. |
| 44 | +static llama_token get_token(const std::vector<llama_token> & inp, const std::vector<llama_token> & draft, const size_t i) { |
| 45 | + return i < inp.size() ? inp[i] : draft[1 + i - inp.size()]; |
| 46 | +}; |
| 47 | + |
| 48 | +// If sample size or percentage are below these thresholds the draft is aborted early: |
| 49 | +constexpr int draft_min_sample_size_lax[LLAMA_NGRAM_MAX] = { 2, 2, 1, 1}; |
| 50 | +constexpr int draft_min_percent_lax[LLAMA_NGRAM_MAX] = {66, 50, 50, 50}; |
| 51 | +constexpr int draft_min_sample_size_strict[LLAMA_NGRAM_MAX] = { 4, 3, 2, 2}; |
| 52 | +constexpr int draft_min_percent_strict[LLAMA_NGRAM_MAX] = {75, 66, 66, 66}; |
| 53 | + |
| 54 | +// Helper function that tries to draft a token from only the static ngram cache: |
| 55 | +static llama_token try_draft(llama_ngram_cache & nc_static, const llama_ngram ngram_static) { |
| 56 | + llama_ngram_cache::iterator part_static_it = nc_static.find(ngram_static); |
| 57 | + if (part_static_it == nc_static.end()) { |
| 58 | + return -1; |
| 59 | + } |
| 60 | + const llama_ngram_cache_part part_static = part_static_it->second; |
| 61 | + |
| 62 | + int max_count_static = 0; |
| 63 | + int sum_count_static = 0; |
| 64 | + llama_token max_token = -1; |
| 65 | + |
| 66 | + for (std::pair<llama_token, int> token_count_static : part_static) { |
| 67 | + const llama_token token = token_count_static.first; |
| 68 | + const int32_t count_static = token_count_static.second; |
| 69 | + |
| 70 | + if (count_static > max_count_static) { |
| 71 | + max_token = token; |
| 72 | + max_count_static = count_static; |
| 73 | + } |
| 74 | + sum_count_static += count_static; |
| 75 | + } |
| 76 | + |
| 77 | + if (sum_count_static < draft_min_sample_size_lax[LLAMA_NGRAM_STATIC-1]) { |
| 78 | + return -1; |
| 79 | + } |
| 80 | + if (100*max_count_static < draft_min_percent_lax[LLAMA_NGRAM_STATIC-1]*sum_count_static) { |
| 81 | + return -1; |
| 82 | + } |
| 83 | + return max_token; |
| 84 | +} |
| 85 | + |
| 86 | +// Try to draft a token from primary cache (context/dynamic), validate with static cache: |
| 87 | +static llama_token try_draft( |
| 88 | + llama_ngram_cache & nc_primary, const std::vector<llama_ngram> & ngrams_primary, llama_ngram_cache_part & part_static, |
| 89 | + const int * min_sample_size, const int * min_percent) { |
| 90 | + |
| 91 | + llama_token drafted_token = -1; |
| 92 | + |
| 93 | + for (int i = ngrams_primary.size()-1; i >= 0 && drafted_token == -1; --i) { |
| 94 | + const llama_ngram ngram_primary = ngrams_primary[i]; |
| 95 | + |
| 96 | + llama_ngram_cache::iterator part_primary_it = nc_primary.find(ngram_primary); |
| 97 | + if (part_primary_it == nc_primary.end()) { |
| 98 | + continue; |
| 99 | + } |
| 100 | + const llama_ngram_cache_part part_primary = part_primary_it->second; |
| 101 | + |
| 102 | + int max_count_primary = 0; |
| 103 | + int max_count_static = 0; |
| 104 | + int sum_count_primary = 0; |
| 105 | + llama_token max_token = -1; |
| 106 | + |
| 107 | + for (std::pair<llama_token, int> token_count_primary : part_primary) { |
| 108 | + const llama_token token = token_count_primary.first; |
| 109 | + |
| 110 | + llama_ngram_cache_part::iterator token_count_static_it = part_static.find(token); |
| 111 | + |
| 112 | + const int32_t count_primary = token_count_primary.second; |
| 113 | + const int32_t count_static = token_count_static_it != part_static.end() ? 100*token_count_static_it->second : 1; |
| 114 | + |
| 115 | + if (count_primary*count_static > max_count_primary*max_count_static) { |
| 116 | + max_token = token; |
| 117 | + max_count_primary = count_primary; |
| 118 | + max_count_static = count_static; |
| 119 | + } |
| 120 | + sum_count_primary += count_primary; |
| 121 | + } |
| 122 | + |
| 123 | + if (sum_count_primary < min_sample_size[i]) { |
| 124 | + continue; |
| 125 | + } |
| 126 | + if (100*max_count_primary < min_percent[i]*sum_count_primary) { |
| 127 | + continue;; |
| 128 | + } |
| 129 | + drafted_token = max_token; |
| 130 | + } |
| 131 | + |
| 132 | + return drafted_token; |
| 133 | +} |
| 134 | + |
| 135 | +void llama_ngram_cache_draft( |
| 136 | + std::vector<llama_token> & inp, std::vector<llama_token> & draft, int n_draft, int ngram_min, int ngram_max, |
| 137 | + llama_ngram_cache & nc_context, llama_ngram_cache & nc_dynamic, llama_ngram_cache & nc_static |
| 138 | +) { |
| 139 | + GGML_ASSERT(draft.size() == 1); |
| 140 | + const int inp_size = inp.size(); |
| 141 | + |
| 142 | + if (inp_size < LLAMA_NGRAM_STATIC) { |
| 143 | + return; |
| 144 | + } |
| 145 | + |
| 146 | + while ((int) draft.size()-1 < n_draft) { |
| 147 | + llama_token drafted_token = -1; |
| 148 | + |
| 149 | + const int ngram_start_static = inp_size-LLAMA_NGRAM_STATIC + draft.size()-1; |
| 150 | + llama_ngram ngram_static; |
| 151 | + for (int j = ngram_start_static; j < ngram_start_static + LLAMA_NGRAM_STATIC; ++j) { |
| 152 | + ngram_static.tokens[j-ngram_start_static] = get_token(inp, draft, j); |
| 153 | + } |
| 154 | + llama_ngram_cache::iterator part_static_it = nc_static.find(ngram_static); |
| 155 | + llama_ngram_cache_part part_static; |
| 156 | + if (part_static_it != nc_static.end()) { |
| 157 | + part_static = part_static_it->second; |
| 158 | + } |
| 159 | + |
| 160 | + // cd = context + dynamic |
| 161 | + std::vector<llama_ngram> ngrams_cd; |
| 162 | + for (int ngram_size_cd = ngram_min; ngram_size_cd <= ngram_max; ++ngram_size_cd) { |
| 163 | + const int ngram_start_cd = inp_size-ngram_size_cd + draft.size()-1; |
| 164 | + llama_ngram ngram_cd; |
| 165 | + for (int j = ngram_start_cd; j < ngram_start_cd + ngram_size_cd; ++j) { |
| 166 | + ngram_cd.tokens[j-ngram_start_cd] = get_token(inp, draft, j); |
| 167 | + } |
| 168 | + ngrams_cd.push_back(ngram_cd); |
| 169 | + } |
| 170 | + if (drafted_token == -1) { |
| 171 | + drafted_token = try_draft(nc_context, ngrams_cd, part_static, draft_min_sample_size_lax, draft_min_percent_lax); |
| 172 | + } |
| 173 | + if (drafted_token == -1) { |
| 174 | + drafted_token = try_draft(nc_dynamic, ngrams_cd, part_static, draft_min_sample_size_strict, draft_min_percent_strict); |
| 175 | + } |
| 176 | + if (drafted_token == -1) { |
| 177 | + drafted_token = try_draft(nc_static, ngram_static); |
| 178 | + } |
| 179 | + |
| 180 | + if (drafted_token == -1) { |
| 181 | + break; |
| 182 | + } |
| 183 | + |
| 184 | + LOG(" - draft candidate: token=%d\n", drafted_token); |
| 185 | + draft.push_back(drafted_token); |
| 186 | + } |
| 187 | +}; |
| 188 | + |
| 189 | +void llama_ngram_cache_save(llama_ngram_cache & ngram_cache, std::string & filename) { |
| 190 | + std::ofstream file_out(filename, std::ios::binary); |
| 191 | + for (std::pair<llama_ngram, llama_ngram_cache_part> item : ngram_cache) { |
| 192 | + const llama_ngram ngram = item.first; |
| 193 | + llama_ngram_cache_part token_counts = item.second; |
| 194 | + GGML_ASSERT(!token_counts.empty()); |
| 195 | + const int32_t ntokens = token_counts.size(); |
| 196 | + |
| 197 | + file_out.write(reinterpret_cast<const char *>(&ngram), sizeof(llama_ngram)); |
| 198 | + file_out.write(reinterpret_cast<const char *>(&ntokens), sizeof(int32_t)); |
| 199 | + for (std::pair<llama_token, int32_t> item2 : token_counts) { |
| 200 | + const llama_token token = item2.first; |
| 201 | + const int32_t count = item2.second; |
| 202 | + file_out.write(reinterpret_cast<const char *>(&token), sizeof(llama_token)); |
| 203 | + file_out.write(reinterpret_cast<const char *>(&count), sizeof(int32_t)); |
| 204 | + } |
| 205 | + } |
| 206 | + |
| 207 | +} |
| 208 | + |
| 209 | +llama_ngram_cache llama_ngram_cache_load(std::string & filename) { |
| 210 | + std::ifstream hashmap_file(filename, std::ios::binary); |
| 211 | + if (!hashmap_file) { |
| 212 | + throw std::system_error(); |
| 213 | + } |
| 214 | + llama_ngram_cache ngram_cache; |
| 215 | + |
| 216 | + llama_ngram ngram; |
| 217 | + int32_t ntokens; |
| 218 | + llama_token token; |
| 219 | + int32_t count; |
| 220 | + |
| 221 | + char * ngramc = reinterpret_cast<char*>(&ngram); |
| 222 | + char * ntokensc = reinterpret_cast<char*>(&ntokens); |
| 223 | + char * tokenc = reinterpret_cast<char*>(&token); |
| 224 | + char * countc = reinterpret_cast<char*>(&count); |
| 225 | + while(hashmap_file.read(ngramc, sizeof(llama_ngram))) { |
| 226 | + GGML_ASSERT(hashmap_file.read(ntokensc, sizeof(int32_t))); |
| 227 | + llama_ngram_cache_part token_counts; |
| 228 | + |
| 229 | + for (int i = 0; i < ntokens; ++i) { |
| 230 | + GGML_ASSERT(hashmap_file.read(tokenc, sizeof(llama_token))); |
| 231 | + GGML_ASSERT(hashmap_file.read(countc, sizeof(int32_t))); |
| 232 | + token_counts.emplace(token, count); |
| 233 | + } |
| 234 | + |
| 235 | + ngram_cache.emplace(ngram, token_counts); |
| 236 | + } |
| 237 | + GGML_ASSERT(hashmap_file.eof()); |
| 238 | + |
| 239 | + return ngram_cache; |
| 240 | +} |
| 241 | + |
| 242 | +void llama_ngram_cache_merge(llama_ngram_cache & ngram_cache_target, llama_ngram_cache & ngram_cache_add) { |
| 243 | + for (std::pair<llama_ngram, llama_ngram_cache_part> ngram_part : ngram_cache_add) { |
| 244 | + const llama_ngram ngram = ngram_part.first; |
| 245 | + llama_ngram_cache_part part = ngram_part.second; |
| 246 | + |
| 247 | + llama_ngram_cache::iterator part_merged_it = ngram_cache_target.find(ngram); |
| 248 | + if (part_merged_it == ngram_cache_target.end()) { |
| 249 | + ngram_cache_target.emplace(ngram, part); |
| 250 | + continue; |
| 251 | + } |
| 252 | + |
| 253 | + for (std::pair<llama_token, int32_t> token_count : part) { |
| 254 | + const llama_token token = token_count.first; |
| 255 | + const int32_t count = token_count.second; |
| 256 | + |
| 257 | + llama_ngram_cache_part::iterator token_count_merged_it = part_merged_it->second.find(token); |
| 258 | + if (token_count_merged_it == part_merged_it->second.end()) { |
| 259 | + part_merged_it->second.emplace(token, count); |
| 260 | + continue; |
| 261 | + } |
| 262 | + |
| 263 | + token_count_merged_it->second += count; |
| 264 | + } |
| 265 | + } |
| 266 | +} |
0 commit comments