|
1 | 1 | #include "llama-quant.h"
|
2 |
| - |
3 | 2 | #include "llama-impl.h"
|
4 | 3 | #include "llama-model.h"
|
5 | 4 | #include "llama-model-loader.h"
|
@@ -27,6 +26,56 @@ static void zeros(std::ofstream & file, size_t n) {
|
27 | 26 | }
|
28 | 27 | }
|
29 | 28 |
|
| 29 | +static std::string remap_layer(const std::string & orig_name, const std::vector<int> & prune, std::map<int, std::string> & mapped, int & next_id) { |
| 30 | + if (prune.empty()) { |
| 31 | + return orig_name; |
| 32 | + } |
| 33 | + |
| 34 | + static const std::regex pattern(R"(blk\.(\d+)\.)"); |
| 35 | + if (std::smatch match; std::regex_search(orig_name, match, pattern)) { |
| 36 | + const int blk = std::stoi(match[1]); |
| 37 | + std::string new_name = orig_name; |
| 38 | + |
| 39 | + if (mapped.count(blk)) { |
| 40 | + // Already mapped, do nothing |
| 41 | + } else if (std::find(prune.begin(), prune.end(), blk) != prune.end()) { |
| 42 | + mapped[blk] = ""; |
| 43 | + } else if (blk < prune.front()) { |
| 44 | + mapped[blk] = std::to_string(blk); |
| 45 | + next_id = blk + 1; |
| 46 | + } else { |
| 47 | + mapped[blk] = std::to_string(next_id); |
| 48 | + ++next_id; |
| 49 | + } |
| 50 | + |
| 51 | + return mapped[blk].empty() ? mapped[blk] : new_name.replace(match.position(1), match.length(1), mapped[blk]); |
| 52 | + } |
| 53 | + |
| 54 | + return orig_name; |
| 55 | +} |
| 56 | + |
| 57 | +static std::string remap_imatrix (const std::string & orig_name, const std::map<int, std::string> & mapped) { |
| 58 | + if (mapped.empty()) { |
| 59 | + return orig_name; |
| 60 | + } |
| 61 | + |
| 62 | + static const std::regex pattern(R"(blk\.(\d+)\.)"); |
| 63 | + if (std::smatch match; std::regex_search(orig_name, match, pattern)) { |
| 64 | + const std::string blk(match[1]); |
| 65 | + std::string new_name = orig_name; |
| 66 | + |
| 67 | + for (const auto & p : mapped) { |
| 68 | + if (p.second == blk) { |
| 69 | + LLAMA_LOG_DEBUG("(blk.%d imatrix) ", p.first); |
| 70 | + return new_name.replace(match.position(1), match.length(1), std::to_string(p.first)); |
| 71 | + } |
| 72 | + } |
| 73 | + GGML_ABORT("\n%s: imatrix mapping error for %s\n", __func__, orig_name.c_str()); |
| 74 | + } |
| 75 | + |
| 76 | + return orig_name; |
| 77 | +} |
| 78 | + |
30 | 79 | struct quantize_state_impl {
|
31 | 80 | const llama_model & model;
|
32 | 81 | const llama_model_quantize_params * params;
|
@@ -568,6 +617,11 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
568 | 617 | const size_t align = GGUF_DEFAULT_ALIGNMENT;
|
569 | 618 | gguf_context_ptr ctx_out { gguf_init_empty() };
|
570 | 619 |
|
| 620 | + std::vector<int> prune_list = {}; |
| 621 | + if (params->prune_layers) { |
| 622 | + prune_list = *static_cast<const std::vector<int> *>(params->prune_layers); |
| 623 | + } |
| 624 | + |
571 | 625 | // copy the KV pairs from the input file
|
572 | 626 | gguf_set_kv (ctx_out.get(), ml.meta.get());
|
573 | 627 | gguf_set_val_u32(ctx_out.get(), "general.quantization_version", GGML_QNT_VERSION); // TODO: use LLM_KV
|
@@ -597,12 +651,32 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
597 | 651 | }
|
598 | 652 | }
|
599 | 653 |
|
| 654 | + std::map<int, std::string> mapped; |
| 655 | + int blk_id = 0; |
| 656 | + int pruned_attention_w = 0; |
| 657 | + |
600 | 658 | // make a list of weights
|
601 | 659 | std::vector<const llama_model_loader::llama_tensor_weight *> tensors;
|
602 | 660 | tensors.reserve(ml.weights_map.size());
|
603 | 661 | for (const auto & it : ml.weights_map) {
|
| 662 | + const std::string remapped_name(remap_layer(it.first, prune_list, mapped, blk_id)); |
| 663 | + if (remapped_name.empty()) { |
| 664 | + if (it.first.find("attn_v.weight") != std::string::npos || |
| 665 | + it.first.find("attn_qkv.weight") != std::string::npos || |
| 666 | + it.first.find("attn_kv_b.weight") != std::string::npos) { |
| 667 | + pruned_attention_w++; |
| 668 | + } |
| 669 | + LLAMA_LOG_DEBUG("%s: pruning tensor %s\n", __func__, it.first.c_str()); |
| 670 | + continue; |
| 671 | + } else if (remapped_name != it.first) { |
| 672 | + ggml_set_name(it.second.tensor, remapped_name.c_str()); |
| 673 | + LLAMA_LOG_DEBUG("%s: tensor %s remapped to %s\n", __func__, it.first.c_str(), ggml_get_name(it.second.tensor)); |
| 674 | + } |
604 | 675 | tensors.push_back(&it.second);
|
605 | 676 | }
|
| 677 | + if (!prune_list.empty()) { |
| 678 | + gguf_set_val_u32(ctx_out.get(), ml.llm_kv(LLM_KV_BLOCK_COUNT).c_str(), blk_id); |
| 679 | + } |
606 | 680 |
|
607 | 681 | // keep_split requires that the weights are sorted by split index
|
608 | 682 | if (params->keep_split) {
|
@@ -640,7 +714,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
640 | 714 | if (llama_model_has_encoder(&model)) {
|
641 | 715 | n_attn_layer *= 3;
|
642 | 716 | }
|
643 |
| - GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected"); |
| 717 | + GGML_ASSERT((qs.n_attention_wv == n_attn_layer - pruned_attention_w) && "n_attention_wv is unexpected"); |
644 | 718 | }
|
645 | 719 |
|
646 | 720 | size_t total_size_org = 0;
|
@@ -681,7 +755,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
681 | 755 | for (size_t i = 0; i < ctx_outs.size(); ++i) {
|
682 | 756 | gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str(), i);
|
683 | 757 | gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str(), n_split);
|
684 |
| - gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), ml.n_tensors); |
| 758 | + gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), (int32_t)tensors.size()); |
685 | 759 | }
|
686 | 760 | }
|
687 | 761 |
|
@@ -832,7 +906,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
832 | 906 |
|
833 | 907 | const float * imatrix = nullptr;
|
834 | 908 | if (imatrix_data) {
|
835 |
| - auto it = imatrix_data->find(tensor->name); |
| 909 | + auto it = imatrix_data->find(remap_imatrix(tensor->name, mapped)); |
836 | 910 | if (it == imatrix_data->end()) {
|
837 | 911 | LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name);
|
838 | 912 | } else {
|
@@ -947,6 +1021,7 @@ llama_model_quantize_params llama_model_quantize_default_params() {
|
947 | 1021 | /*.imatrix =*/ nullptr,
|
948 | 1022 | /*.kv_overrides =*/ nullptr,
|
949 | 1023 | /*.tensor_type =*/ nullptr,
|
| 1024 | + /*.prune_layers =*/ nullptr |
950 | 1025 | };
|
951 | 1026 |
|
952 | 1027 | return result;
|
|
0 commit comments