Skip to content

Commit fa4a9f2

Browse files
authored
quantize : handle user-defined pruning of whole layers (blocks) (#13037)
1 parent 238005c commit fa4a9f2

File tree

3 files changed

+119
-9
lines changed

3 files changed

+119
-9
lines changed

include/llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -390,6 +390,7 @@ extern "C" {
390390
void * imatrix; // pointer to importance matrix data
391391
void * kv_overrides; // pointer to vector containing overrides
392392
void * tensor_types; // pointer to vector containing tensor types
393+
void * prune_layers; // pointer to vector containing layer indices to prune
393394
} llama_model_quantize_params;
394395

395396
typedef struct llama_logit_bias {

src/llama-quant.cpp

Lines changed: 79 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
#include "llama-quant.h"
2-
32
#include "llama-impl.h"
43
#include "llama-model.h"
54
#include "llama-model-loader.h"
@@ -27,6 +26,56 @@ static void zeros(std::ofstream & file, size_t n) {
2726
}
2827
}
2928

29+
static std::string remap_layer(const std::string & orig_name, const std::vector<int> & prune, std::map<int, std::string> & mapped, int & next_id) {
30+
if (prune.empty()) {
31+
return orig_name;
32+
}
33+
34+
static const std::regex pattern(R"(blk\.(\d+)\.)");
35+
if (std::smatch match; std::regex_search(orig_name, match, pattern)) {
36+
const int blk = std::stoi(match[1]);
37+
std::string new_name = orig_name;
38+
39+
if (mapped.count(blk)) {
40+
// Already mapped, do nothing
41+
} else if (std::find(prune.begin(), prune.end(), blk) != prune.end()) {
42+
mapped[blk] = "";
43+
} else if (blk < prune.front()) {
44+
mapped[blk] = std::to_string(blk);
45+
next_id = blk + 1;
46+
} else {
47+
mapped[blk] = std::to_string(next_id);
48+
++next_id;
49+
}
50+
51+
return mapped[blk].empty() ? mapped[blk] : new_name.replace(match.position(1), match.length(1), mapped[blk]);
52+
}
53+
54+
return orig_name;
55+
}
56+
57+
static std::string remap_imatrix (const std::string & orig_name, const std::map<int, std::string> & mapped) {
58+
if (mapped.empty()) {
59+
return orig_name;
60+
}
61+
62+
static const std::regex pattern(R"(blk\.(\d+)\.)");
63+
if (std::smatch match; std::regex_search(orig_name, match, pattern)) {
64+
const std::string blk(match[1]);
65+
std::string new_name = orig_name;
66+
67+
for (const auto & p : mapped) {
68+
if (p.second == blk) {
69+
LLAMA_LOG_DEBUG("(blk.%d imatrix) ", p.first);
70+
return new_name.replace(match.position(1), match.length(1), std::to_string(p.first));
71+
}
72+
}
73+
GGML_ABORT("\n%s: imatrix mapping error for %s\n", __func__, orig_name.c_str());
74+
}
75+
76+
return orig_name;
77+
}
78+
3079
struct quantize_state_impl {
3180
const llama_model & model;
3281
const llama_model_quantize_params * params;
@@ -568,6 +617,11 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
568617
const size_t align = GGUF_DEFAULT_ALIGNMENT;
569618
gguf_context_ptr ctx_out { gguf_init_empty() };
570619

620+
std::vector<int> prune_list = {};
621+
if (params->prune_layers) {
622+
prune_list = *static_cast<const std::vector<int> *>(params->prune_layers);
623+
}
624+
571625
// copy the KV pairs from the input file
572626
gguf_set_kv (ctx_out.get(), ml.meta.get());
573627
gguf_set_val_u32(ctx_out.get(), "general.quantization_version", GGML_QNT_VERSION); // TODO: use LLM_KV
@@ -597,12 +651,32 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
597651
}
598652
}
599653

654+
std::map<int, std::string> mapped;
655+
int blk_id = 0;
656+
int pruned_attention_w = 0;
657+
600658
// make a list of weights
601659
std::vector<const llama_model_loader::llama_tensor_weight *> tensors;
602660
tensors.reserve(ml.weights_map.size());
603661
for (const auto & it : ml.weights_map) {
662+
const std::string remapped_name(remap_layer(it.first, prune_list, mapped, blk_id));
663+
if (remapped_name.empty()) {
664+
if (it.first.find("attn_v.weight") != std::string::npos ||
665+
it.first.find("attn_qkv.weight") != std::string::npos ||
666+
it.first.find("attn_kv_b.weight") != std::string::npos) {
667+
pruned_attention_w++;
668+
}
669+
LLAMA_LOG_DEBUG("%s: pruning tensor %s\n", __func__, it.first.c_str());
670+
continue;
671+
} else if (remapped_name != it.first) {
672+
ggml_set_name(it.second.tensor, remapped_name.c_str());
673+
LLAMA_LOG_DEBUG("%s: tensor %s remapped to %s\n", __func__, it.first.c_str(), ggml_get_name(it.second.tensor));
674+
}
604675
tensors.push_back(&it.second);
605676
}
677+
if (!prune_list.empty()) {
678+
gguf_set_val_u32(ctx_out.get(), ml.llm_kv(LLM_KV_BLOCK_COUNT).c_str(), blk_id);
679+
}
606680

607681
// keep_split requires that the weights are sorted by split index
608682
if (params->keep_split) {
@@ -640,7 +714,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
640714
if (llama_model_has_encoder(&model)) {
641715
n_attn_layer *= 3;
642716
}
643-
GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected");
717+
GGML_ASSERT((qs.n_attention_wv == n_attn_layer - pruned_attention_w) && "n_attention_wv is unexpected");
644718
}
645719

646720
size_t total_size_org = 0;
@@ -681,7 +755,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
681755
for (size_t i = 0; i < ctx_outs.size(); ++i) {
682756
gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str(), i);
683757
gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str(), n_split);
684-
gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), ml.n_tensors);
758+
gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), (int32_t)tensors.size());
685759
}
686760
}
687761

@@ -832,7 +906,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
832906

833907
const float * imatrix = nullptr;
834908
if (imatrix_data) {
835-
auto it = imatrix_data->find(tensor->name);
909+
auto it = imatrix_data->find(remap_imatrix(tensor->name, mapped));
836910
if (it == imatrix_data->end()) {
837911
LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name);
838912
} else {
@@ -947,6 +1021,7 @@ llama_model_quantize_params llama_model_quantize_default_params() {
9471021
/*.imatrix =*/ nullptr,
9481022
/*.kv_overrides =*/ nullptr,
9491023
/*.tensor_type =*/ nullptr,
1024+
/*.prune_layers =*/ nullptr
9501025
};
9511026

9521027
return result;

tools/quantize/quantize.cpp

Lines changed: 39 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -107,13 +107,11 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
107107
return false;
108108
}
109109

110-
// usage:
111-
// ./llama-quantize [--allow-requantize] [--leave-output-tensor] [--pure] models/llama/ggml-model.gguf [models/llama/ggml-model-quant.gguf] type [nthreads]
112-
//
113110
[[noreturn]]
114111
static void usage(const char * executable) {
115-
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type]\n", executable);
116-
printf(" [--token-embedding-type] [--tensor-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n");
112+
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights]\n", executable);
113+
printf(" [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--tensor-type] [--prune-layers] [--keep-split] [--override-kv]\n");
114+
printf(" model-f32.gguf [model-quant.gguf] type [nthreads]\n\n");
117115
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
118116
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
119117
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
@@ -124,6 +122,8 @@ static void usage(const char * executable) {
124122
printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n");
125123
printf(" --tensor-type TENSOR=TYPE: quantize this tensor to this ggml_type. example: --tensor-type attn_q=q8_0\n");
126124
printf(" Advanced option to selectively quantize tensors. May be specified multiple times.\n");
125+
printf(" --prune-layers L0,L1,L2...comma-separated list of layer numbers to prune from the model\n");
126+
printf(" Advanced option to remove all tensors from the given layers\n");
127127
printf(" --keep-split: will generate quantized model in the same shards as input\n");
128128
printf(" --override-kv KEY=TYPE:VALUE\n");
129129
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
@@ -286,6 +286,32 @@ static bool parse_tensor_type(const char * data, std::vector<tensor_quantization
286286
return true;
287287
}
288288

289+
static bool parse_layer_prune(const char * data, std::vector<int> & prune_layers) {
290+
if (!data) {
291+
printf("\n%s: no layer pruning ids provided\n\n", __func__);
292+
return false;
293+
}
294+
295+
const auto block_ids = string_split<std::string>(data, ',');
296+
for (const auto & block_id : block_ids) {
297+
int id;
298+
try {
299+
id = std::stoi(block_id);
300+
} catch (...) {
301+
id = -1;
302+
}
303+
if (id < 0) {
304+
printf("\n%s: invalid layer id '%s'\n\n", __func__, block_id.c_str());
305+
return false;
306+
}
307+
prune_layers.emplace_back(id);
308+
}
309+
310+
sort(prune_layers.begin(), prune_layers.end());
311+
prune_layers.erase(std::unique(prune_layers.begin(), prune_layers.end()), prune_layers.end());
312+
return true;
313+
}
314+
289315
int main(int argc, char ** argv) {
290316
if (argc < 3) {
291317
usage(argv[0]);
@@ -298,6 +324,7 @@ int main(int argc, char ** argv) {
298324
std::vector<std::string> included_weights, excluded_weights;
299325
std::vector<llama_model_kv_override> kv_overrides;
300326
std::vector<tensor_quantization> tensor_types;
327+
std::vector<int> prune_layers;
301328

302329
for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
303330
if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) {
@@ -324,6 +351,10 @@ int main(int argc, char ** argv) {
324351
if (arg_idx == argc-1 || !parse_tensor_type(argv[++arg_idx], tensor_types)) {
325352
usage(argv[0]);
326353
}
354+
} else if (strcmp(argv[arg_idx], "--prune-layers") == 0) {
355+
if (arg_idx == argc-1 || !parse_layer_prune(argv[++arg_idx], prune_layers)) {
356+
usage(argv[0]);
357+
}
327358
} else if (strcmp(argv[arg_idx], "--override-kv") == 0) {
328359
if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) {
329360
usage(argv[0]);
@@ -411,6 +442,9 @@ int main(int argc, char ** argv) {
411442
if (!tensor_types.empty()) {
412443
params.tensor_types = &tensor_types;
413444
}
445+
if (!prune_layers.empty()) {
446+
params.prune_layers = &prune_layers;
447+
}
414448

415449
llama_backend_init();
416450

0 commit comments

Comments
 (0)