@@ -18071,11 +18071,26 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
18071
18071
auto difquant_three_eights_alt_tensors = [](int i_layer, int n_layers) -> bool {
18072
18072
return i_layer <= n_layers/8 || (i_layer > 4*n_layers/8 && i_layer < 5*n_layers/8) || i_layer >= 7*n_layers/8;
18073
18073
};
18074
+
18075
+ // original formula use_some_bits :
18076
+ auto use_some_bits = [](int i_layer, int n_layers) -> bool {
18077
+ return i_layer < n_layers/8 || i_layer >= 7*n_layers/8 || (i_layer - n_layers/8)%4 == 2;
18078
+ };
18079
+
18074
18080
// original formula use_more_bits :
18075
- // return i_layer < n_layers/8 || i_layer >= 7*n_layers/8 || (i_layer - n_layers/8)%3 == 2;
18081
+ auto use_more_bits = [](int i_layer, int n_layers) -> bool {
18082
+ return i_layer < n_layers/8 || i_layer >= 7*n_layers/8 || (i_layer - n_layers/8)%3 == 2;
18083
+ };
18084
+
18076
18085
// The intervals of 3 are replaced by a broad bump in the central layers.
18077
18086
// In the case of a 32 layers model, layers 5-7 and layers 12-16 are always skipped.
18078
18087
// In the case of a 40 layers model, layers 6-9 and layers 15-20 are always skipped.
18088
+
18089
+ // new formula use_most_bits :
18090
+ auto use_most_bits = [](int i_layer, int n_layers) -> bool {
18091
+ return i_layer < n_layers/8 || i_layer >= 7*n_layers/8 || (i_layer - n_layers/8)%2 == 1;
18092
+ };
18093
+
18079
18094
// difquant_half_tensors replaces it and keeps the broad 50% bump to the upper quant. Ex : 16/32
18080
18095
auto difquant_half_tensors = [](int i_layer, int n_layers) -> bool {
18081
18096
// return i_layer <= n_layers/8 || (i_layer >= 2*n_layers/8 && i_layer < 3*n_layers/8) ||
0 commit comments