Skip to content

Commit 6c32d8c

Browse files
authored
llama : refactor internal quantization functions (#5830)
1 parent 802da00 commit 6c32d8c

File tree

1 file changed

+43
-38
lines changed

1 file changed

+43
-38
lines changed

llama.cpp

Lines changed: 43 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -10836,7 +10836,7 @@ struct quantize_state_internal {
1083610836
{}
1083710837
};
1083810838

10839-
static void llama_convert_tensor_internal(
10839+
static void llama_tensor_dequantize_internal(
1084010840
struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
1084110841
const size_t nelements, const int nthread
1084210842
) {
@@ -11177,6 +11177,46 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty
1117711177
return new_type;
1117811178
}
1117911179

11180+
static int32_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int chunk_size, int nrows, int n_per_row, int64_t * hist_cur, const float * imatrix, std::vector<std::thread> & workers, const int nthread) {
11181+
std::mutex mutex;
11182+
int counter = 0;
11183+
size_t new_size = 0;
11184+
if (nthread < 2) {
11185+
// single-thread
11186+
return ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, hist_cur, imatrix);
11187+
}
11188+
auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, chunk_size,
11189+
nrows, n_per_row, imatrix]() {
11190+
std::array<int64_t, 1 << 4> local_hist = {};
11191+
const int nrows_per_chunk = chunk_size / n_per_row;
11192+
size_t local_size = 0;
11193+
while (true) {
11194+
std::unique_lock<std::mutex> lock(mutex);
11195+
int first_row = counter; counter += nrows_per_chunk;
11196+
if (first_row >= nrows) {
11197+
if (local_size > 0) {
11198+
for (int j=0; j<int(local_hist.size()); ++j) {
11199+
hist_cur[j] += local_hist[j];
11200+
}
11201+
new_size += local_size;
11202+
}
11203+
break;
11204+
}
11205+
lock.unlock();
11206+
const int this_nrow = std::min(nrows - first_row, nrows_per_chunk);
11207+
local_size += ggml_quantize_chunk(new_type, f32_data, new_data,
11208+
first_row * n_per_row, this_nrow, n_per_row, local_hist.data(), imatrix);
11209+
}
11210+
};
11211+
for (int it = 0; it < nthread - 1; ++it) {
11212+
workers.emplace_back(compute);
11213+
}
11214+
compute();
11215+
for (auto & w : workers) { w.join(); }
11216+
workers.clear();
11217+
return new_size;
11218+
}
11219+
1118011220
static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
1118111221
ggml_type quantized_type;
1118211222
llama_ftype ftype = params->ftype;
@@ -11289,7 +11329,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
1128911329

1129011330
std::vector<std::thread> workers;
1129111331
workers.reserve(nthread);
11292-
std::mutex mutex;
1129311332

1129411333
int idx = 0;
1129511334

@@ -11403,7 +11442,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
1140311442
} else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
1140411443
throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
1140511444
} else {
11406-
llama_convert_tensor_internal(tensor, f32_conv_buf, workers, nelements, nthread);
11445+
llama_tensor_dequantize_internal(tensor, f32_conv_buf, workers, nelements, nthread);
1140711446
f32_data = (float *) f32_conv_buf.data();
1140811447
}
1140911448

@@ -11424,41 +11463,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
1142411463

1142511464
const int nchunk = (nelements + chunk_size - 1)/chunk_size;
1142611465
const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
11427-
if (nthread_use < 2) {
11428-
new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, hist_cur.data(), imatrix);
11429-
} else {
11430-
int counter = 0;
11431-
new_size = 0;
11432-
auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, chunk_size,
11433-
nrows, n_per_row, imatrix]() {
11434-
std::array<int64_t, 1 << 4> local_hist = {};
11435-
const int nrows_per_chunk = chunk_size / n_per_row;
11436-
size_t local_size = 0;
11437-
while (true) {
11438-
std::unique_lock<std::mutex> lock(mutex);
11439-
int first_row = counter; counter += nrows_per_chunk;
11440-
if (first_row >= nrows) {
11441-
if (local_size > 0) {
11442-
for (int j=0; j<int(local_hist.size()); ++j) {
11443-
hist_cur[j] += local_hist[j];
11444-
}
11445-
new_size += local_size;
11446-
}
11447-
break;
11448-
}
11449-
lock.unlock();
11450-
const int this_nrow = std::min(nrows - first_row, nrows_per_chunk);
11451-
local_size += ggml_quantize_chunk(new_type, f32_data, new_data,
11452-
first_row * n_per_row, this_nrow, n_per_row, local_hist.data(), imatrix);
11453-
}
11454-
};
11455-
for (int it = 0; it < nthread_use - 1; ++it) {
11456-
workers.emplace_back(compute);
11457-
}
11458-
compute();
11459-
for (auto & w : workers) { w.join(); }
11460-
workers.clear();
11461-
}
11466+
new_size = llama_tensor_quantize_internal(new_type, f32_data, new_data, chunk_size, nrows, n_per_row, hist_cur.data(), imatrix, workers, nthread_use);
1146211467

1146311468
LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
1146411469
int64_t tot_count = 0;

0 commit comments

Comments
 (0)