@@ -10836,7 +10836,7 @@ struct quantize_state_internal {
10836
10836
{}
10837
10837
};
10838
10838
10839
- static void llama_convert_tensor_internal (
10839
+ static void llama_tensor_dequantize_internal (
10840
10840
struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
10841
10841
const size_t nelements, const int nthread
10842
10842
) {
@@ -11177,6 +11177,46 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty
11177
11177
return new_type;
11178
11178
}
11179
11179
11180
+ static int32_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int chunk_size, int nrows, int n_per_row, int64_t * hist_cur, const float * imatrix, std::vector<std::thread> & workers, const int nthread) {
11181
+ std::mutex mutex;
11182
+ int counter = 0;
11183
+ size_t new_size = 0;
11184
+ if (nthread < 2) {
11185
+ // single-thread
11186
+ return ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, hist_cur, imatrix);
11187
+ }
11188
+ auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, chunk_size,
11189
+ nrows, n_per_row, imatrix]() {
11190
+ std::array<int64_t, 1 << 4> local_hist = {};
11191
+ const int nrows_per_chunk = chunk_size / n_per_row;
11192
+ size_t local_size = 0;
11193
+ while (true) {
11194
+ std::unique_lock<std::mutex> lock(mutex);
11195
+ int first_row = counter; counter += nrows_per_chunk;
11196
+ if (first_row >= nrows) {
11197
+ if (local_size > 0) {
11198
+ for (int j=0; j<int(local_hist.size()); ++j) {
11199
+ hist_cur[j] += local_hist[j];
11200
+ }
11201
+ new_size += local_size;
11202
+ }
11203
+ break;
11204
+ }
11205
+ lock.unlock();
11206
+ const int this_nrow = std::min(nrows - first_row, nrows_per_chunk);
11207
+ local_size += ggml_quantize_chunk(new_type, f32_data, new_data,
11208
+ first_row * n_per_row, this_nrow, n_per_row, local_hist.data(), imatrix);
11209
+ }
11210
+ };
11211
+ for (int it = 0; it < nthread - 1; ++it) {
11212
+ workers.emplace_back(compute);
11213
+ }
11214
+ compute();
11215
+ for (auto & w : workers) { w.join(); }
11216
+ workers.clear();
11217
+ return new_size;
11218
+ }
11219
+
11180
11220
static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
11181
11221
ggml_type quantized_type;
11182
11222
llama_ftype ftype = params->ftype;
@@ -11289,7 +11329,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
11289
11329
11290
11330
std::vector<std::thread> workers;
11291
11331
workers.reserve(nthread);
11292
- std::mutex mutex;
11293
11332
11294
11333
int idx = 0;
11295
11334
@@ -11403,7 +11442,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
11403
11442
} else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
11404
11443
throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
11405
11444
} else {
11406
- llama_convert_tensor_internal (tensor, f32_conv_buf, workers, nelements, nthread);
11445
+ llama_tensor_dequantize_internal (tensor, f32_conv_buf, workers, nelements, nthread);
11407
11446
f32_data = (float *) f32_conv_buf.data();
11408
11447
}
11409
11448
@@ -11424,41 +11463,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
11424
11463
11425
11464
const int nchunk = (nelements + chunk_size - 1)/chunk_size;
11426
11465
const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
11427
- if (nthread_use < 2) {
11428
- new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, hist_cur.data(), imatrix);
11429
- } else {
11430
- int counter = 0;
11431
- new_size = 0;
11432
- auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, chunk_size,
11433
- nrows, n_per_row, imatrix]() {
11434
- std::array<int64_t, 1 << 4> local_hist = {};
11435
- const int nrows_per_chunk = chunk_size / n_per_row;
11436
- size_t local_size = 0;
11437
- while (true) {
11438
- std::unique_lock<std::mutex> lock(mutex);
11439
- int first_row = counter; counter += nrows_per_chunk;
11440
- if (first_row >= nrows) {
11441
- if (local_size > 0) {
11442
- for (int j=0; j<int(local_hist.size()); ++j) {
11443
- hist_cur[j] += local_hist[j];
11444
- }
11445
- new_size += local_size;
11446
- }
11447
- break;
11448
- }
11449
- lock.unlock();
11450
- const int this_nrow = std::min(nrows - first_row, nrows_per_chunk);
11451
- local_size += ggml_quantize_chunk(new_type, f32_data, new_data,
11452
- first_row * n_per_row, this_nrow, n_per_row, local_hist.data(), imatrix);
11453
- }
11454
- };
11455
- for (int it = 0; it < nthread_use - 1; ++it) {
11456
- workers.emplace_back(compute);
11457
- }
11458
- compute();
11459
- for (auto & w : workers) { w.join(); }
11460
- workers.clear();
11461
- }
11466
+ new_size = llama_tensor_quantize_internal(new_type, f32_data, new_data, chunk_size, nrows, n_per_row, hist_cur.data(), imatrix, workers, nthread_use);
11462
11467
11463
11468
LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
11464
11469
int64_t tot_count = 0;
0 commit comments