File tree Expand file tree Collapse file tree 1 file changed +2
-12
lines changed Expand file tree Collapse file tree 1 file changed +2
-12
lines changed Original file line number Diff line number Diff line change @@ -5433,7 +5433,7 @@ static int llama_decode_internal(
5433
5433
5434
5434
GGML_ASSERT (n_tokens <= n_batch);
5435
5435
5436
- int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch ;
5436
+ int n_threads = n_tokens < 32 ? cparams.n_threads : cparams.n_threads_batch ;
5437
5437
GGML_ASSERT ((!batch.token && batch.embd ) || (batch.token && !batch.embd )); // NOLINT
5438
5438
5439
5439
const int64_t t_start_us = ggml_time_us ();
@@ -5550,18 +5550,8 @@ static int llama_decode_internal(
5550
5550
n_threads = std::min (4 , n_threads);
5551
5551
}
5552
5552
5553
- // If all tensors can be run on the GPU then using more than 1 thread is detrimental.
5554
- const bool full_offload_supported =
5555
- model.arch == LLM_ARCH_LLAMA ||
5556
- model.arch == LLM_ARCH_BAICHUAN ||
5557
- model.arch == LLM_ARCH_FALCON ||
5558
- model.arch == LLM_ARCH_REFACT ||
5559
- model.arch == LLM_ARCH_MPT ||
5560
- model.arch == LLM_ARCH_STARCODER ||
5561
- model.arch == LLM_ARCH_STABLELM;
5562
-
5563
5553
const bool fully_offloaded = model.n_gpu_layers >= (int ) hparams.n_layer + 3 ;
5564
- if (ggml_cpu_has_cublas () && full_offload_supported && fully_offloaded) {
5554
+ if (ggml_cpu_has_cublas () && fully_offloaded) {
5565
5555
n_threads = 1 ;
5566
5556
}
5567
5557
You can’t perform that action at this time.
0 commit comments