We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d018c7b commit c471871Copy full SHA for c471871
llama_cpp/llama.py
@@ -268,7 +268,7 @@ def __init__(
268
269
self.params = llama_cpp.llama_context_default_params()
270
self.params.n_ctx = n_ctx
271
- self.params.n_gpu_layers = n_gpu_layers
+ self.params.n_gpu_layers = 0x7FFFFFFF if n_gpu_layers == -1 else n_gpu_layers # 0x7FFFFFFF is INT32 max, will be auto set to all layers
272
self.params.seed = seed
273
self.params.f16_kv = f16_kv
274
self.params.logits_all = logits_all
0 commit comments