We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 7162b64 commit cfb5f03Copy full SHA for cfb5f03
llama.cpp
@@ -4515,6 +4515,7 @@ static bool llm_load_tensors(
4515
return true;
4516
}
4517
4518
+#ifdef GGML_USE_KOMPUTE
4519
static const llm_arch LLM_KOMPUTE_SUPPORTED_ARCHES[] {
4520
LLM_ARCH_LLAMA,
4521
LLM_ARCH_FALCON,
@@ -4538,6 +4539,7 @@ static const llm_arch LLM_KOMPUTE_SUPPORTED_ARCHES[] {
4538
4539
LLM_ARCH_MINICPM,
4540
LLM_ARCH_GEMMA,
4541
};
4542
+#endif
4543
4544
// Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
4545
static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) {
0 commit comments