Skip to content

Commit c1c7fe3

Browse files
ngxsonmglambda
authored andcommitted
llama : remove check flash_attn with lora (ggml-org#11104)
1 parent 1b93869 commit c1c7fe3

File tree

1 file changed

+0
-6
lines changed

1 file changed

+0
-6
lines changed

src/llama.cpp

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11519,13 +11519,7 @@ int32_t llama_lora_adapter_set(
1151911519
struct llama_context * ctx,
1152011520
struct llama_lora_adapter * adapter,
1152111521
float scale) {
11522-
if (ctx->cparams.flash_attn) {
11523-
LLAMA_LOG_ERROR("%s: flash_attn is not compatible with LoRA\n", __func__);
11524-
return -1;
11525-
}
11526-
1152711522
ctx->lora_adapters[adapter] = scale;
11528-
1152911523
return 0;
1153011524
}
1153111525

0 commit comments

Comments
 (0)