@@ -300,14 +300,10 @@ int main(int argc, char ** argv) {
300
300
return 1 ;
301
301
}
302
302
303
- for ( auto & image : params.image ) {
303
+ if ( prompt_contains_image ( params.prompt ) ) {
304
304
auto ctx_llava = llava_init_context (¶ms, model);
305
305
306
- auto image_embed = load_image (ctx_llava, ¶ms, image);
307
- if (!image_embed) {
308
- std::cerr << " error: failed to load image " << image << " . Terminating\n\n " ;
309
- return 1 ;
310
- }
306
+ auto image_embed = load_image (ctx_llava, ¶ms, " " );
311
307
312
308
// process the prompt
313
309
process_prompt (ctx_llava, image_embed, ¶ms, params.prompt );
@@ -316,7 +312,26 @@ int main(int argc, char ** argv) {
316
312
llava_image_embed_free (image_embed);
317
313
ctx_llava->model = NULL ;
318
314
llava_free (ctx_llava);
315
+ } else {
316
+ for (auto & image : params.image ) {
317
+ auto ctx_llava = llava_init_context (¶ms, model);
318
+
319
+ auto image_embed = load_image (ctx_llava, ¶ms, image);
320
+ if (!image_embed) {
321
+ std::cerr << " error: failed to load image " << image << " . Terminating\n\n " ;
322
+ return 1 ;
323
+ }
324
+
325
+ // process the prompt
326
+ process_prompt (ctx_llava, image_embed, ¶ms, params.prompt );
327
+
328
+ llama_print_timings (ctx_llava->ctx_llama );
329
+ llava_image_embed_free (image_embed);
330
+ ctx_llava->model = NULL ;
331
+ llava_free (ctx_llava);
332
+ }
319
333
}
334
+
320
335
llama_free_model (model);
321
336
322
337
return 0 ;
0 commit comments