38
38
39
39
using json = nlohmann::ordered_json;
40
40
41
+ std::initializer_list<enum llama_example> mmproj_examples = {
42
+ LLAMA_EXAMPLE_LLAVA,
43
+ // TODO: add LLAMA_EXAMPLE_SERVER when it's ready
44
+ };
45
+
41
46
common_arg & common_arg::set_examples (std::initializer_list<enum llama_example> examples) {
42
47
this ->examples = std::move (examples);
43
48
return *this ;
@@ -641,11 +646,16 @@ static struct common_hf_file_res common_get_hf_file(const std::string &, const s
641
646
// utils
642
647
//
643
648
644
- static void common_params_handle_model (
649
+ struct handle_model_result {
650
+ bool found_mmproj = false ;
651
+ common_params_model mmproj;
652
+ };
653
+
654
+ static handle_model_result common_params_handle_model (
645
655
struct common_params_model & model,
646
656
const std::string & bearer_token,
647
- const std::string & model_path_default,
648
- bool is_mmproj = false ) { // TODO: move is_mmproj to an enum when we have more files?
657
+ const std::string & model_path_default) {
658
+ handle_model_result result;
649
659
// handle pre-fill default model path and url based on hf_repo and hf_file
650
660
{
651
661
if (!model.hf_repo .empty ()) {
@@ -657,7 +667,12 @@ static void common_params_handle_model(
657
667
exit (1 ); // built without CURL, error message already printed
658
668
}
659
669
model.hf_repo = auto_detected.repo ;
660
- model.hf_file = is_mmproj ? auto_detected.mmprojFile : auto_detected.ggufFile ;
670
+ model.hf_file = auto_detected.ggufFile ;
671
+ if (!auto_detected.mmprojFile .empty ()) {
672
+ result.found_mmproj = true ;
673
+ result.mmproj .hf_repo = model.hf_repo ;
674
+ result.mmproj .hf_file = auto_detected.mmprojFile ;
675
+ }
661
676
} else {
662
677
model.hf_file = model.path ;
663
678
}
@@ -694,6 +709,8 @@ static void common_params_handle_model(
694
709
exit (1 );
695
710
}
696
711
}
712
+
713
+ return result;
697
714
}
698
715
699
716
const std::vector<ggml_type> kv_cache_types = {
@@ -827,16 +844,25 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context
827
844
throw std::invalid_argument (" error: --prompt-cache-all not supported in interactive mode yet\n " );
828
845
}
829
846
830
- common_params_handle_model (params.model , params.hf_token , DEFAULT_MODEL_PATH);
831
- common_params_handle_model (params.speculative .model , params.hf_token , " " );
832
- common_params_handle_model (params.vocoder .model , params.hf_token , " " );
833
-
834
- // allow --mmproj to be set from -hf
835
- // assuming that mmproj is always in the same repo as text model
836
- if (!params.model .hf_repo .empty () && ctx_arg.ex == LLAMA_EXAMPLE_LLAVA) {
837
- params.mmproj .hf_repo = params.model .hf_repo ;
847
+ // handle model and download
848
+ {
849
+ auto res = common_params_handle_model (params.model , params.hf_token , DEFAULT_MODEL_PATH);
850
+ if (params.no_mmproj ) {
851
+ params.mmproj = {};
852
+ } else if (res.found_mmproj && params.mmproj .path .empty () && params.mmproj .url .empty ()) {
853
+ // optionally, handle mmproj model when -hf is specified
854
+ params.mmproj = res.mmproj ;
855
+ }
856
+ // only download mmproj if the current example is using it
857
+ for (auto & ex : mmproj_examples) {
858
+ if (ctx_arg.ex == ex) {
859
+ common_params_handle_model (params.mmproj , params.hf_token , " " );
860
+ break ;
861
+ }
862
+ }
863
+ common_params_handle_model (params.speculative .model , params.hf_token , " " );
864
+ common_params_handle_model (params.vocoder .model , params.hf_token , " " );
838
865
}
839
- common_params_handle_model (params.mmproj , params.hf_token , " " , true );
840
866
841
867
if (params.escape ) {
842
868
string_process_escapes (params.prompt );
@@ -2095,18 +2121,25 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
2095
2121
).set_examples ({LLAMA_EXAMPLE_SERVER}).set_env (" LLAMA_ARG_NO_CONT_BATCHING" ));
2096
2122
add_opt (common_arg (
2097
2123
{" --mmproj" }, " FILE" ,
2098
- " path to a multimodal projector file for LLaVA . see examples/llava/README.md" ,
2124
+ " path to a multimodal projector file. see examples/llava/README.md" ,
2099
2125
[](common_params & params, const std::string & value) {
2100
2126
params.mmproj .path = value;
2101
2127
}
2102
- ).set_examples ({LLAMA_EXAMPLE_LLAVA} ));
2128
+ ).set_examples (mmproj_examples ));
2103
2129
add_opt (common_arg (
2104
2130
{" --mmproj-url" }, " URL" ,
2105
- " URL to a multimodal projector file for LLaVA . see examples/llava/README.md" ,
2131
+ " URL to a multimodal projector file. see examples/llava/README.md" ,
2106
2132
[](common_params & params, const std::string & value) {
2107
2133
params.mmproj .url = value;
2108
2134
}
2109
- ).set_examples ({LLAMA_EXAMPLE_LLAVA}));
2135
+ ).set_examples (mmproj_examples));
2136
+ add_opt (common_arg (
2137
+ {" --no-mmproj" },
2138
+ " explicitly disable multimodal projector, useful when using -hf" ,
2139
+ [](common_params & params) {
2140
+ params.no_mmproj = true ;
2141
+ }
2142
+ ).set_examples (mmproj_examples));
2110
2143
add_opt (common_arg (
2111
2144
{" --image" }, " FILE" ,
2112
2145
" path to an image file. use with multimodal models. Specify multiple times for batching" ,
@@ -2381,6 +2414,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
2381
2414
add_opt (common_arg (
2382
2415
{" -hf" , " -hfr" , " --hf-repo" }, " <user>/<model>[:quant]" ,
2383
2416
" Hugging Face model repository; quant is optional, case-insensitive, default to Q4_K_M, or falls back to the first file in the repo if Q4_K_M doesn't exist.\n "
2417
+ " mmproj is also downloaded automatically if available. to disable, add --no-mmproj\n "
2384
2418
" example: unsloth/phi-4-GGUF:q4_k_m\n "
2385
2419
" (default: unused)" ,
2386
2420
[](common_params & params, const std::string & value) {
0 commit comments