@@ -899,6 +899,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
899
899
params.interactive = true ;
900
900
return true ;
901
901
}
902
+ if (arg == " --interactive-specials" ) {
903
+ params.interactive_specials = true ;
904
+ return true ;
905
+ }
902
906
if (arg == " --embedding" ) {
903
907
params.embedding = true ;
904
908
return true ;
@@ -1416,6 +1420,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
1416
1420
printf (" -h, --help show this help message and exit\n " );
1417
1421
printf (" --version show version and build info\n " );
1418
1422
printf (" -i, --interactive run in interactive mode\n " );
1423
+ printf (" --interactive-specials allow special tokens in user text, in interactive mode\n " );
1419
1424
printf (" --interactive-first run in interactive mode and wait for input right away\n " );
1420
1425
printf (" -ins, --instruct run in instruction mode (use with Alpaca models)\n " );
1421
1426
printf (" -cml, --chatml run in chatml mode (use with ChatML-compatible models)\n " );
@@ -2645,6 +2650,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
2645
2650
dump_string_yaml_multiline (stream, " in_suffix" , params.input_prefix .c_str ());
2646
2651
fprintf (stream, " instruct: %s # default: false\n " , params.instruct ? " true" : " false" );
2647
2652
fprintf (stream, " interactive: %s # default: false\n " , params.interactive ? " true" : " false" );
2653
+ fprintf (stream, " interactive_specials: %s # default: false\n " , params.interactive_specials ? " true" : " false" );
2648
2654
fprintf (stream, " interactive_first: %s # default: false\n " , params.interactive_first ? " true" : " false" );
2649
2655
fprintf (stream, " keep: %d # default: 0\n " , params.n_keep );
2650
2656
fprintf (stream, " logdir: %s # default: unset (no logging)\n " , params.logdir .c_str ());
0 commit comments