Skip to content

Commit e0f5140

Browse files
model_name from llama_bench PR
1 parent 88999fb commit e0f5140

File tree

4 files changed

+12
-10
lines changed

4 files changed

+12
-10
lines changed

examples/common.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -735,7 +735,7 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
735735
}
736736

737737
void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const std::string & timestamp,
738-
const std::vector<int> & prompt_tokens) {
738+
const std::vector<int> & prompt_tokens, const char * model) {
739739
fprintf(stream, "build_commit: %s\n", BUILD_COMMIT);
740740
fprintf(stream, "build_number: %d\n", BUILD_NUMBER);
741741
fprintf(stream, "cpu_has_arm_fma: %s\n", ggml_cpu_has_arm_fma() ? "true" : "false");
@@ -763,6 +763,8 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const s
763763
fprintf(stream, "debug: true\n");
764764
#endif // NDEBUG
765765

766+
fprintf(stream, "model: %s\n", model);
767+
766768
#ifdef __OPTIMIZE__
767769
fprintf(stream, "optimize: true\n");
768770
#else

examples/common.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,4 +115,6 @@ std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::s
115115
std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(const gpt_params & params);
116116
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params);
117117

118-
void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const std::string & timestamp, const std::vector<int> & prompt_tokens);
118+
void dump_non_result_info_yaml(
119+
FILE * stream, const gpt_params & params, const std::string & timestamp,
120+
const std::vector<int> & prompt_tokens, const char * model);

examples/main/main.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -835,17 +835,16 @@ int main(int argc, char ** argv) {
835835

836836
FILE * logfile = fopen((params.logdir + timestamp + ".yml").c_str(), "w");
837837
fprintf(logfile, "binary: main\n");
838-
dump_non_result_info_yaml(logfile, params, timestamp, input_tokens);
838+
char model_type[128];
839+
llama_model_type(model, model_type, sizeof(model_type));
840+
dump_non_result_info_yaml(logfile, params, timestamp, input_tokens, model_type);
839841

840842
fprintf(logfile, "\n");
841843
fprintf(logfile, "######################\n");
842844
fprintf(logfile, "# Generation Results #\n");
843845
fprintf(logfile, "######################\n");
844846
fprintf(logfile, "\n");
845847

846-
// fprintf(logfile, "ftype: %u\n", ctx->model.hparams.ftype);
847-
// fprintf(logfile, "ftype_str: %s\n", llama_ftype_name(ctx->model.hparams.ftype));
848-
// fprintf(logfile, "model_type: %s\n", llama_model_type_name(ctx->model.type));
849848
dump_string_yaml_multiline(logfile, "output", output_ss.str().c_str(), false);
850849
dump_vector_int_yaml(logfile, "output_tokens", output_tokens);
851850

examples/perplexity/perplexity.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -368,17 +368,16 @@ int main(int argc, char ** argv) {
368368

369369
FILE * logfile = fopen((params.logdir + timestamp + ".yml").c_str(), "w");
370370
fprintf(logfile, "binary: perplexity\n");
371-
dump_non_result_info_yaml(logfile, params, timestamp, tokens);
371+
char model_type[128];
372+
llama_model_type(model, model_type, sizeof(model_type));
373+
dump_non_result_info_yaml(logfile, params, timestamp, tokens, model_type);
372374

373375
fprintf(logfile, "\n");
374376
fprintf(logfile, "######################\n");
375377
fprintf(logfile, "# Perplexity Results #\n");
376378
fprintf(logfile, "######################\n");
377379
fprintf(logfile, "\n");
378380

379-
// fprintf(logfile, "ftype: %u\n", ctx->model.hparams.ftype);
380-
// fprintf(logfile, "ftype_str: %s\n", llama_ftype_name(ctx->model.hparams.ftype));
381-
// fprintf(logfile, "model_type: %s\n", llama_model_type_name(ctx->model.type));
382381
dump_vector_float_yaml(logfile, "probs", probs);
383382

384383
llama_dump_timing_info_yaml(logfile, ctx);

0 commit comments

Comments
 (0)