Skip to content

Commit a0a2b61

Browse files
committed
Don't ignore llama.cpp params
1 parent 439b3fc commit a0a2b61

File tree

1 file changed

+0
-15
lines changed

1 file changed

+0
-15
lines changed

examples/server/utils.hpp

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -355,23 +355,8 @@ static json oaicompat_completion_params_parse(
355355

356356
llama_params["__oaicompat"] = true;
357357

358-
// Map OpenAI parameters to llama.cpp parameters
359-
//
360-
// For parameters that are defined by the OpenAI documentation (e.g.
361-
// temperature), we explicitly specify OpenAI's intended default; we
362-
// need to do that because sometimes OpenAI disagrees with llama.cpp
363-
//
364-
// https://platform.openai.com/docs/api-reference/chat/create
365358
llama_sampling_params default_sparams;
366359
llama_params["model"] = json_value(body, "model", std::string("unknown"));
367-
llama_params["frequency_penalty"] = json_value(body, "frequency_penalty", 0.0);
368-
llama_params["logit_bias"] = json_value(body, "logit_bias", json::object());
369-
llama_params["n_predict"] = json_value(body, "max_tokens", -1);
370-
llama_params["presence_penalty"] = json_value(body, "presence_penalty", 0.0);
371-
llama_params["seed"] = json_value(body, "seed", LLAMA_DEFAULT_SEED);
372-
llama_params["stream"] = json_value(body, "stream", false);
373-
llama_params["temperature"] = json_value(body, "temperature", 1.0);
374-
llama_params["top_p"] = json_value(body, "top_p", 1.0);
375360

376361
// Apply chat template to the list of messages
377362
llama_params["prompt"] = format_chat(model, chat_template, body.at("messages"));

0 commit comments

Comments
 (0)