Skip to content

Commit 96d2f54

Browse files
committed
Apply style/style suggestions for server example
1 parent 5fd23ce commit 96d2f54

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

examples/server/server.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ struct llama_server_context
6161
std::vector<llama_token> prompt_tokens = ::llama_tokenize(ctx, params.prompt, true);
6262
// compare the evaluated prompt with the new prompt
6363
int new_prompt_len = 0;
64-
for (unsigned int i = 0;i < prompt_tokens.size(); i++) {
64+
for (size_t i = 0; i < prompt_tokens.size(); i++) {
6565
if (i < processed_tokens.size() &&
6666
processed_tokens[i] == prompt_tokens[i])
6767
{
@@ -71,7 +71,7 @@ struct llama_server_context
7171
{
7272
embd_inp.push_back(prompt_tokens[i]);
7373
if(new_prompt_len == 0) {
74-
if(((int)i) - 1 < (int)n_past) {
74+
if(((int32_t)i) - 1 < n_past) {
7575
processed_tokens.erase(processed_tokens.begin() + i, processed_tokens.end());
7676
}
7777
// Evaluate the new fragment prompt from the last token processed.
@@ -306,12 +306,12 @@ struct llama_server_context
306306
// Avoid add the no show words to the response
307307
for (std::vector<llama_token> word_tokens : no_show_words)
308308
{
309-
unsigned int match_token = 1;
309+
size_t match_token = 1;
310310
if (tokens_predicted.front() == word_tokens.front())
311311
{
312312
bool execute_matching = true;
313313
if (tokens_predicted.size() > 1) { // if previus tokens had been tested
314-
for (unsigned int i = 1; i < word_tokens.size(); i++)
314+
for (size_t i = 1; i < word_tokens.size(); i++)
315315
{
316316
if (i >= tokens_predicted.size()) {
317317
match_token = i;
@@ -649,7 +649,7 @@ int main(int argc, char **argv)
649649
{"tokens_predicted", llama.num_tokens_predicted}};
650650
return res.set_content(data.dump(), "application/json");
651651
}
652-
catch (json::exception const &e)
652+
catch (const json::exception &e)
653653
{
654654
// Some tokens have bad UTF-8 strings, the json parser is very sensitive
655655
json data = {
@@ -701,7 +701,7 @@ int main(int argc, char **argv)
701701
{"content", result },
702702
{"stop", !llama.has_next_token }};
703703
return res.set_content(data.dump(), "application/json");
704-
} catch (json::exception const &e) {
704+
} catch (const json::exception &e) {
705705
// Some tokens have bad UTF-8 strings, the json parser is very sensitive
706706
json data = {
707707
{"content", "" },

0 commit comments

Comments
 (0)