Skip to content

Commit 940efa9

Browse files
ggerganovmonatis
andauthored
llava : fix tokenization to not add bos between image embeddings and user prompt (#3645)
* llava : fix tokenization to not add bos after system prompt * set seed --------- Co-authored-by: M. Yusuf Sarıgöz <[email protected]>
1 parent 11bff29 commit 940efa9

File tree

2 files changed

+9
-7
lines changed

2 files changed

+9
-7
lines changed

examples/llava/llava-utils.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,9 +49,9 @@ inline bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
4949
return eval_tokens(ctx_llama, tokens, 1, n_past);
5050
}
5151

52-
inline bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past){
52+
inline bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
5353
std::string str2 = str;
54-
std::vector<llama_token> embd_inp = ::llama_tokenize(ctx_llama, str2, true);
54+
std::vector<llama_token> embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos);
5555
eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
5656
return true;
5757
}

examples/llava/llava.cpp

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@ int main(int argc, char ** argv) {
9797
ctx_params.n_ctx = params.n_ctx < 2048 ? 2048 : params.n_ctx; // we need a longer context size to process image embeddings
9898
ctx_params.n_threads = params.n_threads;
9999
ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
100+
ctx_params.seed = params.seed;
100101

101102
llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params);
102103

@@ -106,7 +107,8 @@ int main(int argc, char ** argv) {
106107
}
107108

108109
// make sure that the correct mmproj was used, i.e., compare apples to apples
109-
int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama));
110+
const int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama));
111+
110112
if (n_img_embd != n_llama_embd) {
111113
printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_img_embd, n_llama_embd);
112114

@@ -125,14 +127,14 @@ int main(int argc, char ** argv) {
125127

126128
const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
127129

128-
// GG: are we sure that the should be a trailing whitespace at the end of this string?
129-
eval_string(ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER: ", params.n_batch, &n_past);
130+
eval_string(ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:", params.n_batch, &n_past, true);
130131
eval_image_embd(ctx_llama, image_embd, n_img_pos, params.n_batch, &n_past);
131-
eval_string(ctx_llama, params.prompt.c_str(), params.n_batch, &n_past);
132-
eval_string(ctx_llama, "\nASSISTANT:", params.n_batch, &n_past);
132+
eval_string(ctx_llama, (params.prompt + "\nASSISTANT:").c_str(), params.n_batch, &n_past, false);
133133

134134
// generate the response
135135

136+
printf("\n");
137+
printf("prompt: '%s'\n", params.prompt.c_str());
136138
printf("\n");
137139

138140
for (int i = 0; i < max_tgt_len; i++) {

0 commit comments

Comments
 (0)