We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent f1dcbb4 commit ac86ac0Copy full SHA for ac86ac0
examples/low_level_api_chat_cpp.py
@@ -409,12 +409,15 @@ def generate(self):
409
# replace end of text token with newline token when in interactive mode
410
if (id == llama_cpp.llama_token_eos() and self.params.interactive and not self.params.instruct):
411
id = self.llama_token_newline[0]
412
+ self.embd.append(id)
413
if (self.use_antiprompt()):
414
# tokenize and inject first reverse prompt
415
self.embd_inp += self.first_antiprompt[0]
-
416
- # add it to the context
417
- self.embd.append(id)
+ for id in self.first_antiprompt[0]:
418
+ else:
419
+ # add it to the context
420
421
422
# echo this to console
423
self.output_echo = True
0 commit comments