Skip to content

Commit a9b9f03

Browse files
committed
Format
1 parent abc538f commit a9b9f03

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

llama_cpp/llama.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ def __init__(
224224
rope_freq_base: float = 10000.0,
225225
rope_freq_scale: float = 1.0,
226226
n_gqa: Optional[int] = None, # (TEMPORARY) must be 8 for llama2 70b
227-
rms_norm_eps: Optional[float] = None, # (TEMPORARY)
227+
rms_norm_eps: Optional[float] = None, # (TEMPORARY)
228228
verbose: bool = True,
229229
):
230230
"""Load a llama.cpp model from `model_path`.
@@ -277,7 +277,9 @@ def __init__(
277277

278278
if self.tensor_split is not None:
279279
FloatArray = (ctypes.c_float * len(self.tensor_split))(*self.tensor_split)
280-
self._p_tensor_split = ctypes.POINTER(ctypes.c_float)(FloatArray) # keep a reference to the array so it is not gc'd
280+
self._p_tensor_split = ctypes.POINTER(ctypes.c_float)(
281+
FloatArray
282+
) # keep a reference to the array so it is not gc'd
281283
self.params.tensor_split = self._p_tensor_split
282284

283285
self.params.rope_freq_base = rope_freq_base
@@ -959,9 +961,7 @@ def _create_completion(
959961
for token in remaining_tokens:
960962
token_end_position += len(self.detokenize([token]))
961963
# Check if stop sequence is in the token
962-
if token_end_position >= (
963-
remaining_length - first_stop_position
964-
):
964+
if token_end_position >= (remaining_length - first_stop_position):
965965
break
966966
logprobs_or_none: Optional[CompletionLogprobs] = None
967967
if logprobs is not None:

0 commit comments

Comments
 (0)