|
15 | 15 | c_size_t,
|
16 | 16 | )
|
17 | 17 | import pathlib
|
| 18 | +from typing import List |
18 | 19 |
|
19 | 20 |
|
20 | 21 | # Load the library
|
21 | 22 | def _load_shared_library(lib_base_name: str):
|
| 23 | + # Construct the paths to the possible shared library names |
| 24 | + _base_path = pathlib.Path(__file__).parent.resolve() |
| 25 | + # Searching for the library in the current directory under the name "libllama" (default name |
| 26 | + # for llamacpp) and "llama" (default name for this repo) |
| 27 | + _lib_paths: List[pathlib.Path] = [] |
22 | 28 | # Determine the file extension based on the platform
|
23 | 29 | if sys.platform.startswith("linux"):
|
24 |
| - lib_ext = ".so" |
| 30 | + _lib_paths += [ |
| 31 | + _base_path / f"lib{lib_base_name}.so", |
| 32 | + ] |
25 | 33 | elif sys.platform == "darwin":
|
26 |
| - lib_ext = ".so" |
| 34 | + _lib_paths += [ |
| 35 | + _base_path / f"lib{lib_base_name}.so", |
| 36 | + _base_path / f"lib{lib_base_name}.dylib", |
| 37 | + ] |
27 | 38 | elif sys.platform == "win32":
|
28 |
| - lib_ext = ".dll" |
| 39 | + _lib_paths += [ |
| 40 | + _base_path / f"{lib_base_name}.dll", |
| 41 | + ] |
29 | 42 | else:
|
30 | 43 | raise RuntimeError("Unsupported platform")
|
31 | 44 |
|
32 |
| - # Construct the paths to the possible shared library names |
33 |
| - _base_path = pathlib.Path(__file__).parent.resolve() |
34 |
| - # Searching for the library in the current directory under the name "libllama" (default name |
35 |
| - # for llamacpp) and "llama" (default name for this repo) |
36 |
| - _lib_paths = [ |
37 |
| - _base_path / f"lib{lib_base_name}{lib_ext}", |
38 |
| - _base_path / f"{lib_base_name}{lib_ext}", |
39 |
| - ] |
40 |
| - |
41 | 45 | if "LLAMA_CPP_LIB" in os.environ:
|
42 | 46 | lib_base_name = os.environ["LLAMA_CPP_LIB"]
|
43 | 47 | _lib = pathlib.Path(lib_base_name)
|
@@ -160,6 +164,7 @@ class llama_token_data_array(Structure):
|
160 | 164 | # bool use_mlock; // force system to keep model in RAM
|
161 | 165 | # bool embedding; // embedding mode only
|
162 | 166 |
|
| 167 | + |
163 | 168 | # // called with a progress value between 0 and 1, pass NULL to disable
|
164 | 169 | # llama_progress_callback progress_callback;
|
165 | 170 | # // context pointer passed to the progress callback
|
|
0 commit comments