Skip to content

Commit 11b59f5

Browse files
boxabirdsmishig25
andauthored
Add ollama (#687)
Very popular local LLM inference server. --------- Co-authored-by: Mishig Davaadorj <[email protected]>
1 parent 01ad4c1 commit 11b59f5

File tree

3 files changed

+26
-0
lines changed

3 files changed

+26
-0
lines changed

packages/tasks/package.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,5 +51,8 @@
5151
"@types/node": "^20.11.5",
5252
"quicktype-core": "https://github.com/huggingface/quicktype/raw/pack-18.0.17/packages/quicktype-core/quicktype-core-18.0.17.tgz",
5353
"type-fest": "^3.13.1"
54+
},
55+
"dependencies": {
56+
"@huggingface/gguf": "workspace:^"
5457
}
5558
}

packages/tasks/pnpm-lock.yaml

Lines changed: 5 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

packages/tasks/src/local-apps.ts

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import type { ModelData } from "./model-data";
22
import type { PipelineType } from "./pipelines";
3+
import { parseGGUFQuantLabel } from "@huggingface/gguf";
34

45
export interface LocalAppSnippet {
56
/**
@@ -53,6 +54,7 @@ export type LocalApp = {
5354
/**
5455
* And if not (mostly llama.cpp), snippet to copy/paste in your terminal
5556
* Support the placeholder {{GGUF_FILE}} that will be replaced by the gguf file path or the list of available files.
57+
* Support the placeholder {{OLLAMA_TAG}} that will be replaced by the list of available quant tags or will be removed if there are no multiple quant files in a same repo.
5658
*/
5759
snippet: (model: ModelData, filepath?: string) => string | string[] | LocalAppSnippet | LocalAppSnippet[];
5860
}
@@ -143,6 +145,15 @@ const snippetNodeLlamaCppCli = (model: ModelData, filepath?: string): LocalAppSn
143145
];
144146
};
145147

148+
const snippetOllama = (model: ModelData, filepath?: string): string => {
149+
if (filepath) {
150+
const quantLabel = parseGGUFQuantLabel(filepath);
151+
const ollamatag = quantLabel ? `:${quantLabel}` : "";
152+
return `ollama run hf.co/${model.id}${ollamatag}`;
153+
}
154+
return `ollama run hf.co/${model.id}{{OLLAMA_TAG}}`;
155+
};
156+
146157
const snippetLocalAI = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
147158
const command = (binary: string) =>
148159
["# Load and run the model:", `${binary} huggingface://${model.id}/${filepath ?? "{{GGUF_FILE}}"}`].join("\n");
@@ -389,6 +400,13 @@ export const LOCAL_APPS = {
389400
displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image",
390401
deeplink: (model) => new URL(`https://models.invoke.ai/huggingface/${model.id}`),
391402
},
403+
ollama: {
404+
prettyLabel: "Ollama",
405+
docsUrl: "https://ollama.com",
406+
mainTask: "text-generation",
407+
displayOnModelPage: isLlamaCppGgufModel,
408+
snippet: snippetOllama,
409+
},
392410
} satisfies Record<string, LocalApp>;
393411

394412
export type LocalAppKey = keyof typeof LOCAL_APPS;

0 commit comments

Comments
 (0)