Skip to content

Commit 6f4effe

Browse files
authored
Merge branch 'main' into patch-1
2 parents 6c9b5da + 683cbd0 commit 6f4effe

File tree

1 file changed

+24
-0
lines changed

1 file changed

+24
-0
lines changed

packages/tasks/src/local-apps.ts

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,23 @@ const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[]
127127
];
128128
};
129129

130+
const snippetNodeLlamaCppCli = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
131+
return [
132+
{
133+
title: "Chat with the model",
134+
content: [
135+
`npx -y node-llama-cpp chat \\`,
136+
` --model "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}" \\`,
137+
` --prompt 'Hi there!'`,
138+
].join("\n"),
139+
},
140+
{
141+
title: "Estimate the model compatibility with your hardware",
142+
content: `npx -y node-llama-cpp inspect estimate "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}"`,
143+
},
144+
];
145+
};
146+
130147
const snippetOllama = (model: ModelData, filepath?: string): string => {
131148
if (filepath) {
132149
const quantLabel = parseGGUFQuantLabel(filepath);
@@ -245,6 +262,13 @@ export const LOCAL_APPS = {
245262
displayOnModelPage: isLlamaCppGgufModel,
246263
snippet: snippetLlamacpp,
247264
},
265+
"node-llama-cpp": {
266+
prettyLabel: "node-llama-cpp",
267+
docsUrl: "https://node-llama-cpp.withcat.ai",
268+
mainTask: "text-generation",
269+
displayOnModelPage: isLlamaCppGgufModel,
270+
snippet: snippetNodeLlamaCppCli,
271+
},
248272
vllm: {
249273
prettyLabel: "vLLM",
250274
docsUrl: "https://docs.vllm.ai",

0 commit comments

Comments
 (0)