@@ -127,6 +127,23 @@ const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[]
127
127
] ;
128
128
} ;
129
129
130
+ const snippetNodeLlamaCppCli = ( model : ModelData , filepath ?: string ) : LocalAppSnippet [ ] => {
131
+ return [
132
+ {
133
+ title : "Chat with the model" ,
134
+ content : [
135
+ `npx -y node-llama-cpp chat \\` ,
136
+ ` --model "hf:${ model . id } /${ filepath ?? "{{GGUF_FILE}}" } " \\` ,
137
+ ` --prompt 'Hi there!'` ,
138
+ ] . join ( "\n" ) ,
139
+ } ,
140
+ {
141
+ title : "Estimate the model compatibility with your hardware" ,
142
+ content : `npx -y node-llama-cpp inspect estimate "hf:${ model . id } /${ filepath ?? "{{GGUF_FILE}}" } "` ,
143
+ } ,
144
+ ] ;
145
+ } ;
146
+
130
147
const snippetOllama = ( model : ModelData , filepath ?: string ) : string => {
131
148
if ( filepath ) {
132
149
const quantLabel = parseGGUFQuantLabel ( filepath ) ;
@@ -245,6 +262,13 @@ export const LOCAL_APPS = {
245
262
displayOnModelPage : isLlamaCppGgufModel ,
246
263
snippet : snippetLlamacpp ,
247
264
} ,
265
+ "node-llama-cpp" : {
266
+ prettyLabel : "node-llama-cpp" ,
267
+ docsUrl : "https://node-llama-cpp.withcat.ai" ,
268
+ mainTask : "text-generation" ,
269
+ displayOnModelPage : isLlamaCppGgufModel ,
270
+ snippet : snippetNodeLlamaCppCli ,
271
+ } ,
248
272
vllm : {
249
273
prettyLabel : "vLLM" ,
250
274
docsUrl : "https://docs.vllm.ai" ,
0 commit comments