1
1
import type { PipelineType } from "../pipelines.js" ;
2
+ import type { ChatCompletionInputMessage , GenerationParameters } from "../tasks/index.js" ;
3
+ import { stringifyGenerationConfig , stringifyMessages } from "./common.js" ;
2
4
import { getModelInputSnippet } from "./inputs.js" ;
3
- import type { ModelDataMinimal } from "./types.js" ;
5
+ import type { InferenceSnippet , ModelDataMinimal } from "./types.js" ;
4
6
5
- export const snippetBasic = ( model : ModelDataMinimal , accessToken : string ) : string =>
6
- `curl https://api-inference.huggingface.co/models/${ model . id } \\
7
+ export const snippetBasic = ( model : ModelDataMinimal , accessToken : string ) : InferenceSnippet => ( {
8
+ content : `curl https://api-inference.huggingface.co/models/${ model . id } \\
7
9
-X POST \\
8
10
-d '{"inputs": ${ getModelInputSnippet ( model , true ) } }' \\
9
11
-H 'Content-Type: application/json' \\
10
- -H "Authorization: Bearer ${ accessToken || `{API_TOKEN}` } "` ;
12
+ -H "Authorization: Bearer ${ accessToken || `{API_TOKEN}` } "` ,
13
+ } ) ;
11
14
12
- export const snippetTextGeneration = ( model : ModelDataMinimal , accessToken : string ) : string => {
15
+ export const snippetTextGeneration = (
16
+ model : ModelDataMinimal ,
17
+ accessToken : string ,
18
+ opts ?: {
19
+ streaming ?: boolean ;
20
+ messages ?: ChatCompletionInputMessage [ ] ;
21
+ temperature ?: GenerationParameters [ "temperature" ] ;
22
+ max_tokens ?: GenerationParameters [ "max_tokens" ] ;
23
+ top_p ?: GenerationParameters [ "top_p" ] ;
24
+ }
25
+ ) : InferenceSnippet => {
13
26
if ( model . tags . includes ( "conversational" ) ) {
14
27
// Conversational model detected, so we display a code snippet that features the Messages API
15
- return `curl 'https://api-inference.huggingface.co/models/${ model . id } /v1/chat/completions' \\
28
+ const streaming = opts ?. streaming ?? true ;
29
+ const messages : ChatCompletionInputMessage [ ] = opts ?. messages ?? [
30
+ { role : "user" , content : "What is the capital of France?" } ,
31
+ ] ;
32
+
33
+ const config = {
34
+ ...( opts ?. temperature ? { temperature : opts . temperature } : undefined ) ,
35
+ max_tokens : opts ?. max_tokens ?? 500 ,
36
+ ...( opts ?. top_p ? { top_p : opts . top_p } : undefined ) ,
37
+ } ;
38
+ return {
39
+ content : `curl 'https://api-inference.huggingface.co/models/${ model . id } /v1/chat/completions' \\
16
40
-H "Authorization: Bearer ${ accessToken || `{API_TOKEN}` } " \\
17
41
-H 'Content-Type: application/json' \\
18
- -d '{
19
- "model": "${ model . id } ",
20
- "messages": [{"role": "user", "content": "What is the capital of France?"}],
21
- "max_tokens": 500,
22
- "stream": false
23
- }'
24
- ` ;
42
+ --data '{
43
+ "model": "${ model . id } ",
44
+ "messages": ${ stringifyMessages ( messages , {
45
+ sep : ",\n\t\t" ,
46
+ start : `[\n\t\t` ,
47
+ end : `\n\t]` ,
48
+ attributeKeyQuotes : true ,
49
+ customContentEscaper : ( str ) => str . replace ( / ' / g, "'\\''" ) ,
50
+ } ) } ,
51
+ ${ stringifyGenerationConfig ( config , {
52
+ sep : ",\n " ,
53
+ start : "" ,
54
+ end : "" ,
55
+ attributeKeyQuotes : true ,
56
+ attributeValueConnector : ": " ,
57
+ } ) } ,
58
+ "stream": ${ ! ! streaming }
59
+ }'` ,
60
+ } ;
25
61
} else {
26
62
return snippetBasic ( model , accessToken ) ;
27
63
}
28
64
} ;
29
65
30
- export const snippetImageTextToTextGeneration = ( model : ModelDataMinimal , accessToken : string ) : string => {
66
+ export const snippetImageTextToTextGeneration = ( model : ModelDataMinimal , accessToken : string ) : InferenceSnippet => {
31
67
if ( model . tags . includes ( "conversational" ) ) {
32
68
// Conversational model detected, so we display a code snippet that features the Messages API
33
- return `curl 'https://api-inference.huggingface.co/models/${ model . id } /v1/chat/completions' \\
69
+ return {
70
+ content : `curl 'https://api-inference.huggingface.co/models/${ model . id } /v1/chat/completions' \\
34
71
-H "Authorization: Bearer ${ accessToken || `{API_TOKEN}` } " \\
35
72
-H 'Content-Type: application/json' \\
36
73
-d '{
@@ -47,26 +84,34 @@ export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, access
47
84
"max_tokens": 500,
48
85
"stream": false
49
86
}'
50
- ` ;
87
+ ` ,
88
+ } ;
51
89
} else {
52
90
return snippetBasic ( model , accessToken ) ;
53
91
}
54
92
} ;
55
93
56
- export const snippetZeroShotClassification = ( model : ModelDataMinimal , accessToken : string ) : string =>
57
- `curl https://api-inference.huggingface.co/models/${ model . id } \\
94
+ export const snippetZeroShotClassification = ( model : ModelDataMinimal , accessToken : string ) : InferenceSnippet => ( {
95
+ content : `curl https://api-inference.huggingface.co/models/${ model . id } \\
58
96
-X POST \\
59
97
-d '{"inputs": ${ getModelInputSnippet ( model , true ) } , "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
60
98
-H 'Content-Type: application/json' \\
61
- -H "Authorization: Bearer ${ accessToken || `{API_TOKEN}` } "` ;
99
+ -H "Authorization: Bearer ${ accessToken || `{API_TOKEN}` } "` ,
100
+ } ) ;
62
101
63
- export const snippetFile = ( model : ModelDataMinimal , accessToken : string ) : string =>
64
- `curl https://api-inference.huggingface.co/models/${ model . id } \\
102
+ export const snippetFile = ( model : ModelDataMinimal , accessToken : string ) : InferenceSnippet => ( {
103
+ content : `curl https://api-inference.huggingface.co/models/${ model . id } \\
65
104
-X POST \\
66
105
--data-binary '@${ getModelInputSnippet ( model , true , true ) } ' \\
67
- -H "Authorization: Bearer ${ accessToken || `{API_TOKEN}` } "` ;
106
+ -H "Authorization: Bearer ${ accessToken || `{API_TOKEN}` } "` ,
107
+ } ) ;
68
108
69
- export const curlSnippets : Partial < Record < PipelineType , ( model : ModelDataMinimal , accessToken : string ) => string > > = {
109
+ export const curlSnippets : Partial <
110
+ Record <
111
+ PipelineType ,
112
+ ( model : ModelDataMinimal , accessToken : string , opts ?: Record < string , unknown > ) => InferenceSnippet
113
+ >
114
+ > = {
70
115
// Same order as in js/src/lib/interfaces/Types.ts
71
116
"text-classification" : snippetBasic ,
72
117
"token-classification" : snippetBasic ,
@@ -93,10 +138,10 @@ export const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal
93
138
"image-segmentation" : snippetFile ,
94
139
} ;
95
140
96
- export function getCurlInferenceSnippet ( model : ModelDataMinimal , accessToken : string ) : string {
141
+ export function getCurlInferenceSnippet ( model : ModelDataMinimal , accessToken : string ) : InferenceSnippet {
97
142
return model . pipeline_tag && model . pipeline_tag in curlSnippets
98
- ? curlSnippets [ model . pipeline_tag ] ?.( model , accessToken ) ?? ""
99
- : "" ;
143
+ ? curlSnippets [ model . pipeline_tag ] ?.( model , accessToken ) ?? { content : "" }
144
+ : { content : "" } ;
100
145
}
101
146
102
147
export function hasCurlInferenceSnippet ( model : Pick < ModelDataMinimal , "pipeline_tag" > ) : boolean {
0 commit comments