Skip to content

[Inference snippets] VLM hf_hub, oai snippets #985

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 19 additions & 32 deletions packages/tasks/src/snippets/curl.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,24 @@ export const snippetTextGeneration = (
if (model.tags.includes("conversational")) {
// Conversational model detected, so we display a code snippet that features the Messages API
const streaming = opts?.streaming ?? true;
const messages: ChatCompletionInputMessage[] = opts?.messages ?? [
{ role: "user", content: "What is the capital of France?" },
];
const exampleMessages: ChatCompletionInputMessage[] =
model.pipeline_tag === "text-generation"
? [{ role: "user", content: "What is the capital of France?" }]
: [
{
role: "user",
content: [
{
type: "image_url",
image_url: {
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
},
},
{ type: "text", text: "Describe this image in one sentence." },
],
},
];
const messages = opts?.messages ?? exampleMessages;

const config = {
...(opts?.temperature ? { temperature: opts.temperature } : undefined),
Expand Down Expand Up @@ -63,34 +78,6 @@ export const snippetTextGeneration = (
}
};

export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => {
if (model.tags.includes("conversational")) {
// Conversational model detected, so we display a code snippet that features the Messages API
return {
content: `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
-H 'Content-Type: application/json' \\
-d '{
"model": "${model.id}",
"messages": [
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}},
{"type": "text", "text": "Describe this image in one sentence."}
]
}
],
"max_tokens": 500,
"stream": false
}'
`,
};
} else {
return snippetBasic(model, accessToken);
}
};

export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
-X POST \\
Expand Down Expand Up @@ -122,7 +109,7 @@ export const curlSnippets: Partial<
summarization: snippetBasic,
"feature-extraction": snippetBasic,
"text-generation": snippetTextGeneration,
"image-text-to-text": snippetImageTextToTextGeneration,
"image-text-to-text": snippetTextGeneration,
"text2text-generation": snippetBasic,
"fill-mask": snippetBasic,
"sentence-similarity": snippetBasic,
Expand Down
53 changes: 19 additions & 34 deletions packages/tasks/src/snippets/js.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,24 @@ export const snippetTextGeneration = (
if (model.tags.includes("conversational")) {
// Conversational model detected, so we display a code snippet that features the Messages API
const streaming = opts?.streaming ?? true;
const messages: ChatCompletionInputMessage[] = opts?.messages ?? [
{ role: "user", content: "What is the capital of France?" },
];
const exampleMessages: ChatCompletionInputMessage[] =
model.pipeline_tag === "text-generation"
? [{ role: "user", content: "What is the capital of France?" }]
: [
{
role: "user",
content: [
{
type: "image_url",
image_url: {
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
},
},
{ type: "text", text: "Describe this image in one sentence." },
],
},
];
const messages = opts?.messages ?? exampleMessages;
const messagesStr = stringifyMessages(messages, { sep: ",\n\t\t", start: "[\n\t\t", end: "\n\t]" });

const config = {
Expand Down Expand Up @@ -148,36 +163,6 @@ console.log(chatCompletion.choices[0].message);`,
}
};

export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => {
if (model.tags.includes("conversational")) {
// Conversational model detected, so we display a code snippet that features the Messages API
return {
content: `import { HfInference } from "@huggingface/inference";

const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg";

for await (const chunk of inference.chatCompletionStream({
model: "${model.id}",
messages: [
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": imageUrl}},
{"type": "text", "text": "Describe this image in one sentence."},
],
}
],
max_tokens: 500,
})) {
process.stdout.write(chunk.choices[0]?.delta?.content || "");
}`,
};
} else {
return snippetBasic(model, accessToken);
}
};

export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
content: `async function query(data) {
const response = await fetch(
Expand Down Expand Up @@ -307,7 +292,7 @@ export const jsSnippets: Partial<
summarization: snippetBasic,
"feature-extraction": snippetBasic,
"text-generation": snippetTextGeneration,
"image-text-to-text": snippetImageTextToTextGeneration,
"image-text-to-text": snippetTextGeneration,
"text2text-generation": snippetBasic,
"fill-mask": snippetBasic,
"sentence-similarity": snippetBasic,
Expand Down
52 changes: 20 additions & 32 deletions packages/tasks/src/snippets/python.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,24 @@ export const snippetConversational = (
}
): InferenceSnippet[] => {
const streaming = opts?.streaming ?? true;
const messages: ChatCompletionInputMessage[] = opts?.messages ?? [
{ role: "user", content: "What is the capital of France?" },
];
const exampleMessages: ChatCompletionInputMessage[] =
model.pipeline_tag === "text-generation"
? [{ role: "user", content: "What is the capital of France?" }]
: [
{
role: "user",
content: [
{
type: "image_url",
image_url: {
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
},
},
{ type: "text", text: "Describe this image in one sentence." },
],
},
];
Comment on lines +20 to +35
Copy link
Contributor

@Wauplin Wauplin Oct 25, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Feels like these example messages should be in inputs.ts. I can do it in a later PR if you want.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

const messages = opts?.messages ?? exampleMessages;
const messagesStr = stringifyMessages(messages, {
sep: ",\n\t",
start: `[\n\t`,
Expand Down Expand Up @@ -121,30 +136,6 @@ print(completion.choices[0].message)`,
}
};

export const snippetConversationalWithImage = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
content: `from huggingface_hub import InferenceClient

client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")

image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"

for message in client.chat_completion(
model="${model.id}",
messages=[
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": image_url}},
{"type": "text", "text": "Describe this image in one sentence."},
],
}
],
max_tokens=500,
stream=True,
):
print(message.choices[0].delta.content, end="")`,
});

export const snippetZeroShotClassification = (model: ModelDataMinimal): InferenceSnippet => ({
content: `def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
Expand Down Expand Up @@ -282,7 +273,7 @@ export const pythonSnippets: Partial<
"feature-extraction": snippetBasic,
"text-generation": snippetBasic,
"text2text-generation": snippetBasic,
"image-text-to-text": snippetConversationalWithImage,
"image-text-to-text": snippetConversational,
"fill-mask": snippetBasic,
"sentence-similarity": snippetBasic,
"automatic-speech-recognition": snippetFile,
Expand All @@ -306,12 +297,9 @@ export function getPythonInferenceSnippet(
accessToken: string,
opts?: Record<string, unknown>
): InferenceSnippet | InferenceSnippet[] {
if (model.pipeline_tag === "text-generation" && model.tags.includes("conversational")) {
if (model.tags.includes("conversational")) {
// Conversational model detected, so we display a code snippet that features the Messages API
return snippetConversational(model, accessToken, opts);
} else if (model.pipeline_tag === "image-text-to-text" && model.tags.includes("conversational")) {
// Example sending an image to the Message API
return snippetConversationalWithImage(model, accessToken);
} else {
let snippets =
model.pipeline_tag && model.pipeline_tag in pythonSnippets
Expand Down
Loading