Skip to content

Commit 1925c61

Browse files
committed
text-to-video
1 parent 0a1ab31 commit 1925c61

File tree

6 files changed

+56
-12
lines changed

6 files changed

+56
-12
lines changed
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
video = client.text_to_video(
2-
{{ inputs }},
2+
{{ inputs.asObj.inputs }},
33
model="{{ model.id }}",
44
)

packages/tasks-gen/scripts/generate-snippets-fixtures.ts

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -145,17 +145,17 @@ const TEST_CASES: {
145145
providers: ["hf-inference", "fal-ai"],
146146
languages: ["sh", "js", "py"],
147147
},
148-
// {
149-
// testName: "text-to-video",
150-
// model: {
151-
// id: "tencent/HunyuanVideo",
152-
// pipeline_tag: "text-to-video",
153-
// tags: [],
154-
// inference: "",
155-
// },
156-
// providers: ["replicate", "fal-ai"],
157-
// languages: ["js", "py"],
158-
// },
148+
{
149+
testName: "text-to-video",
150+
model: {
151+
id: "tencent/HunyuanVideo",
152+
pipeline_tag: "text-to-video",
153+
tags: [],
154+
inference: "",
155+
},
156+
providers: ["replicate", "fal-ai"],
157+
languages: ["js", "py"],
158+
},
159159
// {
160160
// testName: "text-classification",
161161
// model: {
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import { InferenceClient } from "@huggingface/inference";
2+
3+
const client = new InferenceClient("api_token");
4+
5+
const video = await client.textToVideo({
6+
model: "tencent/HunyuanVideo",
7+
provider: "fal-ai",
8+
inputs: "A young man walking on the street",
9+
parameters: { num_inference_steps: 5 },
10+
});
11+
// Use the generated video (it's a Blob)
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import { InferenceClient } from "@huggingface/inference";
2+
3+
const client = new InferenceClient("api_token");
4+
5+
const video = await client.textToVideo({
6+
model: "tencent/HunyuanVideo",
7+
provider: "replicate",
8+
inputs: "A young man walking on the street",
9+
parameters: { num_inference_steps: 5 },
10+
});
11+
// Use the generated video (it's a Blob)
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
from huggingface_hub import InferenceClient
2+
3+
client = InferenceClient(
4+
provider="fal-ai",
5+
api_key="api_token",
6+
)
7+
8+
video = client.text_to_video(
9+
"A young man walking on the street",
10+
model="tencent/HunyuanVideo",
11+
)
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
from huggingface_hub import InferenceClient
2+
3+
client = InferenceClient(
4+
provider="replicate",
5+
api_key="api_token",
6+
)
7+
8+
video = client.text_to_video(
9+
"A young man walking on the street",
10+
model="tencent/HunyuanVideo",
11+
)

0 commit comments

Comments
 (0)