Skip to content

Commit e683717

Browse files
committed
Merge branch 'main' of https://github.com/huggingface/huggingface.js into kai/hyperbolic-integration
2 parents 3e5eb4e + a32260f commit e683717

File tree

12 files changed

+7071
-7666
lines changed

12 files changed

+7071
-7666
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ You can run our packages with vanilla JS, without any bundler, by using a CDN or
9696

9797
```html
9898
<script type="module">
99-
import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected].0/+esm';
99+
import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected].3/+esm';
100100
import { createRepo, commit, deleteRepo, listFiles } from "https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm";
101101
</script>
102102
```

packages/agents/src/tools/imageToText.ts

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,9 @@ export const imageToTextTool: Tool = {
1515
if (typeof data === "string") throw "Input must be a blob.";
1616

1717
return (
18-
await inference.imageToText(
19-
{
20-
data,
21-
},
22-
{ wait_for_model: true }
23-
)
18+
await inference.imageToText({
19+
data,
20+
})
2421
).generated_text;
2522
},
2623
};

packages/agents/src/tools/speechToText.ts

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,9 @@ export const speechToTextTool: Tool = {
1515
if (typeof data === "string") throw "Input must be a blob.";
1616

1717
return (
18-
await inference.automaticSpeechRecognition(
19-
{
20-
data,
21-
},
22-
{ wait_for_model: true }
23-
)
18+
await inference.automaticSpeechRecognition({
19+
data,
20+
})
2421
).text;
2522
},
2623
};

packages/agents/src/tools/textToImage.ts

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,8 @@ export const textToImageTool: Tool = {
1919
const data = await input;
2020
if (typeof data !== "string") throw "Input must be a string.";
2121

22-
return await inference.textToImage(
23-
{
24-
inputs: data,
25-
},
26-
{ wait_for_model: true }
27-
);
22+
return await inference.textToImage({
23+
inputs: data,
24+
});
2825
},
2926
};

packages/agents/src/tools/textToSpeech.ts

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,9 @@ export const textToSpeechTool: Tool = {
1919
const data = await input;
2020
if (typeof data !== "string") throw "Input must be a string.";
2121

22-
return inference.textToSpeech(
23-
{
24-
inputs: data,
25-
model: "espnet/kan-bayashi_ljspeech_vits",
26-
},
27-
{ wait_for_model: true }
28-
);
22+
return inference.textToSpeech({
23+
inputs: data,
24+
model: "espnet/kan-bayashi_ljspeech_vits",
25+
});
2926
},
3027
};

packages/inference/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "@huggingface/inference",
3-
"version": "3.3.0",
3+
"version": "3.3.3",
44
"packageManager": "[email protected]",
55
"license": "MIT",
66
"author": "Tim Mikeladze <[email protected]>",

packages/inference/src/lib/makeRequestOptions.ts

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,7 @@ export async function makeRequestOptions(
4040
let otherArgs = remainingArgs;
4141
const provider = maybeProvider ?? "hf-inference";
4242

43-
const { forceTask, includeCredentials, taskHint, wait_for_model, use_cache, dont_load_model, chatCompletion } =
44-
options ?? {};
43+
const { forceTask, includeCredentials, taskHint, chatCompletion } = options ?? {};
4544

4645
if (endpointUrl && provider !== "hf-inference") {
4746
throw new Error(`Cannot use endpointUrl with a third-party provider.`);
@@ -103,18 +102,6 @@ export async function makeRequestOptions(
103102
headers["Content-Type"] = "application/json";
104103
}
105104

106-
if (provider === "hf-inference") {
107-
if (wait_for_model) {
108-
headers["X-Wait-For-Model"] = "true";
109-
}
110-
if (use_cache === false) {
111-
headers["X-Use-Cache"] = "false";
112-
}
113-
if (dont_load_model) {
114-
headers["X-Load-Model"] = "0";
115-
}
116-
}
117-
118105
if (provider === "replicate") {
119106
headers["Prefer"] = "wait";
120107
}

packages/inference/src/tasks/custom/request.ts

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,8 @@ export async function request<T>(
1818
const { url, info } = await makeRequestOptions(args, options);
1919
const response = await (options?.fetch ?? fetch)(url, info);
2020

21-
if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
22-
return request(args, {
23-
...options,
24-
wait_for_model: true,
25-
});
21+
if (options?.retry_on_error !== false && response.status === 503) {
22+
return request(args, options);
2623
}
2724

2825
if (!response.ok) {

packages/inference/src/tasks/custom/streamingRequest.ts

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,8 @@ export async function* streamingRequest<T>(
2020
const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
2121
const response = await (options?.fetch ?? fetch)(url, info);
2222

23-
if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
24-
return yield* streamingRequest(args, {
25-
...options,
26-
wait_for_model: true,
27-
});
23+
if (options?.retry_on_error !== false && response.status === 503) {
24+
return yield* streamingRequest(args, options);
2825
}
2926
if (!response.ok) {
3027
if (response.headers.get("Content-Type")?.startsWith("application/json")) {

packages/inference/src/tasks/cv/textToImage.ts

Lines changed: 22 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import type { TextToImageInput, TextToImageOutput } from "@huggingface/tasks";
22
import { InferenceOutputError } from "../../lib/InferenceOutputError";
3-
import type { BaseArgs, Options } from "../../types";
3+
import type { BaseArgs, InferenceProvider, Options } from "../../types";
44
import { omit } from "../../utils/omit";
55
import { request } from "../custom/request";
66

@@ -15,29 +15,40 @@ interface OutputUrlImageGeneration {
1515
output: string[];
1616
}
1717

18+
function getResponseFormatArg(provider: InferenceProvider) {
19+
switch (provider) {
20+
case "fal-ai":
21+
return { sync_mode: true };
22+
case "nebius":
23+
return { response_format: "b64_json" };
24+
case "replicate":
25+
return undefined;
26+
case "together":
27+
return { response_format: "base64" };
28+
default:
29+
return undefined;
30+
}
31+
}
32+
1833
/**
1934
* This task reads some text input and outputs an image.
2035
* Recommended model: stabilityai/stable-diffusion-2
2136
*/
2237
export async function textToImage(args: TextToImageArgs, options?: Options): Promise<Blob> {
2338
const payload =
24-
args.provider === "together" ||
25-
args.provider === "fal-ai" ||
26-
args.provider === "replicate" ||
27-
args.provider === "nebius" ||
28-
args.provider === "hyperbolic"
29-
? {
39+
!args.provider || args.provider === "hf-inference" || args.provider === "sambanova"
40+
? args
41+
: {
3042
...omit(args, ["inputs", "parameters"]),
3143
...args.parameters,
32-
...(args.provider !== "replicate" ? { response_format: "base64" } : undefined),
33-
...(args.provider === "nebius" ? { response_format: "b64_json" } : undefined),
44+
...getResponseFormatArg(args.provider),
3445
prompt: args.inputs,
35-
}
36-
: args;
46+
};
3747
const res = await request<TextToImageOutput | Base64ImageGeneration | OutputUrlImageGeneration>(payload, {
3848
...options,
3949
taskHint: "text-to-image",
4050
});
51+
4152
if (res && typeof res === "object") {
4253
if (args.provider === "fal-ai" && "images" in res && Array.isArray(res.images) && res.images[0].url) {
4354
const image = await fetch(res.images[0].url);

packages/inference/src/types.ts

Lines changed: 1 addition & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -7,26 +7,10 @@ export type ModelId = string;
77

88
export interface Options {
99
/**
10-
* (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true.
10+
* (Default: true) Boolean. If a request 503s, the request will be retried with the same parameters.
1111
*/
1212
retry_on_error?: boolean;
13-
/**
14-
* (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
15-
*/
16-
use_cache?: boolean;
17-
/**
18-
* (Default: false). Boolean. Do not load the model if it's not already available.
19-
*/
20-
dont_load_model?: boolean;
21-
/**
22-
* (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least).
23-
*/
24-
use_gpu?: boolean;
2513

26-
/**
27-
* (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places.
28-
*/
29-
wait_for_model?: boolean;
3014
/**
3115
* Custom fetch function to use instead of the default one, for example to use a proxy or edit headers.
3216
*/

0 commit comments

Comments
 (0)