Skip to content

Commit 7753620

Browse files
committed
Merge branch 'main' of github.com:huggingface/huggingface.js into refactor-providers
2 parents 7782f2b + b088227 commit 7753620

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+329
-231
lines changed

packages/hub/src/lib/file-download-info.spec.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ describe("fileDownloadInfo", () => {
1313
});
1414

1515
assert.strictEqual(info?.size, 536063208);
16-
assert.strictEqual(info?.etag, '"41a0e56472bad33498744818c8b1ef2c-64"');
16+
assert.strictEqual(info?.etag, '"879c5715c18a0b7f051dd33f70f0a5c8dd1522e0a43f6f75520f16167f29279b"');
1717
assert(info?.downloadLink);
1818
});
1919

packages/hub/src/lib/list-files.spec.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ describe("listFiles", () => {
3737
size: 440473133,
3838
pointerSize: 134,
3939
},
40+
xetHash: "2d8408d3a894d02517d04956e2f7546ff08362594072f3527ce144b5212a3296",
4041
oid: "ba5d19791be1dd7992e33bd61f20207b0f7f50a5",
4142
path: "pytorch_model.bin",
4243
size: 440473133,
@@ -48,6 +49,7 @@ describe("listFiles", () => {
4849
size: 536063208,
4950
pointerSize: 134,
5051
},
52+
xetHash: "879c5715c18a0b7f051dd33f70f0a5c8dd1522e0a43f6f75520f16167f29279b",
5153
oid: "9eb98c817f04b051b3bcca591bcd4e03cec88018",
5254
path: "tf_model.h5",
5355
size: 536063208,
@@ -113,6 +115,7 @@ describe("listFiles", () => {
113115
size: 440473133,
114116
pointerSize: 134,
115117
},
118+
xetHash: "2d8408d3a894d02517d04956e2f7546ff08362594072f3527ce144b5212a3296",
116119
oid: "ba5d19791be1dd7992e33bd61f20207b0f7f50a5",
117120
path: "pytorch_model.bin",
118121
size: 440473133,
@@ -129,6 +132,7 @@ describe("listFiles", () => {
129132
size: 536063208,
130133
pointerSize: 134,
131134
},
135+
xetHash: "879c5715c18a0b7f051dd33f70f0a5c8dd1522e0a43f6f75520f16167f29279b",
132136
oid: "9eb98c817f04b051b3bcca591bcd4e03cec88018",
133137
path: "tf_model.h5",
134138
size: 536063208,

packages/hub/src/lib/list-files.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,10 @@ export interface ListFileEntry {
1717
/** Size of the raw pointer file, 100~200 bytes */
1818
pointerSize: number;
1919
};
20+
/**
21+
* Xet-backed hash, a new protocol replacing LFS for big files.
22+
*/
23+
xetHash?: string;
2024
/**
2125
* Only fetched if `expand` is set to `true` in the `listFiles` call.
2226
*/

packages/inference/README.md

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -572,31 +572,6 @@ await hf.tabularClassification({
572572
})
573573
```
574574

575-
## Custom Calls
576-
577-
For models with custom parameters / outputs.
578-
579-
```typescript
580-
await hf.request({
581-
model: 'my-custom-model',
582-
inputs: 'hello world',
583-
parameters: {
584-
custom_param: 'some magic',
585-
}
586-
})
587-
588-
// Custom streaming call, for models with custom parameters / outputs
589-
for await (const output of hf.streamingRequest({
590-
model: 'my-custom-model',
591-
inputs: 'hello world',
592-
parameters: {
593-
custom_param: 'some magic',
594-
}
595-
})) {
596-
...
597-
}
598-
```
599-
600575
You can use any Chat Completion API-compatible provider with the `chatCompletion` method.
601576

602577
```typescript

packages/inference/src/providers/hf-inference.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -360,8 +360,9 @@ export class HFInferenceZeroShotImageClassificationTask
360360

361361
export class HFInferenceTextClassificationTask extends HFInferenceTask implements TextClassificationTaskHelper {
362362
override async getResponse(response: TextClassificationOutput): Promise<TextClassificationOutput> {
363-
if (Array.isArray(response) && response.every((x) => typeof x?.label === "string" && typeof x.score === "number")) {
364-
return response;
363+
const output = response?.[0];
364+
if (Array.isArray(output) && output.every((x) => typeof x?.label === "string" && typeof x.score === "number")) {
365+
return output;
365366
}
366367
throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
367368
}

packages/inference/src/tasks/audio/audioClassification.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import type { AudioClassificationInput, AudioClassificationOutput } from "@huggingface/tasks";
22
import { getProviderHelper } from "../../lib/getProviderHelper";
33
import type { BaseArgs, Options } from "../../types";
4-
import { request } from "../custom/request";
4+
import { innerRequest } from "../../utils/request";
55
import type { LegacyAudioInput } from "./utils";
66
import { preparePayload } from "./utils";
77

@@ -17,7 +17,7 @@ export async function audioClassification(
1717
): Promise<AudioClassificationOutput> {
1818
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "audio-classification");
1919
const payload = preparePayload(args);
20-
const res = await request(payload, {
20+
const { data: res } = await innerRequest<AudioClassificationOutput>(payload, {
2121
...options,
2222
task: "audio-classification",
2323
});

packages/inference/src/tasks/audio/audioToAudio.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { getProviderHelper } from "../../lib/getProviderHelper";
22
import type { BaseArgs, Options } from "../../types";
3-
import { request } from "../custom/request";
3+
import { innerRequest } from "../../utils/request";
44
import type { LegacyAudioInput } from "./utils";
55
import { preparePayload } from "./utils";
66

@@ -38,7 +38,7 @@ export interface AudioToAudioOutput {
3838
export async function audioToAudio(args: AudioToAudioArgs, options?: Options): Promise<AudioToAudioOutput[]> {
3939
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "audio-to-audio");
4040
const payload = preparePayload(args);
41-
const res = await request<AudioToAudioOutput>(payload, {
41+
const { data: res } = await innerRequest<AudioToAudioOutput>(payload, {
4242
...options,
4343
task: "audio-to-audio",
4444
});

packages/inference/src/tasks/audio/automaticSpeechRecognition.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,10 @@ import { FAL_AI_SUPPORTED_BLOB_TYPES } from "../../providers/fal-ai";
55
import type { BaseArgs, Options, RequestArgs } from "../../types";
66
import { base64FromBytes } from "../../utils/base64FromBytes";
77
import { omit } from "../../utils/omit";
8-
import { request } from "../custom/request";
8+
import { innerRequest } from "../../utils/request";
99
import type { LegacyAudioInput } from "./utils";
1010
import { preparePayload } from "./utils";
11+
1112
export type AutomaticSpeechRecognitionArgs = BaseArgs & (AutomaticSpeechRecognitionInput | LegacyAudioInput);
1213
/**
1314
* This task reads some audio input and outputs the said words within the audio files.
@@ -19,7 +20,7 @@ export async function automaticSpeechRecognition(
1920
): Promise<AutomaticSpeechRecognitionOutput> {
2021
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "automatic-speech-recognition");
2122
const payload = await buildPayload(args);
22-
const res = await request<AutomaticSpeechRecognitionOutput>(payload, {
23+
const { data: res } = await innerRequest<AutomaticSpeechRecognitionOutput>(payload, {
2324
...options,
2425
task: "automatic-speech-recognition",
2526
});

packages/inference/src/tasks/audio/textToSpeech.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import type { TextToSpeechInput } from "@huggingface/tasks";
22
import { getProviderHelper } from "../../lib/getProviderHelper";
33
import type { BaseArgs, Options } from "../../types";
4-
import { request } from "../custom/request";
4+
import { innerRequest } from "../../utils/request";
55
type TextToSpeechArgs = BaseArgs & TextToSpeechInput;
66

77
interface OutputUrlTextToSpeechGeneration {
@@ -14,7 +14,7 @@ interface OutputUrlTextToSpeechGeneration {
1414
export async function textToSpeech(args: TextToSpeechArgs, options?: Options): Promise<Blob> {
1515
const provider = args.provider ?? "hf-inference";
1616
const providerHelper = getProviderHelper(provider, "text-to-speech");
17-
const res = await request<Blob | OutputUrlTextToSpeechGeneration>(args, {
17+
const { data: res } = await innerRequest<Blob | OutputUrlTextToSpeechGeneration>(args, {
1818
...options,
1919
task: "text-to-speech",
2020
});
Lines changed: 7 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
1-
import { makeRequestOptions } from "../../lib/makeRequestOptions";
21
import type { InferenceTask, Options, RequestArgs } from "../../types";
2+
import { innerRequest } from "../../utils/request";
33

44
/**
55
* Primitive to make custom calls to the inference provider
6+
* @deprecated Use specific task functions instead. This function will be removed in a future version.
67
*/
78
export async function request<T>(
89
args: RequestArgs,
@@ -11,35 +12,9 @@ export async function request<T>(
1112
task?: InferenceTask;
1213
}
1314
): Promise<T> {
14-
const { url, info } = await makeRequestOptions(args, options);
15-
const response = await (options?.fetch ?? fetch)(url, info);
16-
17-
if (options?.retry_on_error !== false && response.status === 503) {
18-
return request(args, options);
19-
}
20-
21-
if (!response.ok) {
22-
const contentType = response.headers.get("Content-Type");
23-
if (["application/json", "application/problem+json"].some((ct) => contentType?.startsWith(ct))) {
24-
const output = await response.json();
25-
if ([400, 422, 404, 500].includes(response.status) && options?.task === "conversational") {
26-
throw new Error(
27-
`Server ${args.model} does not seem to support chat completion. Error: ${JSON.stringify(output.error)}`
28-
);
29-
}
30-
if (output.error || output.detail) {
31-
throw new Error(JSON.stringify(output.error ?? output.detail));
32-
} else {
33-
throw new Error(output);
34-
}
35-
}
36-
const message = contentType?.startsWith("text/plain;") ? await response.text() : undefined;
37-
throw new Error(message ?? "An error occurred while fetching the blob");
38-
}
39-
40-
if (response.headers.get("Content-Type")?.startsWith("application/json")) {
41-
return await response.json();
42-
}
43-
44-
return (await response.blob()) as T;
15+
console.warn(
16+
"The request method is deprecated and will be removed in a future version of huggingface.js. Use specific task functions instead."
17+
);
18+
const result = await innerRequest<T>(args, options);
19+
return result.data;
4520
}
Lines changed: 5 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,8 @@
1-
import { makeRequestOptions } from "../../lib/makeRequestOptions";
21
import type { InferenceTask, Options, RequestArgs } from "../../types";
3-
import type { EventSourceMessage } from "../../vendor/fetch-event-source/parse";
4-
import { getLines, getMessages } from "../../vendor/fetch-event-source/parse";
5-
2+
import { innerStreamingRequest } from "../../utils/request";
63
/**
74
* Primitive to make custom inference calls that expect server-sent events, and returns the response through a generator
5+
* @deprecated Use specific task functions instead. This function will be removed in a future version.
86
*/
97
export async function* streamingRequest<T>(
108
args: RequestArgs,
@@ -13,86 +11,8 @@ export async function* streamingRequest<T>(
1311
task?: InferenceTask;
1412
}
1513
): AsyncGenerator<T> {
16-
const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
17-
const response = await (options?.fetch ?? fetch)(url, info);
18-
19-
if (options?.retry_on_error !== false && response.status === 503) {
20-
return yield* streamingRequest(args, options);
21-
}
22-
if (!response.ok) {
23-
if (response.headers.get("Content-Type")?.startsWith("application/json")) {
24-
const output = await response.json();
25-
if ([400, 422, 404, 500].includes(response.status) && options?.task === "conversational") {
26-
throw new Error(`Server ${args.model} does not seem to support chat completion. Error: ${output.error}`);
27-
}
28-
if (typeof output.error === "string") {
29-
throw new Error(output.error);
30-
}
31-
if (output.error && "message" in output.error && typeof output.error.message === "string") {
32-
/// OpenAI errors
33-
throw new Error(output.error.message);
34-
}
35-
}
36-
37-
throw new Error(`Server response contains error: ${response.status}`);
38-
}
39-
if (!response.headers.get("content-type")?.startsWith("text/event-stream")) {
40-
throw new Error(
41-
`Server does not support event stream content type, it returned ` + response.headers.get("content-type")
42-
);
43-
}
44-
45-
if (!response.body) {
46-
return;
47-
}
48-
49-
const reader = response.body.getReader();
50-
let events: EventSourceMessage[] = [];
51-
52-
const onEvent = (event: EventSourceMessage) => {
53-
// accumulate events in array
54-
events.push(event);
55-
};
56-
57-
const onChunk = getLines(
58-
getMessages(
59-
() => {},
60-
() => {},
61-
onEvent
62-
)
14+
console.warn(
15+
"The streamingRequest method is deprecated and will be removed in a future version of huggingface.js. Use specific task functions instead."
6316
);
64-
65-
try {
66-
while (true) {
67-
const { done, value } = await reader.read();
68-
if (done) {
69-
return;
70-
}
71-
onChunk(value);
72-
for (const event of events) {
73-
if (event.data.length > 0) {
74-
if (event.data === "[DONE]") {
75-
return;
76-
}
77-
const data = JSON.parse(event.data);
78-
if (typeof data === "object" && data !== null && "error" in data) {
79-
const errorStr =
80-
typeof data.error === "string"
81-
? data.error
82-
: typeof data.error === "object" &&
83-
data.error &&
84-
"message" in data.error &&
85-
typeof data.error.message === "string"
86-
? data.error.message
87-
: JSON.stringify(data.error);
88-
throw new Error(`Error forwarded from backend: ` + errorStr);
89-
}
90-
yield data as T;
91-
}
92-
}
93-
events = [];
94-
}
95-
} finally {
96-
reader.releaseLock();
97-
}
17+
yield* innerStreamingRequest(args, options);
9818
}

packages/inference/src/tasks/cv/imageClassification.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import type { ImageClassificationInput, ImageClassificationOutput } from "@huggingface/tasks";
22
import { getProviderHelper } from "../../lib/getProviderHelper";
33
import type { BaseArgs, Options } from "../../types";
4-
import { request } from "../custom/request";
4+
import { innerRequest } from "../../utils/request";
55
import { preparePayload, type LegacyImageInput } from "./utils";
66

77
export type ImageClassificationArgs = BaseArgs & (ImageClassificationInput | LegacyImageInput);
@@ -16,7 +16,7 @@ export async function imageClassification(
1616
): Promise<ImageClassificationOutput> {
1717
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "image-classification");
1818
const payload = preparePayload(args);
19-
const res = await request<ImageClassificationOutput>(payload, {
19+
const { data: res } = await innerRequest<ImageClassificationOutput>(payload, {
2020
...options,
2121
task: "image-classification",
2222
});

packages/inference/src/tasks/cv/imageSegmentation.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import type { ImageSegmentationInput, ImageSegmentationOutput } from "@huggingface/tasks";
22
import { getProviderHelper } from "../../lib/getProviderHelper";
33
import type { BaseArgs, Options } from "../../types";
4-
import { request } from "../custom/request";
4+
import { innerRequest } from "../../utils/request";
55
import { preparePayload, type LegacyImageInput } from "./utils";
66

77
export type ImageSegmentationArgs = BaseArgs & (ImageSegmentationInput | LegacyImageInput);
@@ -16,7 +16,7 @@ export async function imageSegmentation(
1616
): Promise<ImageSegmentationOutput> {
1717
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "image-segmentation");
1818
const payload = preparePayload(args);
19-
const res = await request<ImageSegmentationOutput>(payload, {
19+
const { data: res } = await innerRequest<ImageSegmentationOutput>(payload, {
2020
...options,
2121
task: "image-segmentation",
2222
});

packages/inference/src/tasks/cv/imageToImage.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import type { ImageToImageInput } from "@huggingface/tasks";
22
import { getProviderHelper } from "../../lib/getProviderHelper";
33
import type { BaseArgs, Options, RequestArgs } from "../../types";
44
import { base64FromBytes } from "../../utils/base64FromBytes";
5-
import { request } from "../custom/request";
5+
import { innerRequest } from "../../utils/request";
66

77
export type ImageToImageArgs = BaseArgs & ImageToImageInput;
88

@@ -27,7 +27,7 @@ export async function imageToImage(args: ImageToImageArgs, options?: Options): P
2727
),
2828
};
2929
}
30-
const res = await request<Blob>(reqArgs, {
30+
const { data: res } = await innerRequest<Blob>(reqArgs, {
3131
...options,
3232
task: "image-to-image",
3333
});

0 commit comments

Comments
 (0)