Skip to content

[InferenceClient] Add dynamic inference providers mapping #2836

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 39 commits into from
Feb 11, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
dbda294
first draft of dynamic mapping
hanouticelina Feb 5, 2025
2c56667
fix imports and typing
hanouticelina Feb 5, 2025
2760a76
add back recommended models fetching for hf-inference
hanouticelina Feb 5, 2025
7783943
fix
hanouticelina Feb 5, 2025
cc73ead
avoir circular imports
hanouticelina Feb 5, 2025
a178f03
small clean up
hanouticelina Feb 5, 2025
3956df5
add default supported model list
hanouticelina Feb 5, 2025
f2208ae
remove unnecessary arg
hanouticelina Feb 5, 2025
d43462a
nit
hanouticelina Feb 5, 2025
4988db0
rename function
hanouticelina Feb 5, 2025
91f41c3
another nit
hanouticelina Feb 5, 2025
cb7eb9a
fix
hanouticelina Feb 5, 2025
8d78dca
fix conversational
hanouticelina Feb 5, 2025
181b6a7
fix hf-inference
hanouticelina Feb 5, 2025
0568aa5
add warning when status=staging
hanouticelina Feb 5, 2025
d71e92e
update warning and use model_info
hanouticelina Feb 5, 2025
cf1e956
update import
hanouticelina Feb 5, 2025
949e8c6
fix ExpandModelProperty_T
hanouticelina Feb 5, 2025
80acc5c
Merge branch 'main' into add-dynamic-inference-provider-mapping
Wauplin Feb 7, 2025
5639de0
refacto
Wauplin Feb 7, 2025
53cca3c
fix python 3.8
Wauplin Feb 7, 2025
0474833
fix test
Wauplin Feb 10, 2025
ee5443a
remove newlines
Wauplin Feb 10, 2025
5dfe4b9
Base class for inference providers
Wauplin Feb 10, 2025
faf19e0
revert
Wauplin Feb 10, 2025
632d43e
refacto hf-inference and fal-ai tests
Wauplin Feb 10, 2025
4053a15
replicate tests
Wauplin Feb 10, 2025
90521c4
samba and together tests
Wauplin Feb 10, 2025
72396a8
reorder
Wauplin Feb 10, 2025
c6ec0f8
unfinished business
Wauplin Feb 10, 2025
6969309
some docstrings
Wauplin Feb 10, 2025
0c39884
fix some tests
Wauplin Feb 10, 2025
449342d
fix HfInference does not require token
Wauplin Feb 10, 2025
bd8c8e4
fix inference client tests
hanouticelina Feb 11, 2025
9d6f2a3
Merge branch 'main' into add-dynamic-inference-provider-mapping
Wauplin Feb 11, 2025
d041ceb
fix hf-inference _prepare_api_key
Wauplin Feb 11, 2025
e8083c0
fai ai get response tests
Wauplin Feb 11, 2025
eebadd4
test get_response together + replicate
Wauplin Feb 11, 2025
38066e0
fix prepare_url
Wauplin Feb 11, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 32 additions & 8 deletions src/huggingface_hub/hf_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,38 +153,38 @@
"model-index",
"pipeline_tag",
"private",
"resourceGroup",
"safetensors",
"sha",
"siblings",
"spaces",
"tags",
"transformersInfo",
"trendingScore",
"widgetData",
"usedStorage",
"resourceGroup",
"widgetData",
]

ExpandDatasetProperty_T = Literal[
"author",
"cardData",
"citation",
"createdAt",
"disabled",
"description",
"disabled",
"downloads",
"downloadsAllTime",
"gated",
"lastModified",
"likes",
"paperswithcode_id",
"private",
"siblings",
"resourceGroup",
"sha",
"trendingScore",
"siblings",
"tags",
"trendingScore",
"usedStorage",
"resourceGroup",
]

ExpandSpaceProperty_T = Literal[
Expand All @@ -197,15 +197,15 @@
"likes",
"models",
"private",
"resourceGroup",
"runtime",
"sdk",
"siblings",
"sha",
"siblings",
"subdomain",
"tags",
"trendingScore",
"usedStorage",
"resourceGroup",
]

USERNAME_PLACEHOLDER = "hf_user"
Expand Down Expand Up @@ -698,6 +698,19 @@ def __init__(self, **kwargs):
self.last_commit = last_commit


@dataclass
class InferenceProviderMapping:
status: Literal["live", "staging"]
provider_id: str
task: str

def __init__(self, **kwargs):
self.status = kwargs.pop("status")
self.provider_id = kwargs.pop("providerId")
self.task = kwargs.pop("task")
self.__dict__.update(**kwargs)


@dataclass
class ModelInfo:
"""
Expand Down Expand Up @@ -740,6 +753,8 @@ class ModelInfo:
Status of the model on the inference API.
Warm models are available for immediate use. Cold models will be loaded on first inference call.
Frozen models are not available in Inference API.
inference_provider_mapping (`Dict`, *optional*):
Model's inference provider mapping.
likes (`int`):
Number of likes of the model.
library_name (`str`, *optional*):
Expand Down Expand Up @@ -785,6 +800,7 @@ class ModelInfo:
gated: Optional[Literal["auto", "manual", False]]
gguf: Optional[Dict]
inference: Optional[Literal["warm", "cold", "frozen"]]
inference_provider_mapping: Optional[Dict[str, InferenceProviderMapping]]
likes: Optional[int]
library_name: Optional[str]
tags: Optional[List[str]]
Expand Down Expand Up @@ -817,7 +833,15 @@ def __init__(self, **kwargs):
self.likes = kwargs.pop("likes", None)
self.library_name = kwargs.pop("library_name", None)
self.gguf = kwargs.pop("gguf", None)

self.inference = kwargs.pop("inference", None)
self.inference_provider_mapping = kwargs.pop("inferenceProviderMapping", None)
if self.inference_provider_mapping:
self.inference_provider_mapping = {
provider: InferenceProviderMapping(**value)
for provider, value in self.inference_provider_mapping.items()
}

self.tags = kwargs.pop("tags", None)
self.pipeline_tag = kwargs.pop("pipeline_tag", None)
self.mask_token = kwargs.pop("mask_token", None)
Expand Down
5 changes: 3 additions & 2 deletions src/huggingface_hub/inference/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,8 +263,9 @@ def post(
"`InferenceClient.post` is deprecated and should not be used directly anymore."
)
provider_helper = HFInferenceTask(task or "unknown")
url = provider_helper.build_url(provider_helper.map_model(model))
headers = provider_helper.prepare_headers(headers=self.headers, api_key=self.token)
mapped_model = provider_helper._prepare_mapped_model(model or self.model)
url = provider_helper._prepare_url(self.token, mapped_model) # type: ignore[arg-type]
headers = provider_helper._prepare_headers(self.headers, self.token) # type: ignore[arg-type]
return self._inner_post(
request_parameters=RequestParameters(
url=url,
Expand Down
26 changes: 1 addition & 25 deletions src/huggingface_hub/inference/_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
import io
import json
import logging
from abc import ABC, abstractmethod
from contextlib import contextmanager
from dataclasses import dataclass
from pathlib import Path
Expand Down Expand Up @@ -50,12 +49,7 @@
ValidationError,
)

from ..utils import (
get_session,
is_aiohttp_available,
is_numpy_available,
is_pillow_available,
)
from ..utils import get_session, is_aiohttp_available, is_numpy_available, is_pillow_available
from ._generated.types import ChatCompletionStreamOutput, TextGenerationStreamOutput


Expand Down Expand Up @@ -85,24 +79,6 @@ class RequestParameters:
headers: Dict[str, Any]


class TaskProviderHelper(ABC):
"""Protocol defining the interface for task-specific provider helpers."""

@abstractmethod
def prepare_request(
self,
*,
inputs: Any,
parameters: Dict[str, Any],
headers: Dict,
model: Optional[str],
api_key: Optional[str],
extra_payload: Optional[Dict[str, Any]] = None,
) -> RequestParameters: ...
@abstractmethod
def get_response(self, response: Union[bytes, Dict]) -> Any: ...


# Add dataclass for ModelStatus. We use this dataclass in get_model_status function.
@dataclass
class ModelStatus:
Expand Down
5 changes: 3 additions & 2 deletions src/huggingface_hub/inference/_generated/_async_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,8 +258,9 @@ async def post(
"`InferenceClient.post` is deprecated and should not be used directly anymore."
)
provider_helper = HFInferenceTask(task or "unknown")
url = provider_helper.build_url(provider_helper.map_model(model))
headers = provider_helper.prepare_headers(headers=self.headers, api_key=self.token)
mapped_model = provider_helper._prepare_mapped_model(model or self.model)
url = provider_helper._prepare_url(self.token, mapped_model) # type: ignore[arg-type]
headers = provider_helper._prepare_headers(self.headers, self.token) # type: ignore[arg-type]
return await self._inner_post(
request_parameters=RequestParameters(
url=url,
Expand Down
2 changes: 1 addition & 1 deletion src/huggingface_hub/inference/_providers/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from typing import Dict, Literal

from .._common import TaskProviderHelper
from ._common import TaskProviderHelper
from .fal_ai import (
FalAIAutomaticSpeechRecognitionTask,
FalAITextToImageTask,
Expand Down
204 changes: 204 additions & 0 deletions src/huggingface_hub/inference/_providers/_common.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,204 @@
from functools import lru_cache
from typing import Any, Dict, Optional, Union

from huggingface_hub import constants
from huggingface_hub.inference._common import RequestParameters
from huggingface_hub.utils import build_hf_headers, get_token, logging


logger = logging.get_logger(__name__)


# Dev purposes only.
# If you want to try to run inference for a new model locally before it's registered on huggingface.co
# for a given Inference Provider, you can add it to the following dictionary.
HARDCODED_MODEL_ID_MAPPING: Dict[str, Dict[str, str]] = {
# "HF model ID" => "Model ID on Inference Provider's side"
#
# Example:
# "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
"fal-ai": {},
"hf-inference": {},
"replicate": {},
"sambanova": {},
"together": {},
}


def filter_none(d: Dict[str, Any]) -> Dict[str, Any]:
return {k: v for k, v in d.items() if v is not None}


class TaskProviderHelper:
"""Base class for task-specific provider helpers."""

def __init__(self, provider: str, base_url: str, task: str) -> None:
self.provider = provider
self.task = task
self.base_url = base_url

def prepare_request(
self,
*,
inputs: Any,
parameters: Dict[str, Any],
headers: Dict,
model: Optional[str],
api_key: Optional[str],
extra_payload: Optional[Dict[str, Any]] = None,
) -> RequestParameters:
"""
Prepare the request to be sent to the provider.

Each step (api_key, model, headers, url, payload) can be customized in subclasses.
"""
# api_key from user, or local token, or raise error
api_key = self._prepare_api_key(api_key)

# mapped model from HF model ID
mapped_model = self._prepare_mapped_model(model)

# default HF headers + user headers (to customize in subclasses)
headers = self._prepare_headers(headers, api_key)

# routed URL if HF token, or direct URL (to customize in '_prepare_route' in subclasses)
url = self._prepare_url(api_key, mapped_model)

# prepare payload (to customize in subclasses)
payload = self._prepare_payload(inputs, parameters, mapped_model=mapped_model)
if payload is not None:
payload = recursive_merge(payload, extra_payload or {})

# body data (to customize in subclasses)
data = self._prepare_body(inputs, parameters, mapped_model, extra_payload)

# check if both payload and data are set and return
if payload is not None and data is not None:
raise ValueError("Both payload and data cannot be set in the same request.")
if payload is None and data is None:
raise ValueError("Either payload or data must be set in the request.")
return RequestParameters(url=url, task=self.task, model=mapped_model, json=payload, data=data, headers=headers)

def get_response(self, response: Union[bytes, Dict]) -> Any:
"""
Return the response in the expected format.

Override this method in subclasses for customized response handling."""
return response

def _prepare_api_key(self, api_key: Optional[str]) -> str:
"""Return the API key to use for the request.

Usually not overwritten in subclasses."""
if api_key is None:
api_key = get_token()
if api_key is None:
raise ValueError(
f"You must provide an api_key to work with {self.provider} API or log in with `huggingface-cli login`."
)
return api_key

def _prepare_mapped_model(self, model: Optional[str]) -> str:
"""Return the mapped model ID to use for the request.

Usually not overwritten in subclasses."""
if model is None:
raise ValueError(f"Please provide an HF model ID supported by {self.provider}.")

# hardcoded mapping for local testing
if HARDCODED_MODEL_ID_MAPPING.get(self.provider, {}).get(model):
return HARDCODED_MODEL_ID_MAPPING[self.provider][model]

provider_mapping = _fetch_inference_provider_mapping(model).get(self.provider)
if provider_mapping is None:
raise ValueError(f"Model {model} is not supported by provider {self.provider}.")

if provider_mapping.task != self.task:
raise ValueError(
f"Model {model} is not supported for task {self.task} and provider {self.provider}. "
f"Supported task: {provider_mapping.task}."
)

if provider_mapping.status == "staging":
logger.warning(
f"Model {model} is in staging mode for provider {self.provider}. Meant for test purposes only."
)
return provider_mapping.provider_id

def _prepare_headers(self, headers: Dict, api_key: str) -> Dict:
"""Return the headers to use for the request.

Override this method in subclasses for customized headers.
"""
return {**build_hf_headers(token=api_key), **headers}

def _prepare_url(self, api_key: str, mapped_model: str) -> str:
"""Return the URL to use for the request.

Usually not overwritten in subclasses."""
base_url = self._prepare_base_url(api_key)
route = self._prepare_route(mapped_model)
return f"{base_url.rstrip('/')}/{route.lstrip('/')}"

def _prepare_base_url(self, api_key: str) -> str:
"""Return the base URL to use for the request.

Usually not overwritten in subclasses."""
# Route to the proxy if the api_key is a HF TOKEN
if api_key.startswith("hf_"):
logger.info(f"Calling '{self.provider}' provider through Hugging Face router.")
return constants.INFERENCE_PROXY_TEMPLATE.format(provider=self.provider)
else:
logger.info(f"Calling '{self.provider}' provider directly.")
return self.base_url

def _prepare_route(self, mapped_model: str) -> str:
"""Return the route to use for the request.

Override this method in subclasses for customized routes.
"""
return ""

def _prepare_payload(self, inputs: Any, parameters: Dict, mapped_model: str) -> Optional[Dict]:
"""Return the payload to use for the request, as a dict.

Override this method in subclasses for customized payloads.
Only one of `_prepare_payload` and `_prepare_body` should return a value.
"""
return None

def _prepare_body(
self, inputs: Any, parameters: Dict, mapped_model: str, extra_payload: Optional[Dict]
) -> Optional[bytes]:
"""Return the body to use for the request, as bytes.

Override this method in subclasses for customized body data.
Only one of `_prepare_payload` and `_prepare_body` should return a value.
"""
return None


@lru_cache(maxsize=None)
def _fetch_inference_provider_mapping(model: str) -> Dict:
"""
Fetch provider mappings for a model from the Hub.
"""
from huggingface_hub.hf_api import model_info

info = model_info(model, expand=["inferenceProviderMapping"])
provider_mapping = info.inference_provider_mapping
if provider_mapping is None:
raise ValueError(f"No provider mapping found for model {model}")
return provider_mapping


def recursive_merge(dict1: Dict, dict2: Dict) -> Dict:
return {
**dict1,
**{
key: recursive_merge(dict1[key], value)
if (key in dict1 and isinstance(dict1[key], dict) and isinstance(value, dict))
else value
for key, value in dict2.items()
},
}
Loading
Loading