Skip to content

Commit cf1fada

Browse files
VinciGit00f-aguzzi
andcommitted
fix: abstract graph
Co-Authored-By: Federico Aguzzi <[email protected]>
1 parent 20410c9 commit cf1fada

File tree

2 files changed

+9
-20
lines changed

2 files changed

+9
-20
lines changed

scrapegraphai/graphs/abstract_graph.py

Lines changed: 3 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -63,13 +63,10 @@ def __init__(self, prompt: str, config: dict,
6363
self.cache_path = self.config.get("cache_path", False)
6464
self.browser_base = self.config.get("browser_base")
6565

66-
# Create the graph
6766
self.graph = self._create_graph()
6867
self.final_state = None
6968
self.execution_info = None
7069

71-
# Set common configuration parameters
72-
7370
verbose = bool(config and config.get("verbose"))
7471

7572
if verbose:
@@ -87,12 +84,10 @@ def __init__(self, prompt: str, config: dict,
8784

8885
self.set_common_params(common_params, overwrite=True)
8986

90-
# set burr config
9187
self.burr_kwargs = config.get("burr_kwargs", None)
9288
if self.burr_kwargs is not None:
9389
self.graph.use_burr = True
9490
if "app_instance_id" not in self.burr_kwargs:
95-
# set a random uuid for the app_instance_id to avoid conflicts
9691
self.burr_kwargs["app_instance_id"] = str(uuid.uuid4())
9792

9893
self.graph.burr_config = self.burr_kwargs
@@ -125,7 +120,6 @@ def _create_llm(self, llm_config: dict) -> object:
125120
llm_defaults = {"temperature": 0, "streaming": False}
126121
llm_params = {**llm_defaults, **llm_config}
127122

128-
# If model instance is passed directly instead of the model details
129123
if "model_instance" in llm_params:
130124
try:
131125
self.model_token = llm_params["model_tokens"]
@@ -145,18 +139,14 @@ def handle_model(model_name, provider, token_key, default_token=8192):
145139
warnings.simplefilter("ignore")
146140
return init_chat_model(**llm_params)
147141

148-
known_models = ["chatgpt","gpt","openai", "azure_openai", "google_genai",
149-
"ollama", "oneapi", "nvidia", "groq", "google_vertexai",
150-
"bedrock", "mistralai", "hugging_face", "deepseek", "ernie", "fireworks"]
151-
142+
known_models = {"chatgpt","gpt","openai", "azure_openai", "google_genai",
143+
"ollama", "oneapi", "nvidia", "groq", "google_vertexai",
144+
"bedrock", "mistralai", "hugging_face", "deepseek", "ernie", "fireworks"}
152145

153146
if llm_params["model"].split("/")[0] not in known_models and llm_params["model"].split("-")[0] not in known_models:
154147
raise ValueError(f"Model '{llm_params['model']}' is not supported")
155148

156149
try:
157-
if "azure" in llm_params["model"]:
158-
model_name = llm_params["model"].split("/")[-1]
159-
return handle_model(model_name, "azure_openai", model_name)
160150
if "fireworks" in llm_params["model"]:
161151
model_name = "/".join(llm_params["model"].split("/")[1:])
162152
token_key = llm_params["model"].split("/")[-1]
@@ -207,7 +197,6 @@ def handle_model(model_name, provider, token_key, default_token=8192):
207197
return ErnieBotChat(llm_params)
208198

209199
elif "oneapi" in llm_params["model"]:
210-
# take the model after the last dash
211200
llm_params["model"] = llm_params["model"].split("/")[-1]
212201
try:
213202
self.model_token = models_tokens["oneapi"][llm_params["model"]]

scrapegraphai/helpers/models_tokens.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,11 @@
5050
"gemini-1.5-flash-latest": 128000,
5151
"gemini-1.5-pro-latest": 128000,
5252
"models/embedding-001": 2048
53+
},
54+
"google_vertexai": {
55+
"gemini-1.5-flash": 128000,
56+
"gemini-1.5-pro": 128000,
57+
"gemini-1.0-pro": 128000,
5358
},
5459
"ollama": {
5560
"command-r": 12800,
@@ -96,7 +101,7 @@
96101
"oneapi": {
97102
"qwen-turbo": 6000,
98103
},
99-
"nv dia": {
104+
"nvdia": {
100105
"meta/llama3-70b-instruct": 419,
101106
"meta/llama3-8b-instruct": 419,
102107
"nemotron-4-340b-instruct": 1024,
@@ -132,11 +137,6 @@
132137
"claude-3-haiku-20240307": 200000,
133138
"claude-3-5-sonnet-20240620": 200000,
134139
},
135-
"google_vertexai": {
136-
"gemini-1.5-flash": 128000,
137-
"gemini-1.5-pro": 128000,
138-
"gemini-1.0-pro": 128000,
139-
},
140140
"bedrock": {
141141
"anthropic.claude-3-haiku-20240307-v1:0": 200000,
142142
"anthropic.claude-3-sonnet-20240229-v1:0": 200000,

0 commit comments

Comments
 (0)