1
1
"""
2
2
AbstractGraph Module
3
3
"""
4
+
4
5
from abc import ABC , abstractmethod
5
6
from typing import Optional
7
+
6
8
from langchain_aws import BedrockEmbeddings
7
- from langchain_openai import AzureOpenAIEmbeddings , OpenAIEmbeddings
8
9
from langchain_community .embeddings import HuggingFaceHubEmbeddings , OllamaEmbeddings
9
10
from langchain_google_genai import GoogleGenerativeAIEmbeddings
10
- from ..helpers import models_tokens
11
- from ..models import AzureOpenAI , Bedrock , Gemini , Groq , HuggingFace , Ollama , OpenAI , Anthropic , DeepSeek
12
11
from langchain_google_genai .embeddings import GoogleGenerativeAIEmbeddings
12
+ from langchain_openai import AzureOpenAIEmbeddings , OpenAIEmbeddings
13
+
14
+ from ..helpers import models_tokens
15
+ from ..models import (
16
+ Anthropic ,
17
+ AzureOpenAI ,
18
+ Bedrock ,
19
+ Gemini ,
20
+ Groq ,
21
+ HuggingFace ,
22
+ Ollama ,
23
+ OpenAI ,
24
+ )
25
+ from ..utils .logging import set_verbosity_debug , set_verbosity_warning
13
26
14
27
from ..helpers import models_tokens
15
28
from ..models import AzureOpenAI , Bedrock , Gemini , Groq , HuggingFace , Ollama , OpenAI , Anthropic , DeepSeek
@@ -67,10 +80,15 @@ def __init__(self, prompt: str, config: dict, source: Optional[str] = None, sche
67
80
self .execution_info = None
68
81
69
82
# Set common configuration parameters
70
- self .verbose = False if config is None else config .get (
71
- "verbose" , False )
72
- self .headless = True if config is None else config .get (
73
- "headless" , True )
83
+
84
+ verbose = bool (config and config .get ("verbose" ))
85
+
86
+ if verbose :
87
+ set_verbosity_debug ()
88
+ else :
89
+ set_verbosity_warning ()
90
+
91
+ self .headless = True if config is None else config .get ("headless" , True )
74
92
self .loader_kwargs = config .get ("loader_kwargs" , {})
75
93
76
94
common_params = {
@@ -96,22 +114,22 @@ def set_common_params(self, params: dict, overwrite=False):
96
114
97
115
def _set_model_token (self , llm ):
98
116
99
- if ' Azure' in str (type (llm )):
117
+ if " Azure" in str (type (llm )):
100
118
try :
101
119
self .model_token = models_tokens ["azure" ][llm .model_name ]
102
120
except KeyError :
103
121
raise KeyError ("Model not supported" )
104
122
105
- elif ' HuggingFaceEndpoint' in str (type (llm )):
106
- if ' mistral' in llm .repo_id :
123
+ elif " HuggingFaceEndpoint" in str (type (llm )):
124
+ if " mistral" in llm .repo_id :
107
125
try :
108
- self .model_token = models_tokens [' mistral' ][llm .repo_id ]
126
+ self .model_token = models_tokens [" mistral" ][llm .repo_id ]
109
127
except KeyError :
110
128
raise KeyError ("Model not supported" )
111
- elif ' Google' in str (type (llm )):
129
+ elif " Google" in str (type (llm )):
112
130
try :
113
- if ' gemini' in llm .model :
114
- self .model_token = models_tokens [' gemini' ][llm .model ]
131
+ if " gemini" in llm .model :
132
+ self .model_token = models_tokens [" gemini" ][llm .model ]
115
133
except KeyError :
116
134
raise KeyError ("Model not supported" )
117
135
@@ -129,17 +147,14 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
129
147
KeyError: If the model is not supported.
130
148
"""
131
149
132
- llm_defaults = {
133
- "temperature" : 0 ,
134
- "streaming" : False
135
- }
150
+ llm_defaults = {"temperature" : 0 , "streaming" : False }
136
151
llm_params = {** llm_defaults , ** llm_config }
137
152
138
153
# If model instance is passed directly instead of the model details
139
- if ' model_instance' in llm_params :
154
+ if " model_instance" in llm_params :
140
155
if chat :
141
- self ._set_model_token (llm_params [' model_instance' ])
142
- return llm_params [' model_instance' ]
156
+ self ._set_model_token (llm_params [" model_instance" ])
157
+ return llm_params [" model_instance" ]
143
158
144
159
# Instantiate the language model based on the model name
145
160
if "gpt-" in llm_params ["model" ]:
@@ -205,18 +220,20 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
205
220
elif "bedrock" in llm_params ["model" ]:
206
221
llm_params ["model" ] = llm_params ["model" ].split ("/" )[- 1 ]
207
222
model_id = llm_params ["model" ]
208
- client = llm_params .get (' client' , None )
223
+ client = llm_params .get (" client" , None )
209
224
try :
210
225
self .model_token = models_tokens ["bedrock" ][llm_params ["model" ]]
211
226
except KeyError as exc :
212
227
raise KeyError ("Model not supported" ) from exc
213
- return Bedrock ({
214
- "client" : client ,
215
- "model_id" : model_id ,
216
- "model_kwargs" : {
217
- "temperature" : llm_params ["temperature" ],
228
+ return Bedrock (
229
+ {
230
+ "client" : client ,
231
+ "model_id" : model_id ,
232
+ "model_kwargs" : {
233
+ "temperature" : llm_params ["temperature" ],
234
+ },
218
235
}
219
- } )
236
+ )
220
237
elif "claude-3-" in llm_params ["model" ]:
221
238
self .model_token = models_tokens ["claude" ]["claude3" ]
222
239
return Anthropic (llm_params )
@@ -227,8 +244,7 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
227
244
raise KeyError ("Model not supported" ) from exc
228
245
return DeepSeek (llm_params )
229
246
else :
230
- raise ValueError (
231
- "Model provided by the configuration not supported" )
247
+ raise ValueError ("Model provided by the configuration not supported" )
232
248
233
249
def _create_default_embedder (self , llm_config = None ) -> object :
234
250
"""
@@ -241,8 +257,9 @@ def _create_default_embedder(self, llm_config=None) -> object:
241
257
ValueError: If the model is not supported.
242
258
"""
243
259
if isinstance (self .llm_model , Gemini ):
244
- return GoogleGenerativeAIEmbeddings (google_api_key = llm_config ['api_key' ],
245
- model = "models/embedding-001" )
260
+ return GoogleGenerativeAIEmbeddings (
261
+ google_api_key = llm_config ["api_key" ], model = "models/embedding-001"
262
+ )
246
263
if isinstance (self .llm_model , OpenAI ):
247
264
return OpenAIEmbeddings (api_key = self .llm_model .openai_api_key )
248
265
elif isinstance (self .llm_model , DeepSeek ):
@@ -279,8 +296,8 @@ def _create_embedder(self, embedder_config: dict) -> object:
279
296
Raises:
280
297
KeyError: If the model is not supported.
281
298
"""
282
- if ' model_instance' in embedder_config :
283
- return embedder_config [' model_instance' ]
299
+ if " model_instance" in embedder_config :
300
+ return embedder_config [" model_instance" ]
284
301
# Instantiate the embedding model based on the model name
285
302
if "openai" in embedder_config ["model" ]:
286
303
return OpenAIEmbeddings (api_key = embedder_config ["api_key" ])
@@ -297,28 +314,27 @@ def _create_embedder(self, embedder_config: dict) -> object:
297
314
try :
298
315
models_tokens ["hugging_face" ][embedder_config ["model" ]]
299
316
except KeyError as exc :
300
- raise KeyError ("Model not supported" )from exc
317
+ raise KeyError ("Model not supported" ) from exc
301
318
return HuggingFaceHubEmbeddings (model = embedder_config ["model" ])
302
319
elif "gemini" in embedder_config ["model" ]:
303
320
try :
304
321
models_tokens ["gemini" ][embedder_config ["model" ]]
305
322
except KeyError as exc :
306
- raise KeyError ("Model not supported" )from exc
323
+ raise KeyError ("Model not supported" ) from exc
307
324
return GoogleGenerativeAIEmbeddings (model = embedder_config ["model" ])
308
325
elif "bedrock" in embedder_config ["model" ]:
309
326
embedder_config ["model" ] = embedder_config ["model" ].split ("/" )[- 1 ]
310
- client = embedder_config .get (' client' , None )
327
+ client = embedder_config .get (" client" , None )
311
328
try :
312
329
models_tokens ["bedrock" ][embedder_config ["model" ]]
313
330
except KeyError as exc :
314
331
raise KeyError ("Model not supported" ) from exc
315
332
return BedrockEmbeddings (client = client , model_id = embedder_config ["model" ])
316
333
else :
317
- raise ValueError (
318
- "Model provided by the configuration not supported" )
334
+ raise ValueError ("Model provided by the configuration not supported" )
319
335
320
336
def get_state (self , key = None ) -> dict :
321
- """""
337
+ """ ""
322
338
Get the final state of the graph.
323
339
324
340
Args:
0 commit comments