7
7
import uuid
8
8
from pydantic import BaseModel
9
9
10
- from langchain_community .chat_models import ChatOllama
11
- from langchain_openai import ChatOpenAI
12
-
10
+ from langchain_community .chat_models import ChatOllama , ErnieBotChat
13
11
from langchain_aws import BedrockEmbeddings , ChatBedrock
14
12
from langchain_huggingface import ChatHuggingFace , HuggingFaceEmbeddings
15
13
from langchain_community .embeddings import OllamaEmbeddings
16
- from langchain_google_genai import GoogleGenerativeAIEmbeddings
14
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings , ChatGoogleGenerativeAI
17
15
from langchain_google_vertexai import ChatVertexAI , VertexAIEmbeddings
18
- from langchain_google_genai import ChatGoogleGenerativeAI
19
- from langchain_google_genai .embeddings import GoogleGenerativeAIEmbeddings
20
16
from langchain_fireworks import FireworksEmbeddings , ChatFireworks
21
17
from langchain_openai import AzureOpenAIEmbeddings , OpenAIEmbeddings , ChatOpenAI , AzureChatOpenAI
22
18
from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings , ChatNVIDIA
23
- from langchain_community .chat_models import ErnieBotChat
19
+ from langchain .chat_models import init_chat_model
20
+
24
21
from ..helpers import models_tokens
25
22
from ..models import (
26
23
OneApi ,
27
24
DeepSeek
28
25
)
26
+ from ..utils .logging import set_verbosity_warning , set_verbosity_info
29
27
30
- from langchain .chat_models import init_chat_model
31
-
32
- from ..utils .logging import set_verbosity_debug , set_verbosity_warning , set_verbosity_info
33
-
34
- from ..helpers import models_tokens
35
28
36
29
37
30
class AbstractGraph (ABC ):
@@ -65,14 +58,14 @@ class AbstractGraph(ABC):
65
58
>>> result = my_graph.run()
66
59
"""
67
60
68
- def __init__ (self , prompt : str , config : dict ,
61
+ def __init__ (self , prompt : str , config : dict ,
69
62
source : Optional [str ] = None , schema : Optional [BaseModel ] = None ):
70
63
71
64
self .prompt = prompt
72
65
self .source = source
73
66
self .config = config
74
67
self .schema = schema
75
- self .llm_model = self ._create_llm (config ["llm" ], chat = True )
68
+ self .llm_model = self ._create_llm (config ["llm" ])
76
69
self .embedder_model = self ._create_default_embedder (llm_config = config ["llm" ]) if "embeddings" not in config else self ._create_embedder (
77
70
config ["embeddings" ])
78
71
self .verbose = False if config is None else config .get (
@@ -128,7 +121,7 @@ def set_common_params(self, params: dict, overwrite=False):
128
121
for node in self .graph .nodes :
129
122
node .update_config (params , overwrite )
130
123
131
- def _create_llm (self , llm_config : dict , chat = False ) -> object :
124
+ def _create_llm (self , llm_config : dict ) -> object :
132
125
"""
133
126
Create a large language model instance based on the configuration provided.
134
127
@@ -148,9 +141,9 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
148
141
# If model instance is passed directly instead of the model details
149
142
if "model_instance" in llm_params :
150
143
try :
151
- self .model_token = llm_params ["model_tokens" ]
144
+ self .model_token = llm_params ["model_tokens" ]
152
145
except KeyError as exc :
153
- raise KeyError ("model_tokens not specified" ) from exc
146
+ raise KeyError ("model_tokens not specified" ) from exc
154
147
return llm_params ["model_instance" ]
155
148
156
149
# Instantiate the language model based on the model name
@@ -161,23 +154,26 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
161
154
except KeyError as exc :
162
155
raise KeyError ("Model not supported" ) from exc
163
156
return init_chat_model (** llm_params )
164
- elif "oneapi" in llm_params ["model" ]:
157
+
158
+ if "oneapi" in llm_params ["model" ]:
165
159
# take the model after the last dash
166
160
llm_params ["model" ] = llm_params ["model" ].split ("/" )[- 1 ]
167
161
try :
168
162
self .model_token = models_tokens ["oneapi" ][llm_params ["model" ]]
169
163
except KeyError as exc :
170
164
raise KeyError ("Model not supported" ) from exc
171
165
return OneApi (llm_params )
172
- elif "fireworks" in llm_params ["model" ]:
166
+
167
+ if "fireworks" in llm_params ["model" ]:
173
168
try :
174
169
self .model_token = models_tokens ["fireworks" ][llm_params ["model" ].split ("/" )[- 1 ]]
175
170
llm_params ["model" ] = "/" .join (llm_params ["model" ].split ("/" )[1 :])
176
171
except KeyError as exc :
177
172
raise KeyError ("Model not supported" ) from exc
178
173
llm_params ["model_provider" ] = "fireworks"
179
174
return init_chat_model (** llm_params )
180
- elif "azure" in llm_params ["model" ]:
175
+
176
+ if "azure" in llm_params ["model" ]:
181
177
# take the model after the last dash
182
178
llm_params ["model" ] = llm_params ["model" ].split ("/" )[- 1 ]
183
179
try :
@@ -186,38 +182,42 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
186
182
raise KeyError ("Model not supported" ) from exc
187
183
llm_params ["model_provider" ] = "azure_openai"
188
184
return init_chat_model (** llm_params )
189
- elif "nvidia" in llm_params ["model" ]:
185
+
186
+ if "nvidia" in llm_params ["model" ]:
190
187
try :
191
188
self .model_token = models_tokens ["nvidia" ][llm_params ["model" ].split ("/" )[- 1 ]]
192
189
llm_params ["model" ] = "/" .join (llm_params ["model" ].split ("/" )[1 :])
193
190
except KeyError as exc :
194
191
raise KeyError ("Model not supported" ) from exc
195
192
return ChatNVIDIA (llm_params )
196
- elif "gemini" in llm_params ["model" ]:
193
+
194
+ if "gemini" in llm_params ["model" ]:
197
195
llm_params ["model" ] = llm_params ["model" ].split ("/" )[- 1 ]
198
196
try :
199
197
self .model_token = models_tokens ["gemini" ][llm_params ["model" ]]
200
198
except KeyError as exc :
201
199
raise KeyError ("Model not supported" ) from exc
202
200
llm_params ["model_provider" ] = "google_genai "
203
201
return init_chat_model (** llm_params )
204
- elif llm_params ["model" ].startswith ("claude" ):
202
+
203
+ if llm_params ["model" ].startswith ("claude" ):
205
204
llm_params ["model" ] = llm_params ["model" ].split ("/" )[- 1 ]
206
205
try :
207
206
self .model_token = models_tokens ["claude" ][llm_params ["model" ]]
208
207
except KeyError as exc :
209
208
raise KeyError ("Model not supported" ) from exc
210
209
llm_params ["model_provider" ] = "anthropic"
211
210
return init_chat_model (** llm_params )
212
- elif llm_params ["model" ].startswith ("vertexai" ):
211
+
212
+ if llm_params ["model" ].startswith ("vertexai" ):
213
213
try :
214
214
self .model_token = models_tokens ["vertexai" ][llm_params ["model" ]]
215
215
except KeyError as exc :
216
216
raise KeyError ("Model not supported" ) from exc
217
217
llm_params ["model_provider" ] = "google_vertexai"
218
218
return init_chat_model (** llm_params )
219
219
220
- elif "ollama" in llm_params ["model" ]:
220
+ if "ollama" in llm_params ["model" ]:
221
221
llm_params ["model" ] = llm_params ["model" ].split ("ollama/" )[- 1 ]
222
222
llm_params ["model_provider" ] = "ollama"
223
223
@@ -238,7 +238,7 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
238
238
239
239
return init_chat_model (** llm_params )
240
240
241
- elif "hugging_face" in llm_params ["model" ]:
241
+ if "hugging_face" in llm_params ["model" ]:
242
242
llm_params ["model" ] = llm_params ["model" ].split ("/" )[- 1 ]
243
243
try :
244
244
self .model_token = models_tokens ["hugging_face" ][llm_params ["model" ]]
@@ -247,7 +247,8 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
247
247
self .model_token = 8192
248
248
llm_params ["model_provider" ] = "hugging_face"
249
249
return init_chat_model (** llm_params )
250
- elif "groq" in llm_params ["model" ]:
250
+
251
+ if "groq" in llm_params ["model" ]:
251
252
llm_params ["model" ] = llm_params ["model" ].split ("/" )[- 1 ]
252
253
253
254
try :
@@ -257,41 +258,43 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
257
258
self .model_token = 8192
258
259
llm_params ["model_provider" ] = "groq"
259
260
return init_chat_model (** llm_params )
260
- elif "bedrock" in llm_params ["model" ]:
261
+
262
+ if "bedrock" in llm_params ["model" ]:
261
263
llm_params ["model" ] = llm_params ["model" ].split ("/" )[- 1 ]
262
- model_id = llm_params ["model" ]
263
- client = llm_params .get ("client" , None )
264
264
try :
265
265
self .model_token = models_tokens ["bedrock" ][llm_params ["model" ]]
266
266
except KeyError :
267
267
print ("model not found, using default token size (8192)" )
268
268
self .model_token = 8192
269
269
llm_params ["model_provider" ] = "bedrock"
270
270
return init_chat_model (** llm_params )
271
- elif "claude-3-" in llm_params ["model" ]:
271
+
272
+ if "claude-3-" in llm_params ["model" ]:
272
273
try :
273
274
self .model_token = models_tokens ["claude" ]["claude3" ]
274
275
except KeyError :
275
276
print ("model not found, using default token size (8192)" )
276
277
self .model_token = 8192
277
278
llm_params ["model_provider" ] = "anthropic"
278
279
return init_chat_model (** llm_params )
279
- elif "deepseek" in llm_params ["model" ]:
280
+
281
+ if "deepseek" in llm_params ["model" ]:
280
282
try :
281
283
self .model_token = models_tokens ["deepseek" ][llm_params ["model" ]]
282
284
except KeyError :
283
285
print ("model not found, using default token size (8192)" )
284
286
self .model_token = 8192
285
287
return DeepSeek (llm_params )
286
- elif "ernie" in llm_params ["model" ]:
288
+
289
+ if "ernie" in llm_params ["model" ]:
287
290
try :
288
291
self .model_token = models_tokens ["ernie" ][llm_params ["model" ]]
289
292
except KeyError :
290
293
print ("model not found, using default token size (8192)" )
291
294
self .model_token = 8192
292
295
return ErnieBotChat (llm_params )
293
- else :
294
- raise ValueError ("Model provided by the configuration not supported" )
296
+
297
+ raise ValueError ("Model provided by the configuration not supported" )
295
298
296
299
def _create_default_embedder (self , llm_config = None ) -> object :
297
300
"""
@@ -308,7 +311,7 @@ def _create_default_embedder(self, llm_config=None) -> object:
308
311
google_api_key = llm_config ["api_key" ], model = "models/embedding-001"
309
312
)
310
313
if isinstance (self .llm_model , ChatOpenAI ):
311
- return OpenAIEmbeddings (api_key = self .llm_model .openai_api_key ,
314
+ return OpenAIEmbeddings (api_key = self .llm_model .openai_api_key ,
312
315
base_url = self .llm_model .openai_api_base )
313
316
elif isinstance (self .llm_model , DeepSeek ):
314
317
return OpenAIEmbeddings (api_key = self .llm_model .openai_api_key )
@@ -356,53 +359,53 @@ def _create_embedder(self, embedder_config: dict) -> object:
356
359
# Instantiate the embedding model based on the model name
357
360
if "openai" in embedder_params ["model" ]:
358
361
return OpenAIEmbeddings (api_key = embedder_params ["api_key" ])
359
- elif "azure" in embedder_params ["model" ]:
362
+ if "azure" in embedder_params ["model" ]:
360
363
return AzureOpenAIEmbeddings ()
361
364
if "nvidia" in embedder_params ["model" ]:
362
365
embedder_params ["model" ] = "/" .join (embedder_params ["model" ].split ("/" )[1 :])
363
366
try :
364
367
models_tokens ["nvidia" ][embedder_params ["model" ]]
365
368
except KeyError as exc :
366
369
raise KeyError ("Model not supported" ) from exc
367
- return NVIDIAEmbeddings (model = embedder_params ["model" ],
370
+ return NVIDIAEmbeddings (model = embedder_params ["model" ],
368
371
nvidia_api_key = embedder_params ["api_key" ])
369
- elif "ollama" in embedder_params ["model" ]:
372
+ if "ollama" in embedder_params ["model" ]:
370
373
embedder_params ["model" ] = "/" .join (embedder_params ["model" ].split ("/" )[1 :])
371
374
try :
372
375
models_tokens ["ollama" ][embedder_params ["model" ]]
373
376
except KeyError as exc :
374
377
raise KeyError ("Model not supported" ) from exc
375
378
return OllamaEmbeddings (** embedder_params )
376
- elif "hugging_face" in embedder_params ["model" ]:
379
+ if "hugging_face" in embedder_params ["model" ]:
377
380
embedder_params ["model" ] = "/" .join (embedder_params ["model" ].split ("/" )[1 :])
378
381
try :
379
382
models_tokens ["hugging_face" ][embedder_params ["model" ]]
380
383
except KeyError as exc :
381
384
raise KeyError ("Model not supported" ) from exc
382
385
return HuggingFaceEmbeddings (model = embedder_params ["model" ])
383
- elif "fireworks" in embedder_params ["model" ]:
386
+ if "fireworks" in embedder_params ["model" ]:
384
387
embedder_params ["model" ] = "/" .join (embedder_params ["model" ].split ("/" )[1 :])
385
388
try :
386
389
models_tokens ["fireworks" ][embedder_params ["model" ]]
387
390
except KeyError as exc :
388
391
raise KeyError ("Model not supported" ) from exc
389
392
return FireworksEmbeddings (model = embedder_params ["model" ])
390
- elif "gemini" in embedder_params ["model" ]:
393
+ if "gemini" in embedder_params ["model" ]:
391
394
try :
392
395
models_tokens ["gemini" ][embedder_params ["model" ]]
393
396
except KeyError as exc :
394
397
raise KeyError ("Model not supported" ) from exc
395
398
return GoogleGenerativeAIEmbeddings (model = embedder_params ["model" ])
396
- elif "bedrock" in embedder_params ["model" ]:
399
+ if "bedrock" in embedder_params ["model" ]:
397
400
embedder_params ["model" ] = embedder_params ["model" ].split ("/" )[- 1 ]
398
401
client = embedder_params .get ("client" , None )
399
402
try :
400
403
models_tokens ["bedrock" ][embedder_params ["model" ]]
401
404
except KeyError as exc :
402
405
raise KeyError ("Model not supported" ) from exc
403
406
return BedrockEmbeddings (client = client , model_id = embedder_params ["model" ])
404
- else :
405
- raise ValueError ("Model provided by the configuration not supported" )
407
+
408
+ raise ValueError ("Model provided by the configuration not supported" )
406
409
407
410
def get_state (self , key = None ) -> dict :
408
411
""" ""
@@ -444,11 +447,9 @@ def _create_graph(self):
444
447
"""
445
448
Abstract method to create a graph representation.
446
449
"""
447
- pass
448
450
449
451
@abstractmethod
450
452
def run (self ) -> str :
451
453
"""
452
454
Abstract method to execute the graph and return the result.
453
455
"""
454
- pass
0 commit comments