@@ -41,7 +41,7 @@ def _create_llm(self, llm_config: dict):
41
41
try :
42
42
self .model_token = models_tokens ["openai" ][llm_params ["model" ]]
43
43
except KeyError :
44
- raise ValueError ("Model not supported" )
44
+ raise KeyError ("Model not supported" )
45
45
return OpenAI (llm_params )
46
46
47
47
elif "azure" in llm_params ["model" ]:
@@ -50,14 +50,14 @@ def _create_llm(self, llm_config: dict):
50
50
try :
51
51
self .model_token = models_tokens ["azure" ][llm_params ["model" ]]
52
52
except KeyError :
53
- raise ValueError ("Model not supported" )
53
+ raise KeyError ("Model not supported" )
54
54
return AzureOpenAI (llm_params )
55
55
56
56
elif "gemini" in llm_params ["model" ]:
57
57
try :
58
58
self .model_token = models_tokens ["gemini" ][llm_params ["model" ]]
59
59
except KeyError :
60
- raise ValueError ("Model not supported" )
60
+ raise KeyError ("Model not supported" )
61
61
return Gemini (llm_params )
62
62
63
63
elif "ollama" in llm_params ["model" ]:
@@ -70,19 +70,27 @@ def _create_llm(self, llm_config: dict):
70
70
try :
71
71
self .model_token = models_tokens ["ollama" ][llm_params ["model" ]]
72
72
except KeyError :
73
- raise ValueError ("Model not supported" )
73
+ raise KeyError ("Model not supported" )
74
74
75
75
return Ollama (llm_params )
76
76
elif "hugging_face" in llm_params ["model" ]:
77
77
try :
78
78
self .model_token = models_tokens ["hugging_face" ][llm_params ["model" ]]
79
79
except KeyError :
80
- raise ValueError ("Model not supported" )
80
+ raise KeyError ("Model not supported" )
81
81
return HuggingFace (llm_params )
82
82
else :
83
83
raise ValueError (
84
84
"Model provided by the configuration not supported" )
85
85
86
+ def get_state (self , key = None ) -> dict :
87
+ """""
88
+ Obtain the current state
89
+ """
90
+ if key is not None :
91
+ return self .final_state [key ]
92
+ return self .final_state
93
+
86
94
def get_execution_info (self ):
87
95
"""
88
96
Returns the execution information of the graph.
0 commit comments