@@ -63,13 +63,10 @@ def __init__(self, prompt: str, config: dict,
63
63
self .cache_path = self .config .get ("cache_path" , False )
64
64
self .browser_base = self .config .get ("browser_base" )
65
65
66
- # Create the graph
67
66
self .graph = self ._create_graph ()
68
67
self .final_state = None
69
68
self .execution_info = None
70
69
71
- # Set common configuration parameters
72
-
73
70
verbose = bool (config and config .get ("verbose" ))
74
71
75
72
if verbose :
@@ -87,12 +84,10 @@ def __init__(self, prompt: str, config: dict,
87
84
88
85
self .set_common_params (common_params , overwrite = True )
89
86
90
- # set burr config
91
87
self .burr_kwargs = config .get ("burr_kwargs" , None )
92
88
if self .burr_kwargs is not None :
93
89
self .graph .use_burr = True
94
90
if "app_instance_id" not in self .burr_kwargs :
95
- # set a random uuid for the app_instance_id to avoid conflicts
96
91
self .burr_kwargs ["app_instance_id" ] = str (uuid .uuid4 ())
97
92
98
93
self .graph .burr_config = self .burr_kwargs
@@ -125,7 +120,6 @@ def _create_llm(self, llm_config: dict) -> object:
125
120
llm_defaults = {"temperature" : 0 , "streaming" : False }
126
121
llm_params = {** llm_defaults , ** llm_config }
127
122
128
- # If model instance is passed directly instead of the model details
129
123
if "model_instance" in llm_params :
130
124
try :
131
125
self .model_token = llm_params ["model_tokens" ]
@@ -145,18 +139,14 @@ def handle_model(model_name, provider, token_key, default_token=8192):
145
139
warnings .simplefilter ("ignore" )
146
140
return init_chat_model (** llm_params )
147
141
148
- known_models = ["chatgpt" ,"gpt" ,"openai" , "azure_openai" , "google_genai" ,
149
- "ollama" , "oneapi" , "nvidia" , "groq" , "google_vertexai" ,
150
- "bedrock" , "mistralai" , "hugging_face" , "deepseek" , "ernie" , "fireworks" ]
151
-
142
+ known_models = {"chatgpt" ,"gpt" ,"openai" , "azure_openai" , "google_genai" ,
143
+ "ollama" , "oneapi" , "nvidia" , "groq" , "google_vertexai" ,
144
+ "bedrock" , "mistralai" , "hugging_face" , "deepseek" , "ernie" , "fireworks" }
152
145
153
146
if llm_params ["model" ].split ("/" )[0 ] not in known_models and llm_params ["model" ].split ("-" )[0 ] not in known_models :
154
147
raise ValueError (f"Model '{ llm_params ['model' ]} ' is not supported" )
155
148
156
149
try :
157
- if "azure" in llm_params ["model" ]:
158
- model_name = llm_params ["model" ].split ("/" )[- 1 ]
159
- return handle_model (model_name , "azure_openai" , model_name )
160
150
if "fireworks" in llm_params ["model" ]:
161
151
model_name = "/" .join (llm_params ["model" ].split ("/" )[1 :])
162
152
token_key = llm_params ["model" ].split ("/" )[- 1 ]
@@ -207,7 +197,6 @@ def handle_model(model_name, provider, token_key, default_token=8192):
207
197
return ErnieBotChat (llm_params )
208
198
209
199
elif "oneapi" in llm_params ["model" ]:
210
- # take the model after the last dash
211
200
llm_params ["model" ] = llm_params ["model" ].split ("/" )[- 1 ]
212
201
try :
213
202
self .model_token = models_tokens ["oneapi" ][llm_params ["model" ]]
0 commit comments