Skip to content

Commit e4421ff

Browse files
committed
Merge branch '731-fetchnode-object-has-no-attribute-update_state' into pre/beta
2 parents 47645d9 + 026a70b commit e4421ff

18 files changed

+64
-51
lines changed

examples/openai/md_scraper_openai.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
# ************************************************
3838

3939
md_scraper_graph = DocumentScraperGraph(
40-
prompt="List me all the authors, title and genres of the books",
40+
prompt="List me all the projects",
4141
source=text, # Pass the content of the file, not the file object
4242
config=graph_config
4343
)

scrapegraphai/builders/graph_builder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def build_graph(self):
119119
Returns:
120120
dict: A JSON representation of the graph configuration.
121121
"""
122-
return self.chain.invoke(self.prompt)
122+
return self.chain.ainvoke(self.prompt)
123123

124124
@staticmethod
125125
def convert_json_to_graphviz(json_data, format: str = 'pdf'):

scrapegraphai/nodes/generate_answer_csv_node.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def __init__(
6060

6161
self.additional_info = node_config.get("additional_info")
6262

63-
def execute(self, state):
63+
async def execute(self, state):
6464
"""
6565
Generates an answer by constructing a prompt from the user's input and the scraped
6666
content, querying the language model, and parsing its response.
@@ -126,7 +126,7 @@ def execute(self, state):
126126
)
127127

128128
chain = prompt | self.llm_model | output_parser
129-
answer = chain.invoke({"question": user_prompt})
129+
answer = chain.ainvoke({"question": user_prompt})
130130
state.update({self.output[0]: answer})
131131
return state
132132

@@ -157,7 +157,7 @@ def execute(self, state):
157157
)
158158

159159
merge_chain = merge_prompt | self.llm_model | output_parser
160-
answer = merge_chain.invoke({"context": batch_results, "question": user_prompt})
160+
answer = await merge_chain.ainvoke({"context": batch_results, "question": user_prompt})
161161

162162
state.update({self.output[0]: answer})
163163
return state

scrapegraphai/nodes/generate_answer_node.py

Lines changed: 33 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
"""
2-
generate_answer_node module
2+
GenerateAnswerNode Module
33
"""
44
from typing import List, Optional
55
from langchain.prompts import PromptTemplate
@@ -19,24 +19,24 @@
1919

2020
class GenerateAnswerNode(BaseNode):
2121
"""
22-
Initializes the GenerateAnswerNode class.
23-
24-
Args:
25-
input (str): The input data type for the node.
26-
output (List[str]): The output data type(s) for the node.
27-
node_config (Optional[dict]): Configuration dictionary for the node,
28-
which includes the LLM model, verbosity, schema, and other settings.
29-
Defaults to None.
30-
node_name (str): The name of the node. Defaults to "GenerateAnswer".
31-
32-
Attributes:
33-
llm_model: The language model specified in the node configuration.
34-
verbose (bool): Whether verbose mode is enabled.
35-
force (bool): Whether to force certain behaviors, overriding defaults.
36-
script_creator (bool): Whether the node is in script creation mode.
37-
is_md_scraper (bool): Whether the node is scraping markdown data.
38-
additional_info (Optional[str]): Any additional information to be
39-
included in the prompt templates.
22+
Initializes the GenerateAnswerNode class.
23+
24+
Args:
25+
input (str): The input data type for the node.
26+
output (List[str]): The output data type(s) for the node.
27+
node_config (Optional[dict]): Configuration dictionary for the node,
28+
which includes the LLM model, verbosity, schema, and other settings.
29+
Defaults to None.
30+
node_name (str): The name of the node. Defaults to "GenerateAnswer".
31+
32+
Attributes:
33+
llm_model: The language model specified in the node configuration.
34+
verbose (bool): Whether verbose mode is enabled.
35+
force (bool): Whether to force certain behaviors, overriding defaults.
36+
script_creator (bool): Whether the node is in script creation mode.
37+
is_md_scraper (bool): Whether the node is scraping markdown data.
38+
additional_info (Optional[str]): Any additional information to be
39+
included in the prompt templates.
4040
"""
4141
def __init__(
4242
self,
@@ -57,7 +57,17 @@ def __init__(
5757
self.is_md_scraper = node_config.get("is_md_scraper", False)
5858
self.additional_info = node_config.get("additional_info")
5959

60-
def execute(self, state: dict) -> dict:
60+
async def execute(self, state: dict) -> dict:
61+
"""
62+
Executes the GenerateAnswerNode.
63+
64+
Args:
65+
state (dict): The current state of the graph. The input keys will be used
66+
to fetch the correct data from the state.
67+
68+
Returns:
69+
dict: The updated state with the output key containing the generated answer.
70+
"""
6171
self.logger.info(f"--- Executing {self.node_name} Node ---")
6272

6373
input_keys = self.get_input_keys(state)
@@ -113,7 +123,7 @@ def execute(self, state: dict) -> dict:
113123
chain = prompt | self.llm_model
114124
if output_parser:
115125
chain = chain | output_parser
116-
answer = chain.invoke({"question": user_prompt})
126+
answer = await chain.ainvoke({"question": user_prompt})
117127

118128
state.update({self.output[0]: answer})
119129
return state
@@ -133,7 +143,7 @@ def execute(self, state: dict) -> dict:
133143
chains_dict[chain_name] = chains_dict[chain_name] | output_parser
134144

135145
async_runner = RunnableParallel(**chains_dict)
136-
batch_results = async_runner.invoke({"question": user_prompt})
146+
batch_results = await async_runner.ainvoke({"question": user_prompt})
137147

138148
merge_prompt = PromptTemplate(
139149
template=template_merge_prompt,
@@ -144,7 +154,7 @@ def execute(self, state: dict) -> dict:
144154
merge_chain = merge_prompt | self.llm_model
145155
if output_parser:
146156
merge_chain = merge_chain | output_parser
147-
answer = merge_chain.invoke({"context": batch_results, "question": user_prompt})
157+
answer = await merge_chain.ainvoke({"context": batch_results, "question": user_prompt})
148158

149159
state.update({self.output[0]: answer})
150160
return state

scrapegraphai/nodes/generate_answer_node_k_level.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ def execute(self, state: dict) -> dict:
143143
merge_chain = merge_prompt | self.llm_model
144144
if output_parser:
145145
merge_chain = merge_chain | output_parser
146-
answer = merge_chain.invoke({"context": batch_results, "question": user_prompt})
146+
answer = merge_chain.ainvoke({"context": batch_results, "question": user_prompt})
147147

148148
state["answer"] = answer
149149

scrapegraphai/nodes/generate_answer_omni_node.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ def execute(self, state: dict) -> dict:
117117
)
118118

119119
chain = prompt | self.llm_model | output_parser
120-
answer = chain.invoke({"question": user_prompt})
120+
answer = chain.ainvoke({"question": user_prompt})
121121

122122
state.update({self.output[0]: answer})
123123
return state
@@ -149,7 +149,7 @@ def execute(self, state: dict) -> dict:
149149
)
150150

151151
merge_chain = merge_prompt | self.llm_model | output_parser
152-
answer = merge_chain.invoke({"context": batch_results, "question": user_prompt})
152+
answer = merge_chain.ainvoke({"context": batch_results, "question": user_prompt})
153153

154154
state.update({self.output[0]: answer})
155155
return state

scrapegraphai/nodes/generate_code_node.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -325,7 +325,7 @@ def generate_initial_code(self, state: dict) -> str:
325325
output_parser = StrOutputParser()
326326

327327
chain = prompt | self.llm_model | output_parser
328-
generated_code = chain.invoke({})
328+
generated_code = chain.ainvoke({})
329329
return generated_code
330330

331331
def semantic_comparison(self, generated_result: Any, reference_result: Any) -> Dict[str, Any]:
@@ -368,7 +368,7 @@ def semantic_comparison(self, generated_result: Any, reference_result: Any) -> D
368368
)
369369

370370
chain = prompt | self.llm_model | output_parser
371-
return chain.invoke({
371+
return chain.ainvoke({
372372
"generated_result": json.dumps(generated_result, indent=2),
373373
"reference_result": json.dumps(reference_result_dict, indent=2)
374374
})

scrapegraphai/nodes/generate_scraper_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ def execute(self, state: dict) -> dict:
130130
)
131131
map_chain = prompt | self.llm_model | StrOutputParser()
132132

133-
answer = map_chain.invoke({"question": user_prompt})
133+
answer = map_chain.ainvoke({"question": user_prompt})
134134

135135
state.update({self.output[0]: answer})
136136
return state

scrapegraphai/nodes/html_analyzer_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def execute(self, state: dict) -> dict:
9393
output_parser = StrOutputParser()
9494

9595
chain = prompt | self.llm_model | output_parser
96-
html_analysis = chain.invoke({})
96+
html_analysis = chain.ainvoke({})
9797

9898
state.update({self.output[0]: html_analysis, self.output[1]: reduced_html})
9999
return state

scrapegraphai/nodes/merge_answers_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ def execute(self, state: dict) -> dict:
9595
)
9696

9797
merge_chain = prompt_template | self.llm_model | output_parser
98-
answer = merge_chain.invoke({"user_prompt": user_prompt})
98+
answer = merge_chain.ainvoke({"user_prompt": user_prompt})
9999
answer["sources"] = state.get("urls", [])
100100

101101
state.update({self.output[0]: answer})

scrapegraphai/nodes/merge_generated_scripts_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def execute(self, state: dict) -> dict:
7474
)
7575

7676
merge_chain = prompt_template | self.llm_model | StrOutputParser()
77-
answer = merge_chain.invoke({"user_prompt": user_prompt})
77+
answer = merge_chain.ainvoke({"user_prompt": user_prompt})
7878

7979
state.update({self.output[0]: answer})
8080
return state

scrapegraphai/nodes/parse_node.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,10 +85,13 @@ def execute(self, state: dict) -> dict:
8585
else:
8686
docs_transformed = docs_transformed[0]
8787

88-
link_urls, img_urls = self._extract_urls(docs_transformed.page_content, source)
88+
try:
89+
link_urls, img_urls = self._extract_urls(docs_transformed.page_content, source)
90+
except Exception as e:
91+
link_urls, img_urls = "", ""
8992

9093
chunk_size = self.chunk_size
91-
chunk_size = min(chunk_size - 500, int(chunk_size * 0.75))
94+
chunk_size = min(chunk_size - 500, int(chunk_size * 0.8))
9295

9396
if isinstance(docs_transformed, Document):
9497
chunks = split_text_into_chunks(text=docs_transformed.page_content,

scrapegraphai/nodes/prompt_refiner_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def execute(self, state: dict) -> dict:
9696
output_parser = StrOutputParser()
9797

9898
chain = prompt | self.llm_model | output_parser
99-
refined_prompt = chain.invoke({})
99+
refined_prompt = chain.ainvoke({})
100100

101101
state.update({self.output[0]: refined_prompt})
102102
return state

scrapegraphai/nodes/reasoning_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ def execute(self, state: dict) -> dict:
9191
output_parser = StrOutputParser()
9292

9393
chain = prompt | self.llm_model | output_parser
94-
refined_prompt = chain.invoke({})
94+
refined_prompt = chain.ainvoke({})
9595

9696
state.update({self.output[0]: refined_prompt})
9797
return state

scrapegraphai/nodes/robots_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ def execute(self, state: dict) -> dict:
108108
)
109109

110110
chain = prompt | self.llm_model | output_parser
111-
is_scrapable = chain.invoke({"path": source})[0]
111+
is_scrapable = chain.ainvoke({"path": source})[0]
112112

113113
if "no" in is_scrapable:
114114
self.logger.warning(

scrapegraphai/nodes/search_link_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@ def execute(self, state: dict) -> dict:
142142
input_variables=["content", "user_prompt"],
143143
)
144144
merge_chain = merge_prompt | self.llm_model | output_parser
145-
answer = merge_chain.invoke(
145+
answer = merge_chain.ainvoke(
146146
{"content": chunk.page_content}
147147
)
148148
relevant_links += answer

scrapegraphai/utils/code_error_analysis.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def syntax_focused_analysis(state: dict, llm_model) -> str:
3333
prompt = PromptTemplate(template=TEMPLATE_SYNTAX_ANALYSIS,
3434
input_variables=["generated_code", "errors"])
3535
chain = prompt | llm_model | StrOutputParser()
36-
return chain.invoke({
36+
return chain.ainvoke({
3737
"generated_code": state["generated_code"],
3838
"errors": state["errors"]["syntax"]
3939
})
@@ -53,7 +53,7 @@ def execution_focused_analysis(state: dict, llm_model) -> str:
5353
input_variables=["generated_code", "errors",
5454
"html_code", "html_analysis"])
5555
chain = prompt | llm_model | StrOutputParser()
56-
return chain.invoke({
56+
return chain.ainvoke({
5757
"generated_code": state["generated_code"],
5858
"errors": state["errors"]["execution"],
5959
"html_code": state["html_code"],
@@ -76,7 +76,7 @@ def validation_focused_analysis(state: dict, llm_model) -> str:
7676
input_variables=["generated_code", "errors",
7777
"json_schema", "execution_result"])
7878
chain = prompt | llm_model | StrOutputParser()
79-
return chain.invoke({
79+
return chain.ainvoke({
8080
"generated_code": state["generated_code"],
8181
"errors": state["errors"]["validation"],
8282
"json_schema": state["json_schema"],
@@ -100,7 +100,7 @@ def semantic_focused_analysis(state: dict, comparison_result: Dict[str, Any], ll
100100
input_variables=["generated_code",
101101
"differences", "explanation"])
102102
chain = prompt | llm_model | StrOutputParser()
103-
return chain.invoke({
103+
return chain.ainvoke({
104104
"generated_code": state["generated_code"],
105105
"differences": json.dumps(comparison_result["differences"], indent=2),
106106
"explanation": comparison_result["explanation"]

scrapegraphai/utils/code_error_correction.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def syntax_focused_code_generation(state: dict, analysis: str, llm_model) -> str
3232
prompt = PromptTemplate(template=TEMPLATE_SYNTAX_CODE_GENERATION,
3333
input_variables=["analysis", "generated_code"])
3434
chain = prompt | llm_model | StrOutputParser()
35-
return chain.invoke({
35+
return chain.ainvoke({
3636
"analysis": analysis,
3737
"generated_code": state["generated_code"]
3838
})
@@ -52,7 +52,7 @@ def execution_focused_code_generation(state: dict, analysis: str, llm_model) ->
5252
prompt = PromptTemplate(template=TEMPLATE_EXECUTION_CODE_GENERATION,
5353
input_variables=["analysis", "generated_code"])
5454
chain = prompt | llm_model | StrOutputParser()
55-
return chain.invoke({
55+
return chain.ainvoke({
5656
"analysis": analysis,
5757
"generated_code": state["generated_code"]
5858
})
@@ -72,7 +72,7 @@ def validation_focused_code_generation(state: dict, analysis: str, llm_model) ->
7272
prompt = PromptTemplate(template=TEMPLATE_VALIDATION_CODE_GENERATION,
7373
input_variables=["analysis", "generated_code", "json_schema"])
7474
chain = prompt | llm_model | StrOutputParser()
75-
return chain.invoke({
75+
return chain.ainvoke({
7676
"analysis": analysis,
7777
"generated_code": state["generated_code"],
7878
"json_schema": state["json_schema"]
@@ -93,7 +93,7 @@ def semantic_focused_code_generation(state: dict, analysis: str, llm_model) -> s
9393
prompt = PromptTemplate(template=TEMPLATE_SEMANTIC_CODE_GENERATION,
9494
input_variables=["analysis", "generated_code", "generated_result", "reference_result"])
9595
chain = prompt | llm_model | StrOutputParser()
96-
return chain.invoke({
96+
return chain.ainvoke({
9797
"analysis": analysis,
9898
"generated_code": state["generated_code"],
9999
"generated_result": json.dumps(state["execution_result"], indent=2),

0 commit comments

Comments
 (0)