Skip to content

Commit e45f159

Browse files
committed
enhanced performance and readibility
1 parent e6c7940 commit e45f159

File tree

3 files changed

+8
-12
lines changed

3 files changed

+8
-12
lines changed

examples/local_models/pdf_scraper_ollama.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
graph_config = {
77
"llm": {
8-
"model": "ollama/llama3",
8+
"model": "ollama/mistral",
99
"temperature": 0,
1010
"format": "json", # Ollama needs the format to be specified explicitly
1111
"model_tokens": 4000,

scrapegraphai/nodes/generate_answer_node.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,8 @@ def execute(self, state: dict) -> dict:
9999
input_variables=["question"],
100100
partial_variables={"context": chunk.page_content,
101101
"format_instructions": format_instructions})
102-
102+
chain = prompt | self.llm_model | output_parser
103+
answer = chain.invoke({"question": user_prompt})
103104
else:
104105
prompt = PromptTemplate(
105106
template=template_chunks,
@@ -125,11 +126,7 @@ def execute(self, state: dict) -> dict:
125126
)
126127
merge_chain = merge_prompt | self.llm_model | output_parser
127128
answer = merge_chain.invoke({"context": answer, "question": user_prompt})
128-
else:
129-
# Chain
130-
single_chain = list(chains_dict.values())[0]
131-
answer = single_chain.invoke({"question": user_prompt})
132-
133129
# Update the state with the generated answer
134130
state.update({self.output[0]: answer})
135-
return state
131+
return state
132+

scrapegraphai/nodes/generate_answer_pdf_node.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,9 @@ def execute(self, state):
117117
"format_instructions": format_instructions,
118118
},
119119
)
120+
121+
chain = prompt | self.llm_model | output_parser
122+
answer = chain.invoke({"question": user_prompt})
120123
else:
121124
prompt = PromptTemplate(
122125
template=template_chunks_pdf,
@@ -145,10 +148,6 @@ def execute(self, state):
145148
)
146149
merge_chain = merge_prompt | self.llm_model | output_parser
147150
answer = merge_chain.invoke({"context": answer, "question": user_prompt})
148-
else:
149-
# Chain
150-
single_chain = list(chains_dict.values())[0]
151-
answer = single_chain.invoke({"question": user_prompt})
152151

153152
# Update the state with the generated answer
154153
state.update({self.output[0]: answer})

0 commit comments

Comments
 (0)