Skip to content

Commit f01b55e

Browse files
committed
fix: fetch node
1 parent caff63b commit f01b55e

File tree

3 files changed

+5
-75
lines changed

3 files changed

+5
-75
lines changed

requirements-dev.lock

Lines changed: 0 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66
# features: []
77
# all-features: false
88
# with-sources: false
9-
# generate-hashes: false
10-
# universal: false
119

1210
-e file:.
1311
aiofiles==24.1.0
@@ -112,7 +110,6 @@ filelock==3.15.4
112110
# via huggingface-hub
113111
# via torch
114112
# via transformers
115-
# via triton
116113
fireworks-ai==0.14.0
117114
# via langchain-fireworks
118115
fonttools==4.53.1
@@ -188,7 +185,6 @@ graphviz==0.20.3
188185
# via scrapegraphai
189186
greenlet==3.0.3
190187
# via playwright
191-
# via sqlalchemy
192188
groq==0.9.0
193189
# via langchain-groq
194190
grpc-google-iam-v1==0.13.1
@@ -362,34 +358,6 @@ numpy==1.26.4
362358
# via shapely
363359
# via streamlit
364360
# via transformers
365-
nvidia-cublas-cu12==12.1.3.1
366-
# via nvidia-cudnn-cu12
367-
# via nvidia-cusolver-cu12
368-
# via torch
369-
nvidia-cuda-cupti-cu12==12.1.105
370-
# via torch
371-
nvidia-cuda-nvrtc-cu12==12.1.105
372-
# via torch
373-
nvidia-cuda-runtime-cu12==12.1.105
374-
# via torch
375-
nvidia-cudnn-cu12==8.9.2.26
376-
# via torch
377-
nvidia-cufft-cu12==11.0.2.54
378-
# via torch
379-
nvidia-curand-cu12==10.3.2.106
380-
# via torch
381-
nvidia-cusolver-cu12==11.4.5.107
382-
# via torch
383-
nvidia-cusparse-cu12==12.1.0.106
384-
# via nvidia-cusolver-cu12
385-
# via torch
386-
nvidia-nccl-cu12==2.19.3
387-
# via torch
388-
nvidia-nvjitlink-cu12==12.6.20
389-
# via nvidia-cusolver-cu12
390-
# via nvidia-cusparse-cu12
391-
nvidia-nvtx-cu12==12.1.105
392-
# via torch
393361
openai==1.37.0
394362
# via burr
395363
# via langchain-fireworks
@@ -631,8 +599,6 @@ tqdm==4.66.4
631599
transformers==4.43.3
632600
# via langchain-huggingface
633601
# via sentence-transformers
634-
triton==2.2.0
635-
# via torch
636602
typer==0.12.3
637603
# via fastapi-cli
638604
typing-extensions==4.12.2
@@ -676,8 +642,6 @@ uvicorn==0.30.3
676642
# via fastapi
677643
uvloop==0.19.0
678644
# via uvicorn
679-
watchdog==4.0.1
680-
# via streamlit
681645
watchfiles==0.22.0
682646
# via uvicorn
683647
websockets==12.0

requirements.lock

Lines changed: 0 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66
# features: []
77
# all-features: false
88
# with-sources: false
9-
# generate-hashes: false
10-
# universal: false
119

1210
-e file:.
1311
aiohttp==3.9.5
@@ -69,7 +67,6 @@ filelock==3.15.4
6967
# via huggingface-hub
7068
# via torch
7169
# via transformers
72-
# via triton
7370
fireworks-ai==0.14.0
7471
# via langchain-fireworks
7572
free-proxy==1.1.1
@@ -136,7 +133,6 @@ graphviz==0.20.3
136133
# via scrapegraphai
137134
greenlet==3.0.3
138135
# via playwright
139-
# via sqlalchemy
140136
groq==0.9.0
141137
# via langchain-groq
142138
grpc-google-iam-v1==0.13.1
@@ -267,34 +263,6 @@ numpy==1.26.4
267263
# via sentence-transformers
268264
# via shapely
269265
# via transformers
270-
nvidia-cublas-cu12==12.1.3.1
271-
# via nvidia-cudnn-cu12
272-
# via nvidia-cusolver-cu12
273-
# via torch
274-
nvidia-cuda-cupti-cu12==12.1.105
275-
# via torch
276-
nvidia-cuda-nvrtc-cu12==12.1.105
277-
# via torch
278-
nvidia-cuda-runtime-cu12==12.1.105
279-
# via torch
280-
nvidia-cudnn-cu12==8.9.2.26
281-
# via torch
282-
nvidia-cufft-cu12==11.0.2.54
283-
# via torch
284-
nvidia-curand-cu12==10.3.2.106
285-
# via torch
286-
nvidia-cusolver-cu12==11.4.5.107
287-
# via torch
288-
nvidia-cusparse-cu12==12.1.0.106
289-
# via nvidia-cusolver-cu12
290-
# via torch
291-
nvidia-nccl-cu12==2.19.3
292-
# via torch
293-
nvidia-nvjitlink-cu12==12.6.20
294-
# via nvidia-cusolver-cu12
295-
# via nvidia-cusparse-cu12
296-
nvidia-nvtx-cu12==12.1.105
297-
# via torch
298266
openai==1.37.0
299267
# via langchain-fireworks
300268
# via langchain-openai
@@ -446,8 +414,6 @@ tqdm==4.66.4
446414
transformers==4.43.3
447415
# via langchain-huggingface
448416
# via sentence-transformers
449-
triton==2.2.0
450-
# via torch
451417
typing-extensions==4.12.2
452418
# via anthropic
453419
# via anyio

scrapegraphai/nodes/fetch_node.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ def handle_web_source(self, state, source):
260260

261261
if (isinstance(self.llm_model, ChatOpenAI)
262262
and not self.script_creator) or (self.force and not self.script_creator):
263-
parsed_content = convert_to_md(source, input_data[0])
263+
parsed_content = convert_to_md(source, parsed_content)
264264

265265
compressed_document = [Document(page_content=parsed_content)]
266266
else:
@@ -288,14 +288,14 @@ def handle_web_source(self, state, source):
288288
parsed_content = document[0].page_content
289289

290290
if isinstance(self.llm_model, ChatOpenAI) and not self.script_creator or self.force and not self.script_creator and not self.openai_md_enabled:
291-
parsed_content = convert_to_md(document[0].page_content, input_data[0])
291+
parsed_content = convert_to_md(document[0].page_content, parsed_content)
292292

293293
compressed_document = [
294294
Document(page_content=parsed_content, metadata={"source": "html file"})
295295
]
296296

297297
return self.update_state(state, compressed_document)
298-
298+
299299
def update_state(self, state, compressed_document):
300300
"""
301301
Updates the state with the output data from the node.
@@ -308,6 +308,6 @@ def update_state(self, state, compressed_document):
308308
Returns:
309309
dict: The updated state with the output data.
310310
"""
311-
311+
312312
state.update({self.output[0]: compressed_document,})
313-
return state
313+
return state

0 commit comments

Comments
 (0)