Skip to content

Faiss integration #351

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Jun 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ docs/source/_static/
venv/
.venv/
.vscode/
.conda/

# exclude pdf, mp3
*.pdf
Expand All @@ -38,3 +39,6 @@ lib/
*.html
.idea

# extras
cache/
run_smart_scraper.py
1 change: 1 addition & 0 deletions docs/source/scrapers/graph_config.rst
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ Some interesting ones are:
- `loader_kwargs`: A dictionary with additional parameters to be passed to the `Loader` class, such as `proxy`.
- `burr_kwargs`: A dictionary with additional parameters to enable `Burr` graphical user interface.
- `max_images`: The maximum number of images to be analyzed. Useful in `OmniScraperGraph` and `OmniSearchGraph`.
- `cache_path`: The path where the cache files will be saved. If already exists, the cache will be loaded from this path.

.. _Burr:

Expand Down
2 changes: 1 addition & 1 deletion requirements-dev.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
sphinx==7.1.2
furo==2024.5.6
pytest==8.0.0
burr[start]==0.19.1
burr[start]==0.22.1
7 changes: 3 additions & 4 deletions scrapegraphai/graphs/abstract_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ def __init__(self, prompt: str, config: dict,
self.headless = True if config is None else config.get(
"headless", True)
self.loader_kwargs = config.get("loader_kwargs", {})
self.cache_path = config.get("cache_path", False)

# Create the graph
self.graph = self._create_graph()
Expand All @@ -91,15 +92,13 @@ def __init__(self, prompt: str, config: dict,
else:
set_verbosity_warning()

self.headless = True if config is None else config.get("headless", True)
self.loader_kwargs = config.get("loader_kwargs", {})

common_params = {
"headless": self.headless,
"verbose": self.verbose,
"loader_kwargs": self.loader_kwargs,
"llm_model": self.llm_model,
"embedder_model": self.embedder_model
"embedder_model": self.embedder_model,
"cache_path": self.cache_path,
}

self.set_common_params(common_params, overwrite=False)
Expand Down
21 changes: 20 additions & 1 deletion scrapegraphai/nodes/rag_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
"""

from typing import List, Optional
import os

from langchain.docstore.document import Document
from langchain.retrievers import ContextualCompressionRetriever
Expand Down Expand Up @@ -50,6 +51,7 @@ def __init__(
self.verbose = (
False if node_config is None else node_config.get("verbose", False)
)
self.cache_path = node_config.get("cache_path", False)

def execute(self, state: dict) -> dict:
"""
Expand Down Expand Up @@ -98,7 +100,24 @@ def execute(self, state: dict) -> dict:
)
embeddings = self.embedder_model

retriever = FAISS.from_documents(chunked_docs, embeddings).as_retriever()
folder_name = self.node_config.get("cache_path", "cache")

if self.node_config.get("cache_path", False) and not os.path.exists(folder_name):
index = FAISS.from_documents(chunked_docs, embeddings)
os.makedirs(folder_name)
index.save_local(folder_name)
self.logger.info("--- (indexes saved to cache) ---")

elif self.node_config.get("cache_path", False) and os.path.exists(folder_name):
index = FAISS.load_local(folder_path=folder_name,
embeddings=embeddings,
allow_dangerous_deserialization=True)
self.logger.info("--- (indexes loaded from cache) ---")

else:
index = FAISS.from_documents(chunked_docs, embeddings)

retriever = index.as_retriever()

redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)
# similarity_threshold could be set, now k=20
Expand Down
Loading