Skip to content

Commit faef318

Browse files
committed
fix: model count
1 parent 6730797 commit faef318

File tree

4 files changed

+4
-2
lines changed

4 files changed

+4
-2
lines changed

examples/local_models/smart_scraper_ollama.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
graph_config = {
1111
"llm": {
12-
"model": "ollama/llama3.1",
12+
"model": "ollama/mistral",
1313
"temperature": 0,
1414
"format": "json", # Ollama needs the format to be specified explicitly
1515
# "base_url": "http://localhost:11434", # set ollama URL arbitrarily

scrapegraphai/graphs/abstract_graph.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ def handle_model(model_name, provider, token_key, default_token=8192):
149149

150150
known_models = ["openai", "azure_openai", "google_genai", "ollama", "oneapi", "nvidia", "groq", "google_vertexai", "bedrock", "mistralai", "hugging_face", "deepseek", "ernie", "fireworks"]
151151

152-
if llm_params["model"] not in known_models:
152+
if llm_params["model"].split("/")[0] not in known_models:
153153
raise ValueError(f"Model '{llm_params['model']}' is not supported")
154154

155155
try:

scrapegraphai/helpers/models_tokens.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@
6262
"scrapegraph": 8192,
6363
"llava": 4096,
6464
"mixtral:8x22b-instruct": 65536,
65+
"mistral":8192,
6566
"mistral-openorca": 32000,
6667
"nomic-embed-text": 8192,
6768
"nous-hermes2:34b": 4096,

scrapegraphai/nodes/parse_node.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ def execute(self, state: dict) -> dict:
8080
docs_transformed = docs_transformed[0]
8181

8282
if isinstance(docs_transformed, Document):
83+
8384
chunks = chunk(text=docs_transformed.page_content,
8485
chunk_size=self.node_config.get("chunk_size", 4096)-250,
8586
token_counter=lambda text: len(text.split()),

0 commit comments

Comments
 (0)