Skip to content

Commit a17e068

Browse files
authored
Merge pull request #529 from ScrapeGraphAI/523-set-default-temperature-to-0
feat: add refactoring of default temperature
2 parents b470d97 + 6c3b37a commit a17e068

File tree

2 files changed

+5
-3
lines changed

2 files changed

+5
-3
lines changed

examples/local_models/smart_scraper_ollama.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
"format": "json", # Ollama needs the format to be specified explicitly
1515
# "base_url": "http://localhost:11434", # set ollama URL arbitrarily
1616
},
17-
1817
"verbose": True,
1918
"headless": False
2019
}

scrapegraphai/graphs/abstract_graph.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,9 @@ class AbstractGraph(ABC):
5353
def __init__(self, prompt: str, config: dict,
5454
source: Optional[str] = None, schema: Optional[BaseModel] = None):
5555

56+
if config.get("llm").get("temperature") is None:
57+
config["llm"]["temperature"] = 0
58+
5659
self.prompt = prompt
5760
self.source = source
5861
self.config = config
@@ -212,7 +215,7 @@ def handle_model(model_name, provider, token_key, default_token=8192):
212215
print("model not found, using default token size (8192)")
213216
self.model_token = 8192
214217
return ErnieBotChat(llm_params)
215-
218+
216219
if "oneapi" in llm_params["model"]:
217220
# take the model after the last dash
218221
llm_params["model"] = llm_params["model"].split("/")[-1]
@@ -221,7 +224,7 @@ def handle_model(model_name, provider, token_key, default_token=8192):
221224
except KeyError as exc:
222225
raise KeyError("Model not supported") from exc
223226
return OneApi(llm_params)
224-
227+
225228
if "nvidia" in llm_params["model"]:
226229
try:
227230
self.model_token = models_tokens["nvidia"][llm_params["model"].split("/")[-1]]

0 commit comments

Comments
 (0)