|
3 | 3 | """
|
4 | 4 | import pytest
|
5 | 5 | from unittest.mock import patch
|
6 |
| -from scrapegraphai.graphs import AbstractGraph |
| 6 | +from scrapegraphai.graphs import AbstractGraph, BaseGraph |
| 7 | +from scrapegraphai.nodes import ( |
| 8 | + FetchNode, |
| 9 | + ParseNode |
| 10 | +) |
| 11 | +from scrapegraphai.models import OneApi, DeepSeek |
| 12 | +from langchain_openai import ChatOpenAI, AzureChatOpenAI |
| 13 | +from langchain_community.chat_models import ChatOllama |
| 14 | +from langchain_google_genai import ChatGoogleGenerativeAI |
| 15 | + |
| 16 | + |
| 17 | + |
| 18 | +class TestGraph(AbstractGraph): |
| 19 | + def __init__(self, prompt: str, config: dict): |
| 20 | + super().__init__(prompt, config) |
| 21 | + |
| 22 | + def _create_graph(self) -> BaseGraph: |
| 23 | + fetch_node = FetchNode( |
| 24 | + input="url| local_dir", |
| 25 | + output=["doc", "link_urls", "img_urls"], |
| 26 | + node_config={ |
| 27 | + "llm_model": self.llm_model, |
| 28 | + "force": self.config.get("force", False), |
| 29 | + "cut": self.config.get("cut", True), |
| 30 | + "loader_kwargs": self.config.get("loader_kwargs", {}), |
| 31 | + "browser_base": self.config.get("browser_base") |
| 32 | + } |
| 33 | + ) |
| 34 | + parse_node = ParseNode( |
| 35 | + input="doc", |
| 36 | + output=["parsed_doc"], |
| 37 | + node_config={ |
| 38 | + "chunk_size": self.model_token |
| 39 | + } |
| 40 | + ) |
| 41 | + return BaseGraph( |
| 42 | + nodes=[ |
| 43 | + fetch_node, |
| 44 | + parse_node |
| 45 | + ], |
| 46 | + edges=[ |
| 47 | + (fetch_node, parse_node), |
| 48 | + ], |
| 49 | + entry_point=fetch_node, |
| 50 | + graph_name=self.__class__.__name__ |
| 51 | + ) |
| 52 | + |
| 53 | + def run(self) -> str: |
| 54 | + inputs = {"user_prompt": self.prompt, self.input_key: self.source} |
| 55 | + self.final_state, self.execution_info = self.graph.execute(inputs) |
| 56 | + |
| 57 | + return self.final_state.get("answer", "No answer found.") |
| 58 | + |
7 | 59 |
|
8 | 60 | class TestAbstractGraph:
|
9 | 61 | @pytest.mark.parametrize("llm_config, expected_model", [
|
10 |
| - ({"model": "openai/gpt-3.5-turbo"}, "ChatOpenAI"), |
11 |
| - ({"model": "azure_openai/gpt-3.5-turbo"}, "AzureChatOpenAI"), |
12 |
| - ({"model": "google_genai/gemini-pro"}, "ChatGoogleGenerativeAI"), |
13 |
| - ({"model": "google_vertexai/chat-bison"}, "ChatVertexAI"), |
14 |
| - ({"model": "ollama/llama2"}, "Ollama"), |
15 |
| - ({"model": "oneapi/text-davinci-003"}, "OneApi"), |
16 |
| - ({"model": "nvidia/clara-instant-1-base"}, "ChatNVIDIA"), |
17 |
| - ({"model": "deepseek/deepseek-coder-6.7b-instruct"}, "DeepSeek"), |
18 |
| - ({"model": "ernie/ernie-bot"}, "ErnieBotChat"), |
| 62 | + ({"model": "openai/gpt-3.5-turbo", "openai_api_key": "sk-randomtest001"}, ChatOpenAI), |
| 63 | + ({ |
| 64 | + "model": "azure_openai/gpt-3.5-turbo", |
| 65 | + "api_key": "random-api-key", |
| 66 | + "api_version": "no version", |
| 67 | + "azure_endpoint": "https://www.example.com/"}, |
| 68 | + AzureChatOpenAI), |
| 69 | + ({"model": "google_genai/gemini-pro", "google_api_key": "google-key-test"}, ChatGoogleGenerativeAI), |
| 70 | + ({"model": "ollama/llama2"}, ChatOllama), |
| 71 | + ({"model": "oneapi/qwen-turbo"}, OneApi), |
| 72 | + ({"model": "deepseek/deepseek-coder"}, DeepSeek), |
19 | 73 | ])
|
| 74 | + |
20 | 75 | def test_create_llm(self, llm_config, expected_model):
|
21 |
| - graph = AbstractGraph("Test prompt", {"llm": llm_config}) |
| 76 | + graph = TestGraph("Test prompt", {"llm": llm_config}) |
22 | 77 | assert isinstance(graph.llm_model, expected_model)
|
23 | 78 |
|
24 | 79 | def test_create_llm_unknown_provider(self):
|
25 | 80 | with pytest.raises(ValueError):
|
26 |
| - AbstractGraph("Test prompt", {"llm": {"model": "unknown_provider/model"}}) |
| 81 | + TestGraph("Test prompt", {"llm": {"model": "unknown_provider/model"}}) |
27 | 82 |
|
28 |
| - def test_create_llm_error(self): |
29 |
| - with patch("your_module.init_chat_model", side_effect=Exception("Test error")): |
30 |
| - with pytest.raises(Exception): |
31 |
| - AbstractGraph("Test prompt", {"llm": {"model": "openai/gpt-3.5-turbo"}}) |
|
0 commit comments