Skip to content

Add Chat Agent #671

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Feb 26, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 95 additions & 0 deletions src/codegen/agents/chat_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
from typing import TYPE_CHECKING, Optional
from uuid import uuid4

from langchain.tools import BaseTool
from langchain_core.messages import AIMessage

from codegen.extensions.langchain.agent import create_chat_agent

if TYPE_CHECKING:
from codegen import Codebase


class ChatAgent:
"""Agent for interacting with a codebase."""

def __init__(self, codebase: "Codebase", model_provider: str = "anthropic", model_name: str = "claude-3-5-sonnet-latest", memory: bool = True, tools: Optional[list[BaseTool]] = None, **kwargs):
"""Initialize a CodeAgent.

Args:
codebase: The codebase to operate on
model_provider: The model provider to use ("anthropic" or "openai")
model_name: Name of the model to use
memory: Whether to let LLM keep track of the conversation history
tools: Additional tools to use
**kwargs: Additional LLM configuration options. Supported options:
- temperature: Temperature parameter (0-1)
- top_p: Top-p sampling parameter (0-1)
- top_k: Top-k sampling parameter (>= 1)
- max_tokens: Maximum number of tokens to generate
"""
self.codebase = codebase
self.agent = create_chat_agent(self.codebase, model_provider=model_provider, model_name=model_name, memory=memory, additional_tools=tools, **kwargs)

def run(self, prompt: str, thread_id: Optional[str] = None) -> str:
"""Run the agent with a prompt.

Args:
prompt: The prompt to run
thread_id: Optional thread ID for message history. If None, a new thread is created.

Returns:
The agent's response
"""
if thread_id is None:
thread_id = str(uuid4())

input = {"messages": [("user", prompt)]}
stream = self.agent.stream(input, config={"configurable": {"thread_id": thread_id}}, stream_mode="values")

for s in stream:
message = s["messages"][-1]
if isinstance(message, tuple):
print(message)
else:
if isinstance(message, AIMessage) and isinstance(message.content, list) and "text" in message.content[0]:
AIMessage(message.content[0]["text"]).pretty_print()
else:
message.pretty_print()

return s["messages"][-1].content

def chat(self, prompt: str, thread_id: Optional[str] = None) -> tuple[str, str]:
"""Chat with the agent, maintaining conversation history.

Args:
prompt: The user message
thread_id: Optional thread ID for message history. If None, a new thread is created.

Returns:
A tuple of (response_content, thread_id) to allow continued conversation
"""
if thread_id is None:
thread_id = str(uuid4())
print(f"Starting new chat thread: {thread_id}")
else:
print(f"Continuing chat thread: {thread_id}")

response = self.run(prompt, thread_id=thread_id)
return response, thread_id

def get_chat_history(self, thread_id: str) -> list:
"""Retrieve the chat history for a specific thread.

Args:
thread_id: The thread ID to retrieve history for

Returns:
List of messages in the conversation history
"""
# Access the agent's memory to get conversation history
if hasattr(self.agent, "get_state"):
state = self.agent.get_state({"configurable": {"thread_id": thread_id}})
if state and "messages" in state:
return state["messages"]
return []
49 changes: 49 additions & 0 deletions src/codegen/extensions/langchain/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,55 @@ def create_codebase_agent(
return create_react_agent(model=llm, tools=tools, prompt=system_message, checkpointer=memory, debug=debug)


def create_chat_agent(
codebase: "Codebase",
model_provider: str = "anthropic",
model_name: str = "claude-3-5-sonnet-latest",
system_message: SystemMessage = SystemMessage(REASONER_SYSTEM_MESSAGE),
memory: bool = True,
debug: bool = False,
additional_tools: Optional[list[BaseTool]] = None,
**kwargs,
) -> CompiledGraph:
"""Create an agent with all codebase tools.

Args:
codebase: The codebase to operate on
model_provider: The model provider to use ("anthropic" or "openai")
model_name: Name of the model to use
verbose: Whether to print agent's thought process (default: True)
chat_history: Optional list of messages to initialize chat history with
**kwargs: Additional LLM configuration options. Supported options:
- temperature: Temperature parameter (0-1)
- top_p: Top-p sampling parameter (0-1)
- top_k: Top-k sampling parameter (>= 1)
- max_tokens: Maximum number of tokens to generate

Returns:
Initialized agent with message history
"""
llm = LLM(model_provider=model_provider, model_name=model_name, **kwargs)

tools = [
ViewFileTool(codebase),
ListDirectoryTool(codebase),
SearchTool(codebase),
CreateFileTool(codebase),
DeleteFileTool(codebase),
RenameFileTool(codebase),
MoveSymbolTool(codebase),
RevealSymbolTool(codebase),
RelaceEditTool(codebase),
]

if additional_tools:
tools.extend(additional_tools)

memory = MemorySaver() if memory else None

return create_react_agent(model=llm, tools=tools, prompt=system_message, checkpointer=memory, debug=debug)


def create_codebase_inspector_agent(
codebase: "Codebase",
model_provider: str = "openai",
Expand Down
Loading