Skip to content

Commit 4ca5044

Browse files
authored
Add Chat Agent (#671)
1 parent 82979d2 commit 4ca5044

File tree

2 files changed

+144
-0
lines changed

2 files changed

+144
-0
lines changed

src/codegen/agents/chat_agent.py

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
from typing import TYPE_CHECKING, Optional
2+
from uuid import uuid4
3+
4+
from langchain.tools import BaseTool
5+
from langchain_core.messages import AIMessage
6+
7+
from codegen.extensions.langchain.agent import create_chat_agent
8+
9+
if TYPE_CHECKING:
10+
from codegen import Codebase
11+
12+
13+
class ChatAgent:
14+
"""Agent for interacting with a codebase."""
15+
16+
def __init__(self, codebase: "Codebase", model_provider: str = "anthropic", model_name: str = "claude-3-5-sonnet-latest", memory: bool = True, tools: Optional[list[BaseTool]] = None, **kwargs):
17+
"""Initialize a CodeAgent.
18+
19+
Args:
20+
codebase: The codebase to operate on
21+
model_provider: The model provider to use ("anthropic" or "openai")
22+
model_name: Name of the model to use
23+
memory: Whether to let LLM keep track of the conversation history
24+
tools: Additional tools to use
25+
**kwargs: Additional LLM configuration options. Supported options:
26+
- temperature: Temperature parameter (0-1)
27+
- top_p: Top-p sampling parameter (0-1)
28+
- top_k: Top-k sampling parameter (>= 1)
29+
- max_tokens: Maximum number of tokens to generate
30+
"""
31+
self.codebase = codebase
32+
self.agent = create_chat_agent(self.codebase, model_provider=model_provider, model_name=model_name, memory=memory, additional_tools=tools, **kwargs)
33+
34+
def run(self, prompt: str, thread_id: Optional[str] = None) -> str:
35+
"""Run the agent with a prompt.
36+
37+
Args:
38+
prompt: The prompt to run
39+
thread_id: Optional thread ID for message history. If None, a new thread is created.
40+
41+
Returns:
42+
The agent's response
43+
"""
44+
if thread_id is None:
45+
thread_id = str(uuid4())
46+
47+
input = {"messages": [("user", prompt)]}
48+
stream = self.agent.stream(input, config={"configurable": {"thread_id": thread_id}}, stream_mode="values")
49+
50+
for s in stream:
51+
message = s["messages"][-1]
52+
if isinstance(message, tuple):
53+
print(message)
54+
else:
55+
if isinstance(message, AIMessage) and isinstance(message.content, list) and "text" in message.content[0]:
56+
AIMessage(message.content[0]["text"]).pretty_print()
57+
else:
58+
message.pretty_print()
59+
60+
return s["messages"][-1].content
61+
62+
def chat(self, prompt: str, thread_id: Optional[str] = None) -> tuple[str, str]:
63+
"""Chat with the agent, maintaining conversation history.
64+
65+
Args:
66+
prompt: The user message
67+
thread_id: Optional thread ID for message history. If None, a new thread is created.
68+
69+
Returns:
70+
A tuple of (response_content, thread_id) to allow continued conversation
71+
"""
72+
if thread_id is None:
73+
thread_id = str(uuid4())
74+
print(f"Starting new chat thread: {thread_id}")
75+
else:
76+
print(f"Continuing chat thread: {thread_id}")
77+
78+
response = self.run(prompt, thread_id=thread_id)
79+
return response, thread_id
80+
81+
def get_chat_history(self, thread_id: str) -> list:
82+
"""Retrieve the chat history for a specific thread.
83+
84+
Args:
85+
thread_id: The thread ID to retrieve history for
86+
87+
Returns:
88+
List of messages in the conversation history
89+
"""
90+
# Access the agent's memory to get conversation history
91+
if hasattr(self.agent, "get_state"):
92+
state = self.agent.get_state({"configurable": {"thread_id": thread_id}})
93+
if state and "messages" in state:
94+
return state["messages"]
95+
return []

src/codegen/extensions/langchain/agent.py

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,55 @@ def create_codebase_agent(
8989
return create_react_agent(model=llm, tools=tools, prompt=system_message, checkpointer=memory, debug=debug)
9090

9191

92+
def create_chat_agent(
93+
codebase: "Codebase",
94+
model_provider: str = "anthropic",
95+
model_name: str = "claude-3-5-sonnet-latest",
96+
system_message: SystemMessage = SystemMessage(REASONER_SYSTEM_MESSAGE),
97+
memory: bool = True,
98+
debug: bool = False,
99+
additional_tools: Optional[list[BaseTool]] = None,
100+
**kwargs,
101+
) -> CompiledGraph:
102+
"""Create an agent with all codebase tools.
103+
104+
Args:
105+
codebase: The codebase to operate on
106+
model_provider: The model provider to use ("anthropic" or "openai")
107+
model_name: Name of the model to use
108+
verbose: Whether to print agent's thought process (default: True)
109+
chat_history: Optional list of messages to initialize chat history with
110+
**kwargs: Additional LLM configuration options. Supported options:
111+
- temperature: Temperature parameter (0-1)
112+
- top_p: Top-p sampling parameter (0-1)
113+
- top_k: Top-k sampling parameter (>= 1)
114+
- max_tokens: Maximum number of tokens to generate
115+
116+
Returns:
117+
Initialized agent with message history
118+
"""
119+
llm = LLM(model_provider=model_provider, model_name=model_name, **kwargs)
120+
121+
tools = [
122+
ViewFileTool(codebase),
123+
ListDirectoryTool(codebase),
124+
SearchTool(codebase),
125+
CreateFileTool(codebase),
126+
DeleteFileTool(codebase),
127+
RenameFileTool(codebase),
128+
MoveSymbolTool(codebase),
129+
RevealSymbolTool(codebase),
130+
RelaceEditTool(codebase),
131+
]
132+
133+
if additional_tools:
134+
tools.extend(additional_tools)
135+
136+
memory = MemorySaver() if memory else None
137+
138+
return create_react_agent(model=llm, tools=tools, prompt=system_message, checkpointer=memory, debug=debug)
139+
140+
92141
def create_codebase_inspector_agent(
93142
codebase: "Codebase",
94143
model_provider: str = "openai",

0 commit comments

Comments
 (0)