Skip to content

CG-10805 & CG-10806: Migrate Everything to Langgraph + Multi-LLM Config (openai + anthropic) #600

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Feb 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions codegen-examples/examples/deep_code_research/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def cli():
@cli.command()
@click.argument("repo_name", required=False)
@click.option("--query", "-q", default=None, help="Initial research query to start with.")
def research(repo_name: Optional[str] = None, query: Optional[str] = None):
def research(repo_name: Optional[str] = None, query: Optional[str] = None, thread_id: Optional[int] = 100):
"""[bold green]Start a code research session[/bold green]

[blue]Arguments:[/blue]
Expand Down Expand Up @@ -107,7 +107,7 @@ def research(repo_name: Optional[str] = None, query: Optional[str] = None):

# Initialize agent with research tools
with console.status("[bold blue]Initializing research agent...[/bold blue]") as status:
agent = create_agent_with_tools(codebase=codebase, tools=tools, chat_history=[SystemMessage(content=RESEARCH_AGENT_PROMPT)], verbose=True)
agent = create_agent_with_tools(codebase=codebase, tools=tools, system_message=SystemMessage(content=RESEARCH_AGENT_PROMPT))
status.update("[bold green]✓ Research agent ready![/bold green]")

# Get initial query if not provided
Expand Down Expand Up @@ -136,11 +136,11 @@ def research(repo_name: Optional[str] = None, query: Optional[str] = None):
try:
result = agent.invoke(
{"input": query},
config={"configurable": {"session_id": "research"}},
config={"configurable": {"thread_id": thread_id}},
)
# Display the result
console.print("\n[bold blue]📊 Research Findings:[/bold blue]")
console.print(Markdown(result["output"]))
console.print(Markdown(result["messages"][-1].content))
except Exception as e:
console.print(f"\n[bold red]Error during research:[/bold red] {e}")

Expand Down
17 changes: 11 additions & 6 deletions codegen-examples/examples/langchain_agent/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,16 @@ from codegen.extensions.langchain import create_codebase_agent
codebase = Codebase.from_repo("fastapi/fastapi")

# Create the agent
agent = create_codebase_agent(codebase=codebase, model_name="gpt-4", verbose=True)
agent = create_codebase_agent(
codebase=codebase,
model_provider="anthropic", # or "openai"
model_name="claude-3-5-sonnet-latest", # or "gpt-4" for OpenAI
debug=True,
)

# Ask the agent to analyze code
result = agent.invoke({"input": "What are the dependencies of the FastAPI class?", "config": {"configurable": {"session_id": "demo"}}})
print(result["output"])
result = agent.invoke({"input": "What are the dependencies of the FastAPI class?", "config": {"configurable": {"thread_id": "1"}}})
print(result["messages"][-1].content)
```

## Installation
Expand Down Expand Up @@ -68,13 +73,13 @@ The agent can perform various code analysis and manipulation tasks:

```python
# Analyze dependencies
agent.invoke({"input": "What are the dependencies of the reveal_symbol function?", "config": {"configurable": {"session_id": "demo"}}})
agent.invoke({"input": "What are the dependencies of the reveal_symbol function?", "config": {"configurable": {"thread_id": "1"}}})

# Find usage patterns
agent.invoke({"input": "Show me examples of dependency injection in the codebase", "config": {"configurable": {"session_id": "demo"}}})
agent.invoke({"input": "Show me examples of dependency injection in the codebase", "config": {"configurable": {"thread_id": "1"}}})

# Move code
agent.invoke({"input": "Move the validate_email function to validation_utils.py", "config": {"configurable": {"session_id": "demo"}}})
agent.invoke({"input": "Move the validate_email function to validation_utils.py", "config": {"configurable": {"thread_id": "1"}}})
```

## Learn More
Expand Down
74 changes: 29 additions & 45 deletions codegen-examples/examples/langchain_agent/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,37 +14,45 @@
SemanticEditTool,
ViewFileTool,
)
from langchain import hub
from langchain.agents import AgentExecutor
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain_core.chat_history import ChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI

from codegen.extensions.langchain.llm import LLM
from codegen.extensions.langchain.prompts import REASONER_SYSTEM_MESSAGE

from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph.graph import CompiledGraph
from langgraph.prebuilt import create_react_agent
from langchain_core.messages import SystemMessage


def create_codebase_agent(
codebase: Codebase,
model_name: str = "gpt-4o",
temperature: float = 0,
verbose: bool = True,
) -> RunnableWithMessageHistory:
codebase: "Codebase",
model_provider: str = "anthropic",
model_name: str = "claude-3-5-sonnet-latest",
system_message: SystemMessage = SystemMessage(REASONER_SYSTEM_MESSAGE),
memory: bool = True,
debug: bool = True,
**kwargs,
) -> CompiledGraph:
"""Create an agent with all codebase tools.

Args:
codebase: The codebase to operate on
model_name: Name of the model to use (default: gpt-4)
temperature: Model temperature (default: 0)
model_provider: The model provider to use ("anthropic" or "openai")
model_name: Name of the model to use
verbose: Whether to print agent's thought process (default: True)
chat_history: Optional list of messages to initialize chat history with
**kwargs: Additional LLM configuration options. Supported options:
- temperature: Temperature parameter (0-1)
- top_p: Top-p sampling parameter (0-1)
- top_k: Top-k sampling parameter (>= 1)
- max_tokens: Maximum number of tokens to generate

Returns:
Initialized agent with message history
"""
# Initialize language model
llm = ChatOpenAI(
model_name=model_name,
temperature=temperature,
)
llm = LLM(model_provider=model_provider, model_name=model_name, **kwargs)

# Get all codebase tools
# Get all codebase tools
tools = [
ViewFileTool(codebase),
Expand All @@ -60,33 +68,9 @@ def create_codebase_agent(
CommitTool(codebase),
]

# Get the prompt to use
prompt = hub.pull("hwchase17/openai-functions-agent")

# Create the agent
agent = OpenAIFunctionsAgent(
llm=llm,
tools=tools,
prompt=prompt,
)

# Create the agent executor
agent_executor = AgentExecutor(
agent=agent,
tools=tools,
verbose=verbose,
)
memory = MemorySaver() if memory else None

# Create message history handler
message_history = ChatMessageHistory()

# Wrap with message history
return RunnableWithMessageHistory(
agent_executor,
lambda session_id: message_history,
input_messages_key="input",
history_messages_key="chat_history",
)
return create_react_agent(model=llm, tools=tools, prompt=system_message, checkpointer=memory, debug=debug)


if __name__ == "__main__":
Expand All @@ -101,6 +85,6 @@ def create_codebase_agent(
print("\nAsking agent to analyze symbol relationships...")
result = agent.invoke(
{"input": "What are the dependencies of the reveal_symbol function?"},
config={"configurable": {"session_id": "demo"}},
config={"configurable": {"thread_id": 1}},
)
print("Messages:", result["messages"])
14 changes: 13 additions & 1 deletion docs/tutorials/build-code-agent.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,19 @@ agent = CodeAgent(codebase)
# Run the agent with a prompt
agent.run("Tell me about this repo")
```
<Note>Your `ANTHROPIC_API_KEY` must be set in your env.</Note>


<Note>Your `ANTHROPIC_API_KEY` and/or `OPENAI_API_KEY` must be set in your env.</Note>

The default implementation uses `anthropic/claude-3-5-sonnet-latest` for the model but this can be changed through the `model_provider` and `model_name` arguments.

```python
agent = CodeAgent(
codebase=codebase,
model_provider="openai",
model_name="gpt-4o",
)
```

# Available Tools

Expand Down
4 changes: 2 additions & 2 deletions docs/tutorials/deep-code-research.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -135,9 +135,9 @@ def research(repo_name: Optional[str] = None, query: Optional[str] = None):

result = agent.invoke(
{"input": query},
config={"configurable": {"session_id": "research"}}
config={"configurable": {"thread_id": 1}}
)
console.print(Markdown(result["output"]))
console.print(Markdown(result["messages"][-1].content))

query = None # Clear for next iteration
```
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ dependencies = [
"langchain[openai]",
"langchain_core",
"langchain_openai",
"langgraph",
"numpy>=2.2.2",
"mcp[cli]",
"neo4j",
Expand Down
66 changes: 54 additions & 12 deletions src/codegen/agents/code_agent.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,65 @@
from typing import Optional
from typing import TYPE_CHECKING, Optional
from uuid import uuid4

from langchain.tools import BaseTool
from langchain_core.messages import AIMessage

from codegen.extensions.langchain.agent import create_codebase_agent
from codegen.sdk.core.codebase import Codebase

if TYPE_CHECKING:
from codegen import Codebase


class CodeAgent:
"""Agent for interacting with a codebase."""

def __init__(self, codebase: Codebase, tools: Optional[list[BaseTool]] = None):
def __init__(self, codebase: "Codebase", model_provider: str = "anthropic", model_name: str = "claude-3-5-sonnet-latest", memory: bool = True, tools: Optional[list[BaseTool]] = None, **kwargs):
"""Initialize a CodeAgent.

Args:
codebase: The codebase to operate on
model_provider: The model provider to use ("anthropic" or "openai")
model_name: Name of the model to use
memory: Whether to let LLM keep track of the conversation history
tools: Additional tools to use
**kwargs: Additional LLM configuration options. Supported options:
- temperature: Temperature parameter (0-1)
- top_p: Top-p sampling parameter (0-1)
- top_k: Top-k sampling parameter (>= 1)
- max_tokens: Maximum number of tokens to generate
"""
self.codebase = codebase
self.agent = create_codebase_agent(self.codebase, additional_tools=tools)

def run(self, prompt: str, session_id: Optional[str] = None) -> str:
if session_id is None:
session_id = str(uuid4())
return self.agent.invoke(
{"input": prompt},
config={"configurable": {"session_id": session_id}},
)
self.agent = create_codebase_agent(self.codebase, model_provider=model_provider, model_name=model_name, memory=memory, additional_tools=tools, **kwargs)

def run(self, prompt: str, thread_id: Optional[str] = None) -> str:
"""Run the agent with a prompt.

Args:
prompt: The prompt to run
thread_id: Optional thread ID for message history

Returns:
The agent's response
"""
if thread_id is None:
thread_id = str(uuid4())

# this message has a reducer which appends the current message to the existing history
# see more https://langchain-ai.github.io/langgraph/concepts/low_level/#reducers
input = {"messages": [("user", prompt)]}

# we stream the steps instead of invoke because it allows us to access intermediate nodes
stream = self.agent.stream(input, config={"configurable": {"thread_id": thread_id}}, stream_mode="values")

for s in stream:
message = s["messages"][-1]
if isinstance(message, tuple):
print(message)
else:
if isinstance(message, AIMessage) and isinstance(message.content, list) and "text" in message.content[0]:
AIMessage(message.content[0]["text"]).pretty_print()
else:
message.pretty_print()

# last stream object contains all messages. message[-1] is the last message
return s["messages"][-1].content
26 changes: 11 additions & 15 deletions src/codegen/cli/commands/agent/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import warnings

import rich_click as click
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from langchain_core.messages import SystemMessage
from rich.console import Console
from rich.markdown import Markdown
from rich.prompt import Prompt
Expand Down Expand Up @@ -73,13 +73,11 @@ def say(message: str):
]

# Initialize chat history with system message
chat_history = [
SystemMessage(
content="""You are a helpful AI assistant with access to the local codebase.
system_message = SystemMessage(
content="""You are a helpful AI assistant with access to the local codebase.
You can help with code exploration, editing, and general programming tasks.
Always explain what you're planning to do before taking actions."""
)
]
)

# Get initial query if not provided via command line
if not query:
Expand All @@ -92,7 +90,7 @@ def say(message: str):
query = Prompt.ask("[bold]>[/bold]") # Simple arrow prompt

# Create the agent
agent = create_agent_with_tools(codebase, tools, chat_history=chat_history)
agent = create_agent_with_tools(codebase=codebase, tools=tools, system_message=system_message)

# Main chat loop
while True:
Expand All @@ -105,21 +103,19 @@ def say(message: str):
if user_input.lower() in ["exit", "quit"]:
break

# Add user message to chat history
chat_history.append(HumanMessage(content=user_input))

# Invoke the agent
with console.status("[bold green]Agent is thinking...") as status:
try:
session_id = str(uuid.uuid4())
thread_id = str(uuid.uuid4())
result = agent.invoke(
{"input": user_input},
config={"configurable": {"session_id": session_id}},
config={"configurable": {"thread_id": thread_id}},
)

result = result["messages"][-1].content
# Update chat history with AI's response
if result.get("output"):
say(result["output"])
chat_history.append(AIMessage(content=result["output"]))
if result:
say(result)
except Exception as e:
console.print(f"[bold red]Error during agent execution:[/bold red] {e}")
break
Loading
Loading