Open
Description
The llm_factory on the orchestrator should be optional if you provide a planner and agents (like it is on evaluator optimizer)
def __init__(
self,
llm_factory: Callable[[Agent], AugmentedLLM[MessageParamT, MessageT]],
planner: AugmentedLLM | None = None,
available_agents: List[Agent | AugmentedLLM] | None = None,
plan_type: Literal["full", "iterative"] = "full",
context: Optional["Context"] = None,
**kwargs,
):
"""
Args:
llm_factory: Factory function to create an LLM for a given agent
planner: LLM to use for planning steps (if not provided, a default planner will be used)
plan_type: "full" planning generates the full plan first, then executes. "iterative" plans the next step, and loops until success.
available_agents: List of agents available to tasks executed by this orchestrator
context: Application context
"""
super().__init__(context=context, **kwargs)
self.llm_factory = llm_factory
self.planner = planner or llm_factory(
agent=Agent(
name="LLM Orchestration Planner",
instruction="""
You are an expert planner. Given an objective task and a list of MCP servers (which are collections of tools)
or Agents (which are collections of servers), your job is to break down the objective into a series of steps,
which can be performed by LLMs with access to the servers or agents.
""",
)
)
Metadata
Metadata
Assignees
Labels
No labels