From ae6052ed157bd06d177e01bde8181bcc09cbcd7e Mon Sep 17 00:00:00 2001 From: AI Christianson Date: Sat, 21 Dec 2024 12:19:43 -0500 Subject: [PATCH] Introduce run_research_agent. --- ra_aid/__main__.py | 42 ++++++------------- ra_aid/agent_utils.py | 95 ++++++++++++++++++++++++++++++++++++++++++- ra_aid/globals.py | 6 +++ ra_aid/tools/agent.py | 62 +++++----------------------- 4 files changed, 123 insertions(+), 82 deletions(-) create mode 100644 ra_aid/globals.py diff --git a/ra_aid/__main__.py b/ra_aid/__main__.py index 1ff215d..c6b5279 100644 --- a/ra_aid/__main__.py +++ b/ra_aid/__main__.py @@ -1,29 +1,26 @@ import argparse import sys +import uuid from rich.panel import Panel from rich.console import Console from langgraph.checkpoint.memory import MemorySaver from langgraph.prebuilt import create_react_agent from ra_aid.env import validate_environment from ra_aid.tools.memory import _global_memory, get_related_files, get_memory_value -from ra_aid import print_stage_header, print_task_header, print_error, run_agent_with_retry +from ra_aid import print_stage_header, print_task_header, print_error, run_agent_with_retry +from ra_aid.agent_utils import run_research_agent from ra_aid.prompts import ( - RESEARCH_PROMPT, PLANNING_PROMPT, IMPLEMENTATION_PROMPT, CHAT_PROMPT, - EXPERT_PROMPT_SECTION_RESEARCH, EXPERT_PROMPT_SECTION_PLANNING, EXPERT_PROMPT_SECTION_IMPLEMENTATION, - HUMAN_PROMPT_SECTION_RESEARCH, HUMAN_PROMPT_SECTION_PLANNING, HUMAN_PROMPT_SECTION_IMPLEMENTATION ) from ra_aid.llm import initialize_llm from ra_aid.tool_configs import ( - get_read_only_tools, - get_research_tools, get_planning_tools, get_implementation_tools, get_chat_tools @@ -199,7 +196,7 @@ def main(): # Run chat agent with CHAT_PROMPT config = { - "configurable": {"thread_id": "abc123"}, + "configurable": {"thread_id": uuid.uuid4()}, "recursion_limit": 100, "chat_mode": True, "cowboy_mode": args.cowboy_mode, @@ -222,9 +219,7 @@ def main(): base_task = args.message config = { - "configurable": { - "thread_id": "abc123" - }, + "configurable": {"thread_id": uuid.uuid4()}, "recursion_limit": 100, "research_only": args.research_only, "cowboy_mode": args.cowboy_mode @@ -240,28 +235,15 @@ def main(): # Run research stage print_stage_header("Research Stage") - # Create research agent - research_agent = create_react_agent( + run_research_agent( + base_task, model, - get_research_tools( - research_only=_global_memory.get('config', {}).get('research_only', False), - expert_enabled=expert_enabled, - human_interaction=args.hil - ), - checkpointer=research_memory + expert_enabled=expert_enabled, + research_only=args.research_only, + hil=args.hil, + memory=research_memory, + config=config ) - - expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else "" - human_section = HUMAN_PROMPT_SECTION_RESEARCH if args.hil else "" - research_prompt = RESEARCH_PROMPT.format( - expert_section=expert_section, - human_section=human_section, - base_task=base_task, - research_only_note='' if args.research_only else ' Only request implementation if the user explicitly asked for changes to be made.' - ) - - # Run research agent - run_agent_with_retry(research_agent, research_prompt, config) # Proceed with planning and implementation if not an informational query if not is_informational_query(): diff --git a/ra_aid/agent_utils.py b/ra_aid/agent_utils.py index 9a97491..d43423d 100644 --- a/ra_aid/agent_utils.py +++ b/ra_aid/agent_utils.py @@ -1,7 +1,11 @@ """Utility functions for working with agents.""" import time -from typing import Optional +import uuid +from typing import Optional, Any + +from langgraph.prebuilt import create_react_agent +from langgraph.checkpoint.memory import MemorySaver from langchain_core.messages import HumanMessage from langchain_core.messages import BaseMessage @@ -11,9 +15,98 @@ from rich.markdown import Markdown from rich.panel import Panel from ra_aid.tools.memory import _global_memory +from ra_aid.globals import RESEARCH_AGENT_RECURSION_LIMIT +from ra_aid.tool_configs import get_research_tools +from ra_aid.prompts import ( + RESEARCH_PROMPT, + EXPERT_PROMPT_SECTION_RESEARCH, + HUMAN_PROMPT_SECTION_RESEARCH +) console = Console() +def run_research_agent( + base_task_or_query: str, + model, + *, + expert_enabled: bool = False, + research_only: bool = False, + hil: bool = False, + memory: Optional[Any] = None, + config: Optional[dict] = None, + thread_id: Optional[str] = None, + console_message: Optional[str] = None +) -> Optional[str]: + """Run a research agent with the given configuration. + + Args: + base_task_or_query: The main task or query for research + model: The LLM model to use + expert_enabled: Whether expert mode is enabled + research_only: Whether this is a research-only task + hil: Whether human-in-the-loop mode is enabled + memory: Optional memory instance to use + config: Optional configuration dictionary + thread_id: Optional thread ID (defaults to new UUID) + console_message: Optional message to display before running + + Returns: + Optional[str]: The completion message if task completed successfully + + Example: + result = run_research_agent( + "Research Python async patterns", + model, + expert_enabled=True, + research_only=True + ) + """ + # Initialize memory if not provided + if memory is None: + memory = MemorySaver() + memory.memory = _global_memory + + # Set up thread ID + if thread_id is None: + thread_id = str(uuid.uuid4()) + + # Configure tools + tools = get_research_tools( + research_only=research_only, + expert_enabled=expert_enabled, + human_interaction=hil + ) + + # Create agent + agent = create_react_agent(model, tools, checkpointer=memory) + + # Format prompt sections + expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else "" + human_section = HUMAN_PROMPT_SECTION_RESEARCH if hil else "" + + # Build prompt + prompt = RESEARCH_PROMPT.format( + base_task=base_task_or_query, + research_only_note='' if research_only else ' Only request implementation if the user explicitly asked for changes to be made.', + expert_section=expert_section, + human_section=human_section + ) + + # Set up configuration + run_config = { + "configurable": {"thread_id": thread_id}, + "recursion_limit": 100 + } + if config: + run_config.update(config) + + # Display console message if provided + if console_message: + console.print(Panel(Markdown(console_message), title="🔬 Research Task")) + + # Run agent with retry logic + return run_agent_with_retry(agent, prompt, run_config) + def print_agent_output(chunk: dict[str, BaseMessage]) -> None: """Print agent output chunks.""" if chunk.get("delta") and chunk["delta"].content: diff --git a/ra_aid/globals.py b/ra_aid/globals.py new file mode 100644 index 0000000..690de52 --- /dev/null +++ b/ra_aid/globals.py @@ -0,0 +1,6 @@ +""" +Global constants and configuration values used across the RA-AID codebase. +""" + +# Maximum recursion depth for research agents to prevent infinite loops +RESEARCH_AGENT_RECURSION_LIMIT = 100 diff --git a/ra_aid/tools/agent.py b/ra_aid/tools/agent.py index 3e88000..d2c27e9 100644 --- a/ra_aid/tools/agent.py +++ b/ra_aid/tools/agent.py @@ -1,16 +1,9 @@ """Tools for spawning and managing sub-agents.""" from langchain_core.tools import tool -from typing import Dict, Any, List, Optional -import uuid +from typing import Dict, Any from rich.console import Console -from rich.panel import Panel -from rich.markdown import Markdown -from langgraph.prebuilt import create_react_agent -from langgraph.checkpoint.memory import MemorySaver from ra_aid.tools.memory import _global_memory -from ra_aid import run_agent_with_retry -from ..prompts import RESEARCH_PROMPT from .memory import get_memory_value, get_related_files from ..llm import initialize_llm @@ -31,52 +24,19 @@ def request_research(query: str) -> Dict[str, Any]: - success: Whether completed or interrupted - reason: Reason for failure, if any """ - # Initialize model and memory + # Initialize model model = initialize_llm("anthropic", "claude-3-sonnet-20240229") - memory = MemorySaver() - memory.memory = _global_memory - - # Configure research tools - from ..tool_configs import get_research_tools - tools = get_research_tools(research_only=True, expert_enabled=True) - - # Basic config matching main process - config = { - "thread_id": str(uuid.uuid4()), - "memory": memory, - "model": model - } - - from ra_aid.prompts import ( - RESEARCH_PROMPT, - EXPERT_PROMPT_SECTION_RESEARCH, - HUMAN_PROMPT_SECTION_RESEARCH - ) - - # Create research agent - config = _global_memory.get('config', {}) - expert_enabled = config.get('expert_enabled', False) - hil = config.get('hil', False) - - expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else "" - human_section = HUMAN_PROMPT_SECTION_RESEARCH if hil else "" - - agent = create_react_agent(model, tools) - - prompt = RESEARCH_PROMPT.format( - base_task=query, - research_only_note='', - expert_section=expert_section, - human_section=human_section - ) try: - console.print(Panel(Markdown(query), title="🔬 Research Task")) - # Run agent with retry logic - result = run_agent_with_retry( - agent, - prompt, - {"configurable": {"thread_id": str(uuid.uuid4())}, "recursion_limit": 100} + # Run research agent + from ..agent_utils import run_research_agent + result = run_research_agent( + query, + model, + expert_enabled=True, + research_only=True, + hil=_global_memory.get('config', {}).get('hil', False), + console_message=query ) success = True