Introduce run_research_agent.

This commit is contained in:
AI Christianson 2024-12-21 12:19:43 -05:00
parent 2ac796cf2b
commit ae6052ed15
4 changed files with 123 additions and 82 deletions

View File

@ -1,29 +1,26 @@
import argparse
import sys
import uuid
from rich.panel import Panel
from rich.console import Console
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent
from ra_aid.env import validate_environment
from ra_aid.tools.memory import _global_memory, get_related_files, get_memory_value
from ra_aid import print_stage_header, print_task_header, print_error, run_agent_with_retry
from ra_aid import print_stage_header, print_task_header, print_error, run_agent_with_retry
from ra_aid.agent_utils import run_research_agent
from ra_aid.prompts import (
RESEARCH_PROMPT,
PLANNING_PROMPT,
IMPLEMENTATION_PROMPT,
CHAT_PROMPT,
EXPERT_PROMPT_SECTION_RESEARCH,
EXPERT_PROMPT_SECTION_PLANNING,
EXPERT_PROMPT_SECTION_IMPLEMENTATION,
HUMAN_PROMPT_SECTION_RESEARCH,
HUMAN_PROMPT_SECTION_PLANNING,
HUMAN_PROMPT_SECTION_IMPLEMENTATION
)
from ra_aid.llm import initialize_llm
from ra_aid.tool_configs import (
get_read_only_tools,
get_research_tools,
get_planning_tools,
get_implementation_tools,
get_chat_tools
@ -199,7 +196,7 @@ def main():
# Run chat agent with CHAT_PROMPT
config = {
"configurable": {"thread_id": "abc123"},
"configurable": {"thread_id": uuid.uuid4()},
"recursion_limit": 100,
"chat_mode": True,
"cowboy_mode": args.cowboy_mode,
@ -222,9 +219,7 @@ def main():
base_task = args.message
config = {
"configurable": {
"thread_id": "abc123"
},
"configurable": {"thread_id": uuid.uuid4()},
"recursion_limit": 100,
"research_only": args.research_only,
"cowboy_mode": args.cowboy_mode
@ -240,29 +235,16 @@ def main():
# Run research stage
print_stage_header("Research Stage")
# Create research agent
research_agent = create_react_agent(
run_research_agent(
base_task,
model,
get_research_tools(
research_only=_global_memory.get('config', {}).get('research_only', False),
expert_enabled=expert_enabled,
human_interaction=args.hil
),
checkpointer=research_memory
expert_enabled=expert_enabled,
research_only=args.research_only,
hil=args.hil,
memory=research_memory,
config=config
)
expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else ""
human_section = HUMAN_PROMPT_SECTION_RESEARCH if args.hil else ""
research_prompt = RESEARCH_PROMPT.format(
expert_section=expert_section,
human_section=human_section,
base_task=base_task,
research_only_note='' if args.research_only else ' Only request implementation if the user explicitly asked for changes to be made.'
)
# Run research agent
run_agent_with_retry(research_agent, research_prompt, config)
# Proceed with planning and implementation if not an informational query
if not is_informational_query():
print_stage_header("Planning Stage")

View File

@ -1,7 +1,11 @@
"""Utility functions for working with agents."""
import time
from typing import Optional
import uuid
from typing import Optional, Any
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import MemorySaver
from langchain_core.messages import HumanMessage
from langchain_core.messages import BaseMessage
@ -11,9 +15,98 @@ from rich.markdown import Markdown
from rich.panel import Panel
from ra_aid.tools.memory import _global_memory
from ra_aid.globals import RESEARCH_AGENT_RECURSION_LIMIT
from ra_aid.tool_configs import get_research_tools
from ra_aid.prompts import (
RESEARCH_PROMPT,
EXPERT_PROMPT_SECTION_RESEARCH,
HUMAN_PROMPT_SECTION_RESEARCH
)
console = Console()
def run_research_agent(
base_task_or_query: str,
model,
*,
expert_enabled: bool = False,
research_only: bool = False,
hil: bool = False,
memory: Optional[Any] = None,
config: Optional[dict] = None,
thread_id: Optional[str] = None,
console_message: Optional[str] = None
) -> Optional[str]:
"""Run a research agent with the given configuration.
Args:
base_task_or_query: The main task or query for research
model: The LLM model to use
expert_enabled: Whether expert mode is enabled
research_only: Whether this is a research-only task
hil: Whether human-in-the-loop mode is enabled
memory: Optional memory instance to use
config: Optional configuration dictionary
thread_id: Optional thread ID (defaults to new UUID)
console_message: Optional message to display before running
Returns:
Optional[str]: The completion message if task completed successfully
Example:
result = run_research_agent(
"Research Python async patterns",
model,
expert_enabled=True,
research_only=True
)
"""
# Initialize memory if not provided
if memory is None:
memory = MemorySaver()
memory.memory = _global_memory
# Set up thread ID
if thread_id is None:
thread_id = str(uuid.uuid4())
# Configure tools
tools = get_research_tools(
research_only=research_only,
expert_enabled=expert_enabled,
human_interaction=hil
)
# Create agent
agent = create_react_agent(model, tools, checkpointer=memory)
# Format prompt sections
expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else ""
human_section = HUMAN_PROMPT_SECTION_RESEARCH if hil else ""
# Build prompt
prompt = RESEARCH_PROMPT.format(
base_task=base_task_or_query,
research_only_note='' if research_only else ' Only request implementation if the user explicitly asked for changes to be made.',
expert_section=expert_section,
human_section=human_section
)
# Set up configuration
run_config = {
"configurable": {"thread_id": thread_id},
"recursion_limit": 100
}
if config:
run_config.update(config)
# Display console message if provided
if console_message:
console.print(Panel(Markdown(console_message), title="🔬 Research Task"))
# Run agent with retry logic
return run_agent_with_retry(agent, prompt, run_config)
def print_agent_output(chunk: dict[str, BaseMessage]) -> None:
"""Print agent output chunks."""
if chunk.get("delta") and chunk["delta"].content:

6
ra_aid/globals.py Normal file
View File

@ -0,0 +1,6 @@
"""
Global constants and configuration values used across the RA-AID codebase.
"""
# Maximum recursion depth for research agents to prevent infinite loops
RESEARCH_AGENT_RECURSION_LIMIT = 100

View File

@ -1,16 +1,9 @@
"""Tools for spawning and managing sub-agents."""
from langchain_core.tools import tool
from typing import Dict, Any, List, Optional
import uuid
from typing import Dict, Any
from rich.console import Console
from rich.panel import Panel
from rich.markdown import Markdown
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import MemorySaver
from ra_aid.tools.memory import _global_memory
from ra_aid import run_agent_with_retry
from ..prompts import RESEARCH_PROMPT
from .memory import get_memory_value, get_related_files
from ..llm import initialize_llm
@ -31,52 +24,19 @@ def request_research(query: str) -> Dict[str, Any]:
- success: Whether completed or interrupted
- reason: Reason for failure, if any
"""
# Initialize model and memory
# Initialize model
model = initialize_llm("anthropic", "claude-3-sonnet-20240229")
memory = MemorySaver()
memory.memory = _global_memory
# Configure research tools
from ..tool_configs import get_research_tools
tools = get_research_tools(research_only=True, expert_enabled=True)
# Basic config matching main process
config = {
"thread_id": str(uuid.uuid4()),
"memory": memory,
"model": model
}
from ra_aid.prompts import (
RESEARCH_PROMPT,
EXPERT_PROMPT_SECTION_RESEARCH,
HUMAN_PROMPT_SECTION_RESEARCH
)
# Create research agent
config = _global_memory.get('config', {})
expert_enabled = config.get('expert_enabled', False)
hil = config.get('hil', False)
expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else ""
human_section = HUMAN_PROMPT_SECTION_RESEARCH if hil else ""
agent = create_react_agent(model, tools)
prompt = RESEARCH_PROMPT.format(
base_task=query,
research_only_note='',
expert_section=expert_section,
human_section=human_section
)
try:
console.print(Panel(Markdown(query), title="🔬 Research Task"))
# Run agent with retry logic
result = run_agent_with_retry(
agent,
prompt,
{"configurable": {"thread_id": str(uuid.uuid4())}, "recursion_limit": 100}
# Run research agent
from ..agent_utils import run_research_agent
result = run_research_agent(
query,
model,
expert_enabled=True,
research_only=True,
hil=_global_memory.get('config', {}).get('hil', False),
console_message=query
)
success = True