Introduce run_research_agent.
This commit is contained in:
parent
2ac796cf2b
commit
ae6052ed15
|
|
@ -1,5 +1,6 @@
|
||||||
import argparse
|
import argparse
|
||||||
import sys
|
import sys
|
||||||
|
import uuid
|
||||||
from rich.panel import Panel
|
from rich.panel import Panel
|
||||||
from rich.console import Console
|
from rich.console import Console
|
||||||
from langgraph.checkpoint.memory import MemorySaver
|
from langgraph.checkpoint.memory import MemorySaver
|
||||||
|
|
@ -7,23 +8,19 @@ from langgraph.prebuilt import create_react_agent
|
||||||
from ra_aid.env import validate_environment
|
from ra_aid.env import validate_environment
|
||||||
from ra_aid.tools.memory import _global_memory, get_related_files, get_memory_value
|
from ra_aid.tools.memory import _global_memory, get_related_files, get_memory_value
|
||||||
from ra_aid import print_stage_header, print_task_header, print_error, run_agent_with_retry
|
from ra_aid import print_stage_header, print_task_header, print_error, run_agent_with_retry
|
||||||
|
from ra_aid.agent_utils import run_research_agent
|
||||||
from ra_aid.prompts import (
|
from ra_aid.prompts import (
|
||||||
RESEARCH_PROMPT,
|
|
||||||
PLANNING_PROMPT,
|
PLANNING_PROMPT,
|
||||||
IMPLEMENTATION_PROMPT,
|
IMPLEMENTATION_PROMPT,
|
||||||
CHAT_PROMPT,
|
CHAT_PROMPT,
|
||||||
EXPERT_PROMPT_SECTION_RESEARCH,
|
|
||||||
EXPERT_PROMPT_SECTION_PLANNING,
|
EXPERT_PROMPT_SECTION_PLANNING,
|
||||||
EXPERT_PROMPT_SECTION_IMPLEMENTATION,
|
EXPERT_PROMPT_SECTION_IMPLEMENTATION,
|
||||||
HUMAN_PROMPT_SECTION_RESEARCH,
|
|
||||||
HUMAN_PROMPT_SECTION_PLANNING,
|
HUMAN_PROMPT_SECTION_PLANNING,
|
||||||
HUMAN_PROMPT_SECTION_IMPLEMENTATION
|
HUMAN_PROMPT_SECTION_IMPLEMENTATION
|
||||||
)
|
)
|
||||||
from ra_aid.llm import initialize_llm
|
from ra_aid.llm import initialize_llm
|
||||||
|
|
||||||
from ra_aid.tool_configs import (
|
from ra_aid.tool_configs import (
|
||||||
get_read_only_tools,
|
|
||||||
get_research_tools,
|
|
||||||
get_planning_tools,
|
get_planning_tools,
|
||||||
get_implementation_tools,
|
get_implementation_tools,
|
||||||
get_chat_tools
|
get_chat_tools
|
||||||
|
|
@ -199,7 +196,7 @@ def main():
|
||||||
|
|
||||||
# Run chat agent with CHAT_PROMPT
|
# Run chat agent with CHAT_PROMPT
|
||||||
config = {
|
config = {
|
||||||
"configurable": {"thread_id": "abc123"},
|
"configurable": {"thread_id": uuid.uuid4()},
|
||||||
"recursion_limit": 100,
|
"recursion_limit": 100,
|
||||||
"chat_mode": True,
|
"chat_mode": True,
|
||||||
"cowboy_mode": args.cowboy_mode,
|
"cowboy_mode": args.cowboy_mode,
|
||||||
|
|
@ -222,9 +219,7 @@ def main():
|
||||||
|
|
||||||
base_task = args.message
|
base_task = args.message
|
||||||
config = {
|
config = {
|
||||||
"configurable": {
|
"configurable": {"thread_id": uuid.uuid4()},
|
||||||
"thread_id": "abc123"
|
|
||||||
},
|
|
||||||
"recursion_limit": 100,
|
"recursion_limit": 100,
|
||||||
"research_only": args.research_only,
|
"research_only": args.research_only,
|
||||||
"cowboy_mode": args.cowboy_mode
|
"cowboy_mode": args.cowboy_mode
|
||||||
|
|
@ -240,29 +235,16 @@ def main():
|
||||||
# Run research stage
|
# Run research stage
|
||||||
print_stage_header("Research Stage")
|
print_stage_header("Research Stage")
|
||||||
|
|
||||||
# Create research agent
|
run_research_agent(
|
||||||
research_agent = create_react_agent(
|
base_task,
|
||||||
model,
|
model,
|
||||||
get_research_tools(
|
|
||||||
research_only=_global_memory.get('config', {}).get('research_only', False),
|
|
||||||
expert_enabled=expert_enabled,
|
expert_enabled=expert_enabled,
|
||||||
human_interaction=args.hil
|
research_only=args.research_only,
|
||||||
),
|
hil=args.hil,
|
||||||
checkpointer=research_memory
|
memory=research_memory,
|
||||||
|
config=config
|
||||||
)
|
)
|
||||||
|
|
||||||
expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else ""
|
|
||||||
human_section = HUMAN_PROMPT_SECTION_RESEARCH if args.hil else ""
|
|
||||||
research_prompt = RESEARCH_PROMPT.format(
|
|
||||||
expert_section=expert_section,
|
|
||||||
human_section=human_section,
|
|
||||||
base_task=base_task,
|
|
||||||
research_only_note='' if args.research_only else ' Only request implementation if the user explicitly asked for changes to be made.'
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run research agent
|
|
||||||
run_agent_with_retry(research_agent, research_prompt, config)
|
|
||||||
|
|
||||||
# Proceed with planning and implementation if not an informational query
|
# Proceed with planning and implementation if not an informational query
|
||||||
if not is_informational_query():
|
if not is_informational_query():
|
||||||
print_stage_header("Planning Stage")
|
print_stage_header("Planning Stage")
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,11 @@
|
||||||
"""Utility functions for working with agents."""
|
"""Utility functions for working with agents."""
|
||||||
|
|
||||||
import time
|
import time
|
||||||
from typing import Optional
|
import uuid
|
||||||
|
from typing import Optional, Any
|
||||||
|
|
||||||
|
from langgraph.prebuilt import create_react_agent
|
||||||
|
from langgraph.checkpoint.memory import MemorySaver
|
||||||
|
|
||||||
from langchain_core.messages import HumanMessage
|
from langchain_core.messages import HumanMessage
|
||||||
from langchain_core.messages import BaseMessage
|
from langchain_core.messages import BaseMessage
|
||||||
|
|
@ -11,9 +15,98 @@ from rich.markdown import Markdown
|
||||||
from rich.panel import Panel
|
from rich.panel import Panel
|
||||||
|
|
||||||
from ra_aid.tools.memory import _global_memory
|
from ra_aid.tools.memory import _global_memory
|
||||||
|
from ra_aid.globals import RESEARCH_AGENT_RECURSION_LIMIT
|
||||||
|
from ra_aid.tool_configs import get_research_tools
|
||||||
|
from ra_aid.prompts import (
|
||||||
|
RESEARCH_PROMPT,
|
||||||
|
EXPERT_PROMPT_SECTION_RESEARCH,
|
||||||
|
HUMAN_PROMPT_SECTION_RESEARCH
|
||||||
|
)
|
||||||
|
|
||||||
console = Console()
|
console = Console()
|
||||||
|
|
||||||
|
def run_research_agent(
|
||||||
|
base_task_or_query: str,
|
||||||
|
model,
|
||||||
|
*,
|
||||||
|
expert_enabled: bool = False,
|
||||||
|
research_only: bool = False,
|
||||||
|
hil: bool = False,
|
||||||
|
memory: Optional[Any] = None,
|
||||||
|
config: Optional[dict] = None,
|
||||||
|
thread_id: Optional[str] = None,
|
||||||
|
console_message: Optional[str] = None
|
||||||
|
) -> Optional[str]:
|
||||||
|
"""Run a research agent with the given configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_task_or_query: The main task or query for research
|
||||||
|
model: The LLM model to use
|
||||||
|
expert_enabled: Whether expert mode is enabled
|
||||||
|
research_only: Whether this is a research-only task
|
||||||
|
hil: Whether human-in-the-loop mode is enabled
|
||||||
|
memory: Optional memory instance to use
|
||||||
|
config: Optional configuration dictionary
|
||||||
|
thread_id: Optional thread ID (defaults to new UUID)
|
||||||
|
console_message: Optional message to display before running
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Optional[str]: The completion message if task completed successfully
|
||||||
|
|
||||||
|
Example:
|
||||||
|
result = run_research_agent(
|
||||||
|
"Research Python async patterns",
|
||||||
|
model,
|
||||||
|
expert_enabled=True,
|
||||||
|
research_only=True
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
# Initialize memory if not provided
|
||||||
|
if memory is None:
|
||||||
|
memory = MemorySaver()
|
||||||
|
memory.memory = _global_memory
|
||||||
|
|
||||||
|
# Set up thread ID
|
||||||
|
if thread_id is None:
|
||||||
|
thread_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
# Configure tools
|
||||||
|
tools = get_research_tools(
|
||||||
|
research_only=research_only,
|
||||||
|
expert_enabled=expert_enabled,
|
||||||
|
human_interaction=hil
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create agent
|
||||||
|
agent = create_react_agent(model, tools, checkpointer=memory)
|
||||||
|
|
||||||
|
# Format prompt sections
|
||||||
|
expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else ""
|
||||||
|
human_section = HUMAN_PROMPT_SECTION_RESEARCH if hil else ""
|
||||||
|
|
||||||
|
# Build prompt
|
||||||
|
prompt = RESEARCH_PROMPT.format(
|
||||||
|
base_task=base_task_or_query,
|
||||||
|
research_only_note='' if research_only else ' Only request implementation if the user explicitly asked for changes to be made.',
|
||||||
|
expert_section=expert_section,
|
||||||
|
human_section=human_section
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set up configuration
|
||||||
|
run_config = {
|
||||||
|
"configurable": {"thread_id": thread_id},
|
||||||
|
"recursion_limit": 100
|
||||||
|
}
|
||||||
|
if config:
|
||||||
|
run_config.update(config)
|
||||||
|
|
||||||
|
# Display console message if provided
|
||||||
|
if console_message:
|
||||||
|
console.print(Panel(Markdown(console_message), title="🔬 Research Task"))
|
||||||
|
|
||||||
|
# Run agent with retry logic
|
||||||
|
return run_agent_with_retry(agent, prompt, run_config)
|
||||||
|
|
||||||
def print_agent_output(chunk: dict[str, BaseMessage]) -> None:
|
def print_agent_output(chunk: dict[str, BaseMessage]) -> None:
|
||||||
"""Print agent output chunks."""
|
"""Print agent output chunks."""
|
||||||
if chunk.get("delta") and chunk["delta"].content:
|
if chunk.get("delta") and chunk["delta"].content:
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,6 @@
|
||||||
|
"""
|
||||||
|
Global constants and configuration values used across the RA-AID codebase.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Maximum recursion depth for research agents to prevent infinite loops
|
||||||
|
RESEARCH_AGENT_RECURSION_LIMIT = 100
|
||||||
|
|
@ -1,16 +1,9 @@
|
||||||
"""Tools for spawning and managing sub-agents."""
|
"""Tools for spawning and managing sub-agents."""
|
||||||
|
|
||||||
from langchain_core.tools import tool
|
from langchain_core.tools import tool
|
||||||
from typing import Dict, Any, List, Optional
|
from typing import Dict, Any
|
||||||
import uuid
|
|
||||||
from rich.console import Console
|
from rich.console import Console
|
||||||
from rich.panel import Panel
|
|
||||||
from rich.markdown import Markdown
|
|
||||||
from langgraph.prebuilt import create_react_agent
|
|
||||||
from langgraph.checkpoint.memory import MemorySaver
|
|
||||||
from ra_aid.tools.memory import _global_memory
|
from ra_aid.tools.memory import _global_memory
|
||||||
from ra_aid import run_agent_with_retry
|
|
||||||
from ..prompts import RESEARCH_PROMPT
|
|
||||||
from .memory import get_memory_value, get_related_files
|
from .memory import get_memory_value, get_related_files
|
||||||
from ..llm import initialize_llm
|
from ..llm import initialize_llm
|
||||||
|
|
||||||
|
|
@ -31,52 +24,19 @@ def request_research(query: str) -> Dict[str, Any]:
|
||||||
- success: Whether completed or interrupted
|
- success: Whether completed or interrupted
|
||||||
- reason: Reason for failure, if any
|
- reason: Reason for failure, if any
|
||||||
"""
|
"""
|
||||||
# Initialize model and memory
|
# Initialize model
|
||||||
model = initialize_llm("anthropic", "claude-3-sonnet-20240229")
|
model = initialize_llm("anthropic", "claude-3-sonnet-20240229")
|
||||||
memory = MemorySaver()
|
|
||||||
memory.memory = _global_memory
|
|
||||||
|
|
||||||
# Configure research tools
|
|
||||||
from ..tool_configs import get_research_tools
|
|
||||||
tools = get_research_tools(research_only=True, expert_enabled=True)
|
|
||||||
|
|
||||||
# Basic config matching main process
|
|
||||||
config = {
|
|
||||||
"thread_id": str(uuid.uuid4()),
|
|
||||||
"memory": memory,
|
|
||||||
"model": model
|
|
||||||
}
|
|
||||||
|
|
||||||
from ra_aid.prompts import (
|
|
||||||
RESEARCH_PROMPT,
|
|
||||||
EXPERT_PROMPT_SECTION_RESEARCH,
|
|
||||||
HUMAN_PROMPT_SECTION_RESEARCH
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create research agent
|
|
||||||
config = _global_memory.get('config', {})
|
|
||||||
expert_enabled = config.get('expert_enabled', False)
|
|
||||||
hil = config.get('hil', False)
|
|
||||||
|
|
||||||
expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else ""
|
|
||||||
human_section = HUMAN_PROMPT_SECTION_RESEARCH if hil else ""
|
|
||||||
|
|
||||||
agent = create_react_agent(model, tools)
|
|
||||||
|
|
||||||
prompt = RESEARCH_PROMPT.format(
|
|
||||||
base_task=query,
|
|
||||||
research_only_note='',
|
|
||||||
expert_section=expert_section,
|
|
||||||
human_section=human_section
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
console.print(Panel(Markdown(query), title="🔬 Research Task"))
|
# Run research agent
|
||||||
# Run agent with retry logic
|
from ..agent_utils import run_research_agent
|
||||||
result = run_agent_with_retry(
|
result = run_research_agent(
|
||||||
agent,
|
query,
|
||||||
prompt,
|
model,
|
||||||
{"configurable": {"thread_id": str(uuid.uuid4())}, "recursion_limit": 100}
|
expert_enabled=True,
|
||||||
|
research_only=True,
|
||||||
|
hil=_global_memory.get('config', {}).get('hil', False),
|
||||||
|
console_message=query
|
||||||
)
|
)
|
||||||
|
|
||||||
success = True
|
success = True
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue