Add web research prompt section.

This commit is contained in:
user 2024-12-23 16:21:28 -05:00
parent ebfdcf821e
commit f9fd6a4cde
5 changed files with 78 additions and 9 deletions

View File

@ -132,7 +132,7 @@ def main():
"""Main entry point for the ra-aid command line tool."""
try:
args = parse_arguments()
expert_enabled, expert_missing = validate_environment(args) # Will exit if main env vars missing
expert_enabled, expert_missing, web_research_enabled, web_research_missing = validate_environment(args) # Will exit if main env vars missing
if expert_missing:
console.print(Panel(
@ -142,6 +142,15 @@ def main():
title="Expert Tools Disabled",
style="yellow"
))
if web_research_missing:
console.print(Panel(
f"[yellow]Web research disabled due to missing configuration:[/yellow]\n" +
"\n".join(f"- {m}" for m in web_research_missing) +
"\nSet the required environment variables to enable web research.",
title="Web Research Disabled",
style="yellow"
))
# Create the base model after validation
model = initialize_llm(args.provider, args.model)
@ -167,6 +176,7 @@ def main():
"chat_mode": True,
"cowboy_mode": args.cowboy_mode,
"hil": True, # Always true in chat mode
"web_research_enabled": web_research_enabled,
"initial_request": initial_request
}
@ -176,7 +186,10 @@ def main():
_global_memory['config']['expert_model'] = args.expert_model
# Run chat agent and exit
run_agent_with_retry(chat_agent, CHAT_PROMPT.format(initial_request=initial_request), config)
run_agent_with_retry(chat_agent, CHAT_PROMPT.format(
initial_request=initial_request,
web_research_section=WEB_RESEARCH_PROMPT_SECTION_CHAT if web_research_enabled else ""
), config)
return
# Validate message is provided
@ -189,7 +202,8 @@ def main():
"configurable": {"thread_id": uuid.uuid4()},
"recursion_limit": 100,
"research_only": args.research_only,
"cowboy_mode": args.cowboy_mode
"cowboy_mode": args.cowboy_mode,
"web_research_enabled": web_research_enabled
}
# Store config in global memory for access by is_informational_query

View File

@ -31,12 +31,16 @@ from ra_aid.prompts import (
EXPERT_PROMPT_SECTION_IMPLEMENTATION,
HUMAN_PROMPT_SECTION_IMPLEMENTATION,
EXPERT_PROMPT_SECTION_RESEARCH,
WEB_RESEARCH_PROMPT_SECTION_RESEARCH,
WEB_RESEARCH_PROMPT_SECTION_CHAT,
WEB_RESEARCH_PROMPT_SECTION_PLANNING,
RESEARCH_PROMPT,
RESEARCH_ONLY_PROMPT,
HUMAN_PROMPT_SECTION_RESEARCH,
PLANNING_PROMPT,
EXPERT_PROMPT_SECTION_PLANNING,
HUMAN_PROMPT_SECTION_PLANNING
WEB_RESEARCH_PROMPT_SECTION_PLANNING,
HUMAN_PROMPT_SECTION_PLANNING,
)
from langgraph.checkpoint.memory import MemorySaver
@ -120,6 +124,7 @@ def run_research_agent(
# Format prompt sections
expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else ""
human_section = HUMAN_PROMPT_SECTION_RESEARCH if hil else ""
web_research_section = WEB_RESEARCH_PROMPT_SECTION_RESEARCH if config.get('web_research') else ""
# Get research context from memory
key_facts = _global_memory.get("key_facts", "")
@ -132,6 +137,7 @@ def run_research_agent(
research_only_note='' if research_only else ' Only request implementation if the user explicitly asked for changes to be made.',
expert_section=expert_section,
human_section=human_section,
web_research_section=web_research_section,
key_facts=key_facts,
code_snippets=code_snippets,
related_files=related_files
@ -193,11 +199,13 @@ def run_planning_agent(
# Format prompt sections
expert_section = EXPERT_PROMPT_SECTION_PLANNING if expert_enabled else ""
human_section = HUMAN_PROMPT_SECTION_PLANNING if hil else ""
web_research_section = WEB_RESEARCH_PROMPT_SECTION_PLANNING if config.get('web_research') else ""
# Build prompt
planning_prompt = PLANNING_PROMPT.format(
expert_section=expert_section,
human_section=human_section,
web_research_section=web_research_section,
base_task=base_task,
research_notes=get_memory_value('research_notes'),
related_files="\n".join(get_related_files()),
@ -272,7 +280,8 @@ def run_task_implementation_agent(
key_snippets=get_memory_value('key_snippets'),
work_log=get_memory_value('work_log'),
expert_section=EXPERT_PROMPT_SECTION_IMPLEMENTATION if expert_enabled else "",
human_section=HUMAN_PROMPT_SECTION_IMPLEMENTATION if _global_memory.get('config', {}).get('hil', False) else ""
human_section=HUMAN_PROMPT_SECTION_IMPLEMENTATION if _global_memory.get('config', {}).get('hil', False) else "",
web_research_section=WEB_RESEARCH_PROMPT_SECTION_CHAT if config.get('web_research') else ""
)
# Set up configuration

View File

@ -20,7 +20,7 @@ PROVIDER_CONFIGS = {
"openai-compatible": ProviderConfig("OPENAI_API_KEY", base_required=True),
}
def validate_environment(args) -> Tuple[bool, List[str]]:
def validate_environment(args) -> Tuple[bool, List[str], bool, List[str]]:
"""Validate required environment variables and dependencies.
Args:
@ -32,6 +32,8 @@ def validate_environment(args) -> Tuple[bool, List[str]]:
Tuple containing:
- bool: Whether expert mode is enabled
- List[str]: List of missing expert configuration items
- bool: Whether web research is enabled
- List[str]: List of missing web research configuration items
Raises:
SystemExit: If required base environment variables are missing
@ -91,4 +93,13 @@ def validate_environment(args) -> Tuple[bool, List[str]]:
if expert_missing:
expert_enabled = False
return expert_enabled, expert_missing
# Check web research dependencies
web_research_missing = []
if not os.environ.get('TAVILY_API_KEY'):
web_research_missing.append('TAVILY_API_KEY environment variable is not set')
web_research_enabled = True
if web_research_missing:
web_research_enabled = False
return expert_enabled, expert_missing, web_research_enabled, web_research_missing

View File

@ -68,6 +68,39 @@ Human Interaction:
- Wait for responses before proceeding
"""
# Web research prompt sections
WEB_RESEARCH_PROMPT_SECTION_RESEARCH = """
Web Research:
If you need additional information from web sources:
- Use request_web_research to search and gather relevant information
- Wait for web research results before proceeding with research
- The web research can help validate patterns, best practices, or documentation
"""
WEB_RESEARCH_PROMPT_SECTION_PLANNING = """
Web Research:
If you need to confirm current best practices or implementation approaches:
- Use request_web_research to verify implementation patterns
- Wait for web research results before finalizing plans
- Web research can help validate architectural decisions and technical choices
"""
WEB_RESEARCH_PROMPT_SECTION_IMPLEMENTATION = """
Web Research:
If you need guidance on specific implementation details:
- Use request_web_research to find examples or documentation
- Wait for web research results before proceeding with implementation
- Web research can help validate specific code patterns or configurations
"""
WEB_RESEARCH_PROMPT_SECTION_CHAT = """
Web Research:
If you need to obtain additional context from online sources during chat:
- Use request_web_research to gather relevant information
- Wait for web research results before proceeding
- Web research can help provide up-to-date information and best practices
"""
# Research stage prompt - guides initial codebase analysis
RESEARCH_PROMPT = """User query: {base_task} --keep it simple

View File

@ -60,7 +60,8 @@ def request_research(query: str) -> ResearchResult:
expert_enabled=True,
research_only=True,
hil=config.get('hil', False),
console_message=query
console_message=query,
config=config
)
except AgentInterrupt:
print()
@ -117,7 +118,8 @@ def request_research_and_implementation(query: str) -> Dict[str, Any]:
expert_enabled=True,
research_only=False,
hil=config.get('hil', False),
console_message=query
console_message=query,
config=config
)
success = True