Adjust prompts based on tool availability/flags.
This commit is contained in:
parent
c6668fa866
commit
565dee74cf
|
|
@ -21,6 +21,9 @@ from ra_aid.prompts import (
|
||||||
RESEARCH_PROMPT,
|
RESEARCH_PROMPT,
|
||||||
PLANNING_PROMPT,
|
PLANNING_PROMPT,
|
||||||
IMPLEMENTATION_PROMPT,
|
IMPLEMENTATION_PROMPT,
|
||||||
|
EXPERT_PROMPT_SECTION_RESEARCH,
|
||||||
|
EXPERT_PROMPT_SECTION_PLANNING,
|
||||||
|
EXPERT_PROMPT_SECTION_IMPLEMENTATION,
|
||||||
)
|
)
|
||||||
import time
|
import time
|
||||||
from anthropic import APIError, APITimeoutError, RateLimitError, InternalServerError
|
from anthropic import APIError, APITimeoutError, RateLimitError, InternalServerError
|
||||||
|
|
@ -196,7 +199,7 @@ def is_informational_query() -> bool:
|
||||||
def is_stage_requested(stage: str) -> bool:
|
def is_stage_requested(stage: str) -> bool:
|
||||||
"""Check if a stage has been requested to proceed."""
|
"""Check if a stage has been requested to proceed."""
|
||||||
if stage == 'implementation':
|
if stage == 'implementation':
|
||||||
return len(_global_memory.get('implementation_requested', [])) > 0
|
return _global_memory.get('implementation_requested', False)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def run_agent_with_retry(agent, prompt: str, config: dict) -> Optional[str]:
|
def run_agent_with_retry(agent, prompt: str, config: dict) -> Optional[str]:
|
||||||
|
|
@ -227,10 +230,9 @@ def run_agent_with_retry(agent, prompt: str, config: dict) -> Optional[str]:
|
||||||
# Check for task completion after each chunk
|
# Check for task completion after each chunk
|
||||||
if _global_memory.get('task_completed'):
|
if _global_memory.get('task_completed'):
|
||||||
completion_msg = _global_memory.get('completion_message', 'Task was completed successfully.')
|
completion_msg = _global_memory.get('completion_message', 'Task was completed successfully.')
|
||||||
print_stage_header("Task Completed")
|
|
||||||
console.print(Panel(
|
console.print(Panel(
|
||||||
f"[green]{completion_msg}[/green]",
|
f"[green]{completion_msg}[/green]",
|
||||||
title="Task Completed",
|
title="✅ Task Completed",
|
||||||
style="green"
|
style="green"
|
||||||
))
|
))
|
||||||
return completion_msg
|
return completion_msg
|
||||||
|
|
@ -268,7 +270,8 @@ def run_implementation_stage(base_task, tasks, plan, related_files, model, exper
|
||||||
task_agent = create_react_agent(model, get_implementation_tools(expert_enabled=expert_enabled), checkpointer=task_memory)
|
task_agent = create_react_agent(model, get_implementation_tools(expert_enabled=expert_enabled), checkpointer=task_memory)
|
||||||
|
|
||||||
# Construct task-specific prompt
|
# Construct task-specific prompt
|
||||||
task_prompt = IMPLEMENTATION_PROMPT.format(
|
expert_section = EXPERT_PROMPT_SECTION_IMPLEMENTATION if expert_enabled else ""
|
||||||
|
task_prompt = (IMPLEMENTATION_PROMPT + expert_section).format(
|
||||||
plan=plan,
|
plan=plan,
|
||||||
key_facts=get_memory_value('key_facts'),
|
key_facts=get_memory_value('key_facts'),
|
||||||
key_snippets=get_memory_value('key_snippets'),
|
key_snippets=get_memory_value('key_snippets'),
|
||||||
|
|
@ -362,9 +365,11 @@ def main():
|
||||||
checkpointer=research_memory
|
checkpointer=research_memory
|
||||||
)
|
)
|
||||||
|
|
||||||
|
expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else ""
|
||||||
research_prompt = f"""User query: {base_task} --keep it simple
|
research_prompt = f"""User query: {base_task} --keep it simple
|
||||||
|
|
||||||
{RESEARCH_PROMPT}
|
{RESEARCH_PROMPT}
|
||||||
|
{expert_section}
|
||||||
|
|
||||||
Be very thorough in your research and emit lots of snippets, key facts. If you take more than a few steps, be eager to emit research subtasks.{'' if args.research_only else ' Only request implementation if the user explicitly asked for changes to be made.'}"""
|
Be very thorough in your research and emit lots of snippets, key facts. If you take more than a few steps, be eager to emit research subtasks.{'' if args.research_only else ' Only request implementation if the user explicitly asked for changes to be made.'}"""
|
||||||
|
|
||||||
|
|
@ -389,7 +394,8 @@ Be very thorough in your research and emit lots of snippets, key facts. If you t
|
||||||
# Create planning agent
|
# Create planning agent
|
||||||
planning_agent = create_react_agent(model, get_planning_tools(expert_enabled=expert_enabled), checkpointer=planning_memory)
|
planning_agent = create_react_agent(model, get_planning_tools(expert_enabled=expert_enabled), checkpointer=planning_memory)
|
||||||
|
|
||||||
planning_prompt = PLANNING_PROMPT.format(
|
expert_section = EXPERT_PROMPT_SECTION_PLANNING if expert_enabled else ""
|
||||||
|
planning_prompt = (PLANNING_PROMPT + expert_section).format(
|
||||||
research_notes=get_memory_value('research_notes'),
|
research_notes=get_memory_value('research_notes'),
|
||||||
key_facts=get_memory_value('key_facts'),
|
key_facts=get_memory_value('key_facts'),
|
||||||
key_snippets=get_memory_value('key_snippets'),
|
key_snippets=get_memory_value('key_snippets'),
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,31 @@ These updated prompts include instructions to scale complexity:
|
||||||
- For more complex requests, still provide detailed planning and thorough steps.
|
- For more complex requests, still provide detailed planning and thorough steps.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# Expert-specific prompt sections
|
||||||
|
EXPERT_PROMPT_SECTION_RESEARCH = """
|
||||||
|
Expert Consultation:
|
||||||
|
If you need additional guidance or analysis:
|
||||||
|
- Use emit_expert_context to provide all relevant context about what you've found
|
||||||
|
- Wait for expert response before proceeding with research
|
||||||
|
- The expert can help analyze complex codebases or unclear patterns
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXPERT_PROMPT_SECTION_PLANNING = """
|
||||||
|
Expert Consultation:
|
||||||
|
If you need additional input or assistance:
|
||||||
|
- First use emit_expert_context to provide all relevant context
|
||||||
|
- Wait for the expert's response before defining tasks in non-trivial scenarios
|
||||||
|
- The expert can help with architectural decisions and implementation approaches
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXPERT_PROMPT_SECTION_IMPLEMENTATION = """
|
||||||
|
Expert Consultation:
|
||||||
|
If you have any doubts about logic or debugging (or how to best test something):
|
||||||
|
- Use emit_expert_context to provide context about your specific concern
|
||||||
|
- Ask the expert to perform deep analysis
|
||||||
|
- Wait for expert guidance before proceeding with implementation
|
||||||
|
"""
|
||||||
|
|
||||||
# Research stage prompt - guides initial codebase analysis
|
# Research stage prompt - guides initial codebase analysis
|
||||||
RESEARCH_PROMPT = """
|
RESEARCH_PROMPT = """
|
||||||
Objective
|
Objective
|
||||||
|
|
@ -220,7 +245,7 @@ Testing:
|
||||||
- If the tests have not already been run, run them using run_shell_command to get a baseline of functionality (e.g. were any tests failing before we started working? Do they all pass?)
|
- If the tests have not already been run, run them using run_shell_command to get a baseline of functionality (e.g. were any tests failing before we started working? Do they all pass?)
|
||||||
- If you add or change any unit tests, run them using run_shell_command and ensure they pass (check docs or analyze directory structure/test files to infer how to run them.)
|
- If you add or change any unit tests, run them using run_shell_command and ensure they pass (check docs or analyze directory structure/test files to infer how to run them.)
|
||||||
- Start with running very specific tests, then move to more general/complete test suites.
|
- Start with running very specific tests, then move to more general/complete test suites.
|
||||||
- If you have any doubts about logic or debugging (or how to best test something), ask the expert to perform deep analysis.
|
{expert_section}
|
||||||
- Only test UI components if there is already a UI testing system in place.
|
- Only test UI components if there is already a UI testing system in place.
|
||||||
- Only test things that can be tested by an automated process.
|
- Only test things that can be tested by an automated process.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -324,7 +324,7 @@ def one_shot_completed(message: str) -> str:
|
||||||
"""
|
"""
|
||||||
if len(_global_memory['research_subtasks']) > 0:
|
if len(_global_memory['research_subtasks']) > 0:
|
||||||
return "Cannot complete in one shot - research subtasks pending"
|
return "Cannot complete in one shot - research subtasks pending"
|
||||||
if len(_global_memory['implementation_requested']) > 0:
|
if _global_memory.get('implementation_requested', False):
|
||||||
return "Cannot complete in one shot - implementation was requested"
|
return "Cannot complete in one shot - implementation was requested"
|
||||||
|
|
||||||
_global_memory['task_completed'] = True
|
_global_memory['task_completed'] = True
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue