support reasoning assistance in research phase.
This commit is contained in:
parent
0a143e8fff
commit
d0bf78c5f0
|
|
@ -779,7 +779,6 @@ def main():
|
||||||
research_only=args.research_only,
|
research_only=args.research_only,
|
||||||
hil=args.hil,
|
hil=args.hil,
|
||||||
memory=research_memory,
|
memory=research_memory,
|
||||||
config=config,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# for how long have we had a second planning agent triggered here?
|
# for how long have we had a second planning agent triggered here?
|
||||||
|
|
|
||||||
|
|
@ -71,7 +71,7 @@ from ra_aid.prompts.human_prompts import (
|
||||||
from ra_aid.prompts.implementation_prompts import IMPLEMENTATION_PROMPT
|
from ra_aid.prompts.implementation_prompts import IMPLEMENTATION_PROMPT
|
||||||
from ra_aid.prompts.common_prompts import NEW_PROJECT_HINTS
|
from ra_aid.prompts.common_prompts import NEW_PROJECT_HINTS
|
||||||
from ra_aid.prompts.planning_prompts import PLANNING_PROMPT
|
from ra_aid.prompts.planning_prompts import PLANNING_PROMPT
|
||||||
from ra_aid.prompts.reasoning_assist_prompt import REASONING_ASSIST_PROMPT_PLANNING, REASONING_ASSIST_PROMPT_IMPLEMENTATION
|
from ra_aid.prompts.reasoning_assist_prompt import REASONING_ASSIST_PROMPT_PLANNING, REASONING_ASSIST_PROMPT_IMPLEMENTATION, REASONING_ASSIST_PROMPT_RESEARCH
|
||||||
from ra_aid.prompts.research_prompts import (
|
from ra_aid.prompts.research_prompts import (
|
||||||
RESEARCH_ONLY_PROMPT,
|
RESEARCH_ONLY_PROMPT,
|
||||||
RESEARCH_PROMPT,
|
RESEARCH_PROMPT,
|
||||||
|
|
@ -177,6 +177,15 @@ def get_model_token_limit(
|
||||||
Optional[int]: The token limit if found, None otherwise
|
Optional[int]: The token limit if found, None otherwise
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
|
# Try to get config from repository for production use
|
||||||
|
try:
|
||||||
|
config_from_repo = get_config_repository().get_all()
|
||||||
|
# If we succeeded, use the repository config instead of passed config
|
||||||
|
config = config_from_repo
|
||||||
|
except RuntimeError:
|
||||||
|
# In tests, this may fail because the repository isn't set up
|
||||||
|
# So we'll use the passed config directly
|
||||||
|
pass
|
||||||
if agent_type == "research":
|
if agent_type == "research":
|
||||||
provider = config.get("research_provider", "") or config.get("provider", "")
|
provider = config.get("research_provider", "") or config.get("provider", "")
|
||||||
model_name = config.get("research_model", "") or config.get("model", "")
|
model_name = config.get("research_model", "") or config.get("model", "")
|
||||||
|
|
@ -227,7 +236,6 @@ def get_model_token_limit(
|
||||||
|
|
||||||
def build_agent_kwargs(
|
def build_agent_kwargs(
|
||||||
checkpointer: Optional[Any] = None,
|
checkpointer: Optional[Any] = None,
|
||||||
config: Dict[str, Any] = None,
|
|
||||||
max_input_tokens: Optional[int] = None,
|
max_input_tokens: Optional[int] = None,
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
"""Build kwargs dictionary for agent creation.
|
"""Build kwargs dictionary for agent creation.
|
||||||
|
|
@ -247,6 +255,7 @@ def build_agent_kwargs(
|
||||||
if checkpointer is not None:
|
if checkpointer is not None:
|
||||||
agent_kwargs["checkpointer"] = checkpointer
|
agent_kwargs["checkpointer"] = checkpointer
|
||||||
|
|
||||||
|
config = get_config_repository().get_all()
|
||||||
if config.get("limit_tokens", True) and is_anthropic_claude(config):
|
if config.get("limit_tokens", True) and is_anthropic_claude(config):
|
||||||
|
|
||||||
def wrapped_state_modifier(state: AgentState) -> list[BaseMessage]:
|
def wrapped_state_modifier(state: AgentState) -> list[BaseMessage]:
|
||||||
|
|
@ -261,12 +270,12 @@ def is_anthropic_claude(config: Dict[str, Any]) -> bool:
|
||||||
"""Check if the provider and model name indicate an Anthropic Claude model.
|
"""Check if the provider and model name indicate an Anthropic Claude model.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
provider: The provider name
|
config: Configuration dictionary containing provider and model information
|
||||||
model_name: The model name
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
bool: True if this is an Anthropic Claude model
|
bool: True if this is an Anthropic Claude model
|
||||||
"""
|
"""
|
||||||
|
# For backwards compatibility, allow passing of config directly
|
||||||
provider = config.get("provider", "")
|
provider = config.get("provider", "")
|
||||||
model_name = config.get("model", "")
|
model_name = config.get("model", "")
|
||||||
result = (
|
result = (
|
||||||
|
|
@ -306,7 +315,15 @@ def create_agent(
|
||||||
config['limit_tokens'] = False.
|
config['limit_tokens'] = False.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
config = get_config_repository().get_all()
|
# Try to get config from repository for production use
|
||||||
|
try:
|
||||||
|
config_from_repo = get_config_repository().get_all()
|
||||||
|
# If we succeeded, use the repository config instead of passed config
|
||||||
|
config = config_from_repo
|
||||||
|
except RuntimeError:
|
||||||
|
# In tests, this may fail because the repository isn't set up
|
||||||
|
# So we'll use the passed config directly
|
||||||
|
pass
|
||||||
max_input_tokens = (
|
max_input_tokens = (
|
||||||
get_model_token_limit(config, agent_type) or DEFAULT_TOKEN_LIMIT
|
get_model_token_limit(config, agent_type) or DEFAULT_TOKEN_LIMIT
|
||||||
)
|
)
|
||||||
|
|
@ -314,7 +331,7 @@ def create_agent(
|
||||||
# Use REACT agent for Anthropic Claude models, otherwise use CIAYN
|
# Use REACT agent for Anthropic Claude models, otherwise use CIAYN
|
||||||
if is_anthropic_claude(config):
|
if is_anthropic_claude(config):
|
||||||
logger.debug("Using create_react_agent to instantiate agent.")
|
logger.debug("Using create_react_agent to instantiate agent.")
|
||||||
agent_kwargs = build_agent_kwargs(checkpointer, config, max_input_tokens)
|
agent_kwargs = build_agent_kwargs(checkpointer, max_input_tokens)
|
||||||
return create_react_agent(model, tools, interrupt_after=['tools'], **agent_kwargs)
|
return create_react_agent(model, tools, interrupt_after=['tools'], **agent_kwargs)
|
||||||
else:
|
else:
|
||||||
logger.debug("Using CiaynAgent agent instance")
|
logger.debug("Using CiaynAgent agent instance")
|
||||||
|
|
@ -325,7 +342,7 @@ def create_agent(
|
||||||
logger.warning(f"Failed to detect model type: {e}. Defaulting to REACT agent.")
|
logger.warning(f"Failed to detect model type: {e}. Defaulting to REACT agent.")
|
||||||
config = get_config_repository().get_all()
|
config = get_config_repository().get_all()
|
||||||
max_input_tokens = get_model_token_limit(config, agent_type)
|
max_input_tokens = get_model_token_limit(config, agent_type)
|
||||||
agent_kwargs = build_agent_kwargs(checkpointer, config, max_input_tokens)
|
agent_kwargs = build_agent_kwargs(checkpointer, max_input_tokens)
|
||||||
return create_react_agent(model, tools, interrupt_after=['tools'], **agent_kwargs)
|
return create_react_agent(model, tools, interrupt_after=['tools'], **agent_kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -338,7 +355,6 @@ def run_research_agent(
|
||||||
hil: bool = False,
|
hil: bool = False,
|
||||||
web_research_enabled: bool = False,
|
web_research_enabled: bool = False,
|
||||||
memory: Optional[Any] = None,
|
memory: Optional[Any] = None,
|
||||||
config: Optional[dict] = None,
|
|
||||||
thread_id: Optional[str] = None,
|
thread_id: Optional[str] = None,
|
||||||
console_message: Optional[str] = None,
|
console_message: Optional[str] = None,
|
||||||
) -> Optional[str]:
|
) -> Optional[str]:
|
||||||
|
|
@ -380,31 +396,6 @@ def run_research_agent(
|
||||||
if memory is None:
|
if memory is None:
|
||||||
memory = MemorySaver()
|
memory = MemorySaver()
|
||||||
|
|
||||||
tools = get_research_tools(
|
|
||||||
research_only=research_only,
|
|
||||||
expert_enabled=expert_enabled,
|
|
||||||
human_interaction=hil,
|
|
||||||
web_research_enabled=config.get("web_research_enabled", False),
|
|
||||||
)
|
|
||||||
|
|
||||||
agent = create_agent(model, tools, checkpointer=memory, agent_type="research")
|
|
||||||
|
|
||||||
expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else ""
|
|
||||||
human_section = HUMAN_PROMPT_SECTION_RESEARCH if hil else ""
|
|
||||||
web_research_section = (
|
|
||||||
WEB_RESEARCH_PROMPT_SECTION_RESEARCH
|
|
||||||
if config.get("web_research_enabled")
|
|
||||||
else ""
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
key_facts = format_key_facts_dict(get_key_fact_repository().get_facts_dict())
|
|
||||||
except RuntimeError as e:
|
|
||||||
logger.error(f"Failed to access key fact repository: {str(e)}")
|
|
||||||
key_facts = ""
|
|
||||||
key_snippets = format_key_snippets_dict(get_key_snippet_repository().get_snippets_dict())
|
|
||||||
related_files = get_related_files()
|
|
||||||
|
|
||||||
current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||||
working_directory = os.getcwd()
|
working_directory = os.getcwd()
|
||||||
|
|
||||||
|
|
@ -420,6 +411,14 @@ def run_research_agent(
|
||||||
logger.error(f"Failed to access human input repository: {str(e)}")
|
logger.error(f"Failed to access human input repository: {str(e)}")
|
||||||
# Continue without appending last human input
|
# Continue without appending last human input
|
||||||
|
|
||||||
|
try:
|
||||||
|
key_facts = format_key_facts_dict(get_key_fact_repository().get_facts_dict())
|
||||||
|
except RuntimeError as e:
|
||||||
|
logger.error(f"Failed to access key fact repository: {str(e)}")
|
||||||
|
key_facts = ""
|
||||||
|
key_snippets = format_key_snippets_dict(get_key_snippet_repository().get_snippets_dict())
|
||||||
|
related_files = get_related_files()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
project_info = get_project_info(".", file_limit=2000)
|
project_info = get_project_info(".", file_limit=2000)
|
||||||
formatted_project_info = format_project_info(project_info)
|
formatted_project_info = format_project_info(project_info)
|
||||||
|
|
@ -427,6 +426,179 @@ def run_research_agent(
|
||||||
logger.warning(f"Failed to get project info: {e}")
|
logger.warning(f"Failed to get project info: {e}")
|
||||||
formatted_project_info = ""
|
formatted_project_info = ""
|
||||||
|
|
||||||
|
tools = get_research_tools(
|
||||||
|
research_only=research_only,
|
||||||
|
expert_enabled=expert_enabled,
|
||||||
|
human_interaction=hil,
|
||||||
|
web_research_enabled=get_config_repository().get("web_research_enabled", False),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get model info for reasoning assistance configuration
|
||||||
|
provider = get_config_repository().get("provider", "")
|
||||||
|
model_name = get_config_repository().get("model", "")
|
||||||
|
|
||||||
|
# Get model configuration to check for reasoning_assist_default
|
||||||
|
model_config = {}
|
||||||
|
provider_models = models_params.get(provider, {})
|
||||||
|
if provider_models and model_name in provider_models:
|
||||||
|
model_config = provider_models[model_name]
|
||||||
|
|
||||||
|
# Check if reasoning assist is explicitly enabled/disabled
|
||||||
|
force_assistance = get_config_repository().get("force_reasoning_assistance", False)
|
||||||
|
disable_assistance = get_config_repository().get("disable_reasoning_assistance", False)
|
||||||
|
if force_assistance:
|
||||||
|
reasoning_assist_enabled = True
|
||||||
|
elif disable_assistance:
|
||||||
|
reasoning_assist_enabled = False
|
||||||
|
else:
|
||||||
|
# Fall back to model default
|
||||||
|
reasoning_assist_enabled = model_config.get("reasoning_assist_default", False)
|
||||||
|
|
||||||
|
logger.debug("Reasoning assist enabled: %s", reasoning_assist_enabled)
|
||||||
|
expert_guidance = ""
|
||||||
|
|
||||||
|
# Get research note information for reasoning assistance
|
||||||
|
try:
|
||||||
|
research_notes = format_research_notes_dict(get_research_note_repository().get_notes_dict())
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to get research notes: {e}")
|
||||||
|
research_notes = ""
|
||||||
|
|
||||||
|
# If reasoning assist is enabled, make a one-off call to the expert model
|
||||||
|
if reasoning_assist_enabled:
|
||||||
|
try:
|
||||||
|
logger.info("Reasoning assist enabled for model %s, getting expert guidance", model_name)
|
||||||
|
|
||||||
|
# Collect tool descriptions
|
||||||
|
tool_metadata = []
|
||||||
|
from ra_aid.tools.reflection import get_function_info as get_tool_info
|
||||||
|
|
||||||
|
for tool in tools:
|
||||||
|
try:
|
||||||
|
tool_info = get_tool_info(tool.func)
|
||||||
|
name = tool.func.__name__
|
||||||
|
description = inspect.getdoc(tool.func)
|
||||||
|
tool_metadata.append(f"Tool: {name}\nDescription: {description}\n")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error getting tool info for {tool}: {e}")
|
||||||
|
|
||||||
|
# Format tool metadata
|
||||||
|
formatted_tool_metadata = "\n".join(tool_metadata)
|
||||||
|
|
||||||
|
# Initialize expert model
|
||||||
|
expert_model = initialize_expert_llm(provider, model_name)
|
||||||
|
|
||||||
|
# Format the reasoning assist prompt
|
||||||
|
reasoning_assist_prompt = REASONING_ASSIST_PROMPT_RESEARCH.format(
|
||||||
|
current_date=current_date,
|
||||||
|
working_directory=working_directory,
|
||||||
|
base_task=base_task,
|
||||||
|
key_facts=key_facts,
|
||||||
|
key_snippets=key_snippets,
|
||||||
|
research_notes=research_notes,
|
||||||
|
related_files=related_files,
|
||||||
|
env_inv=get_env_inv(),
|
||||||
|
tool_metadata=formatted_tool_metadata,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Show the reasoning assist query in a panel
|
||||||
|
console.print(
|
||||||
|
Panel(Markdown("Consulting with the reasoning model on the best research approach."), title="📝 Thinking about research strategy...", border_style="yellow")
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.debug("Invoking expert model for reasoning assist")
|
||||||
|
# Make the call to the expert model
|
||||||
|
response = expert_model.invoke(reasoning_assist_prompt)
|
||||||
|
|
||||||
|
# Check if the model supports think tags
|
||||||
|
supports_think_tag = model_config.get("supports_think_tag", False)
|
||||||
|
supports_thinking = model_config.get("supports_thinking", False)
|
||||||
|
|
||||||
|
# Get response content, handling if it's a list (for Claude thinking mode)
|
||||||
|
content = None
|
||||||
|
|
||||||
|
if hasattr(response, 'content'):
|
||||||
|
content = response.content
|
||||||
|
else:
|
||||||
|
# Fallback if content attribute is missing
|
||||||
|
content = str(response)
|
||||||
|
|
||||||
|
# Process content based on its type
|
||||||
|
if isinstance(content, list):
|
||||||
|
# Handle structured thinking mode (e.g., Claude 3.7)
|
||||||
|
thinking_content = None
|
||||||
|
response_text = None
|
||||||
|
|
||||||
|
# Process each item in the list
|
||||||
|
for item in content:
|
||||||
|
if isinstance(item, dict):
|
||||||
|
# Extract thinking content
|
||||||
|
if item.get('type') == 'thinking' and 'thinking' in item:
|
||||||
|
thinking_content = item['thinking']
|
||||||
|
logger.debug("Found structured thinking content")
|
||||||
|
# Extract response text
|
||||||
|
elif item.get('type') == 'text' and 'text' in item:
|
||||||
|
response_text = item['text']
|
||||||
|
logger.debug("Found structured response text")
|
||||||
|
|
||||||
|
# Display thinking content in a separate panel if available
|
||||||
|
if thinking_content and get_config_repository().get("show_thoughts", False):
|
||||||
|
logger.debug(f"Displaying structured thinking content ({len(thinking_content)} chars)")
|
||||||
|
console.print(
|
||||||
|
Panel(Markdown(thinking_content), title="💭 Expert Thinking", border_style="yellow")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use response_text if available, otherwise fall back to joining
|
||||||
|
if response_text:
|
||||||
|
content = response_text
|
||||||
|
else:
|
||||||
|
# Fallback: join list items if structured extraction failed
|
||||||
|
logger.debug("No structured response text found, joining list items")
|
||||||
|
content = "\n".join(str(item) for item in content)
|
||||||
|
elif (supports_think_tag or supports_thinking):
|
||||||
|
# Process thinking content using the centralized function
|
||||||
|
content, _ = process_thinking_content(
|
||||||
|
content=content,
|
||||||
|
supports_think_tag=supports_think_tag,
|
||||||
|
supports_thinking=supports_thinking,
|
||||||
|
panel_title="💭 Expert Thinking",
|
||||||
|
panel_style="yellow",
|
||||||
|
logger=logger
|
||||||
|
)
|
||||||
|
|
||||||
|
# Display the expert guidance in a panel
|
||||||
|
console.print(
|
||||||
|
Panel(Markdown(content), title="Research Strategy Guidance", border_style="blue")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use the content as expert guidance
|
||||||
|
expert_guidance = content + "\n\nCONSULT WITH THE EXPERT FREQUENTLY DURING RESEARCH"
|
||||||
|
|
||||||
|
logger.info("Received expert guidance for research")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Error getting expert guidance for research: %s", e)
|
||||||
|
expert_guidance = ""
|
||||||
|
|
||||||
|
agent = create_agent(model, tools, checkpointer=memory, agent_type="research")
|
||||||
|
|
||||||
|
expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else ""
|
||||||
|
human_section = HUMAN_PROMPT_SECTION_RESEARCH if hil else ""
|
||||||
|
web_research_section = (
|
||||||
|
WEB_RESEARCH_PROMPT_SECTION_RESEARCH
|
||||||
|
if get_config_repository().get("web_research_enabled")
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
|
||||||
|
# Prepare expert guidance section if expert guidance is available
|
||||||
|
expert_guidance_section = ""
|
||||||
|
if expert_guidance:
|
||||||
|
expert_guidance_section = f"""<expert guidance>
|
||||||
|
{expert_guidance}
|
||||||
|
</expert guidance>"""
|
||||||
|
|
||||||
|
# Format research notes if available
|
||||||
|
# We get research notes earlier for reasoning assistance
|
||||||
|
|
||||||
# Get environment inventory information
|
# Get environment inventory information
|
||||||
|
|
||||||
prompt = (RESEARCH_ONLY_PROMPT if research_only else RESEARCH_PROMPT).format(
|
prompt = (RESEARCH_ONLY_PROMPT if research_only else RESEARCH_PROMPT).format(
|
||||||
|
|
@ -448,16 +620,16 @@ def run_research_agent(
|
||||||
project_info=formatted_project_info,
|
project_info=formatted_project_info,
|
||||||
new_project_hints=NEW_PROJECT_HINTS if project_info.is_new else "",
|
new_project_hints=NEW_PROJECT_HINTS if project_info.is_new else "",
|
||||||
env_inv=get_env_inv(),
|
env_inv=get_env_inv(),
|
||||||
|
expert_guidance_section=expert_guidance_section,
|
||||||
)
|
)
|
||||||
|
|
||||||
config = get_config_repository().get_all() if not config else config
|
config = get_config_repository().get_all()
|
||||||
recursion_limit = config.get("recursion_limit", DEFAULT_RECURSION_LIMIT)
|
recursion_limit = config.get("recursion_limit", DEFAULT_RECURSION_LIMIT)
|
||||||
run_config = {
|
run_config = {
|
||||||
"configurable": {"thread_id": thread_id},
|
"configurable": {"thread_id": thread_id},
|
||||||
"recursion_limit": recursion_limit,
|
"recursion_limit": recursion_limit,
|
||||||
}
|
}
|
||||||
if config:
|
run_config.update(config)
|
||||||
run_config.update(config)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if console_message:
|
if console_message:
|
||||||
|
|
@ -470,9 +642,9 @@ def run_research_agent(
|
||||||
|
|
||||||
if agent is not None:
|
if agent is not None:
|
||||||
logger.debug("Research agent created successfully")
|
logger.debug("Research agent created successfully")
|
||||||
none_or_fallback_handler = init_fallback_handler(agent, config, tools)
|
none_or_fallback_handler = init_fallback_handler(agent, tools)
|
||||||
_result = run_agent_with_retry(
|
_result = run_agent_with_retry(
|
||||||
agent, prompt, run_config, none_or_fallback_handler
|
agent, prompt, none_or_fallback_handler
|
||||||
)
|
)
|
||||||
if _result:
|
if _result:
|
||||||
# Log research completion
|
# Log research completion
|
||||||
|
|
@ -487,7 +659,6 @@ def run_research_agent(
|
||||||
hil=hil,
|
hil=hil,
|
||||||
web_research_enabled=web_research_enabled,
|
web_research_enabled=web_research_enabled,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
config=config,
|
|
||||||
thread_id=thread_id,
|
thread_id=thread_id,
|
||||||
console_message=console_message,
|
console_message=console_message,
|
||||||
)
|
)
|
||||||
|
|
@ -506,7 +677,6 @@ def run_web_research_agent(
|
||||||
hil: bool = False,
|
hil: bool = False,
|
||||||
web_research_enabled: bool = False,
|
web_research_enabled: bool = False,
|
||||||
memory: Optional[Any] = None,
|
memory: Optional[Any] = None,
|
||||||
config: Optional[dict] = None,
|
|
||||||
thread_id: Optional[str] = None,
|
thread_id: Optional[str] = None,
|
||||||
console_message: Optional[str] = None,
|
console_message: Optional[str] = None,
|
||||||
) -> Optional[str]:
|
) -> Optional[str]:
|
||||||
|
|
@ -585,7 +755,7 @@ def run_web_research_agent(
|
||||||
env_inv=get_env_inv(),
|
env_inv=get_env_inv(),
|
||||||
)
|
)
|
||||||
|
|
||||||
config = get_config_repository().get_all() if not config else config
|
config = get_config_repository().get_all()
|
||||||
|
|
||||||
recursion_limit = config.get("recursion_limit", DEFAULT_RECURSION_LIMIT)
|
recursion_limit = config.get("recursion_limit", DEFAULT_RECURSION_LIMIT)
|
||||||
run_config = {
|
run_config = {
|
||||||
|
|
@ -600,7 +770,7 @@ def run_web_research_agent(
|
||||||
console.print(Panel(Markdown(console_message), title="🔬 Researching..."))
|
console.print(Panel(Markdown(console_message), title="🔬 Researching..."))
|
||||||
|
|
||||||
logger.debug("Web research agent completed successfully")
|
logger.debug("Web research agent completed successfully")
|
||||||
none_or_fallback_handler = init_fallback_handler(agent, config, tools)
|
none_or_fallback_handler = init_fallback_handler(agent, tools)
|
||||||
_result = run_agent_with_retry(
|
_result = run_agent_with_retry(
|
||||||
agent, prompt, run_config, none_or_fallback_handler
|
agent, prompt, run_config, none_or_fallback_handler
|
||||||
)
|
)
|
||||||
|
|
@ -623,7 +793,6 @@ def run_planning_agent(
|
||||||
expert_enabled: bool = False,
|
expert_enabled: bool = False,
|
||||||
hil: bool = False,
|
hil: bool = False,
|
||||||
memory: Optional[Any] = None,
|
memory: Optional[Any] = None,
|
||||||
config: Optional[dict] = None,
|
|
||||||
thread_id: Optional[str] = None,
|
thread_id: Optional[str] = None,
|
||||||
) -> Optional[str]:
|
) -> Optional[str]:
|
||||||
"""Run a planning agent to create implementation plans.
|
"""Run a planning agent to create implementation plans.
|
||||||
|
|
@ -634,7 +803,6 @@ def run_planning_agent(
|
||||||
expert_enabled: Whether expert mode is enabled
|
expert_enabled: Whether expert mode is enabled
|
||||||
hil: Whether human-in-the-loop mode is enabled
|
hil: Whether human-in-the-loop mode is enabled
|
||||||
memory: Optional memory instance to use
|
memory: Optional memory instance to use
|
||||||
config: Optional configuration dictionary
|
|
||||||
thread_id: Optional thread ID (defaults to new UUID)
|
thread_id: Optional thread ID (defaults to new UUID)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
|
@ -660,12 +828,12 @@ def run_planning_agent(
|
||||||
|
|
||||||
tools = get_planning_tools(
|
tools = get_planning_tools(
|
||||||
expert_enabled=expert_enabled,
|
expert_enabled=expert_enabled,
|
||||||
web_research_enabled=config.get("web_research_enabled", False),
|
web_research_enabled=get_config_repository().get("web_research_enabled", False),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Get model configuration
|
# Get model configuration
|
||||||
provider = config.get("provider") if config else get_config_repository().get("provider", "")
|
provider = get_config_repository().get("provider", "")
|
||||||
model_name = config.get("model") if config else get_config_repository().get("model", "")
|
model_name = get_config_repository().get("model", "")
|
||||||
logger.debug("Checking for reasoning_assist_default on %s/%s", provider, model_name)
|
logger.debug("Checking for reasoning_assist_default on %s/%s", provider, model_name)
|
||||||
|
|
||||||
# Get model configuration to check for reasoning_assist_default
|
# Get model configuration to check for reasoning_assist_default
|
||||||
|
|
@ -675,8 +843,8 @@ def run_planning_agent(
|
||||||
model_config = provider_models[model_name]
|
model_config = provider_models[model_name]
|
||||||
|
|
||||||
# Check if reasoning assist is explicitly enabled/disabled
|
# Check if reasoning assist is explicitly enabled/disabled
|
||||||
force_assistance = config.get("force_reasoning_assistance", False) if config else get_config_repository().get("force_reasoning_assistance", False)
|
force_assistance = get_config_repository().get("force_reasoning_assistance", False)
|
||||||
disable_assistance = config.get("disable_reasoning_assistance", False) if config else get_config_repository().get("disable_reasoning_assistance", False)
|
disable_assistance = get_config_repository().get("disable_reasoning_assistance", False)
|
||||||
|
|
||||||
if force_assistance:
|
if force_assistance:
|
||||||
reasoning_assist_enabled = True
|
reasoning_assist_enabled = True
|
||||||
|
|
@ -721,6 +889,9 @@ def run_planning_agent(
|
||||||
# Get environment inventory information
|
# Get environment inventory information
|
||||||
env_inv = get_env_inv()
|
env_inv = get_env_inv()
|
||||||
|
|
||||||
|
# Display the planning stage header before any reasoning assistance
|
||||||
|
print_stage_header("Planning Stage")
|
||||||
|
|
||||||
# Initialize expert guidance section
|
# Initialize expert guidance section
|
||||||
expert_guidance = ""
|
expert_guidance = ""
|
||||||
|
|
||||||
|
|
@ -845,7 +1016,7 @@ def run_planning_agent(
|
||||||
human_section = HUMAN_PROMPT_SECTION_PLANNING if hil else ""
|
human_section = HUMAN_PROMPT_SECTION_PLANNING if hil else ""
|
||||||
web_research_section = (
|
web_research_section = (
|
||||||
WEB_RESEARCH_PROMPT_SECTION_PLANNING
|
WEB_RESEARCH_PROMPT_SECTION_PLANNING
|
||||||
if config.get("web_research_enabled")
|
if get_config_repository().get("web_research_enabled", False)
|
||||||
else ""
|
else ""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -871,28 +1042,26 @@ def run_planning_agent(
|
||||||
work_log=get_work_log_repository().format_work_log(),
|
work_log=get_work_log_repository().format_work_log(),
|
||||||
research_only_note=(
|
research_only_note=(
|
||||||
""
|
""
|
||||||
if config.get("research_only")
|
if get_config_repository().get("research_only", False)
|
||||||
else " Only request implementation if the user explicitly asked for changes to be made."
|
else " Only request implementation if the user explicitly asked for changes to be made."
|
||||||
),
|
),
|
||||||
env_inv=env_inv,
|
env_inv=env_inv,
|
||||||
expert_guidance_section=expert_guidance_section,
|
expert_guidance_section=expert_guidance_section,
|
||||||
)
|
)
|
||||||
|
|
||||||
config = get_config_repository().get_all() if not config else config
|
config_values = get_config_repository().get_all()
|
||||||
recursion_limit = config.get("recursion_limit", DEFAULT_RECURSION_LIMIT)
|
recursion_limit = get_config_repository().get("recursion_limit", DEFAULT_RECURSION_LIMIT)
|
||||||
run_config = {
|
run_config = {
|
||||||
"configurable": {"thread_id": thread_id},
|
"configurable": {"thread_id": thread_id},
|
||||||
"recursion_limit": recursion_limit,
|
"recursion_limit": recursion_limit,
|
||||||
}
|
}
|
||||||
if config:
|
run_config.update(config_values)
|
||||||
run_config.update(config)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print_stage_header("Planning Stage")
|
|
||||||
logger.debug("Planning agent completed successfully")
|
logger.debug("Planning agent completed successfully")
|
||||||
none_or_fallback_handler = init_fallback_handler(agent, config, tools)
|
none_or_fallback_handler = init_fallback_handler(agent, tools)
|
||||||
_result = run_agent_with_retry(
|
_result = run_agent_with_retry(
|
||||||
agent, planning_prompt, run_config, none_or_fallback_handler
|
agent, planning_prompt, none_or_fallback_handler
|
||||||
)
|
)
|
||||||
if _result:
|
if _result:
|
||||||
# Log planning completion
|
# Log planning completion
|
||||||
|
|
@ -916,7 +1085,6 @@ def run_task_implementation_agent(
|
||||||
expert_enabled: bool = False,
|
expert_enabled: bool = False,
|
||||||
web_research_enabled: bool = False,
|
web_research_enabled: bool = False,
|
||||||
memory: Optional[Any] = None,
|
memory: Optional[Any] = None,
|
||||||
config: Optional[dict] = None,
|
|
||||||
thread_id: Optional[str] = None,
|
thread_id: Optional[str] = None,
|
||||||
) -> Optional[str]:
|
) -> Optional[str]:
|
||||||
"""Run an implementation agent for a specific task.
|
"""Run an implementation agent for a specific task.
|
||||||
|
|
@ -930,7 +1098,6 @@ def run_task_implementation_agent(
|
||||||
expert_enabled: Whether expert mode is enabled
|
expert_enabled: Whether expert mode is enabled
|
||||||
web_research_enabled: Whether web research is enabled
|
web_research_enabled: Whether web research is enabled
|
||||||
memory: Optional memory instance to use
|
memory: Optional memory instance to use
|
||||||
config: Optional configuration dictionary
|
|
||||||
thread_id: Optional thread ID (defaults to new UUID)
|
thread_id: Optional thread ID (defaults to new UUID)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
|
@ -954,7 +1121,7 @@ def run_task_implementation_agent(
|
||||||
|
|
||||||
tools = get_implementation_tools(
|
tools = get_implementation_tools(
|
||||||
expert_enabled=expert_enabled,
|
expert_enabled=expert_enabled,
|
||||||
web_research_enabled=config.get("web_research_enabled", False),
|
web_research_enabled=get_config_repository().get("web_research_enabled", False),
|
||||||
)
|
)
|
||||||
|
|
||||||
agent = create_agent(model, tools, checkpointer=memory, agent_type="planner")
|
agent = create_agent(model, tools, checkpointer=memory, agent_type="planner")
|
||||||
|
|
@ -990,8 +1157,8 @@ def run_task_implementation_agent(
|
||||||
env_inv = get_env_inv()
|
env_inv = get_env_inv()
|
||||||
|
|
||||||
# Get model configuration to check for reasoning_assist_default
|
# Get model configuration to check for reasoning_assist_default
|
||||||
provider = config.get("provider") if config else get_config_repository().get("provider", "")
|
provider = get_config_repository().get("provider", "")
|
||||||
model_name = config.get("model") if config else get_config_repository().get("model", "")
|
model_name = get_config_repository().get("model", "")
|
||||||
logger.debug("Checking for reasoning_assist_default on %s/%s", provider, model_name)
|
logger.debug("Checking for reasoning_assist_default on %s/%s", provider, model_name)
|
||||||
|
|
||||||
model_config = {}
|
model_config = {}
|
||||||
|
|
@ -1000,8 +1167,8 @@ def run_task_implementation_agent(
|
||||||
model_config = provider_models[model_name]
|
model_config = provider_models[model_name]
|
||||||
|
|
||||||
# Check if reasoning assist is explicitly enabled/disabled
|
# Check if reasoning assist is explicitly enabled/disabled
|
||||||
force_assistance = config.get("force_reasoning_assistance", False) if config else get_config_repository().get("force_reasoning_assistance", False)
|
force_assistance = get_config_repository().get("force_reasoning_assistance", False)
|
||||||
disable_assistance = config.get("disable_reasoning_assistance", False) if config else get_config_repository().get("disable_reasoning_assistance", False)
|
disable_assistance = get_config_repository().get("disable_reasoning_assistance", False)
|
||||||
|
|
||||||
if force_assistance:
|
if force_assistance:
|
||||||
reasoning_assist_enabled = True
|
reasoning_assist_enabled = True
|
||||||
|
|
@ -1120,7 +1287,7 @@ def run_task_implementation_agent(
|
||||||
),
|
),
|
||||||
web_research_section=(
|
web_research_section=(
|
||||||
WEB_RESEARCH_PROMPT_SECTION_CHAT
|
WEB_RESEARCH_PROMPT_SECTION_CHAT
|
||||||
if config.get("web_research_enabled")
|
if get_config_repository().get("web_research_enabled", False)
|
||||||
else ""
|
else ""
|
||||||
),
|
),
|
||||||
env_inv=env_inv,
|
env_inv=env_inv,
|
||||||
|
|
@ -1128,20 +1295,19 @@ def run_task_implementation_agent(
|
||||||
implementation_guidance_section=implementation_guidance_section,
|
implementation_guidance_section=implementation_guidance_section,
|
||||||
)
|
)
|
||||||
|
|
||||||
config = get_config_repository().get_all() if not config else config
|
config_values = get_config_repository().get_all()
|
||||||
recursion_limit = config.get("recursion_limit", DEFAULT_RECURSION_LIMIT)
|
recursion_limit = get_config_repository().get("recursion_limit", DEFAULT_RECURSION_LIMIT)
|
||||||
run_config = {
|
run_config = {
|
||||||
"configurable": {"thread_id": thread_id},
|
"configurable": {"thread_id": thread_id},
|
||||||
"recursion_limit": recursion_limit,
|
"recursion_limit": recursion_limit,
|
||||||
}
|
}
|
||||||
if config:
|
run_config.update(config_values)
|
||||||
run_config.update(config)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
logger.debug("Implementation agent completed successfully")
|
logger.debug("Implementation agent completed successfully")
|
||||||
none_or_fallback_handler = init_fallback_handler(agent, config, tools)
|
none_or_fallback_handler = init_fallback_handler(agent, tools)
|
||||||
_result = run_agent_with_retry(
|
_result = run_agent_with_retry(
|
||||||
agent, prompt, run_config, none_or_fallback_handler
|
agent, prompt, none_or_fallback_handler
|
||||||
)
|
)
|
||||||
if _result:
|
if _result:
|
||||||
# Log task implementation completion
|
# Log task implementation completion
|
||||||
|
|
@ -1205,6 +1371,8 @@ def reset_agent_completion_flags():
|
||||||
|
|
||||||
|
|
||||||
def _execute_test_command_wrapper(original_prompt, config, test_attempts, auto_test):
|
def _execute_test_command_wrapper(original_prompt, config, test_attempts, auto_test):
|
||||||
|
# For backwards compatibility, allow passing of config directly
|
||||||
|
# No need to get config from repository as it's passed in
|
||||||
return execute_test_command(config, original_prompt, test_attempts, auto_test)
|
return execute_test_command(config, original_prompt, test_attempts, auto_test)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1256,15 +1424,15 @@ def get_agent_type(agent: RAgents) -> Literal["CiaynAgent", "React"]:
|
||||||
return "React"
|
return "React"
|
||||||
|
|
||||||
|
|
||||||
def init_fallback_handler(agent: RAgents, config: Dict[str, Any], tools: List[Any]):
|
def init_fallback_handler(agent: RAgents, tools: List[Any]):
|
||||||
"""
|
"""
|
||||||
Initialize fallback handler if agent is of type "React" and experimental_fallback_handler is enabled; otherwise return None.
|
Initialize fallback handler if agent is of type "React" and experimental_fallback_handler is enabled; otherwise return None.
|
||||||
"""
|
"""
|
||||||
if not config.get("experimental_fallback_handler", False):
|
if not get_config_repository().get("experimental_fallback_handler", False):
|
||||||
return None
|
return None
|
||||||
agent_type = get_agent_type(agent)
|
agent_type = get_agent_type(agent)
|
||||||
if agent_type == "React":
|
if agent_type == "React":
|
||||||
return FallbackHandler(config, tools)
|
return FallbackHandler(get_config_repository().get_all(), tools)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1286,7 +1454,7 @@ def _handle_fallback_response(
|
||||||
msg_list.extend(msg_list_response)
|
msg_list.extend(msg_list_response)
|
||||||
|
|
||||||
|
|
||||||
def _run_agent_stream(agent: RAgents, msg_list: list[BaseMessage], config: dict):
|
def _run_agent_stream(agent: RAgents, msg_list: list[BaseMessage]):
|
||||||
"""
|
"""
|
||||||
Streams agent output while handling completion and interruption.
|
Streams agent output while handling completion and interruption.
|
||||||
|
|
||||||
|
|
@ -1299,6 +1467,7 @@ def _run_agent_stream(agent: RAgents, msg_list: list[BaseMessage], config: dict)
|
||||||
This function adheres to the latest LangGraph best practices (as of March 2025) for handling
|
This function adheres to the latest LangGraph best practices (as of March 2025) for handling
|
||||||
human-in-the-loop interruptions using interrupt_after=["tools"].
|
human-in-the-loop interruptions using interrupt_after=["tools"].
|
||||||
"""
|
"""
|
||||||
|
config = get_config_repository().get_all()
|
||||||
while True:
|
while True:
|
||||||
# Process each chunk from the agent stream.
|
# Process each chunk from the agent stream.
|
||||||
for chunk in agent.stream({"messages": msg_list}, config):
|
for chunk in agent.stream({"messages": msg_list}, config):
|
||||||
|
|
@ -1312,7 +1481,7 @@ def _run_agent_stream(agent: RAgents, msg_list: list[BaseMessage], config: dict)
|
||||||
logger.debug("Stream iteration ended; checking agent state for continuation.")
|
logger.debug("Stream iteration ended; checking agent state for continuation.")
|
||||||
|
|
||||||
# Prepare state configuration, ensuring 'configurable' is present.
|
# Prepare state configuration, ensuring 'configurable' is present.
|
||||||
state_config = config.copy()
|
state_config = get_config_repository().get_all().copy()
|
||||||
if "configurable" not in state_config:
|
if "configurable" not in state_config:
|
||||||
logger.debug("Key 'configurable' not found in config; adding it as an empty dict.")
|
logger.debug("Key 'configurable' not found in config; adding it as an empty dict.")
|
||||||
state_config["configurable"] = {}
|
state_config["configurable"] = {}
|
||||||
|
|
@ -1340,7 +1509,6 @@ def _run_agent_stream(agent: RAgents, msg_list: list[BaseMessage], config: dict)
|
||||||
def run_agent_with_retry(
|
def run_agent_with_retry(
|
||||||
agent: RAgents,
|
agent: RAgents,
|
||||||
prompt: str,
|
prompt: str,
|
||||||
config: dict,
|
|
||||||
fallback_handler: Optional[FallbackHandler] = None,
|
fallback_handler: Optional[FallbackHandler] = None,
|
||||||
) -> Optional[str]:
|
) -> Optional[str]:
|
||||||
"""Run an agent with retry logic for API errors."""
|
"""Run an agent with retry logic for API errors."""
|
||||||
|
|
@ -1349,10 +1517,11 @@ def run_agent_with_retry(
|
||||||
max_retries = 20
|
max_retries = 20
|
||||||
base_delay = 1
|
base_delay = 1
|
||||||
test_attempts = 0
|
test_attempts = 0
|
||||||
_max_test_retries = config.get("max_test_cmd_retries", DEFAULT_MAX_TEST_CMD_RETRIES)
|
_max_test_retries = get_config_repository().get("max_test_cmd_retries", DEFAULT_MAX_TEST_CMD_RETRIES)
|
||||||
auto_test = config.get("auto_test", False)
|
auto_test = get_config_repository().get("auto_test", False)
|
||||||
original_prompt = prompt
|
original_prompt = prompt
|
||||||
msg_list = [HumanMessage(content=prompt)]
|
msg_list = [HumanMessage(content=prompt)]
|
||||||
|
run_config = get_config_repository().get_all()
|
||||||
|
|
||||||
# Create a new agent context for this run
|
# Create a new agent context for this run
|
||||||
with InterruptibleSection(), agent_context() as ctx:
|
with InterruptibleSection(), agent_context() as ctx:
|
||||||
|
|
@ -1370,12 +1539,12 @@ def run_agent_with_retry(
|
||||||
return f"Agent has crashed: {crash_message}"
|
return f"Agent has crashed: {crash_message}"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
_run_agent_stream(agent, msg_list, config)
|
_run_agent_stream(agent, msg_list)
|
||||||
if fallback_handler:
|
if fallback_handler:
|
||||||
fallback_handler.reset_fallback_handler()
|
fallback_handler.reset_fallback_handler()
|
||||||
should_break, prompt, auto_test, test_attempts = (
|
should_break, prompt, auto_test, test_attempts = (
|
||||||
_execute_test_command_wrapper(
|
_execute_test_command_wrapper(
|
||||||
original_prompt, config, test_attempts, auto_test
|
original_prompt, run_config, test_attempts, auto_test
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
if should_break:
|
if should_break:
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,7 @@ from ra_aid.prompts.research_prompts import (
|
||||||
from ra_aid.prompts.planning_prompts import PLANNING_PROMPT
|
from ra_aid.prompts.planning_prompts import PLANNING_PROMPT
|
||||||
|
|
||||||
# Reasoning assist prompts
|
# Reasoning assist prompts
|
||||||
from ra_aid.prompts.reasoning_assist_prompt import REASONING_ASSIST_PROMPT_PLANNING
|
from ra_aid.prompts.reasoning_assist_prompt import REASONING_ASSIST_PROMPT_PLANNING, REASONING_ASSIST_PROMPT_IMPLEMENTATION, REASONING_ASSIST_PROMPT_RESEARCH
|
||||||
|
|
||||||
# Implementation prompts
|
# Implementation prompts
|
||||||
from ra_aid.prompts.implementation_prompts import IMPLEMENTATION_PROMPT
|
from ra_aid.prompts.implementation_prompts import IMPLEMENTATION_PROMPT
|
||||||
|
|
@ -98,6 +98,8 @@ __all__ = [
|
||||||
|
|
||||||
# Reasoning assist prompts
|
# Reasoning assist prompts
|
||||||
"REASONING_ASSIST_PROMPT_PLANNING",
|
"REASONING_ASSIST_PROMPT_PLANNING",
|
||||||
|
"REASONING_ASSIST_PROMPT_IMPLEMENTATION",
|
||||||
|
"REASONING_ASSIST_PROMPT_RESEARCH",
|
||||||
|
|
||||||
# Implementation prompts
|
# Implementation prompts
|
||||||
"IMPLEMENTATION_PROMPT",
|
"IMPLEMENTATION_PROMPT",
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
"""Reasoning assist prompts for planning and implementation stages."""
|
"""Reasoning assist prompts for planning, implementation, and research stages."""
|
||||||
|
|
||||||
REASONING_ASSIST_PROMPT_PLANNING = """Current Date: {current_date}
|
REASONING_ASSIST_PROMPT_PLANNING = """Current Date: {current_date}
|
||||||
Working Directory: {working_directory}
|
Working Directory: {working_directory}
|
||||||
|
|
@ -45,6 +45,8 @@ WE DO NOT WANT TO EXCESSIVELY EMIT TINY KEY SNIPPETS --THEY SHOULD BE "paragraph
|
||||||
|
|
||||||
Given the available information, tools, and base task, write a couple paragraphs about how an agentic system might use the available tools to plan the base task, break it down into tasks, and request implementation of those tasks. The agent will not be writing any code at this point, so we should keep it to high level tasks and keep the focus on project planning.
|
Given the available information, tools, and base task, write a couple paragraphs about how an agentic system might use the available tools to plan the base task, break it down into tasks, and request implementation of those tasks. The agent will not be writing any code at this point, so we should keep it to high level tasks and keep the focus on project planning.
|
||||||
|
|
||||||
|
The agent has a tendency to do the same work/functin calls over and over again.
|
||||||
|
|
||||||
Answer quickly and confidently with five sentences at most.
|
Answer quickly and confidently with five sentences at most.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
@ -91,3 +93,46 @@ Given the available information, tools, and base task, write a couple paragraphs
|
||||||
|
|
||||||
Answer quickly and confidently with a few sentences at most.
|
Answer quickly and confidently with a few sentences at most.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
REASONING_ASSIST_PROMPT_RESEARCH = """Current Date: {current_date}
|
||||||
|
Working Directory: {working_directory}
|
||||||
|
|
||||||
|
<base task or query>
|
||||||
|
{base_task}
|
||||||
|
</base task or query>
|
||||||
|
|
||||||
|
<key facts>
|
||||||
|
{key_facts}
|
||||||
|
</key facts>
|
||||||
|
|
||||||
|
<key snippets>
|
||||||
|
{key_snippets}
|
||||||
|
</key snippets>
|
||||||
|
|
||||||
|
<research notes>
|
||||||
|
{research_notes}
|
||||||
|
</research notes>
|
||||||
|
|
||||||
|
<related files>
|
||||||
|
{related_files}
|
||||||
|
</related files>
|
||||||
|
|
||||||
|
<environment information>
|
||||||
|
{env_inv}
|
||||||
|
</environment information>
|
||||||
|
|
||||||
|
<available tools>
|
||||||
|
{tool_metadata}
|
||||||
|
</available tools>
|
||||||
|
|
||||||
|
FOCUS ON DISCOVERING KEY INFORMATION ABOUT THE CODEBASE, SYSTEM DESIGN, AND ARCHITECTURE.
|
||||||
|
THE AGENT SHOULD EMIT KEY FACTS ABOUT IMPORTANT CONCEPTS, WORKFLOWS, OR PATTERNS DISCOVERED.
|
||||||
|
IMPORTANT CODE SNIPPETS THAT ILLUMINATE CORE FUNCTIONALITY SHOULD BE EMITTED AS KEY SNIPPETS.
|
||||||
|
DO NOT EMIT REDUNDANT KEY FACTS OR SNIPPETS THAT ALREADY EXIST.
|
||||||
|
KEY SNIPPETS SHOULD BE SUBSTANTIAL "PARAGRAPHS" OF CODE, NOT SINGLE LINES OR ENTIRE FILES.
|
||||||
|
IF INFORMATION IS TOO COMPLEX TO UNDERSTAND, THE AGENT SHOULD USE ask_expert.
|
||||||
|
|
||||||
|
Given the available information, tools, and base task or query, write a couple paragraphs about how an agentic system might use the available tools to research the codebase, identify important components, gather key information, and emit key facts and snippets. The focus is on thorough investigation and understanding before any implementation. Remember, the research agent generally should emit research notes at the end of its execution, right before it calls request_implementation if a change or new work is required.
|
||||||
|
|
||||||
|
Answer quickly and confidently with five sentences at most.
|
||||||
|
"""
|
||||||
|
|
|
||||||
|
|
@ -124,6 +124,7 @@ If you find this is an empty directory, you can stop research immediately and as
|
||||||
{expert_section}
|
{expert_section}
|
||||||
{human_section}
|
{human_section}
|
||||||
{web_research_section}
|
{web_research_section}
|
||||||
|
{expert_guidance_section}
|
||||||
|
|
||||||
You have often been criticized for:
|
You have often been criticized for:
|
||||||
- Needlessly requesting more research tasks, especially for general background knowledge which you already know.
|
- Needlessly requesting more research tasks, especially for general background knowledge which you already know.
|
||||||
|
|
@ -184,6 +185,8 @@ If the user explicitly requests implementation, that means you should first perf
|
||||||
{base_task}
|
{base_task}
|
||||||
</user query>
|
</user query>
|
||||||
|
|
||||||
|
{expert_guidance_section}
|
||||||
|
|
||||||
USER QUERY *ALWAYS* TAKES PRECEDENCE OVER EVERYTHING IN PREVIOUS RESEARCH.
|
USER QUERY *ALWAYS* TAKES PRECEDENCE OVER EVERYTHING IN PREVIOUS RESEARCH.
|
||||||
|
|
||||||
KEEP IT SIMPLE
|
KEEP IT SIMPLE
|
||||||
|
|
@ -209,6 +212,8 @@ When you emit research notes, keep it extremely concise and relevant only to the
|
||||||
{base_task}
|
{base_task}
|
||||||
</user query>
|
</user query>
|
||||||
|
|
||||||
|
{expert_guidance_section}
|
||||||
|
|
||||||
USER QUERY *ALWAYS* TAKES PRECEDENCE OVER EVERYTHING IN PREVIOUS RESEARCH.
|
USER QUERY *ALWAYS* TAKES PRECEDENCE OVER EVERYTHING IN PREVIOUS RESEARCH.
|
||||||
|
|
||||||
KEEP IT SIMPLE
|
KEEP IT SIMPLE
|
||||||
|
|
|
||||||
|
|
@ -360,7 +360,6 @@ def request_task_implementation(task_spec: str) -> str:
|
||||||
related_files=related_files,
|
related_files=related_files,
|
||||||
model=model,
|
model=model,
|
||||||
expert_enabled=True,
|
expert_enabled=True,
|
||||||
config=config,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
success = True
|
success = True
|
||||||
|
|
@ -490,7 +489,6 @@ def request_implementation(task_spec: str) -> str:
|
||||||
_result = run_planning_agent(
|
_result = run_planning_agent(
|
||||||
task_spec,
|
task_spec,
|
||||||
model,
|
model,
|
||||||
config=config,
|
|
||||||
expert_enabled=True,
|
expert_enabled=True,
|
||||||
hil=config.get("hil", False),
|
hil=config.get("hil", False),
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -370,7 +370,7 @@ def test_agent_context_depth():
|
||||||
assert ctx3.depth == 2
|
assert ctx3.depth == 2
|
||||||
|
|
||||||
|
|
||||||
def test_run_agent_stream(monkeypatch):
|
def test_run_agent_stream(monkeypatch, mock_config_repository):
|
||||||
from ra_aid.agent_utils import _run_agent_stream
|
from ra_aid.agent_utils import _run_agent_stream
|
||||||
|
|
||||||
# Create a simple state class with a next property
|
# Create a simple state class with a next property
|
||||||
|
|
@ -404,7 +404,7 @@ def test_run_agent_stream(monkeypatch):
|
||||||
monkeypatch.setattr(
|
monkeypatch.setattr(
|
||||||
"ra_aid.agent_utils.print_agent_output", fake_print_agent_output
|
"ra_aid.agent_utils.print_agent_output", fake_print_agent_output
|
||||||
)
|
)
|
||||||
_run_agent_stream(dummy_agent, [HumanMessage("dummy prompt")], {})
|
_run_agent_stream(dummy_agent, [HumanMessage("dummy prompt")])
|
||||||
assert call_flag["called"]
|
assert call_flag["called"]
|
||||||
|
|
||||||
with agent_context() as ctx:
|
with agent_context() as ctx:
|
||||||
|
|
@ -530,7 +530,7 @@ def test_is_anthropic_claude():
|
||||||
) # Wrong provider
|
) # Wrong provider
|
||||||
|
|
||||||
|
|
||||||
def test_run_agent_with_retry_checks_crash_status(monkeypatch):
|
def test_run_agent_with_retry_checks_crash_status(monkeypatch, mock_config_repository):
|
||||||
"""Test that run_agent_with_retry checks for crash status at the beginning of each iteration."""
|
"""Test that run_agent_with_retry checks for crash status at the beginning of each iteration."""
|
||||||
from ra_aid.agent_context import agent_context, mark_agent_crashed
|
from ra_aid.agent_context import agent_context, mark_agent_crashed
|
||||||
from ra_aid.agent_utils import run_agent_with_retry
|
from ra_aid.agent_utils import run_agent_with_retry
|
||||||
|
|
@ -593,7 +593,7 @@ def test_run_agent_with_retry_checks_crash_status(monkeypatch):
|
||||||
assert "Agent has crashed: Test crash message" in result
|
assert "Agent has crashed: Test crash message" in result
|
||||||
|
|
||||||
|
|
||||||
def test_run_agent_with_retry_handles_badrequest_error(monkeypatch):
|
def test_run_agent_with_retry_handles_badrequest_error(monkeypatch, mock_config_repository):
|
||||||
"""Test that run_agent_with_retry properly handles BadRequestError as unretryable."""
|
"""Test that run_agent_with_retry properly handles BadRequestError as unretryable."""
|
||||||
from ra_aid.agent_context import agent_context, is_crashed
|
from ra_aid.agent_context import agent_context, is_crashed
|
||||||
from ra_aid.agent_utils import run_agent_with_retry
|
from ra_aid.agent_utils import run_agent_with_retry
|
||||||
|
|
@ -651,7 +651,7 @@ def test_run_agent_with_retry_handles_badrequest_error(monkeypatch):
|
||||||
assert is_crashed()
|
assert is_crashed()
|
||||||
|
|
||||||
|
|
||||||
def test_run_agent_with_retry_handles_api_badrequest_error(monkeypatch):
|
def test_run_agent_with_retry_handles_api_badrequest_error(monkeypatch, mock_config_repository):
|
||||||
"""Test that run_agent_with_retry properly handles API BadRequestError as unretryable."""
|
"""Test that run_agent_with_retry properly handles API BadRequestError as unretryable."""
|
||||||
# Import APIError from anthropic module and patch it on the agent_utils module
|
# Import APIError from anthropic module and patch it on the agent_utils module
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue