From e64c4066a60b2249e2c53b8dfe4132e569f15c5c Mon Sep 17 00:00:00 2001 From: AI Christianson Date: Sat, 8 Mar 2025 13:35:06 -0500 Subject: [PATCH] implement reasoning-guided workflows --- ra_aid/agent_utils.py | 179 ++++++++++++++++-- ra_aid/models_params.py | 10 + ra_aid/prompts/__init__.py | 6 + ra_aid/prompts/implementation_prompts.py | 2 + ra_aid/prompts/planning_prompts.py | 4 + ra_aid/prompts/reasoning_assist_prompt.py | 50 +++++ ra_aid/prompts/research_prompts.py | 2 + ra_aid/tools/memory.py | 4 +- tests/ra_aid/prompts/test_planning_prompts.py | 61 ++++++ 9 files changed, 303 insertions(+), 15 deletions(-) create mode 100644 ra_aid/prompts/reasoning_assist_prompt.py create mode 100644 tests/ra_aid/prompts/test_planning_prompts.py diff --git a/ra_aid/agent_utils.py b/ra_aid/agent_utils.py index 1fd3daa..d8a5054 100644 --- a/ra_aid/agent_utils.py +++ b/ra_aid/agent_utils.py @@ -1,5 +1,6 @@ """Utility functions for working with agents.""" +import inspect import os import signal import sys @@ -49,6 +50,7 @@ from ra_aid.exceptions import ( ) from ra_aid.fallback_handler import FallbackHandler from ra_aid.logging_config import get_logger +from ra_aid.llm import initialize_expert_llm from ra_aid.models_params import DEFAULT_TOKEN_LIMIT, models_params from ra_aid.project_info import ( display_project_status, @@ -68,6 +70,7 @@ from ra_aid.prompts.human_prompts import ( from ra_aid.prompts.implementation_prompts import IMPLEMENTATION_PROMPT from ra_aid.prompts.common_prompts import NEW_PROJECT_HINTS from ra_aid.prompts.planning_prompts import PLANNING_PROMPT +from ra_aid.prompts.reasoning_assist_prompt import REASONING_ASSIST_PROMPT_PLANNING from ra_aid.prompts.research_prompts import ( RESEARCH_ONLY_PROMPT, RESEARCH_PROMPT, @@ -659,19 +662,25 @@ def run_planning_agent( web_research_enabled=config.get("web_research_enabled", False), ) - agent = create_agent(model, tools, checkpointer=memory, agent_type="planner") - - expert_section = EXPERT_PROMPT_SECTION_PLANNING if expert_enabled else "" - human_section = HUMAN_PROMPT_SECTION_PLANNING if hil else "" - web_research_section = ( - WEB_RESEARCH_PROMPT_SECTION_PLANNING - if config.get("web_research_enabled") - else "" - ) - + # Get model configuration + provider = config.get("provider") if config else get_config_repository().get("provider", "") + model_name = config.get("model") if config else get_config_repository().get("model", "") + logger.debug("Checking for reasoning_assist_default on %s/%s", provider, model_name) + + # Get model configuration to check for reasoning_assist_default + model_config = {} + provider_models = models_params.get(provider, {}) + if provider_models and model_name in provider_models: + model_config = provider_models[model_name] + + # Check if reasoning assist is enabled + reasoning_assist_enabled = model_config.get("reasoning_assist_default", False) + logger.debug("Reasoning assist enabled: %s", reasoning_assist_enabled) + + # Get all the context information (used both for normal planning and reasoning assist) current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S") working_directory = os.getcwd() - + # Make sure key_facts is defined before using it try: key_facts = format_key_facts_dict(get_key_fact_repository().get_facts_dict()) @@ -695,8 +704,149 @@ def run_planning_agent( logger.error(f"Failed to access research note repository: {str(e)}") formatted_research_notes = "" - # Get environment inventory information + # Get related files + related_files = "\n".join(get_related_files()) + # Get environment inventory information + env_inv = get_env_inv() + + # Initialize expert guidance section + expert_guidance = "" + + # If reasoning assist is enabled, make a one-off call to the expert model + if reasoning_assist_enabled: + try: + logger.info("Reasoning assist enabled for model %s, getting expert guidance", model_name) + + # Collect tool descriptions + tool_metadata = [] + from ra_aid.tools.reflection import get_function_info as get_tool_info + + for tool in tools: + try: + tool_info = get_tool_info(tool.func) + name = tool.func.__name__ + description = inspect.getdoc(tool.func) + tool_metadata.append(f"Tool: {name}\nDescription: {description}\n") + except Exception as e: + logger.warning(f"Error getting tool info for {tool}: {e}") + + # Format tool metadata + formatted_tool_metadata = "\n".join(tool_metadata) + + # Initialize expert model + expert_model = initialize_expert_llm(provider, model_name) + + # Format the reasoning assist prompt + reasoning_assist_prompt = REASONING_ASSIST_PROMPT_PLANNING.format( + current_date=current_date, + working_directory=working_directory, + base_task=base_task, + key_facts=key_facts, + key_snippets=key_snippets, + research_notes=formatted_research_notes, + related_files=related_files, + env_inv=env_inv, + tool_metadata=formatted_tool_metadata, + ) + + # Show the reasoning assist query in a panel + console.print( + Panel(Markdown("Consulting with the reasoning model on the best way to do this."), title="📝 Thinking about the plan...", border_style="yellow") + ) + + logger.debug("Invoking expert model for reasoning assist") + # Make the call to the expert model + response = expert_model.invoke(reasoning_assist_prompt) + + # Check if the model supports think tags + supports_think_tag = model_config.get("supports_think_tag", False) + supports_thinking = model_config.get("supports_thinking", False) + + # Get response content, handling if it's a list (for Claude thinking mode) + content = None + + if hasattr(response, 'content'): + content = response.content + else: + # Fallback if content attribute is missing + content = str(response) + + # Process content based on its type + if isinstance(content, list): + # Handle structured thinking mode (e.g., Claude 3.7) + thinking_content = None + response_text = None + + # Process each item in the list + for item in content: + if isinstance(item, dict): + # Extract thinking content + if item.get('type') == 'thinking' and 'thinking' in item: + thinking_content = item['thinking'] + logger.debug("Found structured thinking content") + # Extract response text + elif item.get('type') == 'text' and 'text' in item: + response_text = item['text'] + logger.debug("Found structured response text") + + # Display thinking content in a separate panel if available + if thinking_content and get_config_repository().get("show_thoughts", False): + logger.debug(f"Displaying structured thinking content ({len(thinking_content)} chars)") + console.print( + Panel(Markdown(thinking_content), title="💭 Expert Thinking", border_style="yellow") + ) + + # Use response_text if available, otherwise fall back to joining + if response_text: + content = response_text + else: + # Fallback: join list items if structured extraction failed + logger.debug("No structured response text found, joining list items") + content = "\n".join(str(item) for item in content) + elif (supports_think_tag or supports_thinking) and isinstance(content, str): + # Extract think tags if model supports them + think_content, remaining_text = extract_think_tag(content) + if think_content: + logger.debug(f"Found think tag content ({len(think_content)} chars)") + if get_config_repository().get("show_thoughts", False): + console.print( + Panel(Markdown(think_content), title="💭 Expert Thinking", border_style="yellow") + ) + content = remaining_text + + # Display the expert guidance in a panel + console.print( + Panel(Markdown(content), title="Reasoning Guidance", border_style="blue") + ) + + # Use the content as expert guidance + expert_guidance = content + "\n\nCONSULT WITH THE EXPERT FREQUENTLY ON THIS TASK" + + logger.info("Received expert guidance for planning") + except Exception as e: + logger.error("Error getting expert guidance for planning: %s", e) + expert_guidance = "" + + agent = create_agent(model, tools, checkpointer=memory, agent_type="planner") + + expert_section = EXPERT_PROMPT_SECTION_PLANNING if expert_enabled else "" + human_section = HUMAN_PROMPT_SECTION_PLANNING if hil else "" + web_research_section = ( + WEB_RESEARCH_PROMPT_SECTION_PLANNING + if config.get("web_research_enabled") + else "" + ) + + # Prepare expert guidance section if expert guidance is available + expert_guidance_section = "" + if expert_guidance: + expert_guidance_section = f""" +Expert model has analyzed this task and provided the following guidance: + +{expert_guidance} +""" + planning_prompt = PLANNING_PROMPT.format( current_date=current_date, working_directory=working_directory, @@ -706,7 +856,7 @@ def run_planning_agent( base_task=base_task, project_info=formatted_project_info, research_notes=formatted_research_notes, - related_files="\n".join(get_related_files()), + related_files=related_files, key_facts=key_facts, key_snippets=key_snippets, work_log=get_work_log_repository().format_work_log(), @@ -715,7 +865,8 @@ def run_planning_agent( if config.get("research_only") else " Only request implementation if the user explicitly asked for changes to be made." ), - env_inv=get_env_inv(), + env_inv=env_inv, + expert_guidance_section=expert_guidance_section, ) config = get_config_repository().get_all() if not config else config diff --git a/ra_aid/models_params.py b/ra_aid/models_params.py index 8884fd4..ee1cb31 100644 --- a/ra_aid/models_params.py +++ b/ra_aid/models_params.py @@ -165,6 +165,16 @@ models_params = { "latency_coefficient": DEFAULT_BASE_LATENCY, }, }, + "openrouter": { + "qwen/qwen-2.5-coder-32b-instruct": { + "token_limit": 131072, + "default_temperature": 0.4, + "supports_temperature": True, + "latency_coefficient": DEFAULT_BASE_LATENCY, + "max_tokens": 32000, + "reasoning_assist_default": True, + } + }, "openai-compatible": { "qwen-qwq-32b": { "token_limit": 131072, diff --git a/ra_aid/prompts/__init__.py b/ra_aid/prompts/__init__.py index d57d62d..64212b9 100644 --- a/ra_aid/prompts/__init__.py +++ b/ra_aid/prompts/__init__.py @@ -48,6 +48,9 @@ from ra_aid.prompts.research_prompts import ( # Planning prompts from ra_aid.prompts.planning_prompts import PLANNING_PROMPT +# Reasoning assist prompts +from ra_aid.prompts.reasoning_assist_prompt import REASONING_ASSIST_PROMPT_PLANNING + # Implementation prompts from ra_aid.prompts.implementation_prompts import IMPLEMENTATION_PROMPT @@ -93,6 +96,9 @@ __all__ = [ # Planning prompts "PLANNING_PROMPT", + # Reasoning assist prompts + "REASONING_ASSIST_PROMPT_PLANNING", + # Implementation prompts "IMPLEMENTATION_PROMPT", diff --git a/ra_aid/prompts/implementation_prompts.py b/ra_aid/prompts/implementation_prompts.py index 6c10b9e..eb71548 100644 --- a/ra_aid/prompts/implementation_prompts.py +++ b/ra_aid/prompts/implementation_prompts.py @@ -39,6 +39,8 @@ E.G. IF WE ARE USING A LIBRARY AND IT IS FOUND IN ENV INVENTORY, ADD THE INCLUDE YOU MUST **EXPLICITLY** INCLUDE ANY PATHS FROM THE ABOVE INFO IF NEEDED. IT IS NOT AUTOMATIC. +READ AND STUDY ACTUAL LIBRARY HEADERS/CODE FROM THE ENVIRONMENT, IF AVAILABLE AND RELEVANT. + Important Notes: - Focus solely on the given task and implement it as described. - Scale the complexity of your solution to the complexity of the request. For simple requests, keep it straightforward and minimal. For complex requests, maintain the previously planned depth. diff --git a/ra_aid/prompts/planning_prompts.py b/ra_aid/prompts/planning_prompts.py index c82a13f..c090e27 100644 --- a/ra_aid/prompts/planning_prompts.py +++ b/ra_aid/prompts/planning_prompts.py @@ -40,12 +40,16 @@ ETC. YOU MUST **EXPLICITLY** INCLUDE ANY PATHS FROM THE ABOVE INFO IF NEEDED. IT IS NOT AUTOMATIC. +READ AND STUDY ACTUAL LIBRARY HEADERS/CODE FROM THE ENVIRONMENT, IF AVAILABLE AND RELEVANT. + Work done so far: {work_log} +{expert_guidance_section} + Guidelines: If you need additional input or assistance from the expert (if expert is available), especially for debugging, deeper logic analysis, or correctness checks, use emit_expert_context to provide all relevant context and wait for the expert's response. diff --git a/ra_aid/prompts/reasoning_assist_prompt.py b/ra_aid/prompts/reasoning_assist_prompt.py new file mode 100644 index 0000000..afcfedd --- /dev/null +++ b/ra_aid/prompts/reasoning_assist_prompt.py @@ -0,0 +1,50 @@ +"""Reasoning assist prompts for planning stage.""" + +REASONING_ASSIST_PROMPT_PLANNING = """Current Date: {current_date} +Working Directory: {working_directory} + +I am an agent and need your assistance in planning how to approach the following task in an agentic way. I'll be using the provided tools and context to complete this task, but I'd like your high-level strategic guidance before I start. + + +{base_task} + + + +{key_facts} + + + +{key_snippets} + + + +{research_notes} + + + +{related_files} + + + +{env_inv} + + + +{tool_metadata} + + +Please provide high-level planning guidance including: +1. Overall approach strategy +2. Key decision points to consider +3. Potential challenges and how to address them +4. Most effective tools to use for this task +5. Contingency plans if certain approaches don't work +6. Any critical insights from the provided context + +Focus on strategic thinking rather than implementation details. Your guidance will be used to create a detailed implementation plan. + +Please be concise, practical, and specific to this task. Avoid generic advice. + +Include a flowchart of tools you would call and branches in that flowchart to show contingency/conditional paths. +Use ASCII to visualize the approach + contingencies. +""" diff --git a/ra_aid/prompts/research_prompts.py b/ra_aid/prompts/research_prompts.py index 124729c..c1dc068 100644 --- a/ra_aid/prompts/research_prompts.py +++ b/ra_aid/prompts/research_prompts.py @@ -51,6 +51,8 @@ ETC. YOU MUST **EXPLICITLY** INCLUDE ANY PATHS FROM THE ABOVE INFO IF NEEDED. IT IS NOT AUTOMATIC. +READ AND STUDY ACTUAL LIBRARY HEADERS/CODE FROM THE ENVIRONMENT, IF AVAILABLE AND RELEVANT. + Role: You are an autonomous research agent focused solely on enumerating and describing the current codebase and its related files. You are not a planner, not an implementer, and not a chatbot for general problem solving. You will not propose solutions, improvements, or modifications. diff --git a/ra_aid/tools/memory.py b/ra_aid/tools/memory.py index c334b4b..24e52be 100644 --- a/ra_aid/tools/memory.py +++ b/ra_aid/tools/memory.py @@ -170,6 +170,8 @@ def emit_key_snippet(snippet_info: SnippetInfo) -> str: Focus on external interfaces and things that are very specific and relevant to UPCOMING work. + SNIPPETS SHOULD TYPICALLY BE MULTIPLE LINES, NOT SINGLE LINES, NOT ENTIRE FILES. + Args: snippet_info: Dict with keys: - filepath: Path to the source file @@ -472,4 +474,4 @@ def deregister_related_files(file_ids: List[int]) -> str: ) results.append(success_msg) - return "Files noted." \ No newline at end of file + return "Files noted." diff --git a/tests/ra_aid/prompts/test_planning_prompts.py b/tests/ra_aid/prompts/test_planning_prompts.py new file mode 100644 index 0000000..002f6ba --- /dev/null +++ b/tests/ra_aid/prompts/test_planning_prompts.py @@ -0,0 +1,61 @@ +"""Tests for planning prompts.""" + +import pytest +from ra_aid.agent_utils import get_config_repository +from ra_aid.prompts.planning_prompts import PLANNING_PROMPT + + +def test_planning_prompt_expert_guidance_section(): + """Test that the planning prompt includes the expert_guidance_section placeholder.""" + assert "{expert_guidance_section}" in PLANNING_PROMPT + + +def test_planning_prompt_formatting_with_expert_guidance(): + """Test formatting the planning prompt with expert guidance.""" + # Sample expert guidance + expert_guidance_section = "\nThis is test expert guidance\n" + + # Format the prompt + formatted_prompt = PLANNING_PROMPT.format( + current_date="2025-03-08", + working_directory="/test/path", + expert_section="", + human_section="", + web_research_section="", + base_task="Test task", + project_info="Test project info", + research_notes="Test research notes", + related_files="Test related files", + key_facts="Test key facts", + key_snippets="Test key snippets", + work_log="Test work log", + env_inv="Test env inventory", + expert_guidance_section=expert_guidance_section, + ) + + # Check that the expert guidance section is included + assert expert_guidance_section in formatted_prompt + + +def test_planning_prompt_formatting_without_expert_guidance(): + """Test formatting the planning prompt without expert guidance.""" + # Format the prompt with empty expert guidance + formatted_prompt = PLANNING_PROMPT.format( + current_date="2025-03-08", + working_directory="/test/path", + expert_section="", + human_section="", + web_research_section="", + base_task="Test task", + project_info="Test project info", + research_notes="Test research notes", + related_files="Test related files", + key_facts="Test key facts", + key_snippets="Test key snippets", + work_log="Test work log", + env_inv="Test env inventory", + expert_guidance_section="", + ) + + # Check that the expert guidance section placeholder is replaced with empty string + assert "" not in formatted_prompt \ No newline at end of file