diff --git a/ra_aid/__main__.py b/ra_aid/__main__.py index d9405d3..bdd3200 100644 --- a/ra_aid/__main__.py +++ b/ra_aid/__main__.py @@ -15,8 +15,7 @@ from ra_aid.tools import ( emit_research_notes, emit_plan, emit_related_files, emit_task, emit_expert_context, get_memory_value, emit_key_facts, delete_key_facts, emit_key_snippets, delete_key_snippets, - request_implementation, read_file_tool, emit_research_subtask, - fuzzy_find_project_files, ripgrep_search, list_directory_tree + emit_research_subtask, request_implementation, read_file_tool, fuzzy_find_project_files, ripgrep_search, list_directory_tree ) from ra_aid.tools.memory import _global_memory, get_related_files from ra_aid import print_agent_output, print_stage_header, print_task_header, print_error @@ -103,21 +102,12 @@ research_memory = MemorySaver() planning_memory = MemorySaver() implementation_memory = MemorySaver() -def get_research_tools(research_only: bool = False) -> list: - """Get the list of research tools based on mode. - - Args: - research_only: If True, exclude implementation-related tools - - Returns: - List of available research tools - """ +def get_research_tools(research_only: bool = False, expert_enabled: bool = True, llm_enabled: bool = True) -> list: + """Get the list of research tools based on mode and availability.""" tools = [ list_directory_tree, emit_research_subtask, run_shell_command, - emit_expert_context, - ask_expert, emit_research_notes, emit_related_files, emit_key_facts, @@ -129,55 +119,64 @@ def get_research_tools(research_only: bool = False) -> list: ripgrep_search ] - if not research_only: + if expert_enabled and llm_enabled: + tools.append(emit_expert_context) + tools.append(ask_expert) + + if not research_only and llm_enabled: tools.append(request_implementation) return tools -# Define tool sets for each stage -planning_tools = [list_directory_tree, emit_expert_context, ask_expert, emit_plan, emit_task, emit_related_files, emit_key_facts, delete_key_facts, emit_key_snippets, delete_key_snippets, read_file_tool, fuzzy_find_project_files, ripgrep_search] -implementation_tools = [list_directory_tree, run_shell_command, emit_expert_context, ask_expert, run_programming_task, emit_related_files, emit_key_facts, delete_key_facts, emit_key_snippets, delete_key_snippets, read_file_tool, fuzzy_find_project_files, ripgrep_search] +def get_planning_tools(expert_enabled: bool = True, llm_enabled: bool = True) -> list: + tools = [ + list_directory_tree, + emit_plan, + emit_task, + emit_related_files, + emit_key_facts, + delete_key_facts, + emit_key_snippets, + delete_key_snippets, + read_file_tool, + fuzzy_find_project_files, + ripgrep_search + ] + if expert_enabled and llm_enabled: + tools.append(ask_expert) + tools.append(emit_expert_context) + return tools +def get_implementation_tools(expert_enabled: bool = True, llm_enabled: bool = True) -> list: + tools = [ + list_directory_tree, + run_shell_command, + run_programming_task, + emit_related_files, + emit_key_facts, + delete_key_facts, + emit_key_snippets, + delete_key_snippets, + read_file_tool, + fuzzy_find_project_files, + ripgrep_search + ] + if expert_enabled and llm_enabled: + tools.append(ask_expert) + tools.append(emit_expert_context) + return tools def is_informational_query() -> bool: - """Determine if the current query is informational based on implementation_requested state. - - Returns: - bool: True if query is informational (no implementation requested), False otherwise - """ - # Check both the research_only flag and implementation_requested state return _global_memory.get('config', {}).get('research_only', False) or not is_stage_requested('implementation') def is_stage_requested(stage: str) -> bool: - """Check if a stage has been requested to proceed. - - Args: - stage: The stage to check ('implementation') - - Returns: - True if the stage was requested, False otherwise - """ if stage == 'implementation': return len(_global_memory.get('implementation_requested', [])) > 0 return False def run_agent_with_retry(agent, prompt: str, config: dict): - """Run an agent with retry logic for internal server errors. - - Args: - agent: The agent to run - prompt: The prompt to send to the agent - config: Configuration dictionary for the agent - - Returns: - None - - Raises: - TaskCompletedException: If the task is completed and should exit - RuntimeError: If max retries exceeded - """ max_retries = 20 - base_delay = 1 # Initial delay in seconds + base_delay = 1 for attempt in range(max_retries): try: @@ -191,21 +190,19 @@ def run_agent_with_retry(agent, prompt: str, config: dict): if attempt == max_retries - 1: raise RuntimeError(f"Max retries ({max_retries}) exceeded. Last error: {str(e)}") - delay = base_delay * (2 ** attempt) # Exponential backoff + delay = base_delay * (2 ** attempt) error_type = e.__class__.__name__ print_error(f"Encountered {error_type}: {str(e)}. Retrying in {delay} seconds... (Attempt {attempt + 1}/{max_retries})") time.sleep(delay) continue -def run_implementation_stage(base_task, tasks, plan, related_files, model): - """Run implementation stage with a distinct agent for each task.""" +def run_implementation_stage(base_task, tasks, plan, related_files, model, expert_enabled: bool, llm_enabled: bool): if not is_stage_requested('implementation'): print_stage_header("Implementation Stage Skipped") return print_stage_header("Implementation Stage") - # Get tasks directly from memory instead of using get_memory_value which joins with newlines task_list = _global_memory['tasks'] print_task_header(f"Found {len(task_list)} tasks to implement") @@ -213,13 +210,9 @@ def run_implementation_stage(base_task, tasks, plan, related_files, model): for i, task in enumerate(task_list, 1): print_task_header(task) - # Create a unique memory instance for this task task_memory = MemorySaver() + task_agent = create_react_agent(model, get_implementation_tools(expert_enabled=expert_enabled, llm_enabled=llm_enabled), checkpointer=task_memory) - # Create a fresh agent for each task with its own memory - task_agent = create_react_agent(model, implementation_tools, checkpointer=task_memory) - - # Construct task-specific prompt task_prompt = IMPLEMENTATION_PROMPT.format( plan=plan, key_facts=get_memory_value('key_facts'), @@ -229,29 +222,27 @@ def run_implementation_stage(base_task, tasks, plan, related_files, model): base_task=base_task ) - # Run agent for this task - run_agent_with_retry(task_agent, task_prompt, {"configurable": {"thread_id": "abc123"}, "recursion_limit": 100}) + if llm_enabled: + run_agent_with_retry(task_agent, task_prompt, {"configurable": {"thread_id": "abc123"}, "recursion_limit": 100}) + else: + console.print(Panel("[yellow]LLM is disabled, cannot implement tasks[/yellow]", title="No LLM Available")) - -def run_research_subtasks(base_task: str, config: dict, model): - """Run research subtasks with separate agents.""" +def run_research_subtasks(base_task: str, config: dict, model, expert_enabled: bool, llm_enabled: bool): subtasks = _global_memory.get('research_subtasks', []) if not subtasks: return print_stage_header("Research Subtasks") - # Get tools for subtask agents (excluding research subtask and implementation tools) research_only = _global_memory.get('config', {}).get('research_only', False) subtask_tools = [ - tool for tool in get_research_tools(research_only=research_only) - if tool.name not in ['emit_research_subtask'] + t for t in get_research_tools(research_only=research_only, expert_enabled=expert_enabled, llm_enabled=llm_enabled) + if t.name not in ['emit_research_subtask'] ] for i, subtask in enumerate(subtasks, 1): print_task_header(f"Research Subtask {i}/{len(subtasks)}") - # Create fresh memory and agent for each subtask subtask_memory = MemorySaver() subtask_agent = create_react_agent( model, @@ -259,22 +250,19 @@ def run_research_subtasks(base_task: str, config: dict, model): checkpointer=subtask_memory ) - # Run the subtask agent subtask_prompt = f"Research Subtask: {subtask}\n\n{RESEARCH_PROMPT}" - run_agent_with_retry(subtask_agent, subtask_prompt, config) - + + if llm_enabled: + run_agent_with_retry(subtask_agent, subtask_prompt, config) + else: + console.print(Panel("[yellow]LLM is disabled, cannot perform LLM-based research[/yellow]", title="No LLM Available")) def validate_environment(args): - """Validate required environment variables and dependencies. - - Args: - args: The parsed command line arguments - """ missing = [] provider = args.provider expert_provider = args.expert_provider - # Check API keys based on provider + # Main provider keys if provider == "anthropic": if not os.environ.get('ANTHROPIC_API_KEY'): missing.append('ANTHROPIC_API_KEY environment variable is not set') @@ -290,49 +278,73 @@ def validate_environment(args): if not os.environ.get('OPENAI_API_BASE'): missing.append('OPENAI_API_BASE environment variable is not set') - # Check expert provider keys with fallback to regular keys if providers match + # Expert keys + expert_missing = [] if expert_provider == "anthropic": expert_key_missing = not os.environ.get('EXPERT_ANTHROPIC_API_KEY') fallback_available = expert_provider == provider and os.environ.get('ANTHROPIC_API_KEY') if expert_key_missing and not fallback_available: - missing.append('EXPERT_ANTHROPIC_API_KEY environment variable is not set') + expert_missing.append('EXPERT_ANTHROPIC_API_KEY environment variable is not set') elif expert_provider == "openai": expert_key_missing = not os.environ.get('EXPERT_OPENAI_API_KEY') fallback_available = expert_provider == provider and os.environ.get('OPENAI_API_KEY') if expert_key_missing and not fallback_available: - missing.append('EXPERT_OPENAI_API_KEY environment variable is not set') + expert_missing.append('EXPERT_OPENAI_API_KEY environment variable is not set') elif expert_provider == "openrouter": expert_key_missing = not os.environ.get('EXPERT_OPENROUTER_API_KEY') fallback_available = expert_provider == provider and os.environ.get('OPENROUTER_API_KEY') if expert_key_missing and not fallback_available: - missing.append('EXPERT_OPENROUTER_API_KEY environment variable is not set') + expert_missing.append('EXPERT_OPENROUTER_API_KEY environment variable is not set') elif expert_provider == "openai-compatible": expert_key_missing = not os.environ.get('EXPERT_OPENAI_API_KEY') fallback_available = expert_provider == provider and os.environ.get('OPENAI_API_KEY') if expert_key_missing and not fallback_available: - missing.append('EXPERT_OPENAI_API_KEY environment variable is not set') + expert_missing.append('EXPERT_OPENAI_API_KEY environment variable is not set') expert_base_missing = not os.environ.get('EXPERT_OPENAI_API_BASE') base_fallback_available = expert_provider == provider and os.environ.get('OPENAI_API_BASE') if expert_base_missing and not base_fallback_available: - missing.append('EXPERT_OPENAI_API_BASE environment variable is not set') + expert_missing.append('EXPERT_OPENAI_API_BASE environment variable is not set') + # If main keys missing, just disable LLM entirely + llm_enabled = True if missing: - print_error("Missing required dependencies:") - for item in missing: - print_error(f"- {item}") - sys.exit(1) + llm_enabled = False + + # If expert keys missing, disable expert + expert_enabled = True + if expert_missing: + expert_enabled = False + + return llm_enabled, expert_enabled, missing, expert_missing def main(): - """Main entry point for the ra-aid command line tool.""" try: try: args = parse_arguments() - validate_environment(args) # Will exit if env vars missing + llm_enabled, expert_enabled, missing, expert_missing = validate_environment(args) - # Create the base model after validation - model = initialize_llm(args.provider, args.model) - - # Validate message is provided + # If main LLM keys missing + if missing: + # Disable LLM completely + console.print(Panel( + f"[yellow]LLM disabled due to missing main provider configuration:[/yellow]\n" + + "\n".join(f"- {m}" for m in missing) + + "\nSet the required environment variables to enable LLM features.", + title="LLM Disabled", + style="yellow" + )) + + # If expert keys missing + if expert_missing: + console.print(Panel( + f"[yellow]Expert tools disabled due to missing configuration:[/yellow]\n" + + "\n".join(f"- {m}" for m in expert_missing) + + "\nSet the required environment variables or args to enable expert mode.", + title="Expert Tools Disabled", + style="yellow" + )) + + # If no message, exit if not args.message: print_error("--message is required") sys.exit(1) @@ -347,63 +359,71 @@ def main(): "cowboy_mode": args.cowboy_mode } - # Store config in global memory for access by is_informational_query _global_memory['config'] = config - - # Store expert provider and model in config _global_memory['config']['expert_provider'] = args.expert_provider _global_memory['config']['expert_model'] = args.expert_model - # Run research stage + # Only initialize the model if LLM is enabled + model = None + if llm_enabled: + model = initialize_llm(args.provider, args.model) + print_stage_header("Research Stage") - # Create research agent with local model - research_agent = create_react_agent( - model, - get_research_tools(research_only=_global_memory.get('config', {}).get('research_only', False)), - checkpointer=research_memory + research_tools = get_research_tools( + research_only=_global_memory.get('config', {}).get('research_only', False), + expert_enabled=expert_enabled, + llm_enabled=llm_enabled ) - research_prompt = f"""User query: {base_task} --keep it simple + # If no LLM, we can't run the agent, just print a message + if llm_enabled: + research_agent = create_react_agent( + model, + research_tools, + checkpointer=research_memory + ) + + research_prompt = f"""User query: {base_task} --keep it simple {RESEARCH_PROMPT} Be very thorough in your research and emit lots of snippets, key facts. If you take more than a few steps, be eager to emit research subtasks.{'' if args.research_only else ' Only request implementation if the user explicitly asked for changes to be made.'}""" - - try: - run_agent_with_retry(research_agent, research_prompt, config) - except TaskCompletedException as e: - print_stage_header("Task Completed") - raise # Re-raise to be caught by outer handler - - # Run any research subtasks - run_research_subtasks(base_task, config, model) + try: + run_agent_with_retry(research_agent, research_prompt, config) + except TaskCompletedException as e: + print_stage_header("Task Completed") + raise + else: + console.print(Panel("[yellow]LLM is disabled, cannot perform LLM-based research[/yellow]", title="No LLM Available")) + + run_research_subtasks(base_task, config, model, expert_enabled=expert_enabled, llm_enabled=llm_enabled) - # Proceed with planning and implementation if not an informational query if not is_informational_query(): print_stage_header("Planning Stage") + planning_tools = get_planning_tools(expert_enabled=expert_enabled, llm_enabled=llm_enabled) - # Create planning agent - planning_agent = create_react_agent(model, planning_tools, checkpointer=planning_memory) - - planning_prompt = PLANNING_PROMPT.format( - research_notes=get_memory_value('research_notes'), - key_facts=get_memory_value('key_facts'), - key_snippets=get_memory_value('key_snippets'), - base_task=base_task, - related_files="\n".join(get_related_files()) - ) + if llm_enabled: + planning_agent = create_react_agent(model, planning_tools, checkpointer=planning_memory) + planning_prompt = PLANNING_PROMPT.format( + research_notes=get_memory_value('research_notes'), + key_facts=get_memory_value('key_facts'), + key_snippets=get_memory_value('key_snippets'), + base_task=base_task, + related_files="\n".join(get_related_files()) + ) + run_agent_with_retry(planning_agent, planning_prompt, config) + else: + console.print(Panel("[yellow]LLM is disabled, cannot perform planning[/yellow]", title="No LLM Available")) - # Run planning agent - run_agent_with_retry(planning_agent, planning_prompt, config) - - # Run implementation stage with task-specific agents run_implementation_stage( base_task, get_memory_value('tasks'), get_memory_value('plan'), get_related_files(), - model + model, + expert_enabled=expert_enabled, + llm_enabled=llm_enabled ) except TaskCompletedException: sys.exit(0)