From 0afed5580945296e654f85864cc6ed93d1777398 Mon Sep 17 00:00:00 2001 From: AI Christianson Date: Tue, 4 Mar 2025 13:10:52 -0500 Subject: [PATCH] get rid of dead code around implementation_requested --- ra_aid/__main__.py | 9 ++---- ra_aid/tools/__init__.py | 2 -- ra_aid/tools/agent.py | 2 +- ra_aid/tools/memory.py | 18 ------------ tests/ra_aid/test_info_query.py | 41 ++++++++++++++++++++++++++ tests/ra_aid/test_main.py | 52 ++++++++++++++++++++++----------- 6 files changed, 80 insertions(+), 44 deletions(-) create mode 100644 tests/ra_aid/test_info_query.py diff --git a/ra_aid/__main__.py b/ra_aid/__main__.py index 3fcb679..b167e87 100644 --- a/ra_aid/__main__.py +++ b/ra_aid/__main__.py @@ -331,16 +331,13 @@ implementation_memory = MemorySaver() def is_informational_query() -> bool: - """Determine if the current query is informational based on implementation_requested state.""" - return _global_memory.get("config", {}).get( - "research_only", False - ) or not is_stage_requested("implementation") + """Determine if the current query is informational based on config settings.""" + return _global_memory.get("config", {}).get("research_only", False) def is_stage_requested(stage: str) -> bool: """Check if a stage has been requested to proceed.""" - if stage == "implementation": - return _global_memory.get("implementation_requested", False) + # This is kept for backward compatibility but no longer does anything return False diff --git a/ra_aid/tools/__init__.py b/ra_aid/tools/__init__.py index 4c23d3c..58b6eb3 100644 --- a/ra_aid/tools/__init__.py +++ b/ra_aid/tools/__init__.py @@ -11,7 +11,6 @@ from .memory import ( emit_research_notes, get_memory_value, plan_implementation_completed, - request_implementation, task_completed, ) from .programmer import run_programming_task @@ -35,7 +34,6 @@ __all__ = [ "get_memory_value", "list_directory_tree", "read_file_tool", - "request_implementation", "run_programming_task", "run_shell_command", "put_complete_file_contents", diff --git a/ra_aid/tools/agent.py b/ra_aid/tools/agent.py index f5aae80..edddb8c 100644 --- a/ra_aid/tools/agent.py +++ b/ra_aid/tools/agent.py @@ -350,7 +350,7 @@ def request_task_implementation(task_spec: str) -> str: reset_completion_flags() _result = run_task_implementation_agent( - base_task=_global_memory.get("base_task", ""), + base_task="", # No more base_task from global memory tasks=[], # No more tasks from global memory task=task_spec, plan="", # No more plan from global memory diff --git a/ra_aid/tools/memory.py b/ra_aid/tools/memory.py index d3fc696..3aaa12f 100644 --- a/ra_aid/tools/memory.py +++ b/ra_aid/tools/memory.py @@ -46,7 +46,6 @@ from ra_aid.database.repositories.key_fact_repository import get_key_fact_reposi # Global memory store _global_memory: Dict[str, Any] = { - "implementation_requested": False, "related_files": {}, # Dict[int, str] - ID to filepath mapping "related_file_id_counter": 1, # Counter for generating unique file IDs "agent_depth": 0, @@ -170,20 +169,6 @@ def emit_key_facts(facts: List[str]) -> str: return "Facts stored." -@tool("request_implementation") -def request_implementation() -> str: - """Request that implementation proceed after research/planning. - Used to indicate the agent should move to implementation stage. - - Think carefully before requesting implementation. - Do you need to request research subtasks first? - Have you run relevant unit tests, if they exist, to get a baseline (this can be a subtask)? - Do you need to crawl deeper to find all related files and symbols? - """ - _global_memory["implementation_requested"] = True - console.print(Panel("🚀 Implementation Requested", style="yellow", padding=0)) - log_work_event("Implementation requested.") - return "Implementation requested." @tool("emit_key_snippet") @@ -278,9 +263,6 @@ def one_shot_completed(message: str) -> str: Args: message: Completion message to display """ - if _global_memory.get("implementation_requested", False): - return "Cannot complete in one shot - implementation was requested" - mark_task_completed(message) console.print(Panel(Markdown(message), title="✅ Task Completed")) log_work_event(f"Task completed:\n\n{message}") diff --git a/tests/ra_aid/test_info_query.py b/tests/ra_aid/test_info_query.py new file mode 100644 index 0000000..d54f443 --- /dev/null +++ b/tests/ra_aid/test_info_query.py @@ -0,0 +1,41 @@ +"""Tests for the is_informational_query and is_stage_requested functions.""" + +from ra_aid.__main__ import is_informational_query, is_stage_requested +from ra_aid.tools.memory import _global_memory + + +def test_is_informational_query(): + """Test that is_informational_query only depends on research_only config setting.""" + # Clear global memory to ensure clean state + _global_memory.clear() + + # When research_only is True, should return True + _global_memory["config"] = {"research_only": True} + assert is_informational_query() is True + + # When research_only is False, should return False + _global_memory["config"] = {"research_only": False} + assert is_informational_query() is False + + # When config is empty, should return False (default) + _global_memory.clear() + _global_memory["config"] = {} + assert is_informational_query() is False + + # When global memory is empty, should return False (default) + _global_memory.clear() + assert is_informational_query() is False + + +def test_is_stage_requested(): + """Test that is_stage_requested always returns False now.""" + # Clear global memory to ensure clean state + _global_memory.clear() + + # Should always return False regardless of input + assert is_stage_requested("implementation") is False + assert is_stage_requested("anything_else") is False + + # Even if we set implementation_requested in global memory + _global_memory["implementation_requested"] = True + assert is_stage_requested("implementation") is False \ No newline at end of file diff --git a/tests/ra_aid/test_main.py b/tests/ra_aid/test_main.py index be8c48d..37df53e 100644 --- a/tests/ra_aid/test_main.py +++ b/tests/ra_aid/test_main.py @@ -10,12 +10,23 @@ from ra_aid.tools.memory import _global_memory @pytest.fixture def mock_dependencies(monkeypatch): """Mock all dependencies needed for main().""" + # Initialize global memory with necessary keys to prevent KeyError + _global_memory.clear() + _global_memory["related_files"] = {} + _global_memory["related_file_id_counter"] = 1 + _global_memory["agent_depth"] = 0 + _global_memory["work_log"] = [] + _global_memory["config"] = {} + + # Mock dependencies that interact with external systems monkeypatch.setattr("ra_aid.__main__.check_dependencies", lambda: None) - - monkeypatch.setattr( - "ra_aid.__main__.validate_environment", lambda args: (True, [], True, []) - ) - + monkeypatch.setattr("ra_aid.__main__.validate_environment", lambda args: (True, [], True, [])) + monkeypatch.setattr("ra_aid.__main__.create_agent", lambda *args, **kwargs: None) + monkeypatch.setattr("ra_aid.__main__.run_agent_with_retry", lambda *args, **kwargs: None) + monkeypatch.setattr("ra_aid.__main__.run_research_agent", lambda *args, **kwargs: None) + monkeypatch.setattr("ra_aid.__main__.run_planning_agent", lambda *args, **kwargs: None) + + # Mock LLM initialization def mock_config_update(*args, **kwargs): config = _global_memory.get("config", {}) if kwargs.get("temperature"): @@ -25,10 +36,6 @@ def mock_dependencies(monkeypatch): monkeypatch.setattr("ra_aid.__main__.initialize_llm", mock_config_update) - monkeypatch.setattr( - "ra_aid.__main__.run_research_agent", lambda *args, **kwargs: None - ) - def test_recursion_limit_in_global_config(mock_dependencies): """Test that recursion limit is correctly set in global config.""" @@ -109,20 +116,31 @@ def test_config_settings(mock_dependencies): def test_temperature_validation(mock_dependencies): """Test that temperature argument is correctly passed to initialize_llm.""" import sys - from unittest.mock import patch + from unittest.mock import patch, ANY from ra_aid.__main__ import main + # Reset global memory for clean test _global_memory.clear() + _global_memory["related_files"] = {} + _global_memory["related_file_id_counter"] = 1 + _global_memory["agent_depth"] = 0 + _global_memory["work_log"] = [] + _global_memory["config"] = {} - with patch("ra_aid.__main__.initialize_llm") as mock_init_llm: - with patch.object( - sys, "argv", ["ra-aid", "-m", "test", "--temperature", "0.7"] - ): - main() - mock_init_llm.assert_called_once() - assert mock_init_llm.call_args.kwargs["temperature"] == 0.7 + # Test valid temperature (0.7) + with patch("ra_aid.__main__.initialize_llm", return_value=None) as mock_init_llm: + # Also patch any calls that would actually use the mocked initialize_llm function + with patch("ra_aid.__main__.run_research_agent", return_value=None): + with patch("ra_aid.__main__.run_planning_agent", return_value=None): + with patch.object( + sys, "argv", ["ra-aid", "-m", "test", "--temperature", "0.7"] + ): + main() + # Check if temperature was stored in config correctly + assert _global_memory["config"]["temperature"] == 0.7 + # Test invalid temperature (2.1) with pytest.raises(SystemExit): with patch.object( sys, "argv", ["ra-aid", "-m", "test", "--temperature", "2.1"]