get rid of dead code around implementation_requested

This commit is contained in:
AI Christianson 2025-03-04 13:10:52 -05:00
parent 7845f4d876
commit 0afed55809
6 changed files with 80 additions and 44 deletions

View File

@ -331,16 +331,13 @@ implementation_memory = MemorySaver()
def is_informational_query() -> bool:
"""Determine if the current query is informational based on implementation_requested state."""
return _global_memory.get("config", {}).get(
"research_only", False
) or not is_stage_requested("implementation")
"""Determine if the current query is informational based on config settings."""
return _global_memory.get("config", {}).get("research_only", False)
def is_stage_requested(stage: str) -> bool:
"""Check if a stage has been requested to proceed."""
if stage == "implementation":
return _global_memory.get("implementation_requested", False)
# This is kept for backward compatibility but no longer does anything
return False

View File

@ -11,7 +11,6 @@ from .memory import (
emit_research_notes,
get_memory_value,
plan_implementation_completed,
request_implementation,
task_completed,
)
from .programmer import run_programming_task
@ -35,7 +34,6 @@ __all__ = [
"get_memory_value",
"list_directory_tree",
"read_file_tool",
"request_implementation",
"run_programming_task",
"run_shell_command",
"put_complete_file_contents",

View File

@ -350,7 +350,7 @@ def request_task_implementation(task_spec: str) -> str:
reset_completion_flags()
_result = run_task_implementation_agent(
base_task=_global_memory.get("base_task", ""),
base_task="", # No more base_task from global memory
tasks=[], # No more tasks from global memory
task=task_spec,
plan="", # No more plan from global memory

View File

@ -46,7 +46,6 @@ from ra_aid.database.repositories.key_fact_repository import get_key_fact_reposi
# Global memory store
_global_memory: Dict[str, Any] = {
"implementation_requested": False,
"related_files": {}, # Dict[int, str] - ID to filepath mapping
"related_file_id_counter": 1, # Counter for generating unique file IDs
"agent_depth": 0,
@ -170,20 +169,6 @@ def emit_key_facts(facts: List[str]) -> str:
return "Facts stored."
@tool("request_implementation")
def request_implementation() -> str:
"""Request that implementation proceed after research/planning.
Used to indicate the agent should move to implementation stage.
Think carefully before requesting implementation.
Do you need to request research subtasks first?
Have you run relevant unit tests, if they exist, to get a baseline (this can be a subtask)?
Do you need to crawl deeper to find all related files and symbols?
"""
_global_memory["implementation_requested"] = True
console.print(Panel("🚀 Implementation Requested", style="yellow", padding=0))
log_work_event("Implementation requested.")
return "Implementation requested."
@tool("emit_key_snippet")
@ -278,9 +263,6 @@ def one_shot_completed(message: str) -> str:
Args:
message: Completion message to display
"""
if _global_memory.get("implementation_requested", False):
return "Cannot complete in one shot - implementation was requested"
mark_task_completed(message)
console.print(Panel(Markdown(message), title="✅ Task Completed"))
log_work_event(f"Task completed:\n\n{message}")

View File

@ -0,0 +1,41 @@
"""Tests for the is_informational_query and is_stage_requested functions."""
from ra_aid.__main__ import is_informational_query, is_stage_requested
from ra_aid.tools.memory import _global_memory
def test_is_informational_query():
"""Test that is_informational_query only depends on research_only config setting."""
# Clear global memory to ensure clean state
_global_memory.clear()
# When research_only is True, should return True
_global_memory["config"] = {"research_only": True}
assert is_informational_query() is True
# When research_only is False, should return False
_global_memory["config"] = {"research_only": False}
assert is_informational_query() is False
# When config is empty, should return False (default)
_global_memory.clear()
_global_memory["config"] = {}
assert is_informational_query() is False
# When global memory is empty, should return False (default)
_global_memory.clear()
assert is_informational_query() is False
def test_is_stage_requested():
"""Test that is_stage_requested always returns False now."""
# Clear global memory to ensure clean state
_global_memory.clear()
# Should always return False regardless of input
assert is_stage_requested("implementation") is False
assert is_stage_requested("anything_else") is False
# Even if we set implementation_requested in global memory
_global_memory["implementation_requested"] = True
assert is_stage_requested("implementation") is False

View File

@ -10,12 +10,23 @@ from ra_aid.tools.memory import _global_memory
@pytest.fixture
def mock_dependencies(monkeypatch):
"""Mock all dependencies needed for main()."""
# Initialize global memory with necessary keys to prevent KeyError
_global_memory.clear()
_global_memory["related_files"] = {}
_global_memory["related_file_id_counter"] = 1
_global_memory["agent_depth"] = 0
_global_memory["work_log"] = []
_global_memory["config"] = {}
# Mock dependencies that interact with external systems
monkeypatch.setattr("ra_aid.__main__.check_dependencies", lambda: None)
monkeypatch.setattr(
"ra_aid.__main__.validate_environment", lambda args: (True, [], True, [])
)
monkeypatch.setattr("ra_aid.__main__.validate_environment", lambda args: (True, [], True, []))
monkeypatch.setattr("ra_aid.__main__.create_agent", lambda *args, **kwargs: None)
monkeypatch.setattr("ra_aid.__main__.run_agent_with_retry", lambda *args, **kwargs: None)
monkeypatch.setattr("ra_aid.__main__.run_research_agent", lambda *args, **kwargs: None)
monkeypatch.setattr("ra_aid.__main__.run_planning_agent", lambda *args, **kwargs: None)
# Mock LLM initialization
def mock_config_update(*args, **kwargs):
config = _global_memory.get("config", {})
if kwargs.get("temperature"):
@ -25,10 +36,6 @@ def mock_dependencies(monkeypatch):
monkeypatch.setattr("ra_aid.__main__.initialize_llm", mock_config_update)
monkeypatch.setattr(
"ra_aid.__main__.run_research_agent", lambda *args, **kwargs: None
)
def test_recursion_limit_in_global_config(mock_dependencies):
"""Test that recursion limit is correctly set in global config."""
@ -109,20 +116,31 @@ def test_config_settings(mock_dependencies):
def test_temperature_validation(mock_dependencies):
"""Test that temperature argument is correctly passed to initialize_llm."""
import sys
from unittest.mock import patch
from unittest.mock import patch, ANY
from ra_aid.__main__ import main
# Reset global memory for clean test
_global_memory.clear()
_global_memory["related_files"] = {}
_global_memory["related_file_id_counter"] = 1
_global_memory["agent_depth"] = 0
_global_memory["work_log"] = []
_global_memory["config"] = {}
with patch("ra_aid.__main__.initialize_llm") as mock_init_llm:
with patch.object(
sys, "argv", ["ra-aid", "-m", "test", "--temperature", "0.7"]
):
main()
mock_init_llm.assert_called_once()
assert mock_init_llm.call_args.kwargs["temperature"] == 0.7
# Test valid temperature (0.7)
with patch("ra_aid.__main__.initialize_llm", return_value=None) as mock_init_llm:
# Also patch any calls that would actually use the mocked initialize_llm function
with patch("ra_aid.__main__.run_research_agent", return_value=None):
with patch("ra_aid.__main__.run_planning_agent", return_value=None):
with patch.object(
sys, "argv", ["ra-aid", "-m", "test", "--temperature", "0.7"]
):
main()
# Check if temperature was stored in config correctly
assert _global_memory["config"]["temperature"] == 0.7
# Test invalid temperature (2.1)
with pytest.raises(SystemExit):
with patch.object(
sys, "argv", ["ra-aid", "-m", "test", "--temperature", "2.1"]