Fix unit test warnings.
This commit is contained in:
parent
bfe438379f
commit
f569bb14b1
|
|
@ -28,6 +28,7 @@ dependencies = [
|
|||
"langgraph-checkpoint>=2.0.9",
|
||||
"langgraph-sdk>=0.1.48",
|
||||
"langchain-core>=0.3.28",
|
||||
"langchain>=0.3.13",
|
||||
"rich>=13.0.0",
|
||||
"GitPython>=3.1",
|
||||
"fuzzywuzzy==0.18.0",
|
||||
|
|
|
|||
|
|
@ -1,4 +1,2 @@
|
|||
[pytest]
|
||||
timeout = 30
|
||||
markers =
|
||||
timeout: set timeout for tests
|
||||
|
|
|
|||
|
|
@ -90,13 +90,13 @@ def test_expert_context_management():
|
|||
expert_context['files'].clear()
|
||||
|
||||
# Test adding context
|
||||
result1 = emit_expert_context("Test context 1")
|
||||
result1 = emit_expert_context.invoke("Test context 1")
|
||||
assert "Context added" in result1
|
||||
assert len(expert_context['text']) == 1
|
||||
assert expert_context['text'][0] == "Test context 1"
|
||||
|
||||
# Test adding multiple contexts
|
||||
result2 = emit_expert_context("Test context 2")
|
||||
result2 = emit_expert_context.invoke("Test context 2")
|
||||
assert "Context added" in result2
|
||||
assert len(expert_context['text']) == 2
|
||||
assert expert_context['text'][1] == "Test context 2"
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import pytest
|
||||
from pytest import mark
|
||||
from langchain.schema.runnable import Runnable
|
||||
from ra_aid.tools import read_file_tool
|
||||
|
||||
def test_basic_file_reading(tmp_path):
|
||||
|
|
@ -10,7 +11,7 @@ def test_basic_file_reading(tmp_path):
|
|||
test_file.write_text(test_content)
|
||||
|
||||
# Read the file
|
||||
result = read_file_tool(str(test_file))
|
||||
result = read_file_tool.invoke({"filepath": str(test_file)})
|
||||
|
||||
# Verify return format and content
|
||||
assert isinstance(result, dict)
|
||||
|
|
@ -26,14 +27,13 @@ def test_no_truncation(tmp_path):
|
|||
test_file.write_text(test_content)
|
||||
|
||||
# Read the file
|
||||
result = read_file_tool(str(test_file))
|
||||
result = read_file_tool.invoke({"filepath": str(test_file)})
|
||||
|
||||
# Verify no truncation occurred
|
||||
assert isinstance(result, dict)
|
||||
assert '[lines of output truncated]' not in result['content']
|
||||
assert len(result['content'].splitlines()) == line_count
|
||||
|
||||
@pytest.mark.timeout(30)
|
||||
def test_with_truncation(tmp_path):
|
||||
"""Test that files over max_lines are properly truncated"""
|
||||
# Create a test file exceeding the limit
|
||||
|
|
@ -43,7 +43,7 @@ def test_with_truncation(tmp_path):
|
|||
test_file.write_text(test_content)
|
||||
|
||||
# Read the file
|
||||
result = read_file_tool(str(test_file))
|
||||
result = read_file_tool.invoke({"filepath": str(test_file)})
|
||||
|
||||
# Verify truncation occurred correctly
|
||||
assert isinstance(result, dict)
|
||||
|
|
@ -53,7 +53,7 @@ def test_with_truncation(tmp_path):
|
|||
def test_nonexistent_file():
|
||||
"""Test error handling for non-existent files"""
|
||||
with pytest.raises(FileNotFoundError):
|
||||
read_file_tool("/nonexistent/file.txt")
|
||||
read_file_tool.invoke({"filepath": "/nonexistent/file.txt"})
|
||||
|
||||
def test_empty_file(tmp_path):
|
||||
"""Test reading an empty file"""
|
||||
|
|
@ -62,7 +62,7 @@ def test_empty_file(tmp_path):
|
|||
test_file.write_text("")
|
||||
|
||||
# Read the file
|
||||
result = read_file_tool(str(test_file))
|
||||
result = read_file_tool.invoke({"filepath": str(test_file)})
|
||||
|
||||
# Verify return format and empty content
|
||||
assert isinstance(result, dict)
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ def test_shell_command_cowboy_mode(mock_console, mock_prompt, mock_run_interacti
|
|||
"""Test shell command execution in cowboy mode (no approval)"""
|
||||
_global_memory['config'] = {'cowboy_mode': True}
|
||||
|
||||
result = run_shell_command("echo test")
|
||||
result = run_shell_command.invoke({"command": "echo test"})
|
||||
|
||||
assert result['success'] is True
|
||||
assert result['return_code'] == 0
|
||||
|
|
@ -36,7 +36,7 @@ def test_shell_command_cowboy_message(mock_console, mock_prompt, mock_run_intera
|
|||
|
||||
with patch('ra_aid.tools.shell.get_cowboy_message') as mock_get_message:
|
||||
mock_get_message.return_value = '🤠 Test cowboy message!'
|
||||
result = run_shell_command("echo test")
|
||||
result = run_shell_command.invoke({"command": "echo test"})
|
||||
|
||||
assert result['success'] is True
|
||||
mock_console.print.assert_any_call("")
|
||||
|
|
@ -49,7 +49,7 @@ def test_shell_command_interactive_approved(mock_console, mock_prompt, mock_run_
|
|||
_global_memory['config'] = {'cowboy_mode': False}
|
||||
mock_prompt.ask.return_value = 'y'
|
||||
|
||||
result = run_shell_command("echo test")
|
||||
result = run_shell_command.invoke({"command": "echo test"})
|
||||
|
||||
assert result['success'] is True
|
||||
assert result['return_code'] == 0
|
||||
|
|
@ -67,7 +67,7 @@ def test_shell_command_interactive_rejected(mock_console, mock_prompt, mock_run_
|
|||
_global_memory['config'] = {'cowboy_mode': False}
|
||||
mock_prompt.ask.return_value = 'n'
|
||||
|
||||
result = run_shell_command("echo test")
|
||||
result = run_shell_command.invoke({"command": "echo test"})
|
||||
|
||||
assert result['success'] is False
|
||||
assert result['return_code'] == 1
|
||||
|
|
@ -86,7 +86,7 @@ def test_shell_command_execution_error(mock_console, mock_prompt, mock_run_inter
|
|||
_global_memory['config'] = {'cowboy_mode': True}
|
||||
mock_run_interactive.side_effect = Exception("Command failed")
|
||||
|
||||
result = run_shell_command("invalid command")
|
||||
result = run_shell_command.invoke({"command": "invalid command"})
|
||||
|
||||
assert result['success'] is False
|
||||
assert result['return_code'] == 1
|
||||
|
|
|
|||
Loading…
Reference in New Issue