- Added ask_human tool to allow human operator to answer questions asked by the agent.

This commit is contained in:
AI Christianson 2024-12-19 13:34:43 -05:00
parent 2c6a8cd000
commit dad913b7fe
5 changed files with 92 additions and 31 deletions

View File

@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Make delete_tasks tool available to planning agent.
- Get rid of implementation args as they are not used.
- Improve ripgrep tool status output.
- Added ask_human tool to allow human operator to answer questions asked by the agent.
## [0.6.4] - 2024-12-19

View File

@ -7,7 +7,7 @@ from langchain_core.messages import HumanMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent
from ra_aid.tools import (
ask_expert, run_shell_command, run_programming_task,
ask_expert, ask_human, run_shell_command, run_programming_task,
emit_research_notes, emit_plan, emit_related_files, emit_task,
emit_expert_context, get_memory_value, emit_key_facts, delete_key_facts,
emit_key_snippets, delete_key_snippets, delete_tasks,
@ -24,24 +24,35 @@ from ra_aid.prompts import (
EXPERT_PROMPT_SECTION_RESEARCH,
EXPERT_PROMPT_SECTION_PLANNING,
EXPERT_PROMPT_SECTION_IMPLEMENTATION,
HUMAN_PROMPT_SECTION_RESEARCH,
HUMAN_PROMPT_SECTION_PLANNING
)
import time
from anthropic import APIError, APITimeoutError, RateLimitError, InternalServerError
from ra_aid.llm import initialize_llm
# Read-only tools that don't modify system state
READ_ONLY_TOOLS = [
emit_related_files,
emit_key_facts,
delete_key_facts,
emit_key_snippets,
delete_key_snippets,
list_directory_tree,
read_file_tool,
fuzzy_find_project_files,
ripgrep_search,
run_shell_command # can modify files, but we still need it for read-only tasks.
]
def get_read_only_tools(human_interaction: bool = False) -> list:
"""Get the list of read-only tools, optionally including human interaction tools."""
tools = [
emit_related_files,
emit_key_facts,
delete_key_facts,
emit_key_snippets,
delete_key_snippets,
list_directory_tree,
read_file_tool,
fuzzy_find_project_files,
ripgrep_search,
run_shell_command # can modify files, but we still need it for read-only tasks.
]
if human_interaction:
tools.append(ask_human)
return tools
READ_ONLY_TOOLS = get_read_only_tools()
# Tools that can modify files or system state
MODIFICATION_TOOLS = [
@ -116,6 +127,11 @@ Examples:
type=str,
help='The model name to use for expert knowledge queries (required for non-OpenAI providers)'
)
parser.add_argument(
'--human-interaction',
action='store_true',
help='Enable human interaction'
)
args = parser.parse_args()
@ -140,10 +156,10 @@ research_memory = MemorySaver()
planning_memory = MemorySaver()
implementation_memory = MemorySaver()
def get_research_tools(research_only: bool = False, expert_enabled: bool = True) -> list:
def get_research_tools(research_only: bool = False, expert_enabled: bool = True, human_interaction: bool = False) -> list:
"""Get the list of research tools based on mode and whether expert is enabled."""
# Start with read-only tools
tools = READ_ONLY_TOOLS.copy()
tools = get_read_only_tools(human_interaction).copy()
tools.extend(RESEARCH_TOOLS)
@ -270,15 +286,13 @@ def run_implementation_stage(base_task, tasks, plan, related_files, model, exper
task_agent = create_react_agent(model, get_implementation_tools(expert_enabled=expert_enabled), checkpointer=task_memory)
# Construct task-specific prompt
expert_section = EXPERT_PROMPT_SECTION_IMPLEMENTATION if expert_enabled else ""
task_prompt = (IMPLEMENTATION_PROMPT + expert_section).format(
task_prompt = (IMPLEMENTATION_PROMPT).format(
plan=plan,
key_facts=get_memory_value('key_facts'),
key_snippets=get_memory_value('key_snippets'),
task=task,
related_files="\n".join(related_files),
base_task=base_task,
expert_section=expert_section
base_task=base_task
)
# Run agent for this task
@ -312,7 +326,7 @@ def run_research_subtasks(base_task: str, config: dict, model, expert_enabled: b
)
# Run the subtask agent
subtask_prompt = f"Base Task: {base_task}\nResearch Subtask: {subtask}\n\n{RESEARCH_PROMPT}"
subtask_prompt = f"Base Task: {base_task}\nResearch Subtask: {subtask}\n\n{RESEARCH_PROMPT.format(base_task=base_task, research_only_note='')}"
run_agent_with_retry(subtask_agent, subtask_prompt, config)
@ -362,13 +376,19 @@ def main():
# Create research agent
research_agent = create_react_agent(
model,
get_research_tools(research_only=_global_memory.get('config', {}).get('research_only', False), expert_enabled=expert_enabled),
get_research_tools(
research_only=_global_memory.get('config', {}).get('research_only', False),
expert_enabled=expert_enabled,
human_interaction=args.human_interaction
),
checkpointer=research_memory
)
expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else ""
human_section = HUMAN_PROMPT_SECTION_RESEARCH if args.human_interaction else ""
research_prompt = RESEARCH_PROMPT.format(
expert_section=expert_section,
human_section=human_section,
base_task=base_task,
research_only_note='' if args.research_only else ' Only request implementation if the user explicitly asked for changes to be made.'
)
@ -395,13 +415,16 @@ def main():
planning_agent = create_react_agent(model, get_planning_tools(expert_enabled=expert_enabled), checkpointer=planning_memory)
expert_section = EXPERT_PROMPT_SECTION_PLANNING if expert_enabled else ""
human_section = HUMAN_PROMPT_SECTION_PLANNING if args.human_interaction else ""
planning_prompt = PLANNING_PROMPT.format(
expert_section=expert_section,
human_section=human_section,
base_task=base_task,
research_notes=get_memory_value('research_notes'),
related_files="\n".join(get_related_files()),
key_facts=get_memory_value('key_facts'),
key_snippets=get_memory_value('key_snippets'),
base_task=base_task,
related_files="\n".join(get_related_files()),
expert_section=expert_section
research_only_note='' if args.research_only else ' Only request implementation if the user explicitly asked for changes to be made.'
)
# Run planning agent

View File

@ -33,7 +33,32 @@ Expert Consultation:
- Ask the expert to perform deep analysis
- Wait for expert guidance before proceeding with implementation
"""
# Human-specific prompt sections
HUMAN_PROMPT_SECTION_RESEARCH = """
Human Interaction:
If you need clarification from the human operator:
- Ask clear, specific questions
- Use the ask_human tool for queries
- Wait for human response before proceeding
"""
HUMAN_PROMPT_SECTION_PLANNING = """
Human Interaction:
If you need requirements clarification:
- Use ask_human for specific planning questions
- Await human input before finalizing plans
- Keep questions focused and context-aware
"""
HUMAN_PROMPT_SECTION_IMPLEMENTATION = """
Human Interaction:
If you need implementation guidance:
- Ask the human operator using ask_human
- Keep questions specific to the current task
- Wait for responses before proceeding
"""
# Research stage prompt - guides initial codebase analysis
RESEARCH_PROMPT = """User query: {base_task} --keep it simple
@ -142,6 +167,7 @@ Be thorough on locating all potential change sites/gauging blast radius.
If this is a top-level README.md or docs folder, start there. If relevant tests exist, run them upfront as part of the research phase to establish a baseline.
{expert_section}
{human_section}
"""
# Planning stage prompt - guides task breakdown and implementation planning
@ -208,6 +234,7 @@ Guidelines:
Do not implement anything yet.
{expert_section}
{human_section}
"""
@ -253,6 +280,7 @@ Testing:
- If you add or change any unit tests, run them using run_shell_command and ensure they pass (check docs or analyze directory structure/test files to infer how to run them.)
- Start with running very specific tests, then move to more general/complete test suites.
{expert_section}
{human_section}
- Only test UI components if there is already a UI testing system in place.
- Only test things that can be tested by an automated process.

View File

@ -1,5 +1,6 @@
from .shell import run_shell_command
from .research import monorepo_detected, existing_project_detected, ui_detected
from .human import ask_human
from .programmer import run_programming_task
from .expert import ask_expert, emit_expert_context
from .read_file import read_file_tool
@ -41,5 +42,6 @@ __all__ = [
'swap_task_order',
'monorepo_detected',
'existing_project_detected',
'ui_detected'
'ui_detected',
'ask_human'
]

View File

@ -1,21 +1,28 @@
"""Tool for asking questions to the human user."""
from langchain.tools import tool
from langchain_core.tools import tool
from rich.console import Console
from rich.prompt import Prompt
from rich.panel import Panel
from rich.markdown import Markdown
console = Console()
@tool
def ask_human(question: str) -> str:
"""Ask the human user a question and get their response.
"""Ask the human user a question with a nicely formatted display.
Args:
question: The question to ask the human user
question: The question to ask the human user (supports markdown)
Returns:
The user's response as a string
"""
console.print(f"\n[bold yellow]Human Query:[/] {question}")
response = Prompt.ask("Your response")
console.print(Panel(
Markdown(question),
title="💭 Question for Human",
border_style="yellow bold"
))
response = Prompt.ask("\nYour response")
print()
return response