From 2c6a8cd00048a7c0a8dc226cb6d6d667c7adb680 Mon Sep 17 00:00:00 2001 From: AI Christianson Date: Thu, 19 Dec 2024 13:12:05 -0500 Subject: [PATCH] Fix prompt template consistency. --- ra_aid/__main__.py | 19 ++++++++++--------- ra_aid/prompts.py | 9 ++++++++- ra_aid/tools/human.py | 21 +++++++++++++++++++++ 3 files changed, 39 insertions(+), 10 deletions(-) create mode 100644 ra_aid/tools/human.py diff --git a/ra_aid/__main__.py b/ra_aid/__main__.py index 68afccc..244bb5f 100644 --- a/ra_aid/__main__.py +++ b/ra_aid/__main__.py @@ -277,7 +277,8 @@ def run_implementation_stage(base_task, tasks, plan, related_files, model, exper key_snippets=get_memory_value('key_snippets'), task=task, related_files="\n".join(related_files), - base_task=base_task + base_task=base_task, + expert_section=expert_section ) # Run agent for this task @@ -366,12 +367,11 @@ def main(): ) expert_section = EXPERT_PROMPT_SECTION_RESEARCH if expert_enabled else "" - research_prompt = f"""User query: {base_task} --keep it simple - -{RESEARCH_PROMPT} -{expert_section} - -Be very thorough in your research and emit lots of snippets, key facts. If you take more than a few steps, be eager to emit research subtasks.{'' if args.research_only else ' Only request implementation if the user explicitly asked for changes to be made.'}""" + research_prompt = RESEARCH_PROMPT.format( + expert_section=expert_section, + base_task=base_task, + research_only_note='' if args.research_only else ' Only request implementation if the user explicitly asked for changes to be made.' + ) # Run research agent and check for one-shot completion output = run_agent_with_retry(research_agent, research_prompt, config) @@ -395,12 +395,13 @@ Be very thorough in your research and emit lots of snippets, key facts. If you t planning_agent = create_react_agent(model, get_planning_tools(expert_enabled=expert_enabled), checkpointer=planning_memory) expert_section = EXPERT_PROMPT_SECTION_PLANNING if expert_enabled else "" - planning_prompt = (PLANNING_PROMPT + expert_section).format( + planning_prompt = PLANNING_PROMPT.format( research_notes=get_memory_value('research_notes'), key_facts=get_memory_value('key_facts'), key_snippets=get_memory_value('key_snippets'), base_task=base_task, - related_files="\n".join(get_related_files()) + related_files="\n".join(get_related_files()), + expert_section=expert_section ) # Run planning agent diff --git a/ra_aid/prompts.py b/ra_aid/prompts.py index bc3048a..a88a4bf 100644 --- a/ra_aid/prompts.py +++ b/ra_aid/prompts.py @@ -35,7 +35,10 @@ Expert Consultation: """ # Research stage prompt - guides initial codebase analysis -RESEARCH_PROMPT = """ +RESEARCH_PROMPT = """User query: {base_task} --keep it simple + +Be very thorough in your research and emit lots of snippets, key facts. If you take more than a few steps, be eager to emit research subtasks.{research_only_note} + Objective Your only goal is to thoroughly research what currently exists in the codebase—nothing else. @@ -137,6 +140,8 @@ Decision on Implementation Be thorough on locating all potential change sites/gauging blast radius. If this is a top-level README.md or docs folder, start there. If relevant tests exist, run them upfront as part of the research phase to establish a baseline. + +{expert_section} """ # Planning stage prompt - guides task breakdown and implementation planning @@ -201,6 +206,8 @@ Guidelines: The description should be only as detailed as warranted by the complexity of the request. Do not implement anything yet. + +{expert_section} """ diff --git a/ra_aid/tools/human.py b/ra_aid/tools/human.py new file mode 100644 index 0000000..7416c1a --- /dev/null +++ b/ra_aid/tools/human.py @@ -0,0 +1,21 @@ +"""Tool for asking questions to the human user.""" + +from langchain.tools import tool +from rich.console import Console +from rich.prompt import Prompt + +console = Console() + +@tool +def ask_human(question: str) -> str: + """Ask the human user a question and get their response. + + Args: + question: The question to ask the human user + + Returns: + The user's response as a string + """ + console.print(f"\n[bold yellow]Human Query:[/] {question}") + response = Prompt.ask("Your response") + return response