diff --git a/ra_aid/agent_utils.py b/ra_aid/agent_utils.py index a05c6c4..18c4e13 100644 --- a/ra_aid/agent_utils.py +++ b/ra_aid/agent_utils.py @@ -161,7 +161,7 @@ def run_research_agent( return run_agent_with_retry(agent, prompt, run_config) def run_web_research_agent( - base_task_or_query: str, + query: str, model, *, expert_enabled: bool = False, @@ -174,7 +174,7 @@ def run_web_research_agent( """Run a web research agent with the given configuration. Args: - base_task_or_query: The main task or query for web research + query: The mainquery for web research model: The LLM model to use expert_enabled: Whether expert mode is enabled hil: Whether human-in-the-loop mode is enabled @@ -218,7 +218,7 @@ def run_web_research_agent( # Build prompt prompt = WEB_RESEARCH_PROMPT.format( - base_task=base_task_or_query, + web_research_query=query, expert_section=expert_section, human_section=human_section, key_facts=key_facts, diff --git a/ra_aid/tool_configs.py b/ra_aid/tool_configs.py index d4ce9da..bd25746 100644 --- a/ra_aid/tool_configs.py +++ b/ra_aid/tool_configs.py @@ -117,7 +117,8 @@ def get_web_research_tools(expert_enabled: bool = True) -> list: list: List of tools configured for web research """ tools = [ - web_search_tavily + web_search_tavily, + emit_research_notes ] if expert_enabled: diff --git a/ra_aid/tools/agent.py b/ra_aid/tools/agent.py index 372edbc..51dfa3e 100644 --- a/ra_aid/tools/agent.py +++ b/ra_aid/tools/agent.py @@ -111,10 +111,12 @@ def request_web_research(query: str) -> ResearchResult: success = True reason = None + web_research_notes = [] try: # Run web research agent from ..agent_utils import run_web_research_agent + original_research_notes = _global_memory.get('research_notes', []) result = run_web_research_agent( query, model, @@ -123,6 +125,7 @@ def request_web_research(query: str) -> ResearchResult: console_message=query, config=config ) + web_research_notes = _global_memory.get('research_notes', []) except AgentInterrupt: print() response = ask_human.invoke({"question": "Why did you interrupt me?"}) @@ -135,6 +138,7 @@ def request_web_research(query: str) -> ResearchResult: success = False reason = f"error: {str(e)}" finally: + _global_memory['research_notes'] = original_research_notes # Get completion message if available completion_message = _global_memory.get('completion_message', 'Task was completed successfully.' if success else None) @@ -151,9 +155,7 @@ def request_web_research(query: str) -> ResearchResult: return { "work_log": work_log, "completion_message": completion_message, - "key_facts": get_memory_value("key_facts"), - "related_files": get_related_files(), - "research_notes": get_memory_value("research_notes"), + "web_research_notes": web_research_notes, "key_snippets": get_memory_value("key_snippets"), "success": success, "reason": reason diff --git a/ra_aid/tools/read_file.py b/ra_aid/tools/read_file.py index 383a2a9..edf72eb 100644 --- a/ra_aid/tools/read_file.py +++ b/ra_aid/tools/read_file.py @@ -5,6 +5,7 @@ from typing import Dict, Optional, Tuple from langchain_core.tools import tool from rich.console import Console from rich.panel import Panel +from rich.markdown import Markdown from ra_aid.text.processing import truncate_output console = Console() diff --git a/ra_aid/tools/web_search_tavily.py b/ra_aid/tools/web_search_tavily.py index 98a98b6..e7cc8b7 100644 --- a/ra_aid/tools/web_search_tavily.py +++ b/ra_aid/tools/web_search_tavily.py @@ -2,6 +2,11 @@ import os from typing import Dict from tavily import TavilyClient from langchain_core.tools import tool +from rich.console import Console +from rich.panel import Panel +from rich.markdown import Markdown + +console = Console() @tool def web_search_tavily(query: str) -> Dict: @@ -15,5 +20,6 @@ def web_search_tavily(query: str) -> Dict: Dict containing search results from Tavily """ client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"]) + console.print(Panel(Markdown(query), title="🔍 Searching Tavily", border_style="bright_blue")) search_result = client.search(query=query) return search_result