expert file context

This commit is contained in:
AI Christianson 2024-12-11 19:51:22 -05:00
parent 33f5f9bbbb
commit 56d067b2dc
3 changed files with 68 additions and 6 deletions

View File

@ -19,9 +19,8 @@ from ra_aid.tools import (
fuzzy_find_project_files, ripgrep_search, list_directory_tree
)
from ra_aid.tools.note_tech_debt import BORDER_STYLE, TECH_DEBT_NOTE_EMOJI
from ra_aid.tools.memory import _global_memory
from ra_aid.tools.memory import _global_memory, get_related_files
from ra_aid import print_agent_output, print_stage_header, print_task_header
from ra_aid.tools.programmer import related_files
from ra_aid.prompts import (
RESEARCH_PROMPT,
PLANNING_PROMPT,
@ -424,7 +423,7 @@ Be very thorough in your research and emit lots of snippets, key facts. If you t
key_facts=get_memory_value('key_facts'),
key_snippets=get_memory_value('key_snippets'),
base_task=base_task,
related_files="\n".join(related_files)
related_files="\n".join(get_related_files())
)
# Run planning agent
@ -445,7 +444,7 @@ Be very thorough in your research and emit lots of snippets, key facts. If you t
base_task,
get_memory_value('tasks'),
get_memory_value('plan'),
related_files
get_related_files()
)
except TaskCompletedException:
sys.exit(0)

View File

@ -26,6 +26,7 @@ You must:
Describe what exists in these files (file names, directory structures, documentation found, code patterns, dependencies).
Do so by incrementally and systematically exploring the filesystem with careful directory listing tool calls.
You can use fuzzy file search to quickly find relevant files matching a search pattern.
Use ripgrep_search extensively to do *exhaustive* searches for all references to anything that might be changed as part of the base level task.
You must not:

View File

@ -1,9 +1,11 @@
from typing import List
import os
from langchain_core.tools import tool
from rich.console import Console
from rich.panel import Panel
from rich.markdown import Markdown
from langchain_openai import ChatOpenAI
from .memory import get_memory_value
from .memory import get_memory_value, get_related_files
console = Console()
model = ChatOpenAI(model_name="o1-preview")
@ -39,6 +41,61 @@ def emit_expert_context(context: str) -> str:
return f"Context added."
def read_files_with_limit(file_paths: List[str], max_lines: int = 10000) -> str:
"""Read multiple files and concatenate contents, stopping at line limit.
Args:
file_paths: List of file paths to read
max_lines: Maximum total lines to read (default: 10000)
Returns:
String containing concatenated file contents with headers
Note:
- Each file's contents will be prefaced with its path as a header
- Stops reading files when max_lines limit is reached
- Files that would exceed the line limit are truncated
"""
total_lines = 0
contents = []
for path in file_paths:
try:
if not os.path.exists(path):
console.print(f"Warning: File not found: {path}", style="yellow")
continue
with open(path, 'r', encoding='utf-8') as f:
file_content = []
for i, line in enumerate(f):
if total_lines + i >= max_lines:
file_content.append(f"\n... truncated after {max_lines} lines ...")
break
file_content.append(line)
if file_content:
contents.append(f'\n## File: {path}\n')
contents.append(''.join(file_content))
total_lines += len(file_content)
except Exception as e:
console.print(f"Error reading file {path}: {str(e)}", style="red")
continue
return ''.join(contents)
def read_related_files() -> str:
"""Read related files from memory.
Returns:
String containing concatenated file contents with headers
"""
related_files = get_related_files()
if not related_files:
return ''
return read_files_with_limit(list(related_files), max_lines=10000)
@tool("ask_expert")
def ask_expert(question: str) -> str:
"""Ask a question to an expert AI model.
@ -66,6 +123,11 @@ def ask_expert(question: str) -> str:
# Build query with context and key facts
query_parts = []
# Add related file contents if any exist first
related_contents = read_related_files()
if related_contents:
query_parts.extend(['# Related Files', related_contents])
# Add key facts if they exist
key_facts = get_memory_value('key_facts')
if key_facts and len(key_facts) > 0:
@ -98,7 +160,7 @@ def ask_expert(question: str) -> str:
border_style="yellow"
))
# Clear context after use
# Clear context after use (only after successful panel display)
expert_context.clear()
# Get response