create compatible class structure

This commit is contained in:
AI Christianson 2024-12-28 14:02:52 -05:00
parent 377a670ac8
commit c704117444
1 changed files with 135 additions and 157 deletions

View File

@ -1,8 +1,11 @@
import os import os
import uuid
from dotenv import load_dotenv from dotenv import load_dotenv
from ra_aid.agent_utils import run_agent_with_retry
from typing import Dict, Any, Generator, List, Optional
from langchain_core.messages import AIMessage, HumanMessage
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
from langchain_core.tools import tool from langchain_core.tools import tool
from langchain_core.messages import HumanMessage, SystemMessage
from ra_aid.tools.list_directory import list_directory_tree from ra_aid.tools.list_directory import list_directory_tree
from ra_aid.tool_configs import get_read_only_tools from ra_aid.tool_configs import get_read_only_tools
import inspect import inspect
@ -15,44 +18,14 @@ console = Console()
# Load environment variables # Load environment variables
load_dotenv() load_dotenv()
def get_function_info(func):
"""
Returns a well-formatted string containing the function signature and docstring,
designed to be easily readable by both humans and LLMs.
"""
# Get signature
signature = inspect.signature(func)
# Get docstring - use getdoc to clean up indentation
docstring = inspect.getdoc(func)
if docstring is None:
docstring = "No docstring provided"
# Format full signature including return type
full_signature = f"{func.__name__}{signature}"
# Build the complete string
info = f"""{full_signature}
\"\"\"
{docstring}
\"\"\" """
return info
@tool @tool
def check_weather(location: str) -> str: def check_weather(location: str) -> str:
""" """Gets the weather at the given location."""
Gets the weather at the given location.
"""
return f"The weather in {location} is sunny!" return f"The weather in {location} is sunny!"
@tool @tool
def output_message(message: str, prompt_user_input: bool = False) -> str: def output_message(message: str, prompt_user_input: bool = False) -> str:
""" """Outputs a message to the user, optionally prompting for input."""
Outputs a message to the user, optionally prompting for input.
"""
print()
console.print(Panel(Markdown(message.strip()))) console.print(Panel(Markdown(message.strip())))
if prompt_user_input: if prompt_user_input:
user_input = input("\n> ").strip() user_input = input("\n> ").strip()
@ -60,137 +33,142 @@ def output_message(message: str, prompt_user_input: bool = False) -> str:
return user_input return user_input
return "" return ""
def evaluate_response(code: str, tools: list) -> any: class CiaynAgent:
""" def get_function_info(self, func):
Evaluates a single function call and returns its result """
Returns a well-formatted string containing the function signature and docstring,
Args: designed to be easily readable by both humans and LLMs.
code (str): The code to evaluate """
tools (list): List of tool objects that have a .func property signature = inspect.signature(func)
docstring = inspect.getdoc(func)
Returns: if docstring is None:
any: Result of the code evaluation docstring = "No docstring provided"
""" full_signature = f"{func.__name__}{signature}"
# Create globals dictionary from tool functions info = f"""{full_signature}
globals_dict = { \"\"\"
tool.func.__name__: tool.func {docstring}
for tool in tools \"\"\" """
} return info
try:
# Using eval() instead of exec() since we're evaluating a single expression
result = eval(code, globals_dict)
return result
except Exception as e:
print(f"Code:\n\n{code}\n\n")
print(f"Error executing code: {str(e)}")
return f"Error executing code: {str(e)}"
def create_chat_interface(): def __init__(self, model, tools: list):
# Initialize the chat model """Initialize the agent with a model and list of tools."""
chat = ChatOpenAI( self.model = model
# api_key=os.getenv("OPENROUTER_API_KEY"), self.tools = tools
api_key=os.getenv("DEEPSEEK_API_KEY"), self.available_functions = []
temperature=0.7 , for t in tools:
# base_url="https://openrouter.ai/api/v1", self.available_functions.append(self.get_function_info(t.func))
base_url="https://api.deepseek.com/v1",
# model="deepseek/deepseek-chat" def _build_prompt(self, last_result: Optional[str] = None) -> str:
model="deepseek-chat" """Build the prompt for the agent including available tools and context."""
# model="openai/gpt-4o-mini"
# model="qwen/qwen-2.5-coder-32b-instruct"
# model="qwen/qwen-2.5-72b-instruct"
)
# Chat loop
print("Welcome to the Chat Interface! (Type 'quit' to exit)")
chat_history = []
last_result = None
first_iteration = True
tools = get_read_only_tools(True, True)
tools.extend([output_message])
available_functions = []
for t in tools:
available_functions.append(get_function_info(t.func))
while True:
base_prompt = "" base_prompt = ""
if last_result is not None:
# Add the last result to the prompt if it's not the first iteration
if not first_iteration and last_result is not None:
base_prompt += f"\n<last result>{last_result}</last result>" base_prompt += f"\n<last result>{last_result}</last result>"
# Construct the tool documentation and context
base_prompt += f""" base_prompt += f"""
<available functions> <available functions>
{"\n\n".join(available_functions)} {"\n\n".join(self.available_functions)}
</available functions> </available functions>
"""
<agent instructions>
base_prompt += """ You are a ReAct agent. You run in a loop and use ONE of the available functions per iteration.
<agent instructions> If the current query does not require a function call, just use output_message to say what you would normally say.
You are a ReAct agent. You run in a loop and use ONE of the available functions per iteration. The result of that function call will be given to you in the next message.
If the current query does not require a function call, just use output_message to say what you would normally say. Call one function at a time. Function arguments can be complex objects, long strings, etc. if needed.
The result of that function call will be given to you in the next message. The user cannot see the results of function calls, so you have to explicitly call output_message if you want them to see something.
Call one function at a time. Function arguments can be complex objects, long strings, etc. if needed. You must always respond with a single line of python that calls one of the available tools.
The user cannot see the results of function calls, so you have to explicitly call output_message if you want them to see something. Use as many steps as you need to in order to fully complete the task.
You must always respond with a single line of python that calls one of the available tools. Start by asking the user what they want.
Use as many steps as you need to in order to fully complete the task. </agent instructions>
Start by asking the user what they want.
</agent instructions> <example response>
check_weather("London")
<example response> </example response>
check_weather("London")
</example response> <example response>
output_message(\"\"\"How can I help you today?\"\"\", True)
<example response> </example response>
output_message(\"\"\"
How can I help you today? Output **ONLY THE CODE** and **NO MARKDOWN BACKTICKS**"""
\"\"\", True) return base_prompt
</example response>
""" def _execute_tool(self, code: str) -> str:
"""Execute a tool call and return its result."""
base_prompt += "\nOutput **ONLY THE CODE** and **NO MARKDOWN BACKTICKS**" globals_dict = {
tool.func.__name__: tool.func
# Add user message to history for tool in self.tools
# Remove the previous messages if they exist }
# if len(chat_history) > 1:
# chat_history.pop() # Remove the last assistant message
# chat_history.pop() # Remove the last human message
chat_history.append(HumanMessage(content=base_prompt))
try: try:
# Get response from model result = eval(code.strip(), globals_dict)
# print("PRECHAT") return result
response = chat.invoke(chat_history)
# print("POSTCHAT")
# # Print the code response
# print("\nAssistant generated code:")
# print(response.content)
# Evaluate the code
# print("\nExecuting code:")
# print("PREEVAL")
last_result = evaluate_response(response.content.strip(), tools)
# print("POSTEVAL")
# if last_result is not None:
# print(f"Result: {last_result}")
# Add assistant response to history
chat_history.append(response)
# Set first_iteration to False after the first loop
first_iteration = False
# print("LOOP")
except Exception as e: except Exception as e:
print(f"\nError: {str(e)}") error_msg = f"Error executing code: {str(e)}"
console.print(f"[red]Error:[/red] {error_msg}")
return error_msg
def _create_agent_chunk(self, content: str) -> Dict[str, Any]:
"""Create an agent chunk in the format expected by print_agent_output."""
return {
"agent": {
"messages": [AIMessage(content=content)]
}
}
def _create_error_chunk(self, content: str) -> Dict[str, Any]:
"""Create an error chunk in the format expected by print_agent_output."""
return {
"tools": {
"messages": [{"status": "error", "content": content}]
}
}
def stream(self, messages_dict: Dict[str, List[Any]], config: Dict[str, Any] = None) -> Generator[Dict[str, Any], None, None]:
"""Stream agent responses in a format compatible with print_agent_output."""
initial_messages = messages_dict.get("messages", [])
chat_history = []
last_result = None
first_iteration = True
while True:
base_prompt = self._build_prompt(None if first_iteration else last_result)
chat_history.append(HumanMessage(content=base_prompt))
try:
full_history = initial_messages + chat_history
response = self.model.invoke(full_history)
last_result = self._execute_tool(response.content)
chat_history.append(response)
first_iteration = False
yield {}
except Exception as e:
error_msg = f"Error: {str(e)}"
yield self._create_error_chunk(error_msg)
break
if __name__ == "__main__": if __name__ == "__main__":
create_chat_interface() # Initialize the chat model
chat = ChatOpenAI(
api_key=os.getenv("OPENROUTER_API_KEY"),
temperature=0.7,
base_url="https://openrouter.ai/api/v1",
model="qwen/qwen-2.5-coder-32b-instruct"
)
# Get tools
tools = get_read_only_tools(True, True)
tools.append(output_message)
# Initialize agent
agent = CiaynAgent(chat, tools)
# Test chat prompt
test_prompt = "Find the tests in this codebase."
# Run the agent using run_agent_with_retry
result = run_agent_with_retry(agent, test_prompt, {"configurable": {"thread_id": str(uuid.uuid4())}})
# Initial greeting
print("Welcome to the Chat Interface! (Type 'quit' to exit)")