feat(agent_utils.py): add agent_type retrieval to enhance fallback handling logic
feat(ciayn_agent.py): implement chat_history in CiaynAgent for improved context management during tool execution refactor(ciayn_agent.py): streamline fallback response handling and logging for better clarity and maintainability
This commit is contained in:
parent
e508e4d1f2
commit
646d509c22
|
|
@ -874,6 +874,7 @@ def run_agent_with_retry(
|
||||||
_max_test_retries = config.get("max_test_cmd_retries", DEFAULT_MAX_TEST_CMD_RETRIES)
|
_max_test_retries = config.get("max_test_cmd_retries", DEFAULT_MAX_TEST_CMD_RETRIES)
|
||||||
auto_test = config.get("auto_test", False)
|
auto_test = config.get("auto_test", False)
|
||||||
original_prompt = prompt
|
original_prompt = prompt
|
||||||
|
agent_type = get_agent_type(agent)
|
||||||
msg_list = [HumanMessage(content=prompt)]
|
msg_list = [HumanMessage(content=prompt)]
|
||||||
|
|
||||||
with InterruptibleSection():
|
with InterruptibleSection():
|
||||||
|
|
@ -898,11 +899,18 @@ def run_agent_with_retry(
|
||||||
return "Agent run completed successfully"
|
return "Agent run completed successfully"
|
||||||
except ToolExecutionError as e:
|
except ToolExecutionError as e:
|
||||||
print("except ToolExecutionError in AGENT UTILS")
|
print("except ToolExecutionError in AGENT UTILS")
|
||||||
if not isinstance(agent, CiaynAgent):
|
|
||||||
logger.debug("AGENT UTILS ToolExecutionError called!")
|
logger.debug("AGENT UTILS ToolExecutionError called!")
|
||||||
fallback_response = fallback_handler.handle_failure(e, agent)
|
fallback_response = fallback_handler.handle_failure(e, agent)
|
||||||
if fallback_response:
|
if fallback_response:
|
||||||
|
if agent_type == "React":
|
||||||
msg_list.extend(fallback_response)
|
msg_list.extend(fallback_response)
|
||||||
|
else:
|
||||||
|
agent.chat_history.extend(fallback_response)
|
||||||
|
agent.chat_history.append(
|
||||||
|
HumanMessage(
|
||||||
|
content="Fallback tool handler successfully ran your tool call. See last message for result."
|
||||||
|
)
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
except (KeyboardInterrupt, AgentInterrupt):
|
except (KeyboardInterrupt, AgentInterrupt):
|
||||||
raise
|
raise
|
||||||
|
|
|
||||||
|
|
@ -98,6 +98,7 @@ class CiaynAgent:
|
||||||
self.tools = tools
|
self.tools = tools
|
||||||
self.max_history_messages = max_history_messages
|
self.max_history_messages = max_history_messages
|
||||||
self.max_tokens = max_tokens
|
self.max_tokens = max_tokens
|
||||||
|
self.chat_history = []
|
||||||
self.available_functions = []
|
self.available_functions = []
|
||||||
for t in tools:
|
for t in tools:
|
||||||
self.available_functions.append(get_function_info(t.func))
|
self.available_functions.append(get_function_info(t.func))
|
||||||
|
|
@ -105,6 +106,9 @@ class CiaynAgent:
|
||||||
self.tool_failure_current_provider = None
|
self.tool_failure_current_provider = None
|
||||||
self.tool_failure_current_model = None
|
self.tool_failure_current_model = None
|
||||||
self.fallback_handler = FallbackHandler(config, tools)
|
self.fallback_handler = FallbackHandler(config, tools)
|
||||||
|
self.sys_message = SystemMessage(
|
||||||
|
"Execute efficiently yet completely as a fully autonomous agent."
|
||||||
|
)
|
||||||
|
|
||||||
def _build_prompt(self, last_result: Optional[str] = None) -> str:
|
def _build_prompt(self, last_result: Optional[str] = None) -> str:
|
||||||
"""Build the prompt for the agent including available tools and context."""
|
"""Build the prompt for the agent including available tools and context."""
|
||||||
|
|
@ -348,51 +352,47 @@ Output **ONLY THE CODE** and **NO MARKDOWN BACKTICKS**"""
|
||||||
) -> Generator[Dict[str, Any], None, None]:
|
) -> Generator[Dict[str, Any], None, None]:
|
||||||
"""Stream agent responses in a format compatible with print_agent_output."""
|
"""Stream agent responses in a format compatible with print_agent_output."""
|
||||||
initial_messages = messages_dict.get("messages", [])
|
initial_messages = messages_dict.get("messages", [])
|
||||||
chat_history = []
|
# self.chat_history = []
|
||||||
last_result = None
|
last_result = None
|
||||||
first_iteration = True
|
first_iteration = True
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
base_prompt = self._build_prompt(None if first_iteration else last_result)
|
base_prompt = self._build_prompt(None if first_iteration else last_result)
|
||||||
chat_history.append(HumanMessage(content=base_prompt))
|
self.chat_history.append(HumanMessage(content=base_prompt))
|
||||||
|
|
||||||
full_history = self._trim_chat_history(initial_messages, chat_history)
|
full_history = self._trim_chat_history(initial_messages, self.chat_history)
|
||||||
response = self.model.invoke(
|
response = self.model.invoke([self.sys_message] + full_history)
|
||||||
[
|
|
||||||
SystemMessage(
|
|
||||||
"Execute efficiently yet completely as a fully autonomous agent."
|
|
||||||
)
|
|
||||||
]
|
|
||||||
+ full_history
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
logger.debug(f"Code generated by agent: {response.content}")
|
logger.debug(f"Code generated by agent: {response.content}")
|
||||||
last_result = self._execute_tool(response)
|
last_result = self._execute_tool(response)
|
||||||
chat_history.append(response)
|
self.chat_history.append(response)
|
||||||
first_iteration = False
|
first_iteration = False
|
||||||
yield {}
|
yield {}
|
||||||
|
|
||||||
except ToolExecutionError as e:
|
except ToolExecutionError as e:
|
||||||
fallback_response = self.fallback_handler.handle_failure(e, self)
|
# self.chat_history.append(
|
||||||
print(f"fallback_response={fallback_response}")
|
|
||||||
if fallback_response:
|
|
||||||
hm = HumanMessage(
|
|
||||||
content="The fallback handler has fixed your tool call results are in the last System message."
|
|
||||||
)
|
|
||||||
chat_history.extend(fallback_response)
|
|
||||||
chat_history.append(hm)
|
|
||||||
logger.debug("Appended fallback response to chat history.")
|
|
||||||
yield {}
|
|
||||||
else:
|
|
||||||
yield self._create_error_chunk(str(e))
|
|
||||||
# yield {"messages": [fallback_response[-1]]}
|
|
||||||
|
|
||||||
# chat_history.append(
|
|
||||||
# HumanMessage(
|
# HumanMessage(
|
||||||
# content=f"Your tool call caused an error: {e}\n\nPlease correct your tool call and try again."
|
# content=f"Your tool call caused an error: {e}\n\nPlease correct your tool call and try again."
|
||||||
# )
|
# )
|
||||||
# )
|
# )
|
||||||
|
raise e
|
||||||
|
# yield self._create_error_chunk(str(e))
|
||||||
|
yield {}
|
||||||
|
|
||||||
|
# fallback_response = self.fallback_handler.handle_failure(e, self)
|
||||||
|
# print(f"fallback_response={fallback_response}")
|
||||||
|
# if fallback_response:
|
||||||
|
# hm = HumanMessage(
|
||||||
|
# content="The fallback handler has fixed your tool call results are in the last System message."
|
||||||
|
# )
|
||||||
|
# self.chat_history.extend(fallback_response)
|
||||||
|
# self.chat_history.append(hm)
|
||||||
|
# logger.debug("Appended fallback response to chat history.")
|
||||||
|
# yield {}
|
||||||
|
# else:
|
||||||
|
# yield self._create_error_chunk(str(e))
|
||||||
|
# yield {"messages": [fallback_response[-1]]}
|
||||||
|
|
||||||
|
|
||||||
def _extract_tool_call(code: str, functions_list: str) -> str:
|
def _extract_tool_call(code: str, functions_list: str) -> str:
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue