Handle completion messages.

This commit is contained in:
user 2024-12-21 15:31:04 -05:00
parent 5344aa9e14
commit f0f7487485
2 changed files with 29 additions and 33 deletions

View File

@ -313,7 +313,7 @@ Behavior:
- Request that the user provide their initial instructions or the problem they want solved.
2. Iterative Work:
- After receiving the users initial input, use the given tools (e.g., fuzzy_file_search, ripgrep_search, run_shell_command) to investigate and address their request.
- After receiving the users initial input, use the given tools to fulfill their request.
- If you are uncertain about the users requirements, run ask_human to clarify.
- Continue this pattern: research, propose a next step, and if needed, ask_human for confirmation or guidance.
@ -325,11 +325,7 @@ Scope and Focus:
- Start from zero knowledge: always depend on user input and the discovered context from tools.
- Adapt complexity based on user requests. For simple tasks, keep actions minimal. For more complex tasks, provide deeper investigation and structured approaches.
- Do not assume what the user wants without asking. Always clarify if uncertain.
Testing and Validation:
- If the users request involves code changes or technical implementation:
- Thoroughly investigate existing code and test suites using the provided tools.
- If appropriate, run tests and ensure they pass before concluding.
- If you have called tools previously and can answer user queries based on already known info, do so. You can always ask the user if they would like to dig deeper or implement something.
No Speculation:
- Do not speculate about the purpose of the users request. Let the users instructions and clarifications guide you.
@ -343,4 +339,5 @@ Remember:
- Always begin by calling ask_human.
- Always ask_human before finalizing or exiting.
- Never announce that you are going to ask the human, just do it.
- Do communicate results/responses from tools that you call as it pertains to the users request.
"""

View File

@ -16,14 +16,6 @@ def request_research(query: str) -> Dict[str, Any]:
Args:
query: The research question or project description
Returns:
Dict containing:
- notes: Research notes from the agent
- facts: Current key facts
- files: Related files
- success: Whether completed or interrupted
- reason: Reason for failure, if any
"""
# Initialize model from config
config = _global_memory.get('config', {})
@ -37,7 +29,7 @@ def request_research(query: str) -> Dict[str, Any]:
model,
expert_enabled=True,
research_only=True,
hil=_global_memory.get('config', {}).get('hil', False),
hil=config.get('hil', False),
console_message=query
)
@ -52,8 +44,15 @@ def request_research(query: str) -> Dict[str, Any]:
success = False
reason = f"error: {str(e)}"
# Gather results
# Get completion message if available
completion_message = _global_memory.get('completion_message', 'Task was completed successfully.' if success else None)
# Clear completion state from global memory
_global_memory['completion_message'] = ''
_global_memory['completion_state'] = False
return {
"completion_message": completion_message,
"facts": get_memory_value("key_facts"),
"files": list(get_related_files()),
"notes": get_memory_value("research_notes"),
@ -67,14 +66,6 @@ def request_research_and_implementation(query: str) -> Dict[str, Any]:
Args:
query: The research question or project description
Returns:
Dict containing:
- notes: Research notes from the agent
- facts: Current key facts
- files: Related files
- success: Whether completed or interrupted
- reason: Reason for failure, if any
"""
# Initialize model from config
config = _global_memory.get('config', {})
@ -88,7 +79,7 @@ def request_research_and_implementation(query: str) -> Dict[str, Any]:
model,
expert_enabled=True,
research_only=False,
hil=_global_memory.get('config', {}).get('hil', False),
hil=config.get('hil', False),
console_message=query
)
@ -103,8 +94,15 @@ def request_research_and_implementation(query: str) -> Dict[str, Any]:
success = False
reason = f"error: {str(e)}"
# Gather results
# Get completion message if available
completion_message = _global_memory.get('completion_message', 'Task was completed successfully.' if success else None)
# Clear completion state from global memory
_global_memory['completion_message'] = ''
_global_memory['completion_state'] = False
return {
"completion_message": completion_message,
"facts": get_memory_value("key_facts"),
"files": list(get_related_files()),
"notes": get_memory_value("research_notes"),
@ -174,13 +172,6 @@ def request_implementation(task_spec: str) -> Dict[str, Any]:
Args:
task_spec: The task specification to plan implementation for
Returns:
Dict containing:
- facts: Current key facts
- files: Related files
- success: Whether completed or interrupted
- reason: Reason for failure, if any
"""
# Initialize model from config
config = _global_memory.get('config', {})
@ -208,7 +199,15 @@ def request_implementation(task_spec: str) -> Dict[str, Any]:
success = False
reason = f"error: {str(e)}"
# Get completion message if available
completion_message = _global_memory.get('completion_message', 'Task was completed successfully.' if success else None)
# Clear completion state from global memory
_global_memory['completion_message'] = ''
_global_memory['completion_state'] = False
return {
"completion_message": completion_message,
"facts": get_memory_value("key_facts"),
"files": list(get_related_files()),
"success": success,