Added initial chat mode.
This commit is contained in:
parent
e8a619fcb1
commit
1a125a0139
|
|
@ -14,6 +14,7 @@ from ra_aid.prompts import (
|
|||
RESEARCH_PROMPT,
|
||||
PLANNING_PROMPT,
|
||||
IMPLEMENTATION_PROMPT,
|
||||
CHAT_PROMPT,
|
||||
EXPERT_PROMPT_SECTION_RESEARCH,
|
||||
EXPERT_PROMPT_SECTION_PLANNING,
|
||||
EXPERT_PROMPT_SECTION_IMPLEMENTATION,
|
||||
|
|
@ -29,7 +30,8 @@ from ra_aid.tool_configs import (
|
|||
get_read_only_tools,
|
||||
get_research_tools,
|
||||
get_planning_tools,
|
||||
get_implementation_tools
|
||||
get_implementation_tools,
|
||||
get_chat_tools
|
||||
)
|
||||
|
||||
def parse_arguments():
|
||||
|
|
@ -86,9 +88,18 @@ Examples:
|
|||
action='store_true',
|
||||
help='Enable human-in-the-loop mode, where the agent can prompt the user for additional information.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--chat',
|
||||
action='store_true',
|
||||
help='Enable chat mode with direct human interaction (implies --hil)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Set hil=True when chat mode is enabled
|
||||
if args.chat:
|
||||
args.hil = True
|
||||
|
||||
# Set default model for Anthropic, require model for other providers
|
||||
if args.provider == 'anthropic':
|
||||
if not args.model:
|
||||
|
|
@ -263,6 +274,35 @@ def main():
|
|||
# Create the base model after validation
|
||||
model = initialize_llm(args.provider, args.model)
|
||||
|
||||
# Handle chat mode
|
||||
if args.chat:
|
||||
print_stage_header("Chat Mode")
|
||||
|
||||
# Create chat agent with appropriate tools
|
||||
chat_agent = create_react_agent(
|
||||
model,
|
||||
get_chat_tools(expert_enabled=expert_enabled),
|
||||
checkpointer=MemorySaver()
|
||||
)
|
||||
|
||||
# Run chat agent with CHAT_PROMPT
|
||||
config = {
|
||||
"configurable": {"thread_id": "abc123"},
|
||||
"recursion_limit": 100,
|
||||
"chat_mode": True,
|
||||
"cowboy_mode": args.cowboy_mode,
|
||||
"hil": True # Always true in chat mode
|
||||
}
|
||||
|
||||
# Store config in global memory
|
||||
_global_memory['config'] = config
|
||||
_global_memory['config']['expert_provider'] = args.expert_provider
|
||||
_global_memory['config']['expert_model'] = args.expert_model
|
||||
|
||||
# Run chat agent and exit
|
||||
run_agent_with_retry(chat_agent, CHAT_PROMPT, config)
|
||||
return
|
||||
|
||||
# Validate message is provided
|
||||
if not args.message:
|
||||
print_error("--message is required")
|
||||
|
|
|
|||
|
|
@ -96,3 +96,25 @@ def get_implementation_tools(expert_enabled: bool = True) -> list:
|
|||
tools.extend(EXPERT_TOOLS)
|
||||
|
||||
return tools
|
||||
|
||||
def get_chat_tools(expert_enabled: bool = True) -> list:
|
||||
"""Get the list of tools available in chat mode.
|
||||
|
||||
Chat mode includes research and implementation capabilities but excludes
|
||||
complex planning tools. Human interaction is always enabled.
|
||||
"""
|
||||
# Start with read-only tools and always include human interaction
|
||||
tools = get_read_only_tools(human_interaction=True).copy()
|
||||
|
||||
# Add implementation capability
|
||||
tools.extend(MODIFICATION_TOOLS)
|
||||
|
||||
# Add research tools except for subtask management
|
||||
research_tools = [t for t in RESEARCH_TOOLS if t.name != 'request_research_subtask']
|
||||
tools.extend(research_tools)
|
||||
|
||||
# Add expert tools if enabled
|
||||
if expert_enabled:
|
||||
tools.extend(EXPERT_TOOLS)
|
||||
|
||||
return tools
|
||||
|
|
|
|||
Loading…
Reference in New Issue