Adjustments to get smaller agent models working better.
This commit is contained in:
parent
19e203be7e
commit
684b076dbf
|
|
@ -1,6 +1,7 @@
|
|||
import argparse
|
||||
import sys
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from rich.panel import Panel
|
||||
from rich.console import Console
|
||||
from langgraph.checkpoint.memory import MemorySaver
|
||||
|
|
@ -191,6 +192,10 @@ def main():
|
|||
# Get initial request from user
|
||||
initial_request = ask_human.invoke({"question": "What would you like help with?"})
|
||||
|
||||
# Get working directory and current date
|
||||
working_directory = os.getcwd()
|
||||
current_date = datetime.now().strftime("%Y-%m-%d")
|
||||
|
||||
# Run chat agent with CHAT_PROMPT
|
||||
config = {
|
||||
"configurable": {"thread_id": uuid.uuid4()},
|
||||
|
|
@ -219,7 +224,9 @@ def main():
|
|||
# Run chat agent and exit
|
||||
run_agent_with_retry(chat_agent, CHAT_PROMPT.format(
|
||||
initial_request=initial_request,
|
||||
web_research_section=WEB_RESEARCH_PROMPT_SECTION_CHAT if web_research_enabled else ""
|
||||
web_research_section=WEB_RESEARCH_PROMPT_SECTION_CHAT if web_research_enabled else "",
|
||||
working_directory=working_directory,
|
||||
current_date=current_date
|
||||
), config)
|
||||
return
|
||||
|
||||
|
|
|
|||
|
|
@ -569,7 +569,12 @@ def run_agent_with_retry(agent, prompt: str, config: dict) -> Optional[str]:
|
|||
logger.debug("Agent output: %s", chunk)
|
||||
check_interrupt()
|
||||
print_agent_output(chunk)
|
||||
if _global_memory['task_completed']:
|
||||
if _global_memory['plan_completed']:
|
||||
_global_memory['plan_completed'] = False
|
||||
_global_memory['task_completed'] = False
|
||||
_global_memory['completion_message'] = ''
|
||||
break
|
||||
if _global_memory['task_completed'] or _global_memory['plan_completed']:
|
||||
_global_memory['task_completed'] = False
|
||||
_global_memory['completion_message'] = ''
|
||||
break
|
||||
|
|
|
|||
|
|
@ -91,6 +91,7 @@ Start by asking the user what they want.
|
|||
</agent instructions>
|
||||
|
||||
You must carefully review the conversation history, which functions were called so far, returned results, etc., and make sure the very next function call you make makes sense in order to achieve the original goal.
|
||||
You must achieve the goal in as few steps possible, but no fewer.
|
||||
|
||||
You must ONLY use ONE of the following functions (these are the ONLY functions that exist):
|
||||
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ def initialize_llm(provider: str, model_name: str) -> BaseChatModel:
|
|||
return ChatOpenAI(
|
||||
api_key=os.getenv("OPENAI_API_KEY"),
|
||||
base_url=os.getenv("OPENAI_API_BASE"),
|
||||
temperature=0.3,
|
||||
model=model_name,
|
||||
)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -557,7 +557,8 @@ NEVER ANNOUNCE WHAT YOU ARE DOING, JUST DO IT!
|
|||
"""
|
||||
|
||||
# New agentic chat prompt for interactive mode
|
||||
CHAT_PROMPT = """
|
||||
CHAT_PROMPT = """Working Directory: {working_directory}
|
||||
Current Date: {current_date}
|
||||
Agentic Chat Mode Instructions:
|
||||
|
||||
Overview:
|
||||
|
|
|
|||
Loading…
Reference in New Issue