From 6d095eab4638f4e35a23c19ff07f9fc13ca88350 Mon Sep 17 00:00:00 2001 From: AI Christianson Date: Mon, 17 Feb 2025 18:21:49 -0500 Subject: [PATCH] use base latency in programmer tool --- ra_aid/models_params.py | 2 +- ra_aid/tools/programmer.py | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/ra_aid/models_params.py b/ra_aid/models_params.py index 5c80e0c..ce8479e 100644 --- a/ra_aid/models_params.py +++ b/ra_aid/models_params.py @@ -4,7 +4,7 @@ List of model parameters DEFAULT_TOKEN_LIMIT = 100000 DEFAULT_TEMPERATURE = 0.7 -DEFAULT_BASE_LATENCY = 180 +DEFAULT_BASE_LATENCY = 240 models_params = { "openai": { diff --git a/ra_aid/tools/programmer.py b/ra_aid/tools/programmer.py index a607255..6ecce7c 100644 --- a/ra_aid/tools/programmer.py +++ b/ra_aid/tools/programmer.py @@ -11,6 +11,7 @@ from rich.panel import Panel from rich.text import Text from ra_aid.logging_config import get_logger +from ra_aid.models_params import models_params, DEFAULT_BASE_LATENCY from ra_aid.proc.interactive import run_interactive_command from ra_aid.text.processing import truncate_output from ra_aid.tools.memory import _global_memory, log_work_event @@ -135,7 +136,12 @@ def run_programming_task( try: # Run the command interactively print() - result = run_interactive_command(command) + # Get provider/model specific latency coefficient + provider = _global_memory.get("config", {}).get("provider", "") + model = _global_memory.get("config", {}).get("model", "") + latency = models_params.get(provider, {}).get(model, {}).get("latency_coefficient", DEFAULT_BASE_LATENCY) + + result = run_interactive_command(command, expected_runtime_seconds=latency) print() # Log the programming task