use base latency in programmer tool

This commit is contained in:
AI Christianson 2025-02-17 18:21:49 -05:00
parent c789edd8bd
commit 6d095eab46
2 changed files with 8 additions and 2 deletions

View File

@ -4,7 +4,7 @@ List of model parameters
DEFAULT_TOKEN_LIMIT = 100000
DEFAULT_TEMPERATURE = 0.7
DEFAULT_BASE_LATENCY = 180
DEFAULT_BASE_LATENCY = 240
models_params = {
"openai": {

View File

@ -11,6 +11,7 @@ from rich.panel import Panel
from rich.text import Text
from ra_aid.logging_config import get_logger
from ra_aid.models_params import models_params, DEFAULT_BASE_LATENCY
from ra_aid.proc.interactive import run_interactive_command
from ra_aid.text.processing import truncate_output
from ra_aid.tools.memory import _global_memory, log_work_event
@ -135,7 +136,12 @@ def run_programming_task(
try:
# Run the command interactively
print()
result = run_interactive_command(command)
# Get provider/model specific latency coefficient
provider = _global_memory.get("config", {}).get("provider", "")
model = _global_memory.get("config", {}).get("model", "")
latency = models_params.get(provider, {}).get(model, {}).get("latency_coefficient", DEFAULT_BASE_LATENCY)
result = run_interactive_command(command, expected_runtime_seconds=latency)
print()
# Log the programming task