feat: add `--test-cmd-timeout` option to specify timeout for test command execution
This change introduces a new command-line option `--test-cmd-timeout` to allow users to set a timeout for the execution of test commands. The default timeout is set to 300 seconds. This enhancement provides users with more control over the execution time of their test commands, helping to prevent indefinite hangs during testing. Additionally, the codebase has been updated to utilize this new timeout setting in relevant areas, ensuring consistent behavior across the application.
This commit is contained in:
parent
e9d578c2dc
commit
581dc4b761
|
|
@ -181,6 +181,7 @@ More information is available in our [Usage Examples](https://docs.ra-aid.ai/cat
|
||||||
- `--test-cmd`: Custom command to run tests. If set user will be asked if they want to run the test command
|
- `--test-cmd`: Custom command to run tests. If set user will be asked if they want to run the test command
|
||||||
- `--auto-test`: Automatically run tests after each code change
|
- `--auto-test`: Automatically run tests after each code change
|
||||||
- `--max-test-cmd-retries`: Maximum number of test command retry attempts (default: 3)
|
- `--max-test-cmd-retries`: Maximum number of test command retry attempts (default: 3)
|
||||||
|
- `--test-cmd-timeout`: Timeout in seconds for test command execution (default: 300)
|
||||||
- `--version`: Show program version number and exit
|
- `--version`: Show program version number and exit
|
||||||
- `--webui`: Launch the web interface (alpha feature)
|
- `--webui`: Launch the web interface (alpha feature)
|
||||||
- `--webui-host`: Host to listen on for web interface (default: 0.0.0.0) (alpha feature)
|
- `--webui-host`: Host to listen on for web interface (default: 0.0.0.0) (alpha feature)
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,11 @@ from ra_aid.agent_utils import (
|
||||||
run_planning_agent,
|
run_planning_agent,
|
||||||
run_research_agent,
|
run_research_agent,
|
||||||
)
|
)
|
||||||
from ra_aid.config import DEFAULT_MAX_TEST_CMD_RETRIES, DEFAULT_RECURSION_LIMIT
|
from ra_aid.config import (
|
||||||
|
DEFAULT_MAX_TEST_CMD_RETRIES,
|
||||||
|
DEFAULT_RECURSION_LIMIT,
|
||||||
|
DEFAULT_TEST_CMD_TIMEOUT,
|
||||||
|
)
|
||||||
from ra_aid.dependencies import check_dependencies
|
from ra_aid.dependencies import check_dependencies
|
||||||
from ra_aid.env import validate_environment
|
from ra_aid.env import validate_environment
|
||||||
from ra_aid.llm import initialize_llm
|
from ra_aid.llm import initialize_llm
|
||||||
|
|
@ -81,9 +85,11 @@ Examples:
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--provider",
|
"--provider",
|
||||||
type=str,
|
type=str,
|
||||||
default="openai"
|
default=(
|
||||||
if (os.getenv("OPENAI_API_KEY") and not os.getenv("ANTHROPIC_API_KEY"))
|
"openai"
|
||||||
else "anthropic",
|
if (os.getenv("OPENAI_API_KEY") and not os.getenv("ANTHROPIC_API_KEY"))
|
||||||
|
else "anthropic"
|
||||||
|
),
|
||||||
choices=VALID_PROVIDERS,
|
choices=VALID_PROVIDERS,
|
||||||
help="The LLM provider to use",
|
help="The LLM provider to use",
|
||||||
)
|
)
|
||||||
|
|
@ -173,7 +179,13 @@ Examples:
|
||||||
"--max-test-cmd-retries",
|
"--max-test-cmd-retries",
|
||||||
type=int,
|
type=int,
|
||||||
default=DEFAULT_MAX_TEST_CMD_RETRIES,
|
default=DEFAULT_MAX_TEST_CMD_RETRIES,
|
||||||
help="Maximum number of retries for the test command (default: 10)",
|
help="Maximum number of retries for the test command (default: 3)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--test-cmd-timeout",
|
||||||
|
type=int,
|
||||||
|
default=DEFAULT_TEST_CMD_TIMEOUT,
|
||||||
|
help=f"Timeout in seconds for test command execution (default: {DEFAULT_TEST_CMD_TIMEOUT})",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--webui",
|
"--webui",
|
||||||
|
|
@ -231,7 +243,6 @@ Examples:
|
||||||
parsed_args.expert_provider = parsed_args.provider
|
parsed_args.expert_provider = parsed_args.provider
|
||||||
parsed_args.expert_model = parsed_args.model
|
parsed_args.expert_model = parsed_args.model
|
||||||
|
|
||||||
|
|
||||||
# Validate temperature range if provided
|
# Validate temperature range if provided
|
||||||
if parsed_args.temperature is not None and not (
|
if parsed_args.temperature is not None and not (
|
||||||
0.0 <= parsed_args.temperature <= 2.0
|
0.0 <= parsed_args.temperature <= 2.0
|
||||||
|
|
@ -294,15 +305,24 @@ def main():
|
||||||
|
|
||||||
# Validate model configuration early
|
# Validate model configuration early
|
||||||
from ra_aid.models_params import models_params
|
from ra_aid.models_params import models_params
|
||||||
|
|
||||||
model_config = models_params.get(args.provider, {}).get(args.model or "", {})
|
model_config = models_params.get(args.provider, {}).get(args.model or "", {})
|
||||||
supports_temperature = model_config.get("supports_temperature", args.provider in ["anthropic", "openai", "openrouter", "openai-compatible", "deepseek"])
|
supports_temperature = model_config.get(
|
||||||
|
"supports_temperature",
|
||||||
|
args.provider
|
||||||
|
in ["anthropic", "openai", "openrouter", "openai-compatible", "deepseek"],
|
||||||
|
)
|
||||||
|
|
||||||
if supports_temperature and args.temperature is None:
|
if supports_temperature and args.temperature is None:
|
||||||
args.temperature = model_config.get("default_temperature")
|
args.temperature = model_config.get("default_temperature")
|
||||||
if args.temperature is None:
|
if args.temperature is None:
|
||||||
print_error(f"Temperature must be provided for model {args.model} which supports temperature")
|
print_error(
|
||||||
|
f"Temperature must be provided for model {args.model} which supports temperature"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
logger.debug(f"Using default temperature {args.temperature} for model {args.model}")
|
logger.debug(
|
||||||
|
f"Using default temperature {args.temperature} for model {args.model}"
|
||||||
|
)
|
||||||
|
|
||||||
# Display status lines
|
# Display status lines
|
||||||
status = Text()
|
status = Text()
|
||||||
|
|
@ -324,16 +344,13 @@ def main():
|
||||||
|
|
||||||
# Search info
|
# Search info
|
||||||
status.append("🔍 Search: ")
|
status.append("🔍 Search: ")
|
||||||
status.append("Enabled" if web_research_enabled else "Disabled",
|
status.append(
|
||||||
style=None if web_research_enabled else "italic")
|
"Enabled" if web_research_enabled else "Disabled",
|
||||||
|
style=None if web_research_enabled else "italic",
|
||||||
|
)
|
||||||
|
|
||||||
console.print(
|
console.print(
|
||||||
Panel(
|
Panel(status, title="Config", border_style="bright_blue", padding=(0, 1))
|
||||||
status,
|
|
||||||
title="Config",
|
|
||||||
border_style="bright_blue",
|
|
||||||
padding=(0, 1)
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Handle chat mode
|
# Handle chat mode
|
||||||
|
|
@ -400,9 +417,9 @@ def main():
|
||||||
chat_agent,
|
chat_agent,
|
||||||
CHAT_PROMPT.format(
|
CHAT_PROMPT.format(
|
||||||
initial_request=initial_request,
|
initial_request=initial_request,
|
||||||
web_research_section=WEB_RESEARCH_PROMPT_SECTION_CHAT
|
web_research_section=(
|
||||||
if web_research_enabled
|
WEB_RESEARCH_PROMPT_SECTION_CHAT if web_research_enabled else ""
|
||||||
else "",
|
),
|
||||||
working_directory=working_directory,
|
working_directory=working_directory,
|
||||||
current_date=current_date,
|
current_date=current_date,
|
||||||
project_info=formatted_project_info,
|
project_info=formatted_project_info,
|
||||||
|
|
@ -428,6 +445,7 @@ def main():
|
||||||
"auto_test": args.auto_test,
|
"auto_test": args.auto_test,
|
||||||
"test_cmd": args.test_cmd,
|
"test_cmd": args.test_cmd,
|
||||||
"max_test_cmd_retries": args.max_test_cmd_retries,
|
"max_test_cmd_retries": args.max_test_cmd_retries,
|
||||||
|
"test_cmd_timeout": args.test_cmd_timeout,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Store config in global memory for access by is_informational_query
|
# Store config in global memory for access by is_informational_query
|
||||||
|
|
|
||||||
|
|
@ -2,3 +2,4 @@
|
||||||
|
|
||||||
DEFAULT_RECURSION_LIMIT = 100
|
DEFAULT_RECURSION_LIMIT = 100
|
||||||
DEFAULT_MAX_TEST_CMD_RETRIES = 3
|
DEFAULT_MAX_TEST_CMD_RETRIES = 3
|
||||||
|
DEFAULT_TEST_CMD_TIMEOUT = 60 * 5 # 5 minutes in seconds
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,9 @@
|
||||||
"""Module for efficient file listing using git."""
|
"""Module for efficient file listing using git."""
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import os
|
import os
|
||||||
|
import subprocess
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Optional, Tuple
|
from typing import List, Optional, Tuple
|
||||||
import tempfile
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
|
|
||||||
class FileListerError(Exception):
|
class FileListerError(Exception):
|
||||||
|
|
@ -133,12 +131,18 @@ def get_file_listing(
|
||||||
|
|
||||||
# Combine and process the files
|
# Combine and process the files
|
||||||
all_files = []
|
all_files = []
|
||||||
for file in tracked_files_process.stdout.splitlines() + untracked_files_process.stdout.splitlines():
|
for file in (
|
||||||
|
tracked_files_process.stdout.splitlines()
|
||||||
|
+ untracked_files_process.stdout.splitlines()
|
||||||
|
):
|
||||||
file = file.strip()
|
file = file.strip()
|
||||||
if not file:
|
if not file:
|
||||||
continue
|
continue
|
||||||
# Skip hidden files unless explicitly included
|
# Skip hidden files unless explicitly included
|
||||||
if not include_hidden and (file.startswith(".") or any(part.startswith(".") for part in file.split("/"))):
|
if not include_hidden and (
|
||||||
|
file.startswith(".")
|
||||||
|
or any(part.startswith(".") for part in file.split("/"))
|
||||||
|
):
|
||||||
continue
|
continue
|
||||||
# Skip .aider files
|
# Skip .aider files
|
||||||
if ".aider" in file:
|
if ".aider" in file:
|
||||||
|
|
@ -155,7 +159,7 @@ def get_file_listing(
|
||||||
|
|
||||||
return all_files, total_count
|
return all_files, total_count
|
||||||
|
|
||||||
except (DirectoryNotFoundError, DirectoryAccessError, GitCommandError) as e:
|
except (DirectoryNotFoundError, DirectoryAccessError, GitCommandError):
|
||||||
# Re-raise known exceptions
|
# Re-raise known exceptions
|
||||||
raise
|
raise
|
||||||
except PermissionError as e:
|
except PermissionError as e:
|
||||||
|
|
|
||||||
|
|
@ -1,18 +1,18 @@
|
||||||
import os
|
import os
|
||||||
from typing import Any, Dict, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
from openai import OpenAI
|
|
||||||
from langchain_anthropic import ChatAnthropic
|
from langchain_anthropic import ChatAnthropic
|
||||||
from langchain_core.language_models import BaseChatModel
|
from langchain_core.language_models import BaseChatModel
|
||||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||||
from langchain_openai import ChatOpenAI
|
from langchain_openai import ChatOpenAI
|
||||||
|
from openai import OpenAI
|
||||||
|
|
||||||
from ra_aid.chat_models.deepseek_chat import ChatDeepseekReasoner
|
from ra_aid.chat_models.deepseek_chat import ChatDeepseekReasoner
|
||||||
from ra_aid.logging_config import get_logger
|
from ra_aid.logging_config import get_logger
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from .models_params import models_params
|
from .models_params import models_params
|
||||||
|
|
||||||
|
|
||||||
def get_available_openai_models() -> List[str]:
|
def get_available_openai_models() -> List[str]:
|
||||||
"""Fetch available OpenAI models using OpenAI client.
|
"""Fetch available OpenAI models using OpenAI client.
|
||||||
|
|
||||||
|
|
@ -28,6 +28,7 @@ def get_available_openai_models() -> List[str]:
|
||||||
# Return empty list if unable to fetch models
|
# Return empty list if unable to fetch models
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
def select_expert_model(provider: str, model: Optional[str] = None) -> Optional[str]:
|
def select_expert_model(provider: str, model: Optional[str] = None) -> Optional[str]:
|
||||||
"""Select appropriate expert model based on provider and availability.
|
"""Select appropriate expert model based on provider and availability.
|
||||||
|
|
||||||
|
|
@ -54,6 +55,7 @@ def select_expert_model(provider: str, model: Optional[str] = None) -> Optional[
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
known_temp_providers = {
|
known_temp_providers = {
|
||||||
"openai",
|
"openai",
|
||||||
"anthropic",
|
"anthropic",
|
||||||
|
|
@ -220,7 +222,9 @@ def create_llm_client(
|
||||||
temp_kwargs = {"temperature": 0} if supports_temperature else {}
|
temp_kwargs = {"temperature": 0} if supports_temperature else {}
|
||||||
elif supports_temperature:
|
elif supports_temperature:
|
||||||
if temperature is None:
|
if temperature is None:
|
||||||
raise ValueError(f"Temperature must be provided for model {model_name} which supports temperature")
|
raise ValueError(
|
||||||
|
f"Temperature must be provided for model {model_name} which supports temperature"
|
||||||
|
)
|
||||||
temp_kwargs = {"temperature": temperature}
|
temp_kwargs = {"temperature": temperature}
|
||||||
else:
|
else:
|
||||||
temp_kwargs = {}
|
temp_kwargs = {}
|
||||||
|
|
@ -248,11 +252,13 @@ def create_llm_client(
|
||||||
}
|
}
|
||||||
if is_expert:
|
if is_expert:
|
||||||
openai_kwargs["reasoning_effort"] = "high"
|
openai_kwargs["reasoning_effort"] = "high"
|
||||||
return ChatOpenAI(**{
|
return ChatOpenAI(
|
||||||
**openai_kwargs,
|
**{
|
||||||
"timeout": LLM_REQUEST_TIMEOUT,
|
**openai_kwargs,
|
||||||
"max_retries": LLM_MAX_RETRIES,
|
"timeout": LLM_REQUEST_TIMEOUT,
|
||||||
})
|
"max_retries": LLM_MAX_RETRIES,
|
||||||
|
}
|
||||||
|
)
|
||||||
elif provider == "anthropic":
|
elif provider == "anthropic":
|
||||||
return ChatAnthropic(
|
return ChatAnthropic(
|
||||||
api_key=config["api_key"],
|
api_key=config["api_key"],
|
||||||
|
|
@ -289,8 +295,6 @@ def initialize_llm(
|
||||||
return create_llm_client(provider, model_name, temperature, is_expert=False)
|
return create_llm_client(provider, model_name, temperature, is_expert=False)
|
||||||
|
|
||||||
|
|
||||||
def initialize_expert_llm(
|
def initialize_expert_llm(provider: str, model_name: str) -> BaseChatModel:
|
||||||
provider: str, model_name: str
|
|
||||||
) -> BaseChatModel:
|
|
||||||
"""Initialize an expert language model client based on the specified provider and model."""
|
"""Initialize an expert language model client based on the specified provider and model."""
|
||||||
return create_llm_client(provider, model_name, temperature=None, is_expert=True)
|
return create_llm_client(provider, model_name, temperature=None, is_expert=True)
|
||||||
|
|
|
||||||
|
|
@ -7,114 +7,465 @@ DEFAULT_TEMPERATURE = 0.7
|
||||||
|
|
||||||
models_params = {
|
models_params = {
|
||||||
"openai": {
|
"openai": {
|
||||||
"gpt-3.5-turbo-0125": {"token_limit": 16385, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"gpt-3.5-turbo-0125": {
|
||||||
"gpt-3.5": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 16385,
|
||||||
"gpt-3.5-turbo": {"token_limit": 16385, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"gpt-3.5-turbo-1106": {"token_limit": 16385, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"gpt-3.5-turbo-instruct": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"gpt-4-0125-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"gpt-3.5": {
|
||||||
"gpt-4-turbo-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 4096,
|
||||||
"gpt-4-turbo": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"gpt-4-turbo-2024-04-09": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"gpt-4-1106-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"gpt-4-vision-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"gpt-3.5-turbo": {
|
||||||
"gpt-4": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 16385,
|
||||||
"gpt-4-0613": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"gpt-4-32k": {"token_limit": 32768, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"gpt-4-32k-0613": {"token_limit": 32768, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"gpt-4o": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"gpt-3.5-turbo-1106": {
|
||||||
"gpt-4o-2024-08-06": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 16385,
|
||||||
"gpt-4o-2024-05-13": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"gpt-4o-mini": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-3.5-turbo-instruct": {
|
||||||
|
"token_limit": 4096,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-0125-preview": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-turbo-preview": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-turbo": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-turbo-2024-04-09": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-1106-preview": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-vision-preview": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-0613": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-32k": {
|
||||||
|
"token_limit": 32768,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-32k-0613": {
|
||||||
|
"token_limit": 32768,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4o": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4o-2024-08-06": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4o-2024-05-13": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4o-mini": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
"o1-preview": {"token_limit": 128000, "supports_temperature": False},
|
"o1-preview": {"token_limit": 128000, "supports_temperature": False},
|
||||||
"o1-mini": {"token_limit": 128000, "supports_temperature": False},
|
"o1-mini": {"token_limit": 128000, "supports_temperature": False},
|
||||||
"o1-preview": {"token_limit": 128000, "supports_temperature": False},
|
|
||||||
"o1": {"token_limit": 200000, "supports_temperature": False},
|
"o1": {"token_limit": 200000, "supports_temperature": False},
|
||||||
"o3-mini": {"token_limit": 200000, "supports_temperature": False},
|
"o3-mini": {"token_limit": 200000, "supports_temperature": False},
|
||||||
},
|
},
|
||||||
"azure_openai": {
|
"azure_openai": {
|
||||||
"gpt-3.5-turbo-0125": {"token_limit": 16385, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"gpt-3.5-turbo-0125": {
|
||||||
"gpt-3.5": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 16385,
|
||||||
"gpt-3.5-turbo": {"token_limit": 16385, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"gpt-3.5-turbo-1106": {"token_limit": 16385, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"gpt-3.5-turbo-instruct": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"gpt-4-0125-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"gpt-3.5": {
|
||||||
"gpt-4-turbo-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 4096,
|
||||||
"gpt-4-turbo": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"gpt-4-turbo-2024-04-09": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"gpt-4-1106-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"gpt-4-vision-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"gpt-3.5-turbo": {
|
||||||
"gpt-4": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 16385,
|
||||||
"gpt-4-0613": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"gpt-4-32k": {"token_limit": 32768, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"gpt-4-32k-0613": {"token_limit": 32768, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"gpt-4o": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"gpt-3.5-turbo-1106": {
|
||||||
"gpt-4o-mini": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 16385,
|
||||||
"chatgpt-4o-latest": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-3.5-turbo-instruct": {
|
||||||
|
"token_limit": 4096,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-0125-preview": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-turbo-preview": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-turbo": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-turbo-2024-04-09": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-1106-preview": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-vision-preview": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-0613": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-32k": {
|
||||||
|
"token_limit": 32768,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4-32k-0613": {
|
||||||
|
"token_limit": 32768,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4o": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gpt-4o-mini": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"chatgpt-4o-latest": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
"o1-preview": {"token_limit": 128000, "supports_temperature": False},
|
"o1-preview": {"token_limit": 128000, "supports_temperature": False},
|
||||||
"o1-mini": {"token_limit": 128000, "supports_temperature": False},
|
"o1-mini": {"token_limit": 128000, "supports_temperature": False},
|
||||||
},
|
},
|
||||||
"google_genai": {
|
"google_genai": {
|
||||||
"gemini-pro": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"gemini-pro": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
"gemini-1.5-flash-latest": {
|
"gemini-1.5-flash-latest": {
|
||||||
"token_limit": 128000,
|
"token_limit": 128000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
"default_temperature": DEFAULT_TEMPERATURE,
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"gemini-1.5-pro-latest": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"gemini-1.5-pro-latest": {
|
||||||
"models/embedding-001": {"token_limit": 2048, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"models/embedding-001": {
|
||||||
|
"token_limit": 2048,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"google_vertexai": {
|
"google_vertexai": {
|
||||||
"gemini-1.5-flash": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"gemini-1.5-flash": {
|
||||||
"gemini-1.5-pro": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 128000,
|
||||||
"gemini-1.0-pro": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gemini-1.5-pro": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gemini-1.0-pro": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"ollama": {
|
"ollama": {
|
||||||
"command-r": {"token_limit": 12800, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"command-r": {
|
||||||
"codellama": {"token_limit": 16000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 12800,
|
||||||
"dbrx": {"token_limit": 32768, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"deepseek-coder:33b": {"token_limit": 16000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"falcon": {"token_limit": 2048, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"llama2": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"codellama": {
|
||||||
"llama2:7b": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 16000,
|
||||||
"llama2:13b": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"llama2:70b": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"llama3": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"llama3:8b": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"dbrx": {
|
||||||
"llama3:70b": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 32768,
|
||||||
"llama3.1": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"llama3.1:8b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"llama3.1:70b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"lama3.1:405b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"deepseek-coder:33b": {
|
||||||
"llama3.2": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 16000,
|
||||||
"llama3.2:1b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"llama3.2:3b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"llama3.3:70b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"scrapegraph": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"falcon": {
|
||||||
"mistral-small": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 2048,
|
||||||
"mistral-openorca": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"mistral-large": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"grok-1": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"llava": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"llama2": {
|
||||||
"mixtral:8x22b-instruct": {"token_limit": 65536, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 4096,
|
||||||
"nomic-embed-text": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"nous-hermes2:34b": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"orca-mini": {"token_limit": 2048, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"phi3:3.8b": {"token_limit": 12800, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"llama2:7b": {
|
||||||
"phi3:14b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 4096,
|
||||||
"qwen:0.5b": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"qwen:1.8b": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"qwen:4b": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"qwen:14b": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"llama2:13b": {
|
||||||
"qwen:32b": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 4096,
|
||||||
"qwen:72b": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"qwen:110b": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"stablelm-zephyr": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"wizardlm2:8x22b": {"token_limit": 65536, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"llama2:70b": {
|
||||||
"mistral": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 4096,
|
||||||
"gemma2": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"gemma2:9b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"gemma2:27b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
|
"llama3": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"llama3:8b": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"llama3:70b": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"llama3.1": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"llama3.1:8b": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"llama3.1:70b": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"lama3.1:405b": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"llama3.2": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"llama3.2:1b": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"llama3.2:3b": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"llama3.3:70b": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"scrapegraph": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"mistral-small": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"mistral-openorca": {
|
||||||
|
"token_limit": 32000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"mistral-large": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"grok-1": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"llava": {
|
||||||
|
"token_limit": 4096,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"mixtral:8x22b-instruct": {
|
||||||
|
"token_limit": 65536,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"nomic-embed-text": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"nous-hermes2:34b": {
|
||||||
|
"token_limit": 4096,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"orca-mini": {
|
||||||
|
"token_limit": 2048,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"phi3:3.8b": {
|
||||||
|
"token_limit": 12800,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"phi3:14b": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"qwen:0.5b": {
|
||||||
|
"token_limit": 32000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"qwen:1.8b": {
|
||||||
|
"token_limit": 32000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"qwen:4b": {
|
||||||
|
"token_limit": 32000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"qwen:14b": {
|
||||||
|
"token_limit": 32000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"qwen:32b": {
|
||||||
|
"token_limit": 32000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"qwen:72b": {
|
||||||
|
"token_limit": 32000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"qwen:110b": {
|
||||||
|
"token_limit": 32000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"stablelm-zephyr": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"wizardlm2:8x22b": {
|
||||||
|
"token_limit": 65536,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"mistral": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gemma2": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gemma2:9b": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gemma2:27b": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
# embedding models
|
# embedding models
|
||||||
"shaw/dmeta-embedding-zh-small-q4": {
|
"shaw/dmeta-embedding-zh-small-q4": {
|
||||||
"token_limit": 8192,
|
"token_limit": 8192,
|
||||||
|
|
@ -136,21 +487,75 @@ models_params = {
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
"default_temperature": DEFAULT_TEMPERATURE,
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"snowflake-arctic-embed": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"snowflake-arctic-embed": {
|
||||||
"mxbai-embed-large": {"token_limit": 512, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"mxbai-embed-large": {
|
||||||
|
"token_limit": 512,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"oneapi": {
|
||||||
|
"qwen-turbo": {
|
||||||
|
"token_limit": 6000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"oneapi": {"qwen-turbo": {"token_limit": 6000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE}},
|
|
||||||
"nvidia": {
|
"nvidia": {
|
||||||
"meta/llama3-70b-instruct": {"token_limit": 419, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"meta/llama3-70b-instruct": {
|
||||||
"meta/llama3-8b-instruct": {"token_limit": 419, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 419,
|
||||||
"nemotron-4-340b-instruct": {"token_limit": 1024, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"databricks/dbrx-instruct": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"google/codegemma-7b": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"google/gemma-2b": {"token_limit": 2048, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"meta/llama3-8b-instruct": {
|
||||||
"google/gemma-7b": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 419,
|
||||||
"google/recurrentgemma-2b": {"token_limit": 2048, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"meta/codellama-70b": {"token_limit": 16384, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"meta/llama2-70b": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
|
"nemotron-4-340b-instruct": {
|
||||||
|
"token_limit": 1024,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"databricks/dbrx-instruct": {
|
||||||
|
"token_limit": 4096,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"google/codegemma-7b": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"google/gemma-2b": {
|
||||||
|
"token_limit": 2048,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"google/gemma-7b": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"google/recurrentgemma-2b": {
|
||||||
|
"token_limit": 2048,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"meta/codellama-70b": {
|
||||||
|
"token_limit": 16384,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"meta/llama2-70b": {
|
||||||
|
"token_limit": 4096,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
"microsoft/phi-3-mini-128k-instruct": {
|
"microsoft/phi-3-mini-128k-instruct": {
|
||||||
"token_limit": 122880,
|
"token_limit": 122880,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
|
@ -161,7 +566,11 @@ models_params = {
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
"default_temperature": DEFAULT_TEMPERATURE,
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"mistralai/mistral-large": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"mistralai/mistral-large": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
"mistralai/mixtral-8x22b-instruct-v0.1": {
|
"mistralai/mixtral-8x22b-instruct-v0.1": {
|
||||||
"token_limit": 32768,
|
"token_limit": 32768,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
|
@ -172,14 +581,38 @@ models_params = {
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
"default_temperature": DEFAULT_TEMPERATURE,
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"snowflake/arctic": {"token_limit": 16384, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"snowflake/arctic": {
|
||||||
|
"token_limit": 16384,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"groq": {
|
"groq": {
|
||||||
"llama3-8b-8192": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"llama3-8b-8192": {
|
||||||
"llama3-70b-8192": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 8192,
|
||||||
"mixtral-8x7b-32768": {"token_limit": 32768, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"gemma-7b-it": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"claude-3-haiku-20240307'": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
|
"llama3-70b-8192": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"mixtral-8x7b-32768": {
|
||||||
|
"token_limit": 32768,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"gemma-7b-it": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"claude-3-haiku-20240307'": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"toghetherai": {
|
"toghetherai": {
|
||||||
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo": {
|
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo": {
|
||||||
|
|
@ -217,7 +650,11 @@ models_params = {
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
"default_temperature": DEFAULT_TEMPERATURE,
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"Salesforce/Llama-Rank-V1": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"Salesforce/Llama-Rank-V1": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
"meta-llama/Meta-Llama-Guard-3-8B": {
|
"meta-llama/Meta-Llama-Guard-3-8B": {
|
||||||
"token_limit": 128000,
|
"token_limit": 128000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
|
@ -243,15 +680,43 @@ models_params = {
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
"default_temperature": DEFAULT_TEMPERATURE,
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"google/gemma-2-27b-it": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"google/gemma-2-27b-it": {
|
||||||
|
"token_limit": 8192,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"anthropic": {
|
"anthropic": {
|
||||||
"claude_instant": {"token_limit": 100000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"claude_instant": {
|
||||||
"claude2": {"token_limit": 9000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 100000,
|
||||||
"claude2.1": {"token_limit": 200000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
"claude3": {"token_limit": 200000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
"claude3.5": {"token_limit": 200000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
},
|
||||||
"claude-3-opus-20240229": {"token_limit": 200000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"claude2": {
|
||||||
|
"token_limit": 9000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"claude2.1": {
|
||||||
|
"token_limit": 200000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"claude3": {
|
||||||
|
"token_limit": 200000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"claude3.5": {
|
||||||
|
"token_limit": 200000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"claude-3-opus-20240229": {
|
||||||
|
"token_limit": 200000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
"claude-3-sonnet-20240229": {
|
"claude-3-sonnet-20240229": {
|
||||||
"token_limit": 200000,
|
"token_limit": 200000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
|
@ -304,8 +769,16 @@ models_params = {
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
"default_temperature": DEFAULT_TEMPERATURE,
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"anthropic.claude-v2:1": {"token_limit": 200000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"anthropic.claude-v2:1": {
|
||||||
"anthropic.claude-v2": {"token_limit": 100000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 200000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"anthropic.claude-v2": {
|
||||||
|
"token_limit": 100000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
"anthropic.claude-instant-v1": {
|
"anthropic.claude-instant-v1": {
|
||||||
"token_limit": 100000,
|
"token_limit": 100000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
|
@ -321,8 +794,16 @@ models_params = {
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
"default_temperature": DEFAULT_TEMPERATURE,
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"meta.llama2-13b-chat-v1": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"meta.llama2-13b-chat-v1": {
|
||||||
"meta.llama2-70b-chat-v1": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 4096,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"meta.llama2-70b-chat-v1": {
|
||||||
|
"token_limit": 4096,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
"mistral.mistral-7b-instruct-v0:2": {
|
"mistral.mistral-7b-instruct-v0:2": {
|
||||||
"token_limit": 32768,
|
"token_limit": 32768,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
|
@ -353,7 +834,11 @@ models_params = {
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
"default_temperature": DEFAULT_TEMPERATURE,
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"cohere.embed-english-v3": {"token_limit": 512, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"cohere.embed-english-v3": {
|
||||||
|
"token_limit": 512,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
"cohere.embed-multilingual-v3": {
|
"cohere.embed-multilingual-v3": {
|
||||||
"token_limit": 512,
|
"token_limit": 512,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
|
@ -361,9 +846,21 @@ models_params = {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"mistralai": {
|
"mistralai": {
|
||||||
"mistral-large-latest": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"mistral-large-latest": {
|
||||||
"open-mistral-nemo": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"token_limit": 128000,
|
||||||
"codestral-latest": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"open-mistral-nemo": {
|
||||||
|
"token_limit": 128000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
|
"codestral-latest": {
|
||||||
|
"token_limit": 32000,
|
||||||
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"togetherai": {
|
"togetherai": {
|
||||||
"Meta-Llama-3.1-70B-Instruct-Turbo": {
|
"Meta-Llama-3.1-70B-Instruct-Turbo": {
|
||||||
|
|
|
||||||
|
|
@ -9,28 +9,31 @@ The interface remains compatible with external callers expecting a tuple (output
|
||||||
where output is a bytes object (UTF-8 encoded).
|
where output is a bytes object (UTF-8 encoded).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
|
||||||
import shlex
|
|
||||||
import shutil
|
|
||||||
import errno
|
import errno
|
||||||
import sys
|
|
||||||
import io
|
import io
|
||||||
import subprocess
|
import os
|
||||||
import select
|
import select
|
||||||
import termios
|
import shutil
|
||||||
import tty
|
|
||||||
import time
|
|
||||||
import signal
|
import signal
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import termios
|
||||||
|
import time
|
||||||
|
import tty
|
||||||
from typing import List, Tuple
|
from typing import List, Tuple
|
||||||
|
|
||||||
import pyte
|
import pyte
|
||||||
from pyte.screens import HistoryScreen
|
from pyte.screens import HistoryScreen
|
||||||
|
|
||||||
|
|
||||||
def render_line(line, columns: int) -> str:
|
def render_line(line, columns: int) -> str:
|
||||||
"""Render a single screen line from the pyte buffer (a mapping of column to Char)."""
|
"""Render a single screen line from the pyte buffer (a mapping of column to Char)."""
|
||||||
return "".join(line[x].data for x in range(columns))
|
return "".join(line[x].data for x in range(columns))
|
||||||
|
|
||||||
def run_interactive_command(cmd: List[str], expected_runtime_seconds: int = 30) -> Tuple[bytes, int]:
|
|
||||||
|
def run_interactive_command(
|
||||||
|
cmd: List[str], expected_runtime_seconds: int = 30
|
||||||
|
) -> Tuple[bytes, int]:
|
||||||
"""
|
"""
|
||||||
Runs an interactive command with a pseudo-tty, capturing final scrollback history.
|
Runs an interactive command with a pseudo-tty, capturing final scrollback history.
|
||||||
|
|
||||||
|
|
@ -61,7 +64,9 @@ def run_interactive_command(cmd: List[str], expected_runtime_seconds: int = 30)
|
||||||
if shutil.which(cmd[0]) is None:
|
if shutil.which(cmd[0]) is None:
|
||||||
raise FileNotFoundError(f"Command '{cmd[0]}' not found in PATH.")
|
raise FileNotFoundError(f"Command '{cmd[0]}' not found in PATH.")
|
||||||
if expected_runtime_seconds <= 0 or expected_runtime_seconds > 1800:
|
if expected_runtime_seconds <= 0 or expected_runtime_seconds > 1800:
|
||||||
raise ValueError("expected_runtime_seconds must be between 1 and 1800 seconds (30 minutes)")
|
raise ValueError(
|
||||||
|
"expected_runtime_seconds must be between 1 and 1800 seconds (30 minutes)"
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
term_size = os.get_terminal_size()
|
term_size = os.get_terminal_size()
|
||||||
|
|
@ -85,20 +90,22 @@ def run_interactive_command(cmd: List[str], expected_runtime_seconds: int = 30)
|
||||||
|
|
||||||
# Set up environment variables for the subprocess using detected terminal size.
|
# Set up environment variables for the subprocess using detected terminal size.
|
||||||
env = os.environ.copy()
|
env = os.environ.copy()
|
||||||
env.update({
|
env.update(
|
||||||
'DEBIAN_FRONTEND': 'noninteractive',
|
{
|
||||||
'GIT_PAGER': '',
|
"DEBIAN_FRONTEND": "noninteractive",
|
||||||
'PYTHONUNBUFFERED': '1',
|
"GIT_PAGER": "",
|
||||||
'CI': 'true',
|
"PYTHONUNBUFFERED": "1",
|
||||||
'LANG': 'C.UTF-8',
|
"CI": "true",
|
||||||
'LC_ALL': 'C.UTF-8',
|
"LANG": "C.UTF-8",
|
||||||
'COLUMNS': str(cols),
|
"LC_ALL": "C.UTF-8",
|
||||||
'LINES': str(rows),
|
"COLUMNS": str(cols),
|
||||||
'FORCE_COLOR': '1',
|
"LINES": str(rows),
|
||||||
'GIT_TERMINAL_PROMPT': '0',
|
"FORCE_COLOR": "1",
|
||||||
'PYTHONDONTWRITEBYTECODE': '1',
|
"GIT_TERMINAL_PROMPT": "0",
|
||||||
'NODE_OPTIONS': '--unhandled-rejections=strict'
|
"PYTHONDONTWRITEBYTECODE": "1",
|
||||||
})
|
"NODE_OPTIONS": "--unhandled-rejections=strict",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
proc = subprocess.Popen(
|
proc = subprocess.Popen(
|
||||||
cmd,
|
cmd,
|
||||||
|
|
@ -108,7 +115,7 @@ def run_interactive_command(cmd: List[str], expected_runtime_seconds: int = 30)
|
||||||
bufsize=0,
|
bufsize=0,
|
||||||
close_fds=True,
|
close_fds=True,
|
||||||
env=env,
|
env=env,
|
||||||
preexec_fn=os.setsid # Create new process group for proper signal handling.
|
preexec_fn=os.setsid, # Create new process group for proper signal handling.
|
||||||
)
|
)
|
||||||
os.close(slave_fd) # Close slave end in the parent process.
|
os.close(slave_fd) # Close slave end in the parent process.
|
||||||
|
|
||||||
|
|
@ -211,8 +218,10 @@ def run_interactive_command(cmd: List[str], expected_runtime_seconds: int = 30)
|
||||||
|
|
||||||
return final_output.encode("utf-8"), proc.returncode
|
return final_output.encode("utf-8"), proc.returncode
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
if len(sys.argv) < 2:
|
if len(sys.argv) < 2:
|
||||||
print("Usage: interactive.py <command> [args...]")
|
print("Usage: interactive.py <command> [args...]")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
|
||||||
|
|
@ -50,6 +50,7 @@ class OpenAIStrategy(ProviderStrategy):
|
||||||
# Handle expert model selection if none specified
|
# Handle expert model selection if none specified
|
||||||
if hasattr(args, "expert_model") and not args.expert_model:
|
if hasattr(args, "expert_model") and not args.expert_model:
|
||||||
from ra_aid.llm import select_expert_model
|
from ra_aid.llm import select_expert_model
|
||||||
|
|
||||||
model = select_expert_model("openai")
|
model = select_expert_model("openai")
|
||||||
if model:
|
if model:
|
||||||
args.expert_model = model
|
args.expert_model = model
|
||||||
|
|
|
||||||
|
|
@ -1,25 +1,19 @@
|
||||||
from ra_aid.tools import (
|
from ra_aid.tools import (
|
||||||
ask_expert,
|
ask_expert,
|
||||||
ask_human,
|
ask_human,
|
||||||
delete_key_facts,
|
|
||||||
delete_key_snippets,
|
|
||||||
deregister_related_files,
|
|
||||||
emit_expert_context,
|
emit_expert_context,
|
||||||
emit_key_facts,
|
emit_key_facts,
|
||||||
emit_key_snippets,
|
emit_key_snippets,
|
||||||
emit_plan,
|
|
||||||
emit_related_files,
|
emit_related_files,
|
||||||
emit_research_notes,
|
emit_research_notes,
|
||||||
fuzzy_find_project_files,
|
fuzzy_find_project_files,
|
||||||
list_directory_tree,
|
list_directory_tree,
|
||||||
monorepo_detected,
|
|
||||||
plan_implementation_completed,
|
plan_implementation_completed,
|
||||||
read_file_tool,
|
read_file_tool,
|
||||||
ripgrep_search,
|
ripgrep_search,
|
||||||
run_programming_task,
|
run_programming_task,
|
||||||
run_shell_command,
|
run_shell_command,
|
||||||
task_completed,
|
task_completed,
|
||||||
ui_detected,
|
|
||||||
web_search_tavily,
|
web_search_tavily,
|
||||||
)
|
)
|
||||||
from ra_aid.tools.agent import (
|
from ra_aid.tools.agent import (
|
||||||
|
|
@ -30,7 +24,6 @@ from ra_aid.tools.agent import (
|
||||||
request_web_research,
|
request_web_research,
|
||||||
)
|
)
|
||||||
from ra_aid.tools.memory import one_shot_completed
|
from ra_aid.tools.memory import one_shot_completed
|
||||||
from ra_aid.tools.write_file import put_complete_file_contents
|
|
||||||
|
|
||||||
|
|
||||||
# Read-only tools that don't modify system state
|
# Read-only tools that don't modify system state
|
||||||
|
|
@ -73,7 +66,9 @@ def get_read_only_tools(
|
||||||
# Define constant tool groups
|
# Define constant tool groups
|
||||||
READ_ONLY_TOOLS = get_read_only_tools()
|
READ_ONLY_TOOLS = get_read_only_tools()
|
||||||
# MODIFICATION_TOOLS = [run_programming_task, put_complete_file_contents]
|
# MODIFICATION_TOOLS = [run_programming_task, put_complete_file_contents]
|
||||||
MODIFICATION_TOOLS = [run_programming_task] # having put_complete_file_contents causes trouble :(
|
MODIFICATION_TOOLS = [
|
||||||
|
run_programming_task
|
||||||
|
] # having put_complete_file_contents causes trouble :(
|
||||||
COMMON_TOOLS = get_read_only_tools()
|
COMMON_TOOLS = get_read_only_tools()
|
||||||
EXPERT_TOOLS = [emit_expert_context, ask_expert]
|
EXPERT_TOOLS = [emit_expert_context, ask_expert]
|
||||||
RESEARCH_TOOLS = [
|
RESEARCH_TOOLS = [
|
||||||
|
|
|
||||||
|
|
@ -275,6 +275,7 @@ def request_task_implementation(task_spec: str) -> Dict[str, Any]:
|
||||||
print_task_header(task_spec)
|
print_task_header(task_spec)
|
||||||
# Run implementation agent
|
# Run implementation agent
|
||||||
from ..agent_utils import run_task_implementation_agent
|
from ..agent_utils import run_task_implementation_agent
|
||||||
|
|
||||||
_global_memory["completion_message"] = ""
|
_global_memory["completion_message"] = ""
|
||||||
|
|
||||||
_result = run_task_implementation_agent(
|
_result = run_task_implementation_agent(
|
||||||
|
|
@ -345,6 +346,7 @@ def request_implementation(task_spec: str) -> Dict[str, Any]:
|
||||||
try:
|
try:
|
||||||
# Run planning agent
|
# Run planning agent
|
||||||
from ..agent_utils import run_planning_agent
|
from ..agent_utils import run_planning_agent
|
||||||
|
|
||||||
_global_memory["completion_message"] = ""
|
_global_memory["completion_message"] = ""
|
||||||
|
|
||||||
_result = run_planning_agent(
|
_result = run_planning_agent(
|
||||||
|
|
|
||||||
|
|
@ -185,7 +185,11 @@ def ask_expert(question: str) -> str:
|
||||||
|
|
||||||
query_parts.extend(["# Question", question])
|
query_parts.extend(["# Question", question])
|
||||||
query_parts.extend(
|
query_parts.extend(
|
||||||
["\n # Addidional Requirements", "**DO NOT OVERTHINK**", "**DO NOT OVERCOMPLICATE**"]
|
[
|
||||||
|
"\n # Addidional Requirements",
|
||||||
|
"**DO NOT OVERTHINK**",
|
||||||
|
"**DO NOT OVERCOMPLICATE**",
|
||||||
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
# Join all parts
|
# Join all parts
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ from rich.console import Console
|
||||||
from rich.markdown import Markdown
|
from rich.markdown import Markdown
|
||||||
from rich.panel import Panel
|
from rich.panel import Panel
|
||||||
|
|
||||||
|
from ra_aid.config import DEFAULT_TEST_CMD_TIMEOUT
|
||||||
from ra_aid.logging_config import get_logger
|
from ra_aid.logging_config import get_logger
|
||||||
from ra_aid.tools.human import ask_human
|
from ra_aid.tools.human import ask_human
|
||||||
from ra_aid.tools.shell import run_shell_command
|
from ra_aid.tools.shell import run_shell_command
|
||||||
|
|
@ -85,7 +86,7 @@ class TestCommandExecutor:
|
||||||
cmd: Test command to execute
|
cmd: Test command to execute
|
||||||
original_prompt: Original prompt text
|
original_prompt: Original prompt text
|
||||||
"""
|
"""
|
||||||
timeout = self.config.get("timeout", 30)
|
timeout = self.config.get("test_cmd_timeout", DEFAULT_TEST_CMD_TIMEOUT)
|
||||||
try:
|
try:
|
||||||
logger.info(f"Executing test command: {cmd} with timeout {timeout}s")
|
logger.info(f"Executing test command: {cmd} with timeout {timeout}s")
|
||||||
test_result = run_shell_command(cmd, timeout=timeout)
|
test_result = run_shell_command(cmd, timeout=timeout)
|
||||||
|
|
@ -99,11 +100,11 @@ class TestCommandExecutor:
|
||||||
logger.info("Test command executed successfully")
|
logger.info("Test command executed successfully")
|
||||||
|
|
||||||
except subprocess.TimeoutExpired:
|
except subprocess.TimeoutExpired:
|
||||||
logger.warning(f"Test command timed out after {timeout}s: {cmd}")
|
logger.warning(
|
||||||
self.state.test_attempts += 1
|
f"Test command timed out after {DEFAULT_TEST_CMD_TIMEOUT}s: {cmd}"
|
||||||
self.state.prompt = (
|
|
||||||
f"{original_prompt}. Previous attempt timed out after {timeout} seconds"
|
|
||||||
)
|
)
|
||||||
|
self.state.test_attempts += 1
|
||||||
|
self.state.prompt = f"{original_prompt}. Previous attempt timed out after {DEFAULT_TEST_CMD_TIMEOUT} seconds"
|
||||||
self.display_test_failure()
|
self.display_test_failure()
|
||||||
|
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
import os
|
import os
|
||||||
from typing import Any, Dict, List, Optional, Set, Union
|
from typing import Dict, List, Optional, Set, Union
|
||||||
|
|
||||||
from langchain_core.tools import tool
|
from langchain_core.tools import tool
|
||||||
from rich.console import Console
|
from rich.console import Console
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,4 @@
|
||||||
import os
|
import os
|
||||||
import os
|
|
||||||
import sys
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, List, Union
|
from typing import Dict, List, Union
|
||||||
|
|
@ -90,10 +89,14 @@ def run_programming_task(
|
||||||
|
|
||||||
# Get combined list of files (explicit + related) with normalized paths
|
# Get combined list of files (explicit + related) with normalized paths
|
||||||
# and deduplicated using set operations
|
# and deduplicated using set operations
|
||||||
files_to_use = list({os.path.abspath(f) for f in (files or [])} | {
|
files_to_use = list(
|
||||||
os.path.abspath(f) for f in _global_memory["related_files"].values()
|
{os.path.abspath(f) for f in (files or [])}
|
||||||
if "related_files" in _global_memory
|
| {
|
||||||
})
|
os.path.abspath(f)
|
||||||
|
for f in _global_memory["related_files"].values()
|
||||||
|
if "related_files" in _global_memory
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
# Add config file if specified
|
# Add config file if specified
|
||||||
if "config" in _global_memory and _global_memory["config"].get("aider_config"):
|
if "config" in _global_memory and _global_memory["config"].get("aider_config"):
|
||||||
|
|
|
||||||
|
|
@ -21,12 +21,14 @@ def _truncate_for_log(text: str, max_length: int = 300) -> str:
|
||||||
|
|
||||||
|
|
||||||
@tool
|
@tool
|
||||||
def run_shell_command(command: str, expected_runtime_seconds: int = 30) -> Dict[str, Union[str, int, bool]]:
|
def run_shell_command(
|
||||||
|
command: str, timeout: int = 30
|
||||||
|
) -> Dict[str, Union[str, int, bool]]:
|
||||||
"""Execute a shell command and return its output.
|
"""Execute a shell command and return its output.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
command: The shell command to execute
|
command: The shell command to execute
|
||||||
expected_runtime_seconds: Expected runtime in seconds, defaults to 30.
|
timeout: Expected runtime in seconds, defaults to 30.
|
||||||
If process exceeds 2x this value, it will be terminated gracefully.
|
If process exceeds 2x this value, it will be terminated gracefully.
|
||||||
If process exceeds 3x this value, it will be killed forcefully.
|
If process exceeds 3x this value, it will be killed forcefully.
|
||||||
|
|
||||||
|
|
@ -79,7 +81,9 @@ def run_shell_command(command: str, expected_runtime_seconds: int = 30) -> Dict[
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print()
|
print()
|
||||||
output, return_code = run_interactive_command(["/bin/bash", "-c", command], expected_runtime_seconds=expected_runtime_seconds)
|
output, return_code = run_interactive_command(
|
||||||
|
["/bin/bash", "-c", command], expected_runtime_seconds=timeout
|
||||||
|
)
|
||||||
print()
|
print()
|
||||||
result = {
|
result = {
|
||||||
"output": truncate_output(output.decode()) if output else "",
|
"output": truncate_output(output.decode()) if output else "",
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,10 @@ console = Console()
|
||||||
|
|
||||||
@tool
|
@tool
|
||||||
def put_complete_file_contents(
|
def put_complete_file_contents(
|
||||||
filepath: str, complete_file_contents: str = "", encoding: str = "utf-8", verbose: bool = True
|
filepath: str,
|
||||||
|
complete_file_contents: str = "",
|
||||||
|
encoding: str = "utf-8",
|
||||||
|
verbose: bool = True,
|
||||||
) -> Dict[str, any]:
|
) -> Dict[str, any]:
|
||||||
"""Write the complete contents of a file, creating it if it doesn't exist.
|
"""Write the complete contents of a file, creating it if it doesn't exist.
|
||||||
This tool is specifically for writing the entire contents of a file at once,
|
This tool is specifically for writing the entire contents of a file at once,
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,6 @@
|
||||||
"""Tests for the interactive subprocess module."""
|
"""Tests for the interactive subprocess module."""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
|
||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
@ -70,7 +69,9 @@ def test_large_output():
|
||||||
# Clean up any leading artifacts
|
# Clean up any leading artifacts
|
||||||
output_cleaned = output.lstrip(b"^D")
|
output_cleaned = output.lstrip(b"^D")
|
||||||
# Verify the output size is limited to 8000 bytes
|
# Verify the output size is limited to 8000 bytes
|
||||||
assert len(output_cleaned) <= 8000, f"Output exceeded 8000 bytes: {len(output_cleaned)} bytes"
|
assert (
|
||||||
|
len(output_cleaned) <= 8000
|
||||||
|
), f"Output exceeded 8000 bytes: {len(output_cleaned)} bytes"
|
||||||
# Verify we have the last lines (should contain the highest numbers)
|
# Verify we have the last lines (should contain the highest numbers)
|
||||||
assert b"Line 1000" in output_cleaned, "Missing last line of output"
|
assert b"Line 1000" in output_cleaned, "Missing last line of output"
|
||||||
assert retcode == 0
|
assert retcode == 0
|
||||||
|
|
@ -85,11 +86,13 @@ def test_byte_limit():
|
||||||
output_cleaned = output.lstrip(b"^D")
|
output_cleaned = output.lstrip(b"^D")
|
||||||
|
|
||||||
# Verify exact 8000 byte limit
|
# Verify exact 8000 byte limit
|
||||||
assert len(output_cleaned) <= 8000, f"Output exceeded 8000 bytes: {len(output_cleaned)} bytes"
|
assert (
|
||||||
|
len(output_cleaned) <= 8000
|
||||||
|
), f"Output exceeded 8000 bytes: {len(output_cleaned)} bytes"
|
||||||
|
|
||||||
# Get the last line number from the output
|
# Get the last line number from the output
|
||||||
last_line = output_cleaned.splitlines()[-1]
|
last_line = output_cleaned.splitlines()[-1]
|
||||||
last_num = int(last_line.split(b':')[0])
|
last_num = int(last_line.split(b":")[0])
|
||||||
|
|
||||||
# Verify we have a high number in the last line (should be near 200)
|
# Verify we have a high number in the last line (should be near 200)
|
||||||
assert last_num > 150, f"Expected last line number to be near 200, got {last_num}"
|
assert last_num > 150, f"Expected last line number to be near 200, got {last_num}"
|
||||||
|
|
@ -138,11 +141,15 @@ def test_cat_medium_file():
|
||||||
# With 8000 byte limit, we expect to see the last portion of lines
|
# With 8000 byte limit, we expect to see the last portion of lines
|
||||||
# The exact number may vary due to terminal settings, but we should
|
# The exact number may vary due to terminal settings, but we should
|
||||||
# at least have the last lines of the file
|
# at least have the last lines of the file
|
||||||
assert len(lines) >= 90, f"Expected at least 90 lines due to 8000 byte limit, got {len(lines)}"
|
assert (
|
||||||
|
len(lines) >= 90
|
||||||
|
), f"Expected at least 90 lines due to 8000 byte limit, got {len(lines)}"
|
||||||
|
|
||||||
# Most importantly, verify we have the last lines
|
# Most importantly, verify we have the last lines
|
||||||
last_line = lines[-1].decode('utf-8')
|
last_line = lines[-1].decode("utf-8")
|
||||||
assert "This is test line 499" in last_line, f"Expected last line to be 499, got: {last_line}"
|
assert (
|
||||||
|
"This is test line 499" in last_line
|
||||||
|
), f"Expected last line to be 499, got: {last_line}"
|
||||||
|
|
||||||
assert retcode == 0
|
assert retcode == 0
|
||||||
finally:
|
finally:
|
||||||
|
|
@ -155,9 +162,7 @@ def test_realtime_output():
|
||||||
cmd = "echo 'first'; sleep 0.1; echo 'second'; sleep 0.1; echo 'third'"
|
cmd = "echo 'first'; sleep 0.1; echo 'second'; sleep 0.1; echo 'third'"
|
||||||
output, retcode = run_interactive_command(["/bin/bash", "-c", cmd])
|
output, retcode = run_interactive_command(["/bin/bash", "-c", cmd])
|
||||||
lines = [
|
lines = [
|
||||||
line
|
line for line in output.splitlines() if b"Script" not in line and line.strip()
|
||||||
for line in output.splitlines()
|
|
||||||
if b"Script" not in line and line.strip()
|
|
||||||
]
|
]
|
||||||
assert b"first" in lines[0]
|
assert b"first" in lines[0]
|
||||||
assert b"second" in lines[1]
|
assert b"second" in lines[1]
|
||||||
|
|
|
||||||
|
|
@ -57,7 +57,13 @@ def test_initialize_expert_defaults(clean_env, mock_openai, monkeypatch):
|
||||||
monkeypatch.setenv("EXPERT_OPENAI_API_KEY", "test-key")
|
monkeypatch.setenv("EXPERT_OPENAI_API_KEY", "test-key")
|
||||||
_llm = initialize_expert_llm("openai", "o1")
|
_llm = initialize_expert_llm("openai", "o1")
|
||||||
|
|
||||||
mock_openai.assert_called_once_with(api_key="test-key", model="o1", reasoning_effort="high", timeout=180, max_retries=5)
|
mock_openai.assert_called_once_with(
|
||||||
|
api_key="test-key",
|
||||||
|
model="o1",
|
||||||
|
reasoning_effort="high",
|
||||||
|
timeout=180,
|
||||||
|
max_retries=5,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_initialize_expert_openai_custom(clean_env, mock_openai, monkeypatch):
|
def test_initialize_expert_openai_custom(clean_env, mock_openai, monkeypatch):
|
||||||
|
|
@ -66,8 +72,12 @@ def test_initialize_expert_openai_custom(clean_env, mock_openai, monkeypatch):
|
||||||
_llm = initialize_expert_llm("openai", "gpt-4-preview")
|
_llm = initialize_expert_llm("openai", "gpt-4-preview")
|
||||||
|
|
||||||
mock_openai.assert_called_once_with(
|
mock_openai.assert_called_once_with(
|
||||||
api_key="test-key", model="gpt-4-preview", temperature=0, reasoning_effort="high",
|
api_key="test-key",
|
||||||
timeout=180, max_retries=5
|
model="gpt-4-preview",
|
||||||
|
temperature=0,
|
||||||
|
reasoning_effort="high",
|
||||||
|
timeout=180,
|
||||||
|
max_retries=5,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -77,8 +87,11 @@ def test_initialize_expert_gemini(clean_env, mock_gemini, monkeypatch):
|
||||||
_llm = initialize_expert_llm("gemini", "gemini-2.0-flash-thinking-exp-1219")
|
_llm = initialize_expert_llm("gemini", "gemini-2.0-flash-thinking-exp-1219")
|
||||||
|
|
||||||
mock_gemini.assert_called_once_with(
|
mock_gemini.assert_called_once_with(
|
||||||
api_key="test-key", model="gemini-2.0-flash-thinking-exp-1219", temperature=0,
|
api_key="test-key",
|
||||||
timeout=180, max_retries=5
|
model="gemini-2.0-flash-thinking-exp-1219",
|
||||||
|
temperature=0,
|
||||||
|
timeout=180,
|
||||||
|
max_retries=5,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -88,8 +101,11 @@ def test_initialize_expert_anthropic(clean_env, mock_anthropic, monkeypatch):
|
||||||
_llm = initialize_expert_llm("anthropic", "claude-3")
|
_llm = initialize_expert_llm("anthropic", "claude-3")
|
||||||
|
|
||||||
mock_anthropic.assert_called_once_with(
|
mock_anthropic.assert_called_once_with(
|
||||||
api_key="test-key", model_name="claude-3", temperature=0,
|
api_key="test-key",
|
||||||
timeout=180, max_retries=5
|
model_name="claude-3",
|
||||||
|
temperature=0,
|
||||||
|
timeout=180,
|
||||||
|
max_retries=5,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -104,7 +120,7 @@ def test_initialize_expert_openrouter(clean_env, mock_openai, monkeypatch):
|
||||||
model="models/mistral-large",
|
model="models/mistral-large",
|
||||||
temperature=0,
|
temperature=0,
|
||||||
timeout=180,
|
timeout=180,
|
||||||
max_retries=5
|
max_retries=5,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -120,7 +136,7 @@ def test_initialize_expert_openai_compatible(clean_env, mock_openai, monkeypatch
|
||||||
model="local-model",
|
model="local-model",
|
||||||
temperature=0,
|
temperature=0,
|
||||||
timeout=180,
|
timeout=180,
|
||||||
max_retries=5
|
max_retries=5,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -151,16 +167,24 @@ def test_initialize_openai(clean_env, mock_openai):
|
||||||
os.environ["OPENAI_API_KEY"] = "test-key"
|
os.environ["OPENAI_API_KEY"] = "test-key"
|
||||||
_model = initialize_llm("openai", "gpt-4", temperature=0.7)
|
_model = initialize_llm("openai", "gpt-4", temperature=0.7)
|
||||||
|
|
||||||
mock_openai.assert_called_once_with(api_key="test-key", model="gpt-4", temperature=0.7, timeout=180, max_retries=5)
|
mock_openai.assert_called_once_with(
|
||||||
|
api_key="test-key", model="gpt-4", temperature=0.7, timeout=180, max_retries=5
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_initialize_gemini(clean_env, mock_gemini):
|
def test_initialize_gemini(clean_env, mock_gemini):
|
||||||
"""Test Gemini provider initialization"""
|
"""Test Gemini provider initialization"""
|
||||||
os.environ["GEMINI_API_KEY"] = "test-key"
|
os.environ["GEMINI_API_KEY"] = "test-key"
|
||||||
_model = initialize_llm("gemini", "gemini-2.0-flash-thinking-exp-1219", temperature=0.7)
|
_model = initialize_llm(
|
||||||
|
"gemini", "gemini-2.0-flash-thinking-exp-1219", temperature=0.7
|
||||||
|
)
|
||||||
|
|
||||||
mock_gemini.assert_called_with(
|
mock_gemini.assert_called_with(
|
||||||
api_key="test-key", model="gemini-2.0-flash-thinking-exp-1219", temperature=0.7, timeout=180, max_retries=5
|
api_key="test-key",
|
||||||
|
model="gemini-2.0-flash-thinking-exp-1219",
|
||||||
|
temperature=0.7,
|
||||||
|
timeout=180,
|
||||||
|
max_retries=5,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -169,7 +193,13 @@ def test_initialize_anthropic(clean_env, mock_anthropic):
|
||||||
os.environ["ANTHROPIC_API_KEY"] = "test-key"
|
os.environ["ANTHROPIC_API_KEY"] = "test-key"
|
||||||
_model = initialize_llm("anthropic", "claude-3", temperature=0.7)
|
_model = initialize_llm("anthropic", "claude-3", temperature=0.7)
|
||||||
|
|
||||||
mock_anthropic.assert_called_with(api_key="test-key", model_name="claude-3", temperature=0.7, timeout=180, max_retries=5)
|
mock_anthropic.assert_called_with(
|
||||||
|
api_key="test-key",
|
||||||
|
model_name="claude-3",
|
||||||
|
temperature=0.7,
|
||||||
|
timeout=180,
|
||||||
|
max_retries=5,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_initialize_openrouter(clean_env, mock_openai):
|
def test_initialize_openrouter(clean_env, mock_openai):
|
||||||
|
|
@ -239,10 +269,22 @@ def test_temperature_defaults(clean_env, mock_openai, mock_anthropic, mock_gemin
|
||||||
|
|
||||||
# Test expert models don't require temperature
|
# Test expert models don't require temperature
|
||||||
initialize_expert_llm("openai", "o1")
|
initialize_expert_llm("openai", "o1")
|
||||||
mock_openai.assert_called_with(api_key="test-key", model="o1", reasoning_effort="high", timeout=180, max_retries=5)
|
mock_openai.assert_called_with(
|
||||||
|
api_key="test-key",
|
||||||
|
model="o1",
|
||||||
|
reasoning_effort="high",
|
||||||
|
timeout=180,
|
||||||
|
max_retries=5,
|
||||||
|
)
|
||||||
|
|
||||||
initialize_expert_llm("openai", "o1-mini")
|
initialize_expert_llm("openai", "o1-mini")
|
||||||
mock_openai.assert_called_with(api_key="test-key", model="o1-mini", reasoning_effort="high", timeout=180, max_retries=5)
|
mock_openai.assert_called_with(
|
||||||
|
api_key="test-key",
|
||||||
|
model="o1-mini",
|
||||||
|
reasoning_effort="high",
|
||||||
|
timeout=180,
|
||||||
|
max_retries=5,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_explicit_temperature(clean_env, mock_openai, mock_anthropic, mock_gemini):
|
def test_explicit_temperature(clean_env, mock_openai, mock_anthropic, mock_gemini):
|
||||||
|
|
@ -257,19 +299,31 @@ def test_explicit_temperature(clean_env, mock_openai, mock_anthropic, mock_gemin
|
||||||
# Test OpenAI
|
# Test OpenAI
|
||||||
initialize_llm("openai", "test-model", temperature=test_temp)
|
initialize_llm("openai", "test-model", temperature=test_temp)
|
||||||
mock_openai.assert_called_with(
|
mock_openai.assert_called_with(
|
||||||
api_key="test-key", model="test-model", temperature=test_temp, timeout=180, max_retries=5
|
api_key="test-key",
|
||||||
|
model="test-model",
|
||||||
|
temperature=test_temp,
|
||||||
|
timeout=180,
|
||||||
|
max_retries=5,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Test Gemini
|
# Test Gemini
|
||||||
initialize_llm("gemini", "test-model", temperature=test_temp)
|
initialize_llm("gemini", "test-model", temperature=test_temp)
|
||||||
mock_gemini.assert_called_with(
|
mock_gemini.assert_called_with(
|
||||||
api_key="test-key", model="test-model", temperature=test_temp, timeout=180, max_retries=5
|
api_key="test-key",
|
||||||
|
model="test-model",
|
||||||
|
temperature=test_temp,
|
||||||
|
timeout=180,
|
||||||
|
max_retries=5,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Test Anthropic
|
# Test Anthropic
|
||||||
initialize_llm("anthropic", "test-model", temperature=test_temp)
|
initialize_llm("anthropic", "test-model", temperature=test_temp)
|
||||||
mock_anthropic.assert_called_with(
|
mock_anthropic.assert_called_with(
|
||||||
api_key="test-key", model_name="test-model", temperature=test_temp, timeout=180, max_retries=5
|
api_key="test-key",
|
||||||
|
model_name="test-model",
|
||||||
|
temperature=test_temp,
|
||||||
|
timeout=180,
|
||||||
|
max_retries=5,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Test OpenRouter
|
# Test OpenRouter
|
||||||
|
|
@ -297,6 +351,7 @@ def test_get_available_openai_models_success():
|
||||||
assert models == ["gpt-4"]
|
assert models == ["gpt-4"]
|
||||||
mock_client.return_value.models.list.assert_called_once()
|
mock_client.return_value.models.list.assert_called_once()
|
||||||
|
|
||||||
|
|
||||||
def test_get_available_openai_models_failure():
|
def test_get_available_openai_models_failure():
|
||||||
"""Test graceful handling of model retrieval failure."""
|
"""Test graceful handling of model retrieval failure."""
|
||||||
with mock.patch("ra_aid.llm.OpenAI") as mock_client:
|
with mock.patch("ra_aid.llm.OpenAI") as mock_client:
|
||||||
|
|
@ -305,32 +360,41 @@ def test_get_available_openai_models_failure():
|
||||||
assert models == []
|
assert models == []
|
||||||
mock_client.return_value.models.list.assert_called_once()
|
mock_client.return_value.models.list.assert_called_once()
|
||||||
|
|
||||||
|
|
||||||
def test_select_expert_model_explicit():
|
def test_select_expert_model_explicit():
|
||||||
"""Test model selection with explicitly specified model."""
|
"""Test model selection with explicitly specified model."""
|
||||||
model = select_expert_model("openai", "gpt-4")
|
model = select_expert_model("openai", "gpt-4")
|
||||||
assert model == "gpt-4"
|
assert model == "gpt-4"
|
||||||
|
|
||||||
|
|
||||||
def test_select_expert_model_non_openai():
|
def test_select_expert_model_non_openai():
|
||||||
"""Test model selection for non-OpenAI provider."""
|
"""Test model selection for non-OpenAI provider."""
|
||||||
model = select_expert_model("anthropic", None)
|
model = select_expert_model("anthropic", None)
|
||||||
assert model is None
|
assert model is None
|
||||||
|
|
||||||
|
|
||||||
def test_select_expert_model_priority():
|
def test_select_expert_model_priority():
|
||||||
"""Test model selection follows priority order."""
|
"""Test model selection follows priority order."""
|
||||||
available_models = ["gpt-4", "o1", "o3-mini"]
|
available_models = ["gpt-4", "o1", "o3-mini"]
|
||||||
|
|
||||||
with mock.patch("ra_aid.llm.get_available_openai_models", return_value=available_models):
|
with mock.patch(
|
||||||
|
"ra_aid.llm.get_available_openai_models", return_value=available_models
|
||||||
|
):
|
||||||
model = select_expert_model("openai")
|
model = select_expert_model("openai")
|
||||||
assert model == "o3-mini"
|
assert model == "o3-mini"
|
||||||
|
|
||||||
|
|
||||||
def test_select_expert_model_no_match():
|
def test_select_expert_model_no_match():
|
||||||
"""Test model selection when no priority models available."""
|
"""Test model selection when no priority models available."""
|
||||||
available_models = ["gpt-4", "gpt-3.5"]
|
available_models = ["gpt-4", "gpt-3.5"]
|
||||||
|
|
||||||
with mock.patch("ra_aid.llm.get_available_openai_models", return_value=available_models):
|
with mock.patch(
|
||||||
|
"ra_aid.llm.get_available_openai_models", return_value=available_models
|
||||||
|
):
|
||||||
model = select_expert_model("openai")
|
model = select_expert_model("openai")
|
||||||
assert model is None
|
assert model is None
|
||||||
|
|
||||||
|
|
||||||
def test_temperature_validation(clean_env, mock_openai):
|
def test_temperature_validation(clean_env, mock_openai):
|
||||||
"""Test temperature validation in command line arguments."""
|
"""Test temperature validation in command line arguments."""
|
||||||
from ra_aid.__main__ import parse_arguments
|
from ra_aid.__main__ import parse_arguments
|
||||||
|
|
@ -358,34 +422,49 @@ def test_provider_name_validation():
|
||||||
initialize_llm(provider, "test-model", temperature=0.7)
|
initialize_llm(provider, "test-model", temperature=0.7)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
if "Temperature must be provided" not in str(e):
|
if "Temperature must be provided" not in str(e):
|
||||||
pytest.fail(f"Valid provider {provider} raised unexpected ValueError: {e}")
|
pytest.fail(
|
||||||
|
f"Valid provider {provider} raised unexpected ValueError: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_initialize_llm_cross_provider(clean_env, mock_openai, mock_anthropic, mock_gemini, monkeypatch):
|
def test_initialize_llm_cross_provider(
|
||||||
|
clean_env, mock_openai, mock_anthropic, mock_gemini, monkeypatch
|
||||||
|
):
|
||||||
"""Test initializing different providers in sequence."""
|
"""Test initializing different providers in sequence."""
|
||||||
# Initialize OpenAI
|
# Initialize OpenAI
|
||||||
monkeypatch.setenv("OPENAI_API_KEY", "openai-key")
|
monkeypatch.setenv("OPENAI_API_KEY", "openai-key")
|
||||||
_llm1 = initialize_llm("openai", "gpt-4", temperature=0.7)
|
_llm1 = initialize_llm("openai", "gpt-4", temperature=0.7)
|
||||||
mock_openai.assert_called_with(api_key="openai-key", model="gpt-4", temperature=0.7, timeout=180, max_retries=5)
|
mock_openai.assert_called_with(
|
||||||
|
api_key="openai-key", model="gpt-4", temperature=0.7, timeout=180, max_retries=5
|
||||||
|
)
|
||||||
|
|
||||||
# Initialize Anthropic
|
# Initialize Anthropic
|
||||||
monkeypatch.setenv("ANTHROPIC_API_KEY", "anthropic-key")
|
monkeypatch.setenv("ANTHROPIC_API_KEY", "anthropic-key")
|
||||||
_llm2 = initialize_llm("anthropic", "claude-3", temperature=0.7)
|
_llm2 = initialize_llm("anthropic", "claude-3", temperature=0.7)
|
||||||
mock_anthropic.assert_called_with(
|
mock_anthropic.assert_called_with(
|
||||||
api_key="anthropic-key", model_name="claude-3", temperature=0.7, timeout=180, max_retries=5
|
api_key="anthropic-key",
|
||||||
|
model_name="claude-3",
|
||||||
|
temperature=0.7,
|
||||||
|
timeout=180,
|
||||||
|
max_retries=5,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Initialize Gemini
|
# Initialize Gemini
|
||||||
monkeypatch.setenv("GEMINI_API_KEY", "gemini-key")
|
monkeypatch.setenv("GEMINI_API_KEY", "gemini-key")
|
||||||
_llm3 = initialize_llm("gemini", "gemini-pro", temperature=0.7)
|
_llm3 = initialize_llm("gemini", "gemini-pro", temperature=0.7)
|
||||||
mock_gemini.assert_called_with(
|
mock_gemini.assert_called_with(
|
||||||
api_key="gemini-key", model="gemini-pro", temperature=0.7, timeout=180, max_retries=5
|
api_key="gemini-key",
|
||||||
|
model="gemini-pro",
|
||||||
|
temperature=0.7,
|
||||||
|
timeout=180,
|
||||||
|
max_retries=5,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class Args:
|
class Args:
|
||||||
"""Test arguments class."""
|
"""Test arguments class."""
|
||||||
|
|
||||||
provider: str
|
provider: str
|
||||||
expert_provider: str
|
expert_provider: str
|
||||||
model: str = None
|
model: str = None
|
||||||
|
|
@ -412,7 +491,13 @@ def test_environment_variable_precedence(clean_env, mock_openai, monkeypatch):
|
||||||
|
|
||||||
# Test LLM client creation with expert mode
|
# Test LLM client creation with expert mode
|
||||||
_llm = create_llm_client("openai", "o1", is_expert=True)
|
_llm = create_llm_client("openai", "o1", is_expert=True)
|
||||||
mock_openai.assert_called_with(api_key="expert-key", model="o1", reasoning_effort="high", timeout=180, max_retries=5)
|
mock_openai.assert_called_with(
|
||||||
|
api_key="expert-key",
|
||||||
|
model="o1",
|
||||||
|
reasoning_effort="high",
|
||||||
|
timeout=180,
|
||||||
|
max_retries=5,
|
||||||
|
)
|
||||||
|
|
||||||
# Test environment validation
|
# Test environment validation
|
||||||
monkeypatch.setenv("EXPERT_OPENAI_API_KEY", "")
|
monkeypatch.setenv("EXPERT_OPENAI_API_KEY", "")
|
||||||
|
|
@ -459,7 +544,9 @@ def mock_deepseek_reasoner():
|
||||||
yield mock
|
yield mock
|
||||||
|
|
||||||
|
|
||||||
def test_initialize_deepseek(clean_env, mock_openai, mock_deepseek_reasoner, monkeypatch):
|
def test_initialize_deepseek(
|
||||||
|
clean_env, mock_openai, mock_deepseek_reasoner, monkeypatch
|
||||||
|
):
|
||||||
"""Test DeepSeek provider initialization with different models."""
|
"""Test DeepSeek provider initialization with different models."""
|
||||||
monkeypatch.setenv("DEEPSEEK_API_KEY", "test-key")
|
monkeypatch.setenv("DEEPSEEK_API_KEY", "test-key")
|
||||||
|
|
||||||
|
|
@ -486,7 +573,9 @@ def test_initialize_deepseek(clean_env, mock_openai, mock_deepseek_reasoner, mon
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_initialize_openrouter_deepseek(clean_env, mock_openai, mock_deepseek_reasoner, monkeypatch):
|
def test_initialize_openrouter_deepseek(
|
||||||
|
clean_env, mock_openai, mock_deepseek_reasoner, monkeypatch
|
||||||
|
):
|
||||||
"""Test OpenRouter DeepSeek model initialization."""
|
"""Test OpenRouter DeepSeek model initialization."""
|
||||||
monkeypatch.setenv("OPENROUTER_API_KEY", "test-key")
|
monkeypatch.setenv("OPENROUTER_API_KEY", "test-key")
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,10 @@
|
||||||
import pytest
|
import pytest
|
||||||
from pathlib import Path
|
|
||||||
from langchain_core.tools import Tool
|
|
||||||
|
|
||||||
from ra_aid.tools.programmer import parse_aider_flags, run_programming_task, get_aider_executable
|
from ra_aid.tools.programmer import (
|
||||||
|
get_aider_executable,
|
||||||
|
parse_aider_flags,
|
||||||
|
run_programming_task,
|
||||||
|
)
|
||||||
|
|
||||||
# Test cases for parse_aider_flags function
|
# Test cases for parse_aider_flags function
|
||||||
test_cases = [
|
test_cases = [
|
||||||
|
|
@ -106,14 +108,20 @@ def test_path_normalization_and_deduplication(mocker, tmp_path):
|
||||||
|
|
||||||
# Mock dependencies
|
# Mock dependencies
|
||||||
mocker.patch("ra_aid.tools.programmer._global_memory", {"related_files": {}})
|
mocker.patch("ra_aid.tools.programmer._global_memory", {"related_files": {}})
|
||||||
mocker.patch("ra_aid.tools.programmer.get_aider_executable", return_value="/path/to/aider")
|
mocker.patch(
|
||||||
mock_run = mocker.patch("ra_aid.tools.programmer.run_interactive_command", return_value=(b"", 0))
|
"ra_aid.tools.programmer.get_aider_executable", return_value="/path/to/aider"
|
||||||
|
)
|
||||||
|
mock_run = mocker.patch(
|
||||||
|
"ra_aid.tools.programmer.run_interactive_command", return_value=(b"", 0)
|
||||||
|
)
|
||||||
|
|
||||||
# Test duplicate paths
|
# Test duplicate paths
|
||||||
run_programming_task.invoke({
|
run_programming_task.invoke(
|
||||||
"instructions": "test instruction",
|
{
|
||||||
"files": [str(test_file), str(test_file)] # Same path twice
|
"instructions": "test instruction",
|
||||||
})
|
"files": [str(test_file), str(test_file)], # Same path twice
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
# Get the command list passed to run_interactive_command
|
# Get the command list passed to run_interactive_command
|
||||||
cmd_args = mock_run.call_args[0][0]
|
cmd_args = mock_run.call_args[0][0]
|
||||||
|
|
@ -122,16 +130,22 @@ def test_path_normalization_and_deduplication(mocker, tmp_path):
|
||||||
assert test_file_count == 1, "Expected exactly one instance of test_file path"
|
assert test_file_count == 1, "Expected exactly one instance of test_file path"
|
||||||
|
|
||||||
# Test mixed paths
|
# Test mixed paths
|
||||||
run_programming_task.invoke({
|
run_programming_task.invoke(
|
||||||
"instructions": "test instruction",
|
{
|
||||||
"files": [str(test_file), str(new_file)] # Two different paths
|
"instructions": "test instruction",
|
||||||
})
|
"files": [str(test_file), str(new_file)], # Two different paths
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
# Get the command list from the second call
|
# Get the command list from the second call
|
||||||
cmd_args = mock_run.call_args[0][0]
|
cmd_args = mock_run.call_args[0][0]
|
||||||
# Verify both paths are present exactly once
|
# Verify both paths are present exactly once
|
||||||
assert sum(1 for arg in cmd_args if arg == str(test_file)) == 1, "Expected one instance of test_file"
|
assert (
|
||||||
assert sum(1 for arg in cmd_args if arg == str(new_file)) == 1, "Expected one instance of new_file"
|
sum(1 for arg in cmd_args if arg == str(test_file)) == 1
|
||||||
|
), "Expected one instance of test_file"
|
||||||
|
assert (
|
||||||
|
sum(1 for arg in cmd_args if arg == str(new_file)) == 1
|
||||||
|
), "Expected one instance of new_file"
|
||||||
|
|
||||||
|
|
||||||
def test_get_aider_executable(mocker):
|
def test_get_aider_executable(mocker):
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ from unittest.mock import patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
from ra_aid.config import DEFAULT_TEST_CMD_TIMEOUT
|
||||||
from ra_aid.tools.handle_user_defined_test_cmd_execution import execute_test_command
|
from ra_aid.tools.handle_user_defined_test_cmd_execution import execute_test_command
|
||||||
|
|
||||||
# Test cases for execute_test_command
|
# Test cases for execute_test_command
|
||||||
|
|
@ -195,9 +196,10 @@ def test_execute_test_command(
|
||||||
|
|
||||||
if auto_test and test_attempts < config.get("max_test_cmd_retries", 5):
|
if auto_test and test_attempts < config.get("max_test_cmd_retries", 5):
|
||||||
if config.get("test_cmd"):
|
if config.get("test_cmd"):
|
||||||
# Verify run_shell_command called with command and default timeout
|
# Verify run_shell_command called with command and configured timeout
|
||||||
mock_run_cmd.assert_called_once_with(
|
mock_run_cmd.assert_called_once_with(
|
||||||
config["test_cmd"], timeout=config.get("timeout", 30)
|
config["test_cmd"],
|
||||||
|
timeout=config.get("test_cmd_timeout", DEFAULT_TEST_CMD_TIMEOUT),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Verify logging for max retries
|
# Verify logging for max retries
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ from unittest.mock import Mock, patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
from ra_aid.config import DEFAULT_TEST_CMD_TIMEOUT
|
||||||
from ra_aid.tools.handle_user_defined_test_cmd_execution import (
|
from ra_aid.tools.handle_user_defined_test_cmd_execution import (
|
||||||
TestCommandExecutor,
|
TestCommandExecutor,
|
||||||
TestState,
|
TestState,
|
||||||
|
|
@ -92,8 +93,9 @@ def test_run_test_command_timeout(test_executor):
|
||||||
"ra_aid.tools.handle_user_defined_test_cmd_execution.logger.warning"
|
"ra_aid.tools.handle_user_defined_test_cmd_execution.logger.warning"
|
||||||
) as mock_logger,
|
) as mock_logger,
|
||||||
):
|
):
|
||||||
# Create a TimeoutExpired exception
|
# Create a TimeoutExpired exception with configured timeout
|
||||||
timeout_exc = subprocess.TimeoutExpired(cmd="test", timeout=30)
|
timeout = test_executor.config.get("test_cmd_timeout", DEFAULT_TEST_CMD_TIMEOUT)
|
||||||
|
timeout_exc = subprocess.TimeoutExpired(cmd="test", timeout=timeout)
|
||||||
mock_run.side_effect = timeout_exc
|
mock_run.side_effect = timeout_exc
|
||||||
|
|
||||||
test_executor.run_test_command("test", "original")
|
test_executor.run_test_command("test", "original")
|
||||||
|
|
@ -101,7 +103,7 @@ def test_run_test_command_timeout(test_executor):
|
||||||
# Verify state updates
|
# Verify state updates
|
||||||
assert not test_executor.state.should_break
|
assert not test_executor.state.should_break
|
||||||
assert test_executor.state.test_attempts == 1
|
assert test_executor.state.test_attempts == 1
|
||||||
assert "timed out after 30 seconds" in test_executor.state.prompt
|
assert f"timed out after {timeout} seconds" in test_executor.state.prompt
|
||||||
|
|
||||||
# Verify logging
|
# Verify logging
|
||||||
mock_logger.assert_called_once()
|
mock_logger.assert_called_once()
|
||||||
|
|
|
||||||
|
|
@ -472,6 +472,7 @@ def test_emit_related_files_path_normalization(reset_memory, tmp_path):
|
||||||
|
|
||||||
# Change to the temp directory so relative paths work
|
# Change to the temp directory so relative paths work
|
||||||
import os
|
import os
|
||||||
|
|
||||||
original_dir = os.getcwd()
|
original_dir = os.getcwd()
|
||||||
os.chdir(tmp_path)
|
os.chdir(tmp_path)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,9 @@ def test_basic_write_functionality(temp_test_dir):
|
||||||
test_file = temp_test_dir / "test.txt"
|
test_file = temp_test_dir / "test.txt"
|
||||||
content = "Hello, World!\nTest content"
|
content = "Hello, World!\nTest content"
|
||||||
|
|
||||||
result = put_complete_file_contents({"filepath": str(test_file), "complete_file_contents": content})
|
result = put_complete_file_contents(
|
||||||
|
{"filepath": str(test_file), "complete_file_contents": content}
|
||||||
|
)
|
||||||
|
|
||||||
# Verify file contents
|
# Verify file contents
|
||||||
assert test_file.read_text() == content
|
assert test_file.read_text() == content
|
||||||
|
|
@ -39,7 +41,9 @@ def test_directory_creation(temp_test_dir):
|
||||||
test_file = nested_dir / "test.txt"
|
test_file = nested_dir / "test.txt"
|
||||||
content = "Test content"
|
content = "Test content"
|
||||||
|
|
||||||
result = put_complete_file_contents({"filepath": str(test_file), "complete_file_contents": content})
|
result = put_complete_file_contents(
|
||||||
|
{"filepath": str(test_file), "complete_file_contents": content}
|
||||||
|
)
|
||||||
|
|
||||||
assert test_file.exists()
|
assert test_file.exists()
|
||||||
assert test_file.read_text() == content
|
assert test_file.read_text() == content
|
||||||
|
|
@ -53,14 +57,22 @@ def test_different_encodings(temp_test_dir):
|
||||||
|
|
||||||
# Test UTF-8
|
# Test UTF-8
|
||||||
result_utf8 = put_complete_file_contents(
|
result_utf8 = put_complete_file_contents(
|
||||||
{"filepath": str(test_file), "complete_file_contents": content, "encoding": "utf-8"}
|
{
|
||||||
|
"filepath": str(test_file),
|
||||||
|
"complete_file_contents": content,
|
||||||
|
"encoding": "utf-8",
|
||||||
|
}
|
||||||
)
|
)
|
||||||
assert result_utf8["success"] is True
|
assert result_utf8["success"] is True
|
||||||
assert test_file.read_text(encoding="utf-8") == content
|
assert test_file.read_text(encoding="utf-8") == content
|
||||||
|
|
||||||
# Test UTF-16
|
# Test UTF-16
|
||||||
result_utf16 = put_complete_file_contents(
|
result_utf16 = put_complete_file_contents(
|
||||||
{"filepath": str(test_file), "complete_file_contents": content, "encoding": "utf-16"}
|
{
|
||||||
|
"filepath": str(test_file),
|
||||||
|
"complete_file_contents": content,
|
||||||
|
"encoding": "utf-16",
|
||||||
|
}
|
||||||
)
|
)
|
||||||
assert result_utf16["success"] is True
|
assert result_utf16["success"] is True
|
||||||
assert test_file.read_text(encoding="utf-16") == content
|
assert test_file.read_text(encoding="utf-16") == content
|
||||||
|
|
@ -145,7 +157,9 @@ def test_large_file_write(temp_test_dir):
|
||||||
test_file = temp_test_dir / "large.txt"
|
test_file = temp_test_dir / "large.txt"
|
||||||
content = "Large content\n" * 1000 # Create substantial content
|
content = "Large content\n" * 1000 # Create substantial content
|
||||||
|
|
||||||
result = put_complete_file_contents({"filepath": str(test_file), "complete_file_contents": content})
|
result = put_complete_file_contents(
|
||||||
|
{"filepath": str(test_file), "complete_file_contents": content}
|
||||||
|
)
|
||||||
|
|
||||||
assert test_file.exists()
|
assert test_file.exists()
|
||||||
assert test_file.read_text() == content
|
assert test_file.read_text() == content
|
||||||
|
|
|
||||||
|
|
@ -60,11 +60,7 @@ def sample_git_repo(empty_git_repo):
|
||||||
def git_repo_with_untracked(sample_git_repo):
|
def git_repo_with_untracked(sample_git_repo):
|
||||||
"""Create a git repository with both tracked and untracked files."""
|
"""Create a git repository with both tracked and untracked files."""
|
||||||
# Create untracked files
|
# Create untracked files
|
||||||
untracked_files = [
|
untracked_files = ["untracked.txt", "src/untracked.py", "docs/draft.md"]
|
||||||
"untracked.txt",
|
|
||||||
"src/untracked.py",
|
|
||||||
"docs/draft.md"
|
|
||||||
]
|
|
||||||
|
|
||||||
for file_path in untracked_files:
|
for file_path in untracked_files:
|
||||||
full_path = sample_git_repo / file_path
|
full_path = sample_git_repo / file_path
|
||||||
|
|
@ -109,7 +105,7 @@ docs/draft.md
|
||||||
ignored_files = [
|
ignored_files = [
|
||||||
"ignored.txt",
|
"ignored.txt",
|
||||||
"temp/temp.txt",
|
"temp/temp.txt",
|
||||||
"src/__pycache__/main.cpython-39.pyc"
|
"src/__pycache__/main.cpython-39.pyc",
|
||||||
]
|
]
|
||||||
|
|
||||||
for file_path in ignored_files:
|
for file_path in ignored_files:
|
||||||
|
|
@ -128,14 +124,11 @@ def git_repo_with_aider_files(sample_git_repo):
|
||||||
".aider.chat.history.md",
|
".aider.chat.history.md",
|
||||||
".aider.input.history",
|
".aider.input.history",
|
||||||
".aider.tags.cache.v3/some_file",
|
".aider.tags.cache.v3/some_file",
|
||||||
"src/.aider.local.settings"
|
"src/.aider.local.settings",
|
||||||
]
|
]
|
||||||
|
|
||||||
# Create regular files
|
# Create regular files
|
||||||
regular_files = [
|
regular_files = ["main.cpp", "src/helper.cpp"]
|
||||||
"main.cpp",
|
|
||||||
"src/helper.cpp"
|
|
||||||
]
|
|
||||||
|
|
||||||
# Create all files
|
# Create all files
|
||||||
for file_path in aider_files + regular_files:
|
for file_path in aider_files + regular_files:
|
||||||
|
|
@ -354,14 +347,15 @@ def mock_is_git_repo():
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def mock_os_path(monkeypatch):
|
def mock_os_path(monkeypatch):
|
||||||
"""Mock os.path functions."""
|
"""Mock os.path functions."""
|
||||||
|
|
||||||
def mock_exists(path):
|
def mock_exists(path):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def mock_isdir(path):
|
def mock_isdir(path):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
monkeypatch.setattr(os.path, 'exists', mock_exists)
|
monkeypatch.setattr(os.path, "exists", mock_exists)
|
||||||
monkeypatch.setattr(os.path, 'isdir', mock_isdir)
|
monkeypatch.setattr(os.path, "isdir", mock_isdir)
|
||||||
return monkeypatch
|
return monkeypatch
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -390,14 +384,18 @@ def test_get_file_listing_git_error(mock_subprocess, mock_is_git_repo, mock_os_p
|
||||||
get_file_listing(DUMMY_PATH)
|
get_file_listing(DUMMY_PATH)
|
||||||
|
|
||||||
|
|
||||||
def test_get_file_listing_permission_error(mock_subprocess, mock_is_git_repo, mock_os_path):
|
def test_get_file_listing_permission_error(
|
||||||
|
mock_subprocess, mock_is_git_repo, mock_os_path
|
||||||
|
):
|
||||||
"""Test get_file_listing with permission error."""
|
"""Test get_file_listing with permission error."""
|
||||||
mock_subprocess.side_effect = PermissionError("Permission denied")
|
mock_subprocess.side_effect = PermissionError("Permission denied")
|
||||||
with pytest.raises(DirectoryAccessError):
|
with pytest.raises(DirectoryAccessError):
|
||||||
get_file_listing(DUMMY_PATH)
|
get_file_listing(DUMMY_PATH)
|
||||||
|
|
||||||
|
|
||||||
def test_get_file_listing_unexpected_error(mock_subprocess, mock_is_git_repo, mock_os_path):
|
def test_get_file_listing_unexpected_error(
|
||||||
|
mock_subprocess, mock_is_git_repo, mock_os_path
|
||||||
|
):
|
||||||
"""Test get_file_listing with unexpected error."""
|
"""Test get_file_listing with unexpected error."""
|
||||||
mock_subprocess.side_effect = Exception("Unexpected error")
|
mock_subprocess.side_effect = Exception("Unexpected error")
|
||||||
with pytest.raises(FileListerError):
|
with pytest.raises(FileListerError):
|
||||||
|
|
@ -420,6 +418,7 @@ def test_get_file_listing_with_untracked(git_repo_with_untracked):
|
||||||
expected_count = 8 # 5 tracked + 3 untracked (excluding .gitignore)
|
expected_count = 8 # 5 tracked + 3 untracked (excluding .gitignore)
|
||||||
assert count == expected_count
|
assert count == expected_count
|
||||||
|
|
||||||
|
|
||||||
def test_get_file_listing_with_untracked_and_limit(git_repo_with_untracked):
|
def test_get_file_listing_with_untracked_and_limit(git_repo_with_untracked):
|
||||||
"""Test that file listing with limit works correctly with untracked files."""
|
"""Test that file listing with limit works correctly with untracked files."""
|
||||||
limit = 3
|
limit = 3
|
||||||
|
|
@ -434,6 +433,7 @@ def test_get_file_listing_with_untracked_and_limit(git_repo_with_untracked):
|
||||||
# Files should be sorted, so we can check first 3
|
# Files should be sorted, so we can check first 3
|
||||||
assert files == sorted(files)
|
assert files == sorted(files)
|
||||||
|
|
||||||
|
|
||||||
def test_get_file_listing_respects_gitignore(git_repo_with_ignores):
|
def test_get_file_listing_respects_gitignore(git_repo_with_ignores):
|
||||||
"""Test that file listing respects .gitignore rules."""
|
"""Test that file listing respects .gitignore rules."""
|
||||||
# First test with hidden files excluded (default)
|
# First test with hidden files excluded (default)
|
||||||
|
|
@ -468,6 +468,7 @@ def test_get_file_listing_respects_gitignore(git_repo_with_ignores):
|
||||||
expected_count = 8 # 5 tracked + 2 untracked + .gitignore
|
expected_count = 8 # 5 tracked + 2 untracked + .gitignore
|
||||||
assert count == expected_count
|
assert count == expected_count
|
||||||
|
|
||||||
|
|
||||||
def test_aider_files_excluded(git_repo_with_aider_files):
|
def test_aider_files_excluded(git_repo_with_aider_files):
|
||||||
"""Test that .aider files are excluded from the file listing."""
|
"""Test that .aider files are excluded from the file listing."""
|
||||||
files, count = get_file_listing(str(git_repo_with_aider_files))
|
files, count = get_file_listing(str(git_repo_with_aider_files))
|
||||||
|
|
@ -487,21 +488,14 @@ def test_aider_files_excluded(git_repo_with_aider_files):
|
||||||
assert count == expected_count
|
assert count == expected_count
|
||||||
assert len(files) == expected_count
|
assert len(files) == expected_count
|
||||||
|
|
||||||
|
|
||||||
def test_hidden_files_excluded_by_default(git_repo_with_aider_files):
|
def test_hidden_files_excluded_by_default(git_repo_with_aider_files):
|
||||||
"""Test that hidden files are excluded by default."""
|
"""Test that hidden files are excluded by default."""
|
||||||
# Create some hidden files
|
# Create some hidden files
|
||||||
hidden_files = [
|
hidden_files = [".config", ".env", "src/.local", ".gitattributes"]
|
||||||
".config",
|
|
||||||
".env",
|
|
||||||
"src/.local",
|
|
||||||
".gitattributes"
|
|
||||||
]
|
|
||||||
|
|
||||||
# Create regular files
|
# Create regular files
|
||||||
regular_files = [
|
regular_files = ["main.cpp", "src/helper.cpp"]
|
||||||
"main.cpp",
|
|
||||||
"src/helper.cpp"
|
|
||||||
]
|
|
||||||
|
|
||||||
# Create all files
|
# Create all files
|
||||||
for file_path in hidden_files + regular_files:
|
for file_path in hidden_files + regular_files:
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue