support default temp on a per-model basis; show status panel
This commit is contained in:
parent
264f5025ed
commit
a1371fc7e0
|
|
@ -7,6 +7,7 @@ from datetime import datetime
|
||||||
from langgraph.checkpoint.memory import MemorySaver
|
from langgraph.checkpoint.memory import MemorySaver
|
||||||
from rich.console import Console
|
from rich.console import Console
|
||||||
from rich.panel import Panel
|
from rich.panel import Panel
|
||||||
|
from rich.text import Text
|
||||||
|
|
||||||
from ra_aid import print_error, print_stage_header
|
from ra_aid import print_error, print_stage_header
|
||||||
from ra_aid.__version__ import __version__
|
from ra_aid.__version__ import __version__
|
||||||
|
|
@ -282,27 +283,43 @@ def main():
|
||||||
) # Will exit if main env vars missing
|
) # Will exit if main env vars missing
|
||||||
logger.debug("Environment validation successful")
|
logger.debug("Environment validation successful")
|
||||||
|
|
||||||
if expert_missing:
|
# Validate model configuration early
|
||||||
console.print(
|
from ra_aid.models_params import models_params
|
||||||
Panel(
|
model_config = models_params.get(args.provider, {}).get(args.model or "", {})
|
||||||
"[yellow]Expert tools disabled due to missing configuration:[/yellow]\n"
|
supports_temperature = model_config.get("supports_temperature", args.provider in ["anthropic", "openai", "openrouter", "openai-compatible", "deepseek"])
|
||||||
+ "\n".join(f"- {m}" for m in expert_missing)
|
|
||||||
+ "\nSet the required environment variables or args to enable expert mode.",
|
if supports_temperature and args.temperature is None:
|
||||||
title="Expert Tools Disabled",
|
args.temperature = model_config.get("default_temperature")
|
||||||
style="yellow",
|
if args.temperature is None:
|
||||||
)
|
print_error(f"Temperature must be provided for model {args.model} which supports temperature")
|
||||||
)
|
sys.exit(1)
|
||||||
|
logger.debug(f"Using default temperature {args.temperature} for model {args.model}")
|
||||||
|
|
||||||
if web_research_missing:
|
# Display status line
|
||||||
console.print(
|
status = Text()
|
||||||
Panel(
|
status.append("🤖 ")
|
||||||
"[yellow]Web research disabled due to missing configuration:[/yellow]\n"
|
status.append(f"{args.provider}/{args.model}")
|
||||||
+ "\n".join(f"- {m}" for m in web_research_missing)
|
status.append(f" @ T{args.temperature or 'N/A'}")
|
||||||
+ "\nSet the required environment variables to enable web research.",
|
|
||||||
title="Web Research Disabled",
|
if expert_enabled:
|
||||||
style="yellow",
|
status.append(" | 🤔 ")
|
||||||
)
|
status.append(f"{args.expert_provider}/{args.expert_model}")
|
||||||
|
else:
|
||||||
|
status.append(" | 🤔 Expert: ")
|
||||||
|
status.append("Disabled", style="italic")
|
||||||
|
|
||||||
|
status.append(" | 🔍 Search: ")
|
||||||
|
status.append("Enabled" if web_research_enabled else "Disabled",
|
||||||
|
style=None if web_research_enabled else "italic")
|
||||||
|
|
||||||
|
console.print(
|
||||||
|
Panel(
|
||||||
|
status,
|
||||||
|
title="Config",
|
||||||
|
style="bold blue",
|
||||||
|
padding=(0, 1)
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
# Handle chat mode
|
# Handle chat mode
|
||||||
if args.chat:
|
if args.chat:
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,11 @@
|
||||||
"""Module for efficient file listing using git."""
|
"""Module for efficient file listing using git."""
|
||||||
|
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Optional, Tuple
|
from typing import List, Optional, Tuple
|
||||||
|
import tempfile
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
|
||||||
class FileListerError(Exception):
|
class FileListerError(Exception):
|
||||||
|
|
@ -70,7 +73,7 @@ def is_git_repo(directory: str) -> bool:
|
||||||
|
|
||||||
|
|
||||||
def get_file_listing(
|
def get_file_listing(
|
||||||
directory: str, limit: Optional[int] = None
|
directory: str, limit: Optional[int] = None, include_hidden: bool = False
|
||||||
) -> Tuple[List[str], int]:
|
) -> Tuple[List[str], int]:
|
||||||
"""
|
"""
|
||||||
Get a list of tracked files in a git repository.
|
Get a list of tracked files in a git repository.
|
||||||
|
|
@ -82,6 +85,7 @@ def get_file_listing(
|
||||||
Args:
|
Args:
|
||||||
directory: Path to the git repository
|
directory: Path to the git repository
|
||||||
limit: Optional maximum number of files to return
|
limit: Optional maximum number of files to return
|
||||||
|
include_hidden: Whether to include hidden files (starting with .) in the results
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple[List[str], int]: Tuple containing:
|
Tuple[List[str], int]: Tuple containing:
|
||||||
|
|
@ -95,42 +99,66 @@ def get_file_listing(
|
||||||
FileListerError: For other unexpected errors
|
FileListerError: For other unexpected errors
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
# Check if directory is a git repo first
|
# Check if directory exists and is accessible
|
||||||
|
if not os.path.exists(directory):
|
||||||
|
raise DirectoryNotFoundError(f"Directory not found: {directory}")
|
||||||
|
if not os.path.isdir(directory):
|
||||||
|
raise DirectoryNotFoundError(f"Not a directory: {directory}")
|
||||||
|
|
||||||
|
# Check if it's a git repository
|
||||||
if not is_git_repo(directory):
|
if not is_git_repo(directory):
|
||||||
return [], 0
|
return [], 0
|
||||||
|
|
||||||
# Run git ls-files
|
# Get list of files from git ls-files
|
||||||
result = subprocess.run(
|
try:
|
||||||
["git", "ls-files"],
|
# Get both tracked and untracked files
|
||||||
cwd=directory,
|
tracked_files_process = subprocess.run(
|
||||||
capture_output=True,
|
["git", "ls-files"],
|
||||||
text=True,
|
cwd=directory,
|
||||||
check=True,
|
capture_output=True,
|
||||||
)
|
text=True,
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
untracked_files_process = subprocess.run(
|
||||||
|
["git", "ls-files", "--others", "--exclude-standard"],
|
||||||
|
cwd=directory,
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
raise GitCommandError(f"Git command failed: {e}")
|
||||||
|
except PermissionError as e:
|
||||||
|
raise DirectoryAccessError(f"Permission denied: {e}")
|
||||||
|
|
||||||
# Process the output
|
# Combine and process the files
|
||||||
files = [line.strip() for line in result.stdout.splitlines() if line.strip()]
|
all_files = []
|
||||||
|
for file in tracked_files_process.stdout.splitlines() + untracked_files_process.stdout.splitlines():
|
||||||
|
file = file.strip()
|
||||||
|
if not file:
|
||||||
|
continue
|
||||||
|
# Skip hidden files unless explicitly included
|
||||||
|
if not include_hidden and (file.startswith(".") or any(part.startswith(".") for part in file.split("/"))):
|
||||||
|
continue
|
||||||
|
# Skip .aider files
|
||||||
|
if ".aider" in file:
|
||||||
|
continue
|
||||||
|
all_files.append(file)
|
||||||
|
|
||||||
# Deduplicate and sort for consistency
|
# Remove duplicates and sort
|
||||||
files = list(dict.fromkeys(files)) # Remove duplicates while preserving order
|
all_files = sorted(set(all_files))
|
||||||
|
total_count = len(all_files)
|
||||||
|
|
||||||
# Sort for consistency
|
# Apply limit if specified
|
||||||
files.sort()
|
|
||||||
|
|
||||||
# Get total count before truncation
|
|
||||||
total_count = len(files)
|
|
||||||
|
|
||||||
# Truncate if limit specified
|
|
||||||
if limit is not None:
|
if limit is not None:
|
||||||
files = files[:limit]
|
all_files = all_files[:limit]
|
||||||
|
|
||||||
return files, total_count
|
return all_files, total_count
|
||||||
|
|
||||||
except subprocess.CalledProcessError as e:
|
except (DirectoryNotFoundError, DirectoryAccessError, GitCommandError) as e:
|
||||||
raise GitCommandError(f"Git command failed: {e}")
|
# Re-raise known exceptions
|
||||||
|
raise
|
||||||
except PermissionError as e:
|
except PermissionError as e:
|
||||||
raise DirectoryAccessError(f"Cannot access directory {directory}: {e}")
|
raise DirectoryAccessError(f"Permission denied: {e}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if isinstance(e, FileListerError):
|
raise FileListerError(f"Unexpected error: {e}")
|
||||||
raise
|
|
||||||
raise FileListerError(f"Error listing files: {e}")
|
|
||||||
|
|
|
||||||
|
|
@ -170,10 +170,10 @@ def create_llm_client(
|
||||||
# Handle temperature settings
|
# Handle temperature settings
|
||||||
if is_expert:
|
if is_expert:
|
||||||
temp_kwargs = {"temperature": 0} if supports_temperature else {}
|
temp_kwargs = {"temperature": 0} if supports_temperature else {}
|
||||||
elif temperature is not None and supports_temperature:
|
elif supports_temperature:
|
||||||
|
if temperature is None:
|
||||||
|
raise ValueError(f"Temperature must be provided for model {model_name} which supports temperature")
|
||||||
temp_kwargs = {"temperature": temperature}
|
temp_kwargs = {"temperature": temperature}
|
||||||
elif provider == "openai-compatible" and supports_temperature:
|
|
||||||
temp_kwargs = {"temperature": 0.3}
|
|
||||||
else:
|
else:
|
||||||
temp_kwargs = {}
|
temp_kwargs = {}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,28 +3,29 @@ List of model parameters
|
||||||
"""
|
"""
|
||||||
|
|
||||||
DEFAULT_TOKEN_LIMIT = 100000
|
DEFAULT_TOKEN_LIMIT = 100000
|
||||||
|
DEFAULT_TEMPERATURE = 0.7
|
||||||
|
|
||||||
models_params = {
|
models_params = {
|
||||||
"openai": {
|
"openai": {
|
||||||
"gpt-3.5-turbo-0125": {"token_limit": 16385, "supports_temperature": True},
|
"gpt-3.5-turbo-0125": {"token_limit": 16385, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-3.5": {"token_limit": 4096, "supports_temperature": True},
|
"gpt-3.5": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-3.5-turbo": {"token_limit": 16385, "supports_temperature": True},
|
"gpt-3.5-turbo": {"token_limit": 16385, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-3.5-turbo-1106": {"token_limit": 16385, "supports_temperature": True},
|
"gpt-3.5-turbo-1106": {"token_limit": 16385, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-3.5-turbo-instruct": {"token_limit": 4096, "supports_temperature": True},
|
"gpt-3.5-turbo-instruct": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-0125-preview": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4-0125-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-turbo-preview": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4-turbo-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-turbo": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4-turbo": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-turbo-2024-04-09": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4-turbo-2024-04-09": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-1106-preview": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4-1106-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-vision-preview": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4-vision-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4": {"token_limit": 8192, "supports_temperature": True},
|
"gpt-4": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-0613": {"token_limit": 8192, "supports_temperature": True},
|
"gpt-4-0613": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-32k": {"token_limit": 32768, "supports_temperature": True},
|
"gpt-4-32k": {"token_limit": 32768, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-32k-0613": {"token_limit": 32768, "supports_temperature": True},
|
"gpt-4-32k-0613": {"token_limit": 32768, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4o": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4o": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4o-2024-08-06": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4o-2024-08-06": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4o-2024-05-13": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4o-2024-05-13": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4o-mini": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4o-mini": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"o1-preview": {"token_limit": 128000, "supports_temperature": False},
|
"o1-preview": {"token_limit": 128000, "supports_temperature": False},
|
||||||
"o1-mini": {"token_limit": 128000, "supports_temperature": False},
|
"o1-mini": {"token_limit": 128000, "supports_temperature": False},
|
||||||
"o1-preview": {"token_limit": 128000, "supports_temperature": False},
|
"o1-preview": {"token_limit": 128000, "supports_temperature": False},
|
||||||
|
|
@ -32,301 +33,343 @@ models_params = {
|
||||||
"o3-mini": {"token_limit": 200000, "supports_temperature": False},
|
"o3-mini": {"token_limit": 200000, "supports_temperature": False},
|
||||||
},
|
},
|
||||||
"azure_openai": {
|
"azure_openai": {
|
||||||
"gpt-3.5-turbo-0125": {"token_limit": 16385, "supports_temperature": True},
|
"gpt-3.5-turbo-0125": {"token_limit": 16385, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-3.5": {"token_limit": 4096, "supports_temperature": True},
|
"gpt-3.5": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-3.5-turbo": {"token_limit": 16385, "supports_temperature": True},
|
"gpt-3.5-turbo": {"token_limit": 16385, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-3.5-turbo-1106": {"token_limit": 16385, "supports_temperature": True},
|
"gpt-3.5-turbo-1106": {"token_limit": 16385, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-3.5-turbo-instruct": {"token_limit": 4096, "supports_temperature": True},
|
"gpt-3.5-turbo-instruct": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-0125-preview": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4-0125-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-turbo-preview": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4-turbo-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-turbo": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4-turbo": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-turbo-2024-04-09": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4-turbo-2024-04-09": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-1106-preview": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4-1106-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-vision-preview": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4-vision-preview": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4": {"token_limit": 8192, "supports_temperature": True},
|
"gpt-4": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-0613": {"token_limit": 8192, "supports_temperature": True},
|
"gpt-4-0613": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-32k": {"token_limit": 32768, "supports_temperature": True},
|
"gpt-4-32k": {"token_limit": 32768, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4-32k-0613": {"token_limit": 32768, "supports_temperature": True},
|
"gpt-4-32k-0613": {"token_limit": 32768, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4o": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4o": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gpt-4o-mini": {"token_limit": 128000, "supports_temperature": True},
|
"gpt-4o-mini": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"chatgpt-4o-latest": {"token_limit": 128000, "supports_temperature": True},
|
"chatgpt-4o-latest": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"o1-preview": {"token_limit": 128000, "supports_temperature": False},
|
"o1-preview": {"token_limit": 128000, "supports_temperature": False},
|
||||||
"o1-mini": {"token_limit": 128000, "supports_temperature": False},
|
"o1-mini": {"token_limit": 128000, "supports_temperature": False},
|
||||||
},
|
},
|
||||||
"google_genai": {
|
"google_genai": {
|
||||||
"gemini-pro": {"token_limit": 128000, "supports_temperature": True},
|
"gemini-pro": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gemini-1.5-flash-latest": {
|
"gemini-1.5-flash-latest": {
|
||||||
"token_limit": 128000,
|
"token_limit": 128000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"gemini-1.5-pro-latest": {"token_limit": 128000, "supports_temperature": True},
|
"gemini-1.5-pro-latest": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"models/embedding-001": {"token_limit": 2048, "supports_temperature": True},
|
"models/embedding-001": {"token_limit": 2048, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
},
|
},
|
||||||
"google_vertexai": {
|
"google_vertexai": {
|
||||||
"gemini-1.5-flash": {"token_limit": 128000, "supports_temperature": True},
|
"gemini-1.5-flash": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gemini-1.5-pro": {"token_limit": 128000, "supports_temperature": True},
|
"gemini-1.5-pro": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gemini-1.0-pro": {"token_limit": 128000, "supports_temperature": True},
|
"gemini-1.0-pro": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
},
|
},
|
||||||
"ollama": {
|
"ollama": {
|
||||||
"command-r": {"token_limit": 12800, "supports_temperature": True},
|
"command-r": {"token_limit": 12800, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"codellama": {"token_limit": 16000, "supports_temperature": True},
|
"codellama": {"token_limit": 16000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"dbrx": {"token_limit": 32768, "supports_temperature": True},
|
"dbrx": {"token_limit": 32768, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"deepseek-coder:33b": {"token_limit": 16000, "supports_temperature": True},
|
"deepseek-coder:33b": {"token_limit": 16000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"falcon": {"token_limit": 2048, "supports_temperature": True},
|
"falcon": {"token_limit": 2048, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama2": {"token_limit": 4096, "supports_temperature": True},
|
"llama2": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama2:7b": {"token_limit": 4096, "supports_temperature": True},
|
"llama2:7b": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama2:13b": {"token_limit": 4096, "supports_temperature": True},
|
"llama2:13b": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama2:70b": {"token_limit": 4096, "supports_temperature": True},
|
"llama2:70b": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama3": {"token_limit": 8192, "supports_temperature": True},
|
"llama3": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama3:8b": {"token_limit": 8192, "supports_temperature": True},
|
"llama3:8b": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama3:70b": {"token_limit": 8192, "supports_temperature": True},
|
"llama3:70b": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama3.1": {"token_limit": 128000, "supports_temperature": True},
|
"llama3.1": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama3.1:8b": {"token_limit": 128000, "supports_temperature": True},
|
"llama3.1:8b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama3.1:70b": {"token_limit": 128000, "supports_temperature": True},
|
"llama3.1:70b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"lama3.1:405b": {"token_limit": 128000, "supports_temperature": True},
|
"lama3.1:405b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama3.2": {"token_limit": 128000, "supports_temperature": True},
|
"llama3.2": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama3.2:1b": {"token_limit": 128000, "supports_temperature": True},
|
"llama3.2:1b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama3.2:3b": {"token_limit": 128000, "supports_temperature": True},
|
"llama3.2:3b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama3.3:70b": {"token_limit": 128000, "supports_temperature": True},
|
"llama3.3:70b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"scrapegraph": {"token_limit": 8192, "supports_temperature": True},
|
"scrapegraph": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"mistral-small": {"token_limit": 128000, "supports_temperature": True},
|
"mistral-small": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"mistral-openorca": {"token_limit": 32000, "supports_temperature": True},
|
"mistral-openorca": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"mistral-large": {"token_limit": 128000, "supports_temperature": True},
|
"mistral-large": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"grok-1": {"token_limit": 8192, "supports_temperature": True},
|
"grok-1": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llava": {"token_limit": 4096, "supports_temperature": True},
|
"llava": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"mixtral:8x22b-instruct": {"token_limit": 65536, "supports_temperature": True},
|
"mixtral:8x22b-instruct": {"token_limit": 65536, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"nomic-embed-text": {"token_limit": 8192, "supports_temperature": True},
|
"nomic-embed-text": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"nous-hermes2:34b": {"token_limit": 4096, "supports_temperature": True},
|
"nous-hermes2:34b": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"orca-mini": {"token_limit": 2048, "supports_temperature": True},
|
"orca-mini": {"token_limit": 2048, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"phi3:3.8b": {"token_limit": 12800, "supports_temperature": True},
|
"phi3:3.8b": {"token_limit": 12800, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"phi3:14b": {"token_limit": 128000, "supports_temperature": True},
|
"phi3:14b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"qwen:0.5b": {"token_limit": 32000, "supports_temperature": True},
|
"qwen:0.5b": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"qwen:1.8b": {"token_limit": 32000, "supports_temperature": True},
|
"qwen:1.8b": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"qwen:4b": {"token_limit": 32000, "supports_temperature": True},
|
"qwen:4b": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"qwen:14b": {"token_limit": 32000, "supports_temperature": True},
|
"qwen:14b": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"qwen:32b": {"token_limit": 32000, "supports_temperature": True},
|
"qwen:32b": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"qwen:72b": {"token_limit": 32000, "supports_temperature": True},
|
"qwen:72b": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"qwen:110b": {"token_limit": 32000, "supports_temperature": True},
|
"qwen:110b": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"stablelm-zephyr": {"token_limit": 8192, "supports_temperature": True},
|
"stablelm-zephyr": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"wizardlm2:8x22b": {"token_limit": 65536, "supports_temperature": True},
|
"wizardlm2:8x22b": {"token_limit": 65536, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"mistral": {"token_limit": 128000, "supports_temperature": True},
|
"mistral": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gemma2": {"token_limit": 128000, "supports_temperature": True},
|
"gemma2": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gemma2:9b": {"token_limit": 128000, "supports_temperature": True},
|
"gemma2:9b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gemma2:27b": {"token_limit": 128000, "supports_temperature": True},
|
"gemma2:27b": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
# embedding models
|
# embedding models
|
||||||
"shaw/dmeta-embedding-zh-small-q4": {
|
"shaw/dmeta-embedding-zh-small-q4": {
|
||||||
"token_limit": 8192,
|
"token_limit": 8192,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"shaw/dmeta-embedding-zh-q4": {
|
"shaw/dmeta-embedding-zh-q4": {
|
||||||
"token_limit": 8192,
|
"token_limit": 8192,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"chevalblanc/acge_text_embedding": {
|
"chevalblanc/acge_text_embedding": {
|
||||||
"token_limit": 8192,
|
"token_limit": 8192,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"martcreation/dmeta-embedding-zh": {
|
"martcreation/dmeta-embedding-zh": {
|
||||||
"token_limit": 8192,
|
"token_limit": 8192,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"snowflake-arctic-embed": {"token_limit": 8192, "supports_temperature": True},
|
"snowflake-arctic-embed": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"mxbai-embed-large": {"token_limit": 512, "supports_temperature": True},
|
"mxbai-embed-large": {"token_limit": 512, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
},
|
},
|
||||||
"oneapi": {"qwen-turbo": {"token_limit": 6000, "supports_temperature": True}},
|
"oneapi": {"qwen-turbo": {"token_limit": 6000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE}},
|
||||||
"nvidia": {
|
"nvidia": {
|
||||||
"meta/llama3-70b-instruct": {"token_limit": 419, "supports_temperature": True},
|
"meta/llama3-70b-instruct": {"token_limit": 419, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"meta/llama3-8b-instruct": {"token_limit": 419, "supports_temperature": True},
|
"meta/llama3-8b-instruct": {"token_limit": 419, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"nemotron-4-340b-instruct": {"token_limit": 1024, "supports_temperature": True},
|
"nemotron-4-340b-instruct": {"token_limit": 1024, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"databricks/dbrx-instruct": {"token_limit": 4096, "supports_temperature": True},
|
"databricks/dbrx-instruct": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"google/codegemma-7b": {"token_limit": 8192, "supports_temperature": True},
|
"google/codegemma-7b": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"google/gemma-2b": {"token_limit": 2048, "supports_temperature": True},
|
"google/gemma-2b": {"token_limit": 2048, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"google/gemma-7b": {"token_limit": 8192, "supports_temperature": True},
|
"google/gemma-7b": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"google/recurrentgemma-2b": {"token_limit": 2048, "supports_temperature": True},
|
"google/recurrentgemma-2b": {"token_limit": 2048, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"meta/codellama-70b": {"token_limit": 16384, "supports_temperature": True},
|
"meta/codellama-70b": {"token_limit": 16384, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"meta/llama2-70b": {"token_limit": 4096, "supports_temperature": True},
|
"meta/llama2-70b": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"microsoft/phi-3-mini-128k-instruct": {
|
"microsoft/phi-3-mini-128k-instruct": {
|
||||||
"token_limit": 122880,
|
"token_limit": 122880,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"mistralai/mistral-7b-instruct-v0.2": {
|
"mistralai/mistral-7b-instruct-v0.2": {
|
||||||
"token_limit": 4096,
|
"token_limit": 4096,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"mistralai/mistral-large": {"token_limit": 8192, "supports_temperature": True},
|
"mistralai/mistral-large": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"mistralai/mixtral-8x22b-instruct-v0.1": {
|
"mistralai/mixtral-8x22b-instruct-v0.1": {
|
||||||
"token_limit": 32768,
|
"token_limit": 32768,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"mistralai/mixtral-8x7b-instruct-v0.1": {
|
"mistralai/mixtral-8x7b-instruct-v0.1": {
|
||||||
"token_limit": 8192,
|
"token_limit": 8192,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"snowflake/arctic": {"token_limit": 16384, "supports_temperature": True},
|
"snowflake/arctic": {"token_limit": 16384, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
},
|
},
|
||||||
"groq": {
|
"groq": {
|
||||||
"llama3-8b-8192": {"token_limit": 8192, "supports_temperature": True},
|
"llama3-8b-8192": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"llama3-70b-8192": {"token_limit": 8192, "supports_temperature": True},
|
"llama3-70b-8192": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"mixtral-8x7b-32768": {"token_limit": 32768, "supports_temperature": True},
|
"mixtral-8x7b-32768": {"token_limit": 32768, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"gemma-7b-it": {"token_limit": 8192, "supports_temperature": True},
|
"gemma-7b-it": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"claude-3-haiku-20240307'": {"token_limit": 8192, "supports_temperature": True},
|
"claude-3-haiku-20240307'": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
},
|
},
|
||||||
"toghetherai": {
|
"toghetherai": {
|
||||||
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo": {
|
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo": {
|
||||||
"token_limit": 128000,
|
"token_limit": 128000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": {
|
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": {
|
||||||
"token_limit": 128000,
|
"token_limit": 128000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"mistralai/Mixtral-8x22B-Instruct-v0.1": {
|
"mistralai/Mixtral-8x22B-Instruct-v0.1": {
|
||||||
"token_limit": 128000,
|
"token_limit": 128000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"stabilityai/stable-diffusion-xl-base-1.0": {
|
"stabilityai/stable-diffusion-xl-base-1.0": {
|
||||||
"token_limit": 2048,
|
"token_limit": 2048,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo": {
|
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo": {
|
||||||
"token_limit": 128000,
|
"token_limit": 128000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"NousResearch/Hermes-3-Llama-3.1-405B-Turbo": {
|
"NousResearch/Hermes-3-Llama-3.1-405B-Turbo": {
|
||||||
"token_limit": 128000,
|
"token_limit": 128000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"Gryphe/MythoMax-L2-13b-Lite": {
|
"Gryphe/MythoMax-L2-13b-Lite": {
|
||||||
"token_limit": 8192,
|
"token_limit": 8192,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"Salesforce/Llama-Rank-V1": {"token_limit": 8192, "supports_temperature": True},
|
"Salesforce/Llama-Rank-V1": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"meta-llama/Meta-Llama-Guard-3-8B": {
|
"meta-llama/Meta-Llama-Guard-3-8B": {
|
||||||
"token_limit": 128000,
|
"token_limit": 128000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"meta-llama/Meta-Llama-3-70B-Instruct-Turbo": {
|
"meta-llama/Meta-Llama-3-70B-Instruct-Turbo": {
|
||||||
"token_limit": 128000,
|
"token_limit": 128000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"meta-llama/Llama-3-8b-chat-hf": {
|
"meta-llama/Llama-3-8b-chat-hf": {
|
||||||
"token_limit": 8192,
|
"token_limit": 8192,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"meta-llama/Llama-3-70b-chat-hf": {
|
"meta-llama/Llama-3-70b-chat-hf": {
|
||||||
"token_limit": 8192,
|
"token_limit": 8192,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"Qwen/Qwen2-72B-Instruct": {
|
"Qwen/Qwen2-72B-Instruct": {
|
||||||
"token_limit": 128000,
|
"token_limit": 128000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"google/gemma-2-27b-it": {"token_limit": 8192, "supports_temperature": True},
|
"google/gemma-2-27b-it": {"token_limit": 8192, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
},
|
},
|
||||||
"anthropic": {
|
"anthropic": {
|
||||||
"claude_instant": {"token_limit": 100000, "supports_temperature": True},
|
"claude_instant": {"token_limit": 100000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"claude2": {"token_limit": 9000, "supports_temperature": True},
|
"claude2": {"token_limit": 9000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"claude2.1": {"token_limit": 200000, "supports_temperature": True},
|
"claude2.1": {"token_limit": 200000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"claude3": {"token_limit": 200000, "supports_temperature": True},
|
"claude3": {"token_limit": 200000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"claude3.5": {"token_limit": 200000, "supports_temperature": True},
|
"claude3.5": {"token_limit": 200000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"claude-3-opus-20240229": {"token_limit": 200000, "supports_temperature": True},
|
"claude-3-opus-20240229": {"token_limit": 200000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"claude-3-sonnet-20240229": {
|
"claude-3-sonnet-20240229": {
|
||||||
"token_limit": 200000,
|
"token_limit": 200000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"claude-3-haiku-20240307": {
|
"claude-3-haiku-20240307": {
|
||||||
"token_limit": 200000,
|
"token_limit": 200000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"claude-3-5-sonnet-20240620": {
|
"claude-3-5-sonnet-20240620": {
|
||||||
"token_limit": 200000,
|
"token_limit": 200000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"claude-3-5-sonnet-20241022": {
|
"claude-3-5-sonnet-20241022": {
|
||||||
"token_limit": 200000,
|
"token_limit": 200000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"claude-3-5-haiku-latest": {
|
"claude-3-5-haiku-latest": {
|
||||||
"token_limit": 200000,
|
"token_limit": 200000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"bedrock": {
|
"bedrock": {
|
||||||
"anthropic.claude-3-haiku-20240307-v1:0": {
|
"anthropic.claude-3-haiku-20240307-v1:0": {
|
||||||
"token_limit": 200000,
|
"token_limit": 200000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"anthropic.claude-3-sonnet-20240229-v1:0": {
|
"anthropic.claude-3-sonnet-20240229-v1:0": {
|
||||||
"token_limit": 200000,
|
"token_limit": 200000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"anthropic.claude-3-opus-20240229-v1:0": {
|
"anthropic.claude-3-opus-20240229-v1:0": {
|
||||||
"token_limit": 200000,
|
"token_limit": 200000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"anthropic.claude-3-5-sonnet-20240620-v1:0": {
|
"anthropic.claude-3-5-sonnet-20240620-v1:0": {
|
||||||
"token_limit": 200000,
|
"token_limit": 200000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"claude-3-5-haiku-latest": {
|
"claude-3-5-haiku-latest": {
|
||||||
"token_limit": 200000,
|
"token_limit": 200000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"anthropic.claude-v2:1": {"token_limit": 200000, "supports_temperature": True},
|
"anthropic.claude-v2:1": {"token_limit": 200000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"anthropic.claude-v2": {"token_limit": 100000, "supports_temperature": True},
|
"anthropic.claude-v2": {"token_limit": 100000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"anthropic.claude-instant-v1": {
|
"anthropic.claude-instant-v1": {
|
||||||
"token_limit": 100000,
|
"token_limit": 100000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"meta.llama3-8b-instruct-v1:0": {
|
"meta.llama3-8b-instruct-v1:0": {
|
||||||
"token_limit": 8192,
|
"token_limit": 8192,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"meta.llama3-70b-instruct-v1:0": {
|
"meta.llama3-70b-instruct-v1:0": {
|
||||||
"token_limit": 8192,
|
"token_limit": 8192,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"meta.llama2-13b-chat-v1": {"token_limit": 4096, "supports_temperature": True},
|
"meta.llama2-13b-chat-v1": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"meta.llama2-70b-chat-v1": {"token_limit": 4096, "supports_temperature": True},
|
"meta.llama2-70b-chat-v1": {"token_limit": 4096, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"mistral.mistral-7b-instruct-v0:2": {
|
"mistral.mistral-7b-instruct-v0:2": {
|
||||||
"token_limit": 32768,
|
"token_limit": 32768,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"mistral.mixtral-8x7b-instruct-v0:1": {
|
"mistral.mixtral-8x7b-instruct-v0:1": {
|
||||||
"token_limit": 32768,
|
"token_limit": 32768,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"mistral.mistral-large-2402-v1:0": {
|
"mistral.mistral-large-2402-v1:0": {
|
||||||
"token_limit": 32768,
|
"token_limit": 32768,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"mistral.mistral-small-2402-v1:0": {
|
"mistral.mistral-small-2402-v1:0": {
|
||||||
"token_limit": 32768,
|
"token_limit": 32768,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"amazon.titan-embed-text-v1": {
|
"amazon.titan-embed-text-v1": {
|
||||||
"token_limit": 8000,
|
"token_limit": 8000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"amazon.titan-embed-text-v2:0": {
|
"amazon.titan-embed-text-v2:0": {
|
||||||
"token_limit": 8000,
|
"token_limit": 8000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
"cohere.embed-english-v3": {"token_limit": 512, "supports_temperature": True},
|
"cohere.embed-english-v3": {"token_limit": 512, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"cohere.embed-multilingual-v3": {
|
"cohere.embed-multilingual-v3": {
|
||||||
"token_limit": 512,
|
"token_limit": 512,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"mistralai": {
|
"mistralai": {
|
||||||
"mistral-large-latest": {"token_limit": 128000, "supports_temperature": True},
|
"mistral-large-latest": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"open-mistral-nemo": {"token_limit": 128000, "supports_temperature": True},
|
"open-mistral-nemo": {"token_limit": 128000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
"codestral-latest": {"token_limit": 32000, "supports_temperature": True},
|
"codestral-latest": {"token_limit": 32000, "supports_temperature": True, "default_temperature": DEFAULT_TEMPERATURE},
|
||||||
},
|
},
|
||||||
"togetherai": {
|
"togetherai": {
|
||||||
"Meta-Llama-3.1-70B-Instruct-Turbo": {
|
"Meta-Llama-3.1-70B-Instruct-Turbo": {
|
||||||
"token_limit": 128000,
|
"token_limit": 128000,
|
||||||
"supports_temperature": True,
|
"supports_temperature": True,
|
||||||
|
"default_temperature": DEFAULT_TEMPERATURE,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ def get_read_only_tools(
|
||||||
|
|
||||||
# Define constant tool groups
|
# Define constant tool groups
|
||||||
READ_ONLY_TOOLS = get_read_only_tools()
|
READ_ONLY_TOOLS = get_read_only_tools()
|
||||||
MODIFICATION_TOOLS = [run_programming_task, put_complete_file_contents]
|
MODIFICATION_TOOLS = [run_programming_task]
|
||||||
COMMON_TOOLS = get_read_only_tools()
|
COMMON_TOOLS = get_read_only_tools()
|
||||||
EXPERT_TOOLS = [emit_expert_context, ask_expert]
|
EXPERT_TOOLS = [emit_expert_context, ask_expert]
|
||||||
RESEARCH_TOOLS = [
|
RESEARCH_TOOLS = [
|
||||||
|
|
|
||||||
|
|
@ -117,6 +117,8 @@ def run_programming_task(
|
||||||
|
|
||||||
# Log the programming task
|
# Log the programming task
|
||||||
log_work_event(f"Executed programming task: {_truncate_for_log(instructions)}")
|
log_work_event(f"Executed programming task: {_truncate_for_log(instructions)}")
|
||||||
|
|
||||||
|
print(repr(result))
|
||||||
|
|
||||||
# Return structured output
|
# Return structured output
|
||||||
return {
|
return {
|
||||||
|
|
|
||||||
|
|
@ -12,17 +12,16 @@ console = Console()
|
||||||
|
|
||||||
@tool
|
@tool
|
||||||
def put_complete_file_contents(
|
def put_complete_file_contents(
|
||||||
filepath: str, content: str, encoding: str = "utf-8", verbose: bool = True
|
filepath: str, complete_file_contents: str = "", encoding: str = "utf-8", verbose: bool = True
|
||||||
) -> Dict[str, any]:
|
) -> Dict[str, any]:
|
||||||
"""Write the complete contents of a file, creating it if it doesn't exist.
|
"""Write the complete contents of a file, creating it if it doesn't exist.
|
||||||
This tool is specifically for writing the entire contents of a file at once,
|
This tool is specifically for writing the entire contents of a file at once,
|
||||||
not for appending or partial writes.
|
not for appending or partial writes.
|
||||||
|
|
||||||
`filepath` and `content` must ALWAYS be provided.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
filepath: Path to the file to write
|
filepath: (Required) Path to the file to write. Must be provided.
|
||||||
content: Complete string content to write to the file
|
complete_file_contents: Complete string content to write to the file. Defaults to
|
||||||
|
an empty string, which will create an empty file.
|
||||||
encoding: File encoding to use (default: utf-8)
|
encoding: File encoding to use (default: utf-8)
|
||||||
verbose: Whether to display a Rich panel with write statistics (default: True)
|
verbose: Whether to display a Rich panel with write statistics (default: True)
|
||||||
|
|
||||||
|
|
@ -55,14 +54,18 @@ def put_complete_file_contents(
|
||||||
logging.debug(f"Starting to write file: {filepath}")
|
logging.debug(f"Starting to write file: {filepath}")
|
||||||
|
|
||||||
with open(filepath, "w", encoding=encoding) as f:
|
with open(filepath, "w", encoding=encoding) as f:
|
||||||
f.write(content)
|
logging.debug(f"Writing {len(complete_file_contents)} bytes to {filepath}")
|
||||||
result["bytes_written"] = len(content.encode(encoding))
|
f.write(complete_file_contents)
|
||||||
|
result["bytes_written"] = len(complete_file_contents.encode(encoding))
|
||||||
|
|
||||||
elapsed = time.time() - start_time
|
elapsed = time.time() - start_time
|
||||||
result["elapsed_time"] = elapsed
|
result["elapsed_time"] = elapsed
|
||||||
result["success"] = True
|
result["success"] = True
|
||||||
result["filepath"] = filepath
|
result["filepath"] = filepath
|
||||||
result["message"] = "Operation completed successfully"
|
result["message"] = (
|
||||||
|
f"Successfully {'initialized empty file' if not complete_file_contents else f'wrote {result['bytes_written']} bytes'} "
|
||||||
|
f"at {filepath} in {result['elapsed_time']:.3f}s"
|
||||||
|
)
|
||||||
|
|
||||||
logging.debug(
|
logging.debug(
|
||||||
f"File write complete: {result['bytes_written']} bytes in {elapsed:.2f}s"
|
f"File write complete: {result['bytes_written']} bytes in {elapsed:.2f}s"
|
||||||
|
|
@ -71,7 +74,7 @@ def put_complete_file_contents(
|
||||||
if verbose:
|
if verbose:
|
||||||
console.print(
|
console.print(
|
||||||
Panel(
|
Panel(
|
||||||
f"Wrote {result['bytes_written']} bytes to {filepath} in {elapsed:.2f}s",
|
f"{'Initialized empty file' if not complete_file_contents else f'Wrote {result['bytes_written']} bytes'} at {filepath} in {elapsed:.2f}s",
|
||||||
title="💾 File Write",
|
title="💾 File Write",
|
||||||
border_style="bright_green",
|
border_style="bright_green",
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ def test_basic_write_functionality(temp_test_dir):
|
||||||
test_file = temp_test_dir / "test.txt"
|
test_file = temp_test_dir / "test.txt"
|
||||||
content = "Hello, World!\nTest content"
|
content = "Hello, World!\nTest content"
|
||||||
|
|
||||||
result = put_complete_file_contents({"filepath": str(test_file), "content": content})
|
result = put_complete_file_contents({"filepath": str(test_file), "complete_file_contents": content})
|
||||||
|
|
||||||
# Verify file contents
|
# Verify file contents
|
||||||
assert test_file.read_text() == content
|
assert test_file.read_text() == content
|
||||||
|
|
@ -38,7 +38,7 @@ def test_directory_creation(temp_test_dir):
|
||||||
test_file = nested_dir / "test.txt"
|
test_file = nested_dir / "test.txt"
|
||||||
content = "Test content"
|
content = "Test content"
|
||||||
|
|
||||||
result = put_complete_file_contents({"filepath": str(test_file), "content": content})
|
result = put_complete_file_contents({"filepath": str(test_file), "complete_file_contents": content})
|
||||||
|
|
||||||
assert test_file.exists()
|
assert test_file.exists()
|
||||||
assert test_file.read_text() == content
|
assert test_file.read_text() == content
|
||||||
|
|
@ -52,14 +52,14 @@ def test_different_encodings(temp_test_dir):
|
||||||
|
|
||||||
# Test UTF-8
|
# Test UTF-8
|
||||||
result_utf8 = put_complete_file_contents(
|
result_utf8 = put_complete_file_contents(
|
||||||
{"filepath": str(test_file), "content": content, "encoding": "utf-8"}
|
{"filepath": str(test_file), "complete_file_contents": content, "encoding": "utf-8"}
|
||||||
)
|
)
|
||||||
assert result_utf8["success"] is True
|
assert result_utf8["success"] is True
|
||||||
assert test_file.read_text(encoding="utf-8") == content
|
assert test_file.read_text(encoding="utf-8") == content
|
||||||
|
|
||||||
# Test UTF-16
|
# Test UTF-16
|
||||||
result_utf16 = put_complete_file_contents(
|
result_utf16 = put_complete_file_contents(
|
||||||
{"filepath": str(test_file), "content": content, "encoding": "utf-16"}
|
{"filepath": str(test_file), "complete_file_contents": content, "encoding": "utf-16"}
|
||||||
)
|
)
|
||||||
assert result_utf16["success"] is True
|
assert result_utf16["success"] is True
|
||||||
assert test_file.read_text(encoding="utf-16") == content
|
assert test_file.read_text(encoding="utf-16") == content
|
||||||
|
|
@ -72,7 +72,7 @@ def test_permission_error(mock_open_func, temp_test_dir):
|
||||||
test_file = temp_test_dir / "noperm.txt"
|
test_file = temp_test_dir / "noperm.txt"
|
||||||
|
|
||||||
result = put_complete_file_contents(
|
result = put_complete_file_contents(
|
||||||
{"filepath": str(test_file), "content": "test content"}
|
{"filepath": str(test_file), "complete_file_contents": "test content"}
|
||||||
)
|
)
|
||||||
|
|
||||||
assert result["success"] is False
|
assert result["success"] is False
|
||||||
|
|
@ -87,7 +87,7 @@ def test_io_error(mock_open_func, temp_test_dir):
|
||||||
test_file = temp_test_dir / "ioerror.txt"
|
test_file = temp_test_dir / "ioerror.txt"
|
||||||
|
|
||||||
result = put_complete_file_contents(
|
result = put_complete_file_contents(
|
||||||
{"filepath": str(test_file), "content": "test content"}
|
{"filepath": str(test_file), "complete_file_contents": "test content"}
|
||||||
)
|
)
|
||||||
|
|
||||||
assert result["success"] is False
|
assert result["success"] is False
|
||||||
|
|
@ -99,12 +99,26 @@ def test_empty_content(temp_test_dir):
|
||||||
"""Test writing empty content to a file."""
|
"""Test writing empty content to a file."""
|
||||||
test_file = temp_test_dir / "empty.txt"
|
test_file = temp_test_dir / "empty.txt"
|
||||||
|
|
||||||
result = put_complete_file_contents({"filepath": str(test_file), "content": ""})
|
result = put_complete_file_contents({"filepath": str(test_file)})
|
||||||
|
|
||||||
assert test_file.exists()
|
assert test_file.exists()
|
||||||
assert test_file.read_text() == ""
|
assert test_file.read_text() == ""
|
||||||
assert result["success"] is True
|
assert result["success"] is True
|
||||||
assert result["bytes_written"] == 0
|
assert result["bytes_written"] == 0
|
||||||
|
assert "initialized empty file" in result["message"].lower()
|
||||||
|
|
||||||
|
|
||||||
|
def test_write_empty_file_default(temp_test_dir):
|
||||||
|
"""Test creating an empty file using default parameter."""
|
||||||
|
test_file = temp_test_dir / "empty_default.txt"
|
||||||
|
|
||||||
|
result = put_complete_file_contents({"filepath": str(test_file)})
|
||||||
|
|
||||||
|
assert test_file.exists()
|
||||||
|
assert test_file.read_text() == ""
|
||||||
|
assert result["success"] is True
|
||||||
|
assert result["bytes_written"] == 0
|
||||||
|
assert "initialized empty file" in result["message"].lower()
|
||||||
|
|
||||||
|
|
||||||
def test_overwrite_existing_file(temp_test_dir):
|
def test_overwrite_existing_file(temp_test_dir):
|
||||||
|
|
@ -117,7 +131,7 @@ def test_overwrite_existing_file(temp_test_dir):
|
||||||
# Overwrite with new content
|
# Overwrite with new content
|
||||||
new_content = "New content"
|
new_content = "New content"
|
||||||
result = put_complete_file_contents(
|
result = put_complete_file_contents(
|
||||||
{"filepath": str(test_file), "content": new_content}
|
{"filepath": str(test_file), "complete_file_contents": new_content}
|
||||||
)
|
)
|
||||||
|
|
||||||
assert test_file.read_text() == new_content
|
assert test_file.read_text() == new_content
|
||||||
|
|
@ -130,7 +144,7 @@ def test_large_file_write(temp_test_dir):
|
||||||
test_file = temp_test_dir / "large.txt"
|
test_file = temp_test_dir / "large.txt"
|
||||||
content = "Large content\n" * 1000 # Create substantial content
|
content = "Large content\n" * 1000 # Create substantial content
|
||||||
|
|
||||||
result = put_complete_file_contents({"filepath": str(test_file), "content": content})
|
result = put_complete_file_contents({"filepath": str(test_file), "complete_file_contents": content})
|
||||||
|
|
||||||
assert test_file.exists()
|
assert test_file.exists()
|
||||||
assert test_file.read_text() == content
|
assert test_file.read_text() == content
|
||||||
|
|
@ -144,7 +158,7 @@ def test_invalid_path_characters(temp_test_dir):
|
||||||
invalid_path = temp_test_dir / "invalid\0file.txt"
|
invalid_path = temp_test_dir / "invalid\0file.txt"
|
||||||
|
|
||||||
result = put_complete_file_contents(
|
result = put_complete_file_contents(
|
||||||
{"filepath": str(invalid_path), "content": "test content"}
|
{"filepath": str(invalid_path), "complete_file_contents": "test content"}
|
||||||
)
|
)
|
||||||
|
|
||||||
assert result["success"] is False
|
assert result["success"] is False
|
||||||
|
|
@ -162,7 +176,7 @@ def test_write_to_readonly_directory(temp_test_dir):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
result = put_complete_file_contents(
|
result = put_complete_file_contents(
|
||||||
{"filepath": str(test_file), "content": "test content"}
|
{"filepath": str(test_file), "complete_file_contents": "test content"}
|
||||||
)
|
)
|
||||||
assert result["success"] is False
|
assert result["success"] is False
|
||||||
assert "Permission" in result["message"]
|
assert "Permission" in result["message"]
|
||||||
|
|
|
||||||
|
|
@ -56,6 +56,109 @@ def sample_git_repo(empty_git_repo):
|
||||||
return empty_git_repo
|
return empty_git_repo
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def git_repo_with_untracked(sample_git_repo):
|
||||||
|
"""Create a git repository with both tracked and untracked files."""
|
||||||
|
# Create untracked files
|
||||||
|
untracked_files = [
|
||||||
|
"untracked.txt",
|
||||||
|
"src/untracked.py",
|
||||||
|
"docs/draft.md"
|
||||||
|
]
|
||||||
|
|
||||||
|
for file_path in untracked_files:
|
||||||
|
full_path = sample_git_repo / file_path
|
||||||
|
full_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
full_path.write_text(f"Untracked content of {file_path}")
|
||||||
|
|
||||||
|
return sample_git_repo
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def git_repo_with_ignores(git_repo_with_untracked):
|
||||||
|
"""Create a git repository with .gitignore rules."""
|
||||||
|
# Create .gitignore file
|
||||||
|
gitignore_content = """
|
||||||
|
# Python
|
||||||
|
__pycache__/
|
||||||
|
*.pyc
|
||||||
|
|
||||||
|
# Project specific
|
||||||
|
*.log
|
||||||
|
temp/
|
||||||
|
ignored.txt
|
||||||
|
docs/draft.md
|
||||||
|
"""
|
||||||
|
gitignore_path = git_repo_with_untracked / ".gitignore"
|
||||||
|
gitignore_path.write_text(gitignore_content)
|
||||||
|
|
||||||
|
# Add and commit .gitignore first
|
||||||
|
subprocess.run(["git", "add", ".gitignore"], cwd=git_repo_with_untracked)
|
||||||
|
subprocess.run(
|
||||||
|
["git", "commit", "-m", "Add .gitignore"],
|
||||||
|
cwd=git_repo_with_untracked,
|
||||||
|
env={
|
||||||
|
"GIT_AUTHOR_NAME": "Test",
|
||||||
|
"GIT_AUTHOR_EMAIL": "test@example.com",
|
||||||
|
"GIT_COMMITTER_NAME": "Test",
|
||||||
|
"GIT_COMMITTER_EMAIL": "test@example.com",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create some ignored files
|
||||||
|
ignored_files = [
|
||||||
|
"ignored.txt",
|
||||||
|
"temp/temp.txt",
|
||||||
|
"src/__pycache__/main.cpython-39.pyc"
|
||||||
|
]
|
||||||
|
|
||||||
|
for file_path in ignored_files:
|
||||||
|
full_path = git_repo_with_untracked / file_path
|
||||||
|
full_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
full_path.write_text(f"Ignored content of {file_path}")
|
||||||
|
|
||||||
|
return git_repo_with_untracked
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def git_repo_with_aider_files(sample_git_repo):
|
||||||
|
"""Create a git repository with .aider files that should be ignored."""
|
||||||
|
# Create .aider files
|
||||||
|
aider_files = [
|
||||||
|
".aider.chat.history.md",
|
||||||
|
".aider.input.history",
|
||||||
|
".aider.tags.cache.v3/some_file",
|
||||||
|
"src/.aider.local.settings"
|
||||||
|
]
|
||||||
|
|
||||||
|
# Create regular files
|
||||||
|
regular_files = [
|
||||||
|
"main.cpp",
|
||||||
|
"src/helper.cpp"
|
||||||
|
]
|
||||||
|
|
||||||
|
# Create all files
|
||||||
|
for file_path in aider_files + regular_files:
|
||||||
|
full_path = sample_git_repo / file_path
|
||||||
|
full_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
full_path.write_text(f"Content of {file_path}")
|
||||||
|
|
||||||
|
# Add all files (both .aider and regular) to git
|
||||||
|
subprocess.run(["git", "add", "."], cwd=sample_git_repo)
|
||||||
|
subprocess.run(
|
||||||
|
["git", "commit", "-m", "Add files including .aider"],
|
||||||
|
cwd=sample_git_repo,
|
||||||
|
env={
|
||||||
|
"GIT_AUTHOR_NAME": "Test",
|
||||||
|
"GIT_AUTHOR_EMAIL": "test@example.com",
|
||||||
|
"GIT_COMMITTER_NAME": "Test",
|
||||||
|
"GIT_COMMITTER_EMAIL": "test@example.com",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
return sample_git_repo
|
||||||
|
|
||||||
|
|
||||||
def test_is_git_repo(sample_git_repo, tmp_path_factory):
|
def test_is_git_repo(sample_git_repo, tmp_path_factory):
|
||||||
"""Test git repository detection."""
|
"""Test git repository detection."""
|
||||||
# Create a new directory that is not a git repository
|
# Create a new directory that is not a git repository
|
||||||
|
|
@ -248,39 +351,199 @@ def mock_is_git_repo():
|
||||||
yield mock
|
yield mock
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_os_path(monkeypatch):
|
||||||
|
"""Mock os.path functions."""
|
||||||
|
def mock_exists(path):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def mock_isdir(path):
|
||||||
|
return True
|
||||||
|
|
||||||
|
monkeypatch.setattr(os.path, 'exists', mock_exists)
|
||||||
|
monkeypatch.setattr(os.path, 'isdir', mock_isdir)
|
||||||
|
return monkeypatch
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("test_case", FILE_LISTING_TEST_CASES, ids=lambda x: x["name"])
|
@pytest.mark.parametrize("test_case", FILE_LISTING_TEST_CASES, ids=lambda x: x["name"])
|
||||||
def test_get_file_listing(test_case, mock_subprocess, mock_is_git_repo):
|
def test_get_file_listing(test_case, mock_subprocess, mock_is_git_repo, mock_os_path):
|
||||||
"""Test get_file_listing with various inputs."""
|
"""Test get_file_listing with various inputs."""
|
||||||
mock_subprocess.return_value = create_mock_process(test_case["git_output"])
|
mock_subprocess.return_value = create_mock_process(test_case["git_output"])
|
||||||
files, total = get_file_listing(DUMMY_PATH, limit=test_case["limit"])
|
files, total = get_file_listing(DUMMY_PATH, limit=test_case["limit"])
|
||||||
|
|
||||||
assert files == test_case["expected_files"]
|
assert files == test_case["expected_files"]
|
||||||
assert total == test_case["expected_total"]
|
assert total == test_case["expected_total"]
|
||||||
|
|
||||||
|
|
||||||
def test_get_file_listing_non_git_repo(mock_is_git_repo):
|
def test_get_file_listing_non_git_repo(mock_is_git_repo, mock_os_path):
|
||||||
"""Test get_file_listing with non-git repository."""
|
"""Test get_file_listing with non-git repository."""
|
||||||
mock_is_git_repo.return_value = False
|
mock_is_git_repo.return_value = False
|
||||||
files, total = get_file_listing(DUMMY_PATH)
|
files, total = get_file_listing(DUMMY_PATH)
|
||||||
assert files == EMPTY_FILE_LIST
|
assert files == []
|
||||||
assert total == EMPTY_FILE_TOTAL
|
assert total == 0
|
||||||
|
|
||||||
|
|
||||||
def test_get_file_listing_git_error(mock_subprocess, mock_is_git_repo):
|
def test_get_file_listing_git_error(mock_subprocess, mock_is_git_repo, mock_os_path):
|
||||||
"""Test get_file_listing when git command fails."""
|
"""Test get_file_listing when git command fails."""
|
||||||
mock_subprocess.side_effect = GitCommandError("Git command failed")
|
mock_subprocess.side_effect = GitCommandError("Git command failed")
|
||||||
with pytest.raises(GitCommandError):
|
with pytest.raises(GitCommandError):
|
||||||
get_file_listing(DUMMY_PATH)
|
get_file_listing(DUMMY_PATH)
|
||||||
|
|
||||||
|
|
||||||
def test_get_file_listing_permission_error(mock_subprocess, mock_is_git_repo):
|
def test_get_file_listing_permission_error(mock_subprocess, mock_is_git_repo, mock_os_path):
|
||||||
"""Test get_file_listing with permission error."""
|
"""Test get_file_listing with permission error."""
|
||||||
mock_subprocess.side_effect = PermissionError("Permission denied")
|
mock_subprocess.side_effect = PermissionError("Permission denied")
|
||||||
with pytest.raises(DirectoryAccessError):
|
with pytest.raises(DirectoryAccessError):
|
||||||
get_file_listing(DUMMY_PATH)
|
get_file_listing(DUMMY_PATH)
|
||||||
|
|
||||||
|
|
||||||
def test_get_file_listing_unexpected_error(mock_subprocess, mock_is_git_repo):
|
def test_get_file_listing_unexpected_error(mock_subprocess, mock_is_git_repo, mock_os_path):
|
||||||
"""Test get_file_listing with unexpected error."""
|
"""Test get_file_listing with unexpected error."""
|
||||||
mock_subprocess.side_effect = Exception("Unexpected error")
|
mock_subprocess.side_effect = Exception("Unexpected error")
|
||||||
with pytest.raises(FileListerError):
|
with pytest.raises(FileListerError):
|
||||||
get_file_listing(DUMMY_PATH)
|
get_file_listing(DUMMY_PATH)
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_file_listing_with_untracked(git_repo_with_untracked):
|
||||||
|
"""Test that file listing includes both tracked and untracked files."""
|
||||||
|
files, count = get_file_listing(str(git_repo_with_untracked))
|
||||||
|
|
||||||
|
# Check tracked files are present
|
||||||
|
assert "README.md" in files
|
||||||
|
assert "src/main.py" in files
|
||||||
|
|
||||||
|
# Check untracked files are present
|
||||||
|
assert "untracked.txt" in files
|
||||||
|
assert "src/untracked.py" in files
|
||||||
|
|
||||||
|
# Verify count includes both tracked and untracked
|
||||||
|
expected_count = 8 # 5 tracked + 3 untracked (excluding .gitignore)
|
||||||
|
assert count == expected_count
|
||||||
|
|
||||||
|
def test_get_file_listing_with_untracked_and_limit(git_repo_with_untracked):
|
||||||
|
"""Test that file listing with limit works correctly with untracked files."""
|
||||||
|
limit = 3
|
||||||
|
files, count = get_file_listing(str(git_repo_with_untracked), limit=limit)
|
||||||
|
|
||||||
|
# Total count should still be full count
|
||||||
|
assert count == 8 # 5 tracked + 3 untracked (excluding .gitignore)
|
||||||
|
|
||||||
|
# Only limit number of files should be returned
|
||||||
|
assert len(files) == limit
|
||||||
|
|
||||||
|
# Files should be sorted, so we can check first 3
|
||||||
|
assert files == sorted(files)
|
||||||
|
|
||||||
|
def test_get_file_listing_respects_gitignore(git_repo_with_ignores):
|
||||||
|
"""Test that file listing respects .gitignore rules."""
|
||||||
|
# First test with hidden files excluded (default)
|
||||||
|
files, count = get_file_listing(str(git_repo_with_ignores))
|
||||||
|
|
||||||
|
# These files should be included (tracked or untracked but not ignored)
|
||||||
|
assert "README.md" in files
|
||||||
|
assert "src/main.py" in files
|
||||||
|
assert "untracked.txt" in files
|
||||||
|
assert "src/untracked.py" in files
|
||||||
|
|
||||||
|
# .gitignore should be excluded as it's hidden
|
||||||
|
assert ".gitignore" not in files
|
||||||
|
|
||||||
|
# These files should be excluded (ignored)
|
||||||
|
assert "ignored.txt" not in files
|
||||||
|
assert "temp/temp.txt" not in files
|
||||||
|
assert "src/__pycache__/main.cpython-39.pyc" not in files
|
||||||
|
assert "docs/draft.md" not in files # Explicitly ignored in .gitignore
|
||||||
|
|
||||||
|
# Count should include non-ignored, non-hidden files
|
||||||
|
expected_count = 7 # 4 tracked + 2 untracked (excluding .gitignore)
|
||||||
|
assert count == expected_count
|
||||||
|
|
||||||
|
# Now test with hidden files included
|
||||||
|
files, count = get_file_listing(str(git_repo_with_ignores), include_hidden=True)
|
||||||
|
|
||||||
|
# .gitignore should now be included
|
||||||
|
assert ".gitignore" in files
|
||||||
|
|
||||||
|
# Count should include non-ignored files plus .gitignore
|
||||||
|
expected_count = 8 # 5 tracked + 2 untracked + .gitignore
|
||||||
|
assert count == expected_count
|
||||||
|
|
||||||
|
def test_aider_files_excluded(git_repo_with_aider_files):
|
||||||
|
"""Test that .aider files are excluded from the file listing."""
|
||||||
|
files, count = get_file_listing(str(git_repo_with_aider_files))
|
||||||
|
|
||||||
|
# Regular files should be included
|
||||||
|
assert "main.cpp" in files
|
||||||
|
assert "src/helper.cpp" in files
|
||||||
|
|
||||||
|
# .aider files should be excluded
|
||||||
|
assert ".aider.chat.history.md" not in files
|
||||||
|
assert ".aider.input.history" not in files
|
||||||
|
assert ".aider.tags.cache.v3/some_file" not in files
|
||||||
|
assert "src/.aider.local.settings" not in files
|
||||||
|
|
||||||
|
# Only the regular files should be counted
|
||||||
|
expected_count = 7 # 5 original files from sample_git_repo + 2 new regular files
|
||||||
|
assert count == expected_count
|
||||||
|
assert len(files) == expected_count
|
||||||
|
|
||||||
|
def test_hidden_files_excluded_by_default(git_repo_with_aider_files):
|
||||||
|
"""Test that hidden files are excluded by default."""
|
||||||
|
# Create some hidden files
|
||||||
|
hidden_files = [
|
||||||
|
".config",
|
||||||
|
".env",
|
||||||
|
"src/.local",
|
||||||
|
".gitattributes"
|
||||||
|
]
|
||||||
|
|
||||||
|
# Create regular files
|
||||||
|
regular_files = [
|
||||||
|
"main.cpp",
|
||||||
|
"src/helper.cpp"
|
||||||
|
]
|
||||||
|
|
||||||
|
# Create all files
|
||||||
|
for file_path in hidden_files + regular_files:
|
||||||
|
full_path = git_repo_with_aider_files / file_path
|
||||||
|
full_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
full_path.write_text(f"Content of {file_path}")
|
||||||
|
|
||||||
|
# Add all files to git
|
||||||
|
subprocess.run(["git", "add", "."], cwd=git_repo_with_aider_files)
|
||||||
|
subprocess.run(
|
||||||
|
["git", "commit", "-m", "Add files including hidden files"],
|
||||||
|
cwd=git_repo_with_aider_files,
|
||||||
|
env={
|
||||||
|
"GIT_AUTHOR_NAME": "Test",
|
||||||
|
"GIT_AUTHOR_EMAIL": "test@example.com",
|
||||||
|
"GIT_COMMITTER_NAME": "Test",
|
||||||
|
"GIT_COMMITTER_EMAIL": "test@example.com",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Test default behavior (hidden files excluded)
|
||||||
|
files, count = get_file_listing(str(git_repo_with_aider_files))
|
||||||
|
|
||||||
|
# Regular files should be included
|
||||||
|
assert "main.cpp" in files
|
||||||
|
assert "src/helper.cpp" in files
|
||||||
|
|
||||||
|
# Hidden files should be excluded
|
||||||
|
for hidden_file in hidden_files:
|
||||||
|
assert hidden_file not in files
|
||||||
|
|
||||||
|
# Only regular files should be counted
|
||||||
|
assert count == 7 # 5 original files + 2 new regular files
|
||||||
|
|
||||||
|
# Test with include_hidden=True
|
||||||
|
files, count = get_file_listing(str(git_repo_with_aider_files), include_hidden=True)
|
||||||
|
|
||||||
|
# Both regular and hidden files should be included
|
||||||
|
assert "main.cpp" in files
|
||||||
|
assert "src/helper.cpp" in files
|
||||||
|
for hidden_file in hidden_files:
|
||||||
|
assert hidden_file in files
|
||||||
|
|
||||||
|
# All files should be counted
|
||||||
|
assert count == 11 # 5 original + 2 regular + 4 hidden
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue