diff --git a/README.md b/README.md index 4fb923e..4c23e53 100644 --- a/README.md +++ b/README.md @@ -324,7 +324,7 @@ ra-aid -m "Update all deprecated API calls" --cowboy-mode ### Model Configuration -RA.Aid supports multiple AI providers and models. The default model is Anthropic's Claude 3 Sonnet (`claude-3-5-sonnet-20241022`). +RA.Aid supports multiple AI providers and models. The default model is Anthropic's Claude 3 Sonnet (`claude-3-7-sonnet-20250219`). The programmer tool (aider) automatically selects its model based on your available API keys. It will use Claude models if ANTHROPIC_API_KEY is set, or fall back to OpenAI models if only OPENAI_API_KEY is available. @@ -372,7 +372,7 @@ export GEMINI_API_KEY=your_api_key_here 1. **Using Anthropic (Default)** ```bash - # Uses default model (claude-3-5-sonnet-20241022) + # Uses default model (claude-3-7-sonnet-20250219) ra-aid -m "Your task" # Or explicitly specify: @@ -438,7 +438,7 @@ Note: For `AIDER_FLAGS`, you can specify flags with or without the leading `--`. **Important Notes:** - Performance varies between models. The default Claude 3 Sonnet model currently provides the best and most reliable results. - Model configuration is done via command line arguments: `--provider` and `--model` -- The `--model` argument is required for all providers except Anthropic (which defaults to `claude-3-5-sonnet-20241022`) +- The `--model` argument is required for all providers except Anthropic (which defaults to `claude-3-7-sonnet-20250219`) More information is available in our [Open Models Setup](https://docs.ra-aid.ai/quickstart/open-models) guide. diff --git a/ra_aid/tools/agent.py b/ra_aid/tools/agent.py index d6896a8..0a75aee 100644 --- a/ra_aid/tools/agent.py +++ b/ra_aid/tools/agent.py @@ -37,7 +37,7 @@ def request_research(query: str) -> ResearchResult: config = _global_memory.get("config", {}) model = initialize_llm( config.get("provider", "anthropic"), - config.get("model", "claude-3-5-sonnet-20241022"), + config.get("model", "claude-3-7-sonnet-20250219"), temperature=config.get("temperature"), ) @@ -120,7 +120,7 @@ def request_web_research(query: str) -> ResearchResult: config = _global_memory.get("config", {}) model = initialize_llm( config.get("provider", "anthropic"), - config.get("model", "claude-3-5-sonnet-20241022"), + config.get("model", "claude-3-7-sonnet-20250219"), temperature=config.get("temperature"), ) @@ -189,7 +189,7 @@ def request_research_and_implementation(query: str) -> Dict[str, Any]: config = _global_memory.get("config", {}) model = initialize_llm( config.get("provider", "anthropic"), - config.get("model", "claude-3-5-sonnet-20241022"), + config.get("model", "claude-3-7-sonnet-20250219"), temperature=config.get("temperature"), ) diff --git a/tests/ra_aid/test_main.py b/tests/ra_aid/test_main.py index 8e769a1..10f0025 100644 --- a/tests/ra_aid/test_main.py +++ b/tests/ra_aid/test_main.py @@ -85,7 +85,7 @@ def test_config_settings(mock_dependencies): "--provider", "anthropic", "--model", - "claude-3-5-sonnet-20241022", + "claude-3-7-sonnet-20250219", "--expert-provider", "openai", "--expert-model", @@ -100,7 +100,7 @@ def test_config_settings(mock_dependencies): assert config["cowboy_mode"] is True assert config["research_only"] is True assert config["provider"] == "anthropic" - assert config["model"] == "claude-3-5-sonnet-20241022" + assert config["model"] == "claude-3-7-sonnet-20250219" assert config["expert_provider"] == "openai" assert config["expert_model"] == "gpt-4" assert config["limit_tokens"] is False