FIX do not default to o1 model (#82)

This commit is contained in:
Jose M Leon 2025-02-08 20:28:10 -05:00 committed by GitHub
parent 0c86900ce4
commit 00a455d586
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 13 additions and 15 deletions

View File

@ -206,7 +206,7 @@ Examples:
if parsed_args.provider == "openai":
parsed_args.model = parsed_args.model or OPENAI_DEFAULT_MODEL
if parsed_args.provider == "anthropic":
elif parsed_args.provider == "anthropic":
# Always use default model for Anthropic
parsed_args.model = ANTHROPIC_DEFAULT_MODEL
elif not parsed_args.model and not parsed_args.research_only:
@ -215,15 +215,12 @@ Examples:
f"--model is required when using provider '{parsed_args.provider}'"
)
# Validate expert model requirement
if (
parsed_args.expert_provider != "openai"
and not parsed_args.expert_model
and not parsed_args.research_only
):
parser.error(
f"--expert-model is required when using expert provider '{parsed_args.expert_provider}'"
)
# Handle expert provider/model defaults
if not parsed_args.expert_provider:
# If no expert provider specified, use main provider instead of defaulting to
# to any particular model since we do not know if we have access to any other model.
parsed_args.expert_provider = parsed_args.provider
parsed_args.expert_model = parsed_args.model
# Validate temperature range if provided
if parsed_args.temperature is not None and not (

View File

@ -220,7 +220,7 @@ def initialize_llm(
def initialize_expert_llm(
provider: str = "openai", model_name: str = "o1"
provider: str, model_name: str
) -> BaseChatModel:
"""Initialize an expert language model client based on the specified provider and model."""
return create_llm_client(provider, model_name, temperature=None, is_expert=True)

View File

@ -17,8 +17,9 @@ def get_model():
global _model
try:
if _model is None:
provider = _global_memory["config"]["expert_provider"] or "openai"
model = _global_memory["config"]["expert_model"] or "o1"
config = _global_memory["config"]
provider = config.get("expert_provider") or config.get("provider")
model = config.get("expert_model") or config.get("model")
_model = initialize_expert_llm(provider, model)
except Exception as e:
_model = None

View File

@ -50,9 +50,9 @@ def mock_openai():
def test_initialize_expert_defaults(clean_env, mock_openai, monkeypatch):
"""Test expert LLM initialization with default parameters."""
"""Test expert LLM initialization with explicit parameters."""
monkeypatch.setenv("EXPERT_OPENAI_API_KEY", "test-key")
_llm = initialize_expert_llm()
_llm = initialize_expert_llm("openai", "o1")
mock_openai.assert_called_once_with(api_key="test-key", model="o1", reasoning_effort="high")