From 2d4615e6558d1ad01c1267224e7478acddc53e04 Mon Sep 17 00:00:00 2001 From: Ariel Frischer Date: Fri, 24 Jan 2025 17:09:01 -0800 Subject: [PATCH] refactor(llm.py): simplify temperature handling logic for better readability and maintainability fix(llm.py): ensure default temperature is set correctly for different providers --- ra_aid/llm.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/ra_aid/llm.py b/ra_aid/llm.py index 41d4055..8009a23 100644 --- a/ra_aid/llm.py +++ b/ra_aid/llm.py @@ -132,27 +132,29 @@ def create_llm_client( is_expert ) - # Handle temperature for expert mode + # Handle temperature settings if is_expert: - temperature = 0 - - temp_kwargs = {} - if not is_expert and temperature is not None: + temp_kwargs = {"temperature": 0} + elif temperature is not None: temp_kwargs = {"temperature": temperature} + elif provider == "openai-compatible": + temp_kwargs = {"temperature": 0.3} + else: + temp_kwargs = {} if provider == "deepseek": return create_deepseek_client( model_name=model_name, api_key=config["api_key"], base_url=config["base_url"], - temperature=temperature, + temperature=temperature if not is_expert else 0, is_expert=is_expert, ) elif provider == "openrouter": return create_openrouter_client( model_name=model_name, api_key=config["api_key"], - temperature=temperature, + temperature=temperature if not is_expert else 0, is_expert=is_expert, ) elif provider == "openai": @@ -171,8 +173,8 @@ def create_llm_client( return ChatOpenAI( api_key=config["api_key"], base_url=config["base_url"], - **temp_kwargs if temp_kwargs else {"temperature": 0.3}, model=model_name, + **temp_kwargs, ) elif provider == "gemini": return ChatGoogleGenerativeAI(