diff --git a/pr_agent/algo/__init__.py b/pr_agent/algo/__init__.py index 10b53cb3..dd6febfd 100644 --- a/pr_agent/algo/__init__.py +++ b/pr_agent/algo/__init__.py @@ -85,11 +85,19 @@ MAX_TOKENS = { } USER_MESSAGE_ONLY_MODELS = [ + "deepseek/deepseek-reasoner", + "o1-mini", + "o1-mini-2024-09-12", + "o1-preview" +] + +NO_SUPPORT_TEMPERATURE_MODELS = [ "deepseek/deepseek-reasoner", "o1-mini", "o1-mini-2024-09-12", "o1", "o1-2024-12-17", "o3-mini", - "o3-mini-2025-01-31" + "o3-mini-2025-01-31", + "o1-preview" ] diff --git a/pr_agent/algo/ai_handlers/litellm_ai_handler.py b/pr_agent/algo/ai_handlers/litellm_ai_handler.py index c936ebd5..0ef29b51 100644 --- a/pr_agent/algo/ai_handlers/litellm_ai_handler.py +++ b/pr_agent/algo/ai_handlers/litellm_ai_handler.py @@ -6,7 +6,7 @@ import requests from litellm import acompletion from tenacity import retry, retry_if_exception_type, stop_after_attempt -from pr_agent.algo import USER_MESSAGE_ONLY_MODELS +from pr_agent.algo import NO_SUPPORT_TEMPERATURE_MODELS, USER_MESSAGE_ONLY_MODELS from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler from pr_agent.algo.utils import get_version from pr_agent.config_loader import get_settings @@ -98,6 +98,9 @@ class LiteLLMAIHandler(BaseAiHandler): # Models that only use user meessage self.user_message_only_models = USER_MESSAGE_ONLY_MODELS + # Model that doesn't support temperature argument + self.no_support_temperature_models = NO_SUPPORT_TEMPERATURE_MODELS + def prepare_logs(self, response, system, user, resp, finish_reason): response_log = response.dict().copy() response_log['system'] = system @@ -202,7 +205,7 @@ class LiteLLMAIHandler(BaseAiHandler): {"type": "image_url", "image_url": {"url": img_path}}] # Currently, some models do not support a separate system and user prompts - if self.user_message_only_models and any(entry.lower() in model.lower() for entry in self.user_message_only_models): + if model in self.user_message_only_models: user = f"{system}\n\n\n{user}" system = "" get_logger().info(f"Using model {model}, combining system and user prompts") @@ -219,11 +222,14 @@ class LiteLLMAIHandler(BaseAiHandler): "model": model, "deployment_id": deployment_id, "messages": messages, - "temperature": temperature, "timeout": get_settings().config.ai_timeout, "api_base": self.api_base, } + # Add temperature only if model supports it + if model not in self.no_support_temperature_models: + kwargs["temperature"] = temperature + if get_settings().litellm.get("enable_callbacks", False): kwargs = self.add_litellm_callbacks(kwargs)