From 9abb212e83126187d1ee8e61eb3ca8c8f6f825e2 Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Fri, 21 Feb 2025 22:16:18 +0700 Subject: [PATCH] Add reasoning_effort argument to chat completion request --- pr_agent/algo/ai_handlers/litellm_ai_handler.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/pr_agent/algo/ai_handlers/litellm_ai_handler.py b/pr_agent/algo/ai_handlers/litellm_ai_handler.py index b22b834e..512ea52d 100644 --- a/pr_agent/algo/ai_handlers/litellm_ai_handler.py +++ b/pr_agent/algo/ai_handlers/litellm_ai_handler.py @@ -6,9 +6,9 @@ import requests from litellm import acompletion from tenacity import retry, retry_if_exception_type, stop_after_attempt -from pr_agent.algo import NO_SUPPORT_TEMPERATURE_MODELS, USER_MESSAGE_ONLY_MODELS +from pr_agent.algo import NO_SUPPORT_TEMPERATURE_MODELS, SUPPORT_REASONING_EFFORT_MODELS, USER_MESSAGE_ONLY_MODELS from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler -from pr_agent.algo.utils import get_version +from pr_agent.algo.utils import ReasoningEffort, get_version from pr_agent.config_loader import get_settings from pr_agent.log import get_logger @@ -101,6 +101,9 @@ class LiteLLMAIHandler(BaseAiHandler): # Model that doesn't support temperature argument self.no_support_temperature_models = NO_SUPPORT_TEMPERATURE_MODELS + # Models that support reasoning effort + self.support_reasoning_models = SUPPORT_REASONING_EFFORT_MODELS + def prepare_logs(self, response, system, user, resp, finish_reason): response_log = response.dict().copy() response_log['system'] = system @@ -230,6 +233,13 @@ class LiteLLMAIHandler(BaseAiHandler): if model not in self.no_support_temperature_models and not get_settings().config.custom_reasoning_model: kwargs["temperature"] = temperature + # Add reasoning_effort if model supports it + if (model in self.support_reasoning_models): + supported_reasoning_efforts = [ReasoningEffort.HIGH.value, ReasoningEffort.MEDIUM.value, ReasoningEffort.LOW.value] + reasoning_effort = get_settings().config.reasoning_effort if (get_settings().config.reasoning_effort in supported_reasoning_efforts) else ReasoningEffort.MEDIUM.value + get_logger().info(f"Add reasoning_effort with value {reasoning_effort} to model {model}.") + kwargs["reasoning_effort"] = reasoning_effort + if get_settings().litellm.get("enable_callbacks", False): kwargs = self.add_litellm_callbacks(kwargs)