mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-04 04:40:38 +08:00
Merge pull request #1561 from KennyDizi/main
Support reasoning effort via configuration
This commit is contained in:
@ -204,3 +204,12 @@ custom_model_max_tokens= ...
|
||||
|
||||
4. Most reasoning models do not support chat-style inputs (`system` and `user` messages) or temperature settings.
|
||||
To bypass chat templates and temperature controls, set `config.custom_reasoning_model = true` in your configuration file.
|
||||
|
||||
## Dedicated parameters
|
||||
|
||||
### OpenAI models
|
||||
|
||||
[config]
|
||||
reasoning_efffort= = "medium" # "low", "medium", "high"
|
||||
|
||||
With the OpenAI models that support reasoning effort (eg: o3-mini), you can specify its reasoning effort via `config` section. The default value is `medium`. You can change it to `high` or `low` based on your usage.
|
||||
|
@ -104,3 +104,8 @@ NO_SUPPORT_TEMPERATURE_MODELS = [
|
||||
"o3-mini-2025-01-31",
|
||||
"o1-preview"
|
||||
]
|
||||
|
||||
SUPPORT_REASONING_EFFORT_MODELS = [
|
||||
"o3-mini",
|
||||
"o3-mini-2025-01-31"
|
||||
]
|
||||
|
@ -6,9 +6,9 @@ import requests
|
||||
from litellm import acompletion
|
||||
from tenacity import retry, retry_if_exception_type, stop_after_attempt
|
||||
|
||||
from pr_agent.algo import NO_SUPPORT_TEMPERATURE_MODELS, USER_MESSAGE_ONLY_MODELS
|
||||
from pr_agent.algo import NO_SUPPORT_TEMPERATURE_MODELS, SUPPORT_REASONING_EFFORT_MODELS, USER_MESSAGE_ONLY_MODELS
|
||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||
from pr_agent.algo.utils import get_version
|
||||
from pr_agent.algo.utils import ReasoningEffort, get_version
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
@ -101,6 +101,9 @@ class LiteLLMAIHandler(BaseAiHandler):
|
||||
# Model that doesn't support temperature argument
|
||||
self.no_support_temperature_models = NO_SUPPORT_TEMPERATURE_MODELS
|
||||
|
||||
# Models that support reasoning effort
|
||||
self.support_reasoning_models = SUPPORT_REASONING_EFFORT_MODELS
|
||||
|
||||
def prepare_logs(self, response, system, user, resp, finish_reason):
|
||||
response_log = response.dict().copy()
|
||||
response_log['system'] = system
|
||||
@ -228,8 +231,16 @@ class LiteLLMAIHandler(BaseAiHandler):
|
||||
|
||||
# Add temperature only if model supports it
|
||||
if model not in self.no_support_temperature_models and not get_settings().config.custom_reasoning_model:
|
||||
get_logger().info(f"Adding temperature with value {temperature} to model {model}.")
|
||||
kwargs["temperature"] = temperature
|
||||
|
||||
# Add reasoning_effort if model supports it
|
||||
if (model in self.support_reasoning_models):
|
||||
supported_reasoning_efforts = [ReasoningEffort.HIGH.value, ReasoningEffort.MEDIUM.value, ReasoningEffort.LOW.value]
|
||||
reasoning_effort = get_settings().config.reasoning_effort if (get_settings().config.reasoning_effort in supported_reasoning_efforts) else ReasoningEffort.MEDIUM.value
|
||||
get_logger().info(f"Adding reasoning_effort with value {reasoning_effort} to model {model}.")
|
||||
kwargs["reasoning_effort"] = reasoning_effort
|
||||
|
||||
if get_settings().litellm.get("enable_callbacks", False):
|
||||
kwargs = self.add_litellm_callbacks(kwargs)
|
||||
|
||||
|
@ -50,6 +50,11 @@ class PRReviewHeader(str, Enum):
|
||||
REGULAR = "## PR Reviewer Guide"
|
||||
INCREMENTAL = "## Incremental PR Reviewer Guide"
|
||||
|
||||
class ReasoningEffort(str, Enum):
|
||||
HIGH = "high"
|
||||
MEDIUM = "medium"
|
||||
LOW = "low"
|
||||
|
||||
|
||||
class PRDescriptionHeader(str, Enum):
|
||||
CHANGES_WALKTHROUGH = "### **Changes walkthrough** 📝"
|
||||
|
@ -48,6 +48,7 @@ ignore_pr_authors = [] # authors to ignore from PR agent when an PR is created
|
||||
#
|
||||
is_auto_command = false # will be auto-set to true if the command is triggered by an automation
|
||||
enable_ai_metadata = false # will enable adding ai metadata
|
||||
reasoning_effort = "medium" # "low", "medium", "high"
|
||||
# auto approval 💎
|
||||
enable_auto_approval=false # Set to true to enable auto-approval of PRs under certain conditions
|
||||
auto_approve_for_low_review_effort=-1 # -1 to disable, [1-5] to set the threshold for auto-approval
|
||||
|
Reference in New Issue
Block a user