Merge pull request #1614 from KennyDizi/main

Fix default value for extended_thinking_max_output_tokens
This commit is contained in:
Tal
2025-03-11 14:34:58 +02:00
committed by GitHub
3 changed files with 3 additions and 3 deletions

View File

@ -240,4 +240,4 @@ With the OpenAI models that support reasoning effort (eg: o3-mini), you can spec
[config]
enable_claude_extended_thinking = false # Set to true to enable extended thinking feature
extended_thinking_budget_tokens = 2048
extended_thinking_max_output_tokens = 2048
extended_thinking_max_output_tokens = 4096

View File

@ -136,7 +136,7 @@ class LiteLLMAIHandler(BaseAiHandler):
dict: Updated kwargs with extended thinking configuration
"""
extended_thinking_budget_tokens = get_settings().config.get("extended_thinking_budget_tokens", 2048)
extended_thinking_max_output_tokens = get_settings().config.get("extended_thinking_max_output_tokens", 2048)
extended_thinking_max_output_tokens = get_settings().config.get("extended_thinking_max_output_tokens", 4096)
# Validate extended thinking parameters
if not isinstance(extended_thinking_budget_tokens, int) or extended_thinking_budget_tokens <= 0:

View File

@ -63,7 +63,7 @@ auto_approve_for_no_suggestions=false # If true, the PR will be auto-approved if
# extended thinking for Claude reasoning models
enable_claude_extended_thinking = false # Set to true to enable extended thinking feature
extended_thinking_budget_tokens = 2048
extended_thinking_max_output_tokens = 2048
extended_thinking_max_output_tokens = 4096
[pr_reviewer] # /review #