Merge pull request #1551 from qodo-ai/tr/custom_reasoning_model

feat: add support for custom reasoning models
This commit is contained in:
Tal
2025-02-18 12:04:24 +02:00
committed by GitHub
3 changed files with 6 additions and 2 deletions

View File

@ -201,3 +201,6 @@ fallback_models=["custom_model_name"]
custom_model_max_tokens= ...
```
(3) Go to [litellm documentation](https://litellm.vercel.app/docs/proxy/quick_start#supported-llms), find the model you want to use, and set the relevant environment variables.
(4) Most reasoning models do not support chat-style inputs (`system` and `user` messages) or temperature settings.
To bypass chat templates and temperature controls, set `config.custom_reasoning_model = true` in your configuration file.

View File

@ -205,7 +205,7 @@ class LiteLLMAIHandler(BaseAiHandler):
{"type": "image_url", "image_url": {"url": img_path}}]
# Currently, some models do not support a separate system and user prompts
if model in self.user_message_only_models:
if model in self.user_message_only_models or get_settings().config.custom_reasoning_model:
user = f"{system}\n\n\n{user}"
system = ""
get_logger().info(f"Using model {model}, combining system and user prompts")
@ -227,7 +227,7 @@ class LiteLLMAIHandler(BaseAiHandler):
}
# Add temperature only if model supports it
if model not in self.no_support_temperature_models:
if model not in self.no_support_temperature_models and not get_settings().config.custom_reasoning_model:
kwargs["temperature"] = temperature
if get_settings().litellm.get("enable_callbacks", False):

View File

@ -17,6 +17,7 @@ use_global_settings_file=true
disable_auto_feedback = false
ai_timeout=120 # 2minutes
skip_keys = []
custom_reasoning_model = false # when true, disables system messages and temperature controls for models that don't support chat-style inputs
# token limits
max_description_tokens = 500
max_commits_tokens = 500