diff --git a/README.md b/README.md
index 3ca4f90f..8bf3893f 100644
--- a/README.md
+++ b/README.md
@@ -4,8 +4,8 @@
-
-
+
+
@@ -213,12 +213,6 @@ Note that this is a promotional bot, suitable only for initial experimentation.
It does not have 'edit' access to your repo, for example, so it cannot update the PR description or add labels (`@CodiumAI-Agent /describe` will publish PR description as a comment). In addition, the bot cannot be used on private repositories, as it does not have access to the files there.
-
-
-
-To set up your own PR-Agent, see the [Installation](https://qodo-merge-docs.qodo.ai/installation/) section below.
-Note that when you set your own PR-Agent or use Qodo hosted PR-Agent, there is no need to mention `@CodiumAI-Agent ...`. Instead, directly start with the command, e.g., `/ask ...`.
-
---
@@ -273,8 +267,6 @@ https://openai.com/enterprise-privacy
## Links
-[](https://discord.gg/kG35uSHDBc)
-
- Discord community: https://discord.gg/kG35uSHDBc
- Qodo site: https://www.qodo.ai/
- Blog: https://www.qodo.ai/blog/
diff --git a/docs/docs/assets/favicon.ico b/docs/docs/assets/favicon.ico
index b6b7e012..7ae74bd5 100644
Binary files a/docs/docs/assets/favicon.ico and b/docs/docs/assets/favicon.ico differ
diff --git a/docs/docs/assets/logo.png b/docs/docs/assets/logo.png
index 4c9fec1d..340590b0 100644
Binary files a/docs/docs/assets/logo.png and b/docs/docs/assets/logo.png differ
diff --git a/docs/docs/assets/logo.svg b/docs/docs/assets/logo.svg
index a22defe7..d43dfdb8 100644
--- a/docs/docs/assets/logo.svg
+++ b/docs/docs/assets/logo.svg
@@ -1 +1,9 @@
-
+
diff --git a/docs/docs/assets/logo_.png b/docs/docs/assets/logo_.png
index 22b8d4f0..d6db64f1 100644
Binary files a/docs/docs/assets/logo_.png and b/docs/docs/assets/logo_.png differ
diff --git a/docs/docs/tools/improve.md b/docs/docs/tools/improve.md
index c9f2bd3c..b00ea66c 100644
--- a/docs/docs/tools/improve.md
+++ b/docs/docs/tools/improve.md
@@ -9,8 +9,9 @@ The tool can be triggered automatically every time a new PR is [opened](../usage
{width=512}
-Note that the `Apply this suggestion` checkbox, which interactively converts a suggestion into a commitable code comment, is available only for Qodo Mergeđź’Ž users.
-
+Note that the following features are available only for Qodo Mergeđź’Ž users:
+- The `Apply this suggestion` checkbox, which interactively converts a suggestion into a committable code comment
+- The `More` checkbox to generate additional suggestions
## Example usage
@@ -366,6 +367,10 @@ Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 line
apply_suggestions_checkbox
Enable the checkbox to create a committable suggestion. Default is true.
+
+
enable_more_suggestions_checkbox
+
Enable the checkbox to generate more suggestions. Default is true.
+
enable_help_text
If set to true, the tool will display a help text in the comment. Default is true.
diff --git a/pr_agent/algo/__init__.py b/pr_agent/algo/__init__.py
index 10b53cb3..37ca48ac 100644
--- a/pr_agent/algo/__init__.py
+++ b/pr_agent/algo/__init__.py
@@ -45,11 +45,11 @@ MAX_TOKENS = {
'vertex_ai/claude-3-5-sonnet-v2@20241022': 100000,
'vertex_ai/gemini-1.5-pro': 1048576,
'vertex_ai/gemini-1.5-flash': 1048576,
- 'vertex_ai/gemini-2.0-flash-exp': 1048576,
+ 'vertex_ai/gemini-2.0-flash': 1048576,
'vertex_ai/gemma2': 8200,
'gemini/gemini-1.5-pro': 1048576,
'gemini/gemini-1.5-flash': 1048576,
- 'gemini/gemini-2.0-flash-exp': 1048576,
+ 'gemini/gemini-2.0-flash': 1048576,
'codechat-bison': 6144,
'codechat-bison-32k': 32000,
'anthropic.claude-instant-v1': 100000,
@@ -85,11 +85,19 @@ MAX_TOKENS = {
}
USER_MESSAGE_ONLY_MODELS = [
+ "deepseek/deepseek-reasoner",
+ "o1-mini",
+ "o1-mini-2024-09-12",
+ "o1-preview"
+]
+
+NO_SUPPORT_TEMPERATURE_MODELS = [
"deepseek/deepseek-reasoner",
"o1-mini",
"o1-mini-2024-09-12",
"o1",
"o1-2024-12-17",
"o3-mini",
- "o3-mini-2025-01-31"
+ "o3-mini-2025-01-31",
+ "o1-preview"
]
diff --git a/pr_agent/algo/ai_handlers/litellm_ai_handler.py b/pr_agent/algo/ai_handlers/litellm_ai_handler.py
index c936ebd5..0ef29b51 100644
--- a/pr_agent/algo/ai_handlers/litellm_ai_handler.py
+++ b/pr_agent/algo/ai_handlers/litellm_ai_handler.py
@@ -6,7 +6,7 @@ import requests
from litellm import acompletion
from tenacity import retry, retry_if_exception_type, stop_after_attempt
-from pr_agent.algo import USER_MESSAGE_ONLY_MODELS
+from pr_agent.algo import NO_SUPPORT_TEMPERATURE_MODELS, USER_MESSAGE_ONLY_MODELS
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.utils import get_version
from pr_agent.config_loader import get_settings
@@ -98,6 +98,9 @@ class LiteLLMAIHandler(BaseAiHandler):
# Models that only use user meessage
self.user_message_only_models = USER_MESSAGE_ONLY_MODELS
+ # Model that doesn't support temperature argument
+ self.no_support_temperature_models = NO_SUPPORT_TEMPERATURE_MODELS
+
def prepare_logs(self, response, system, user, resp, finish_reason):
response_log = response.dict().copy()
response_log['system'] = system
@@ -202,7 +205,7 @@ class LiteLLMAIHandler(BaseAiHandler):
{"type": "image_url", "image_url": {"url": img_path}}]
# Currently, some models do not support a separate system and user prompts
- if self.user_message_only_models and any(entry.lower() in model.lower() for entry in self.user_message_only_models):
+ if model in self.user_message_only_models:
user = f"{system}\n\n\n{user}"
system = ""
get_logger().info(f"Using model {model}, combining system and user prompts")
@@ -219,11 +222,14 @@ class LiteLLMAIHandler(BaseAiHandler):
"model": model,
"deployment_id": deployment_id,
"messages": messages,
- "temperature": temperature,
"timeout": get_settings().config.ai_timeout,
"api_base": self.api_base,
}
+ # Add temperature only if model supports it
+ if model not in self.no_support_temperature_models:
+ kwargs["temperature"] = temperature
+
if get_settings().litellm.get("enable_callbacks", False):
kwargs = self.add_litellm_callbacks(kwargs)