Merge remote-tracking branch 'origin/main'

This commit is contained in:
mrT23
2025-02-17 20:33:28 +02:00
8 changed files with 38 additions and 19 deletions

View File

@ -4,8 +4,8 @@
<picture> <picture>
<source media="(prefers-color-scheme: dark)" srcset="https://codium.ai/images/pr_agent/logo-dark.png" width="330"> <source media="(prefers-color-scheme: dark)" srcset="https://www.qodo.ai/wp-content/uploads/2025/02/PR-Agent-Light-1.png" width="330">
<source media="(prefers-color-scheme: light)" srcset="https://codium.ai/images/pr_agent/logo-light.png" width="330"> <source media="(prefers-color-scheme: light)" srcset="https://www.qodo.ai/wp-content/uploads/2025/02/PR-Agent-Dark-1.png" width="330">
<img src="https://codium.ai/images/pr_agent/logo-light.png" alt="logo" width="330"> <img src="https://codium.ai/images/pr_agent/logo-light.png" alt="logo" width="330">
</picture> </picture>
@ -213,12 +213,6 @@ Note that this is a promotional bot, suitable only for initial experimentation.
It does not have 'edit' access to your repo, for example, so it cannot update the PR description or add labels (`@CodiumAI-Agent /describe` will publish PR description as a comment). In addition, the bot cannot be used on private repositories, as it does not have access to the files there. It does not have 'edit' access to your repo, for example, so it cannot update the PR description or add labels (`@CodiumAI-Agent /describe` will publish PR description as a comment). In addition, the bot cannot be used on private repositories, as it does not have access to the files there.
![Review generation process](https://www.codium.ai/images/demo-2.gif)
To set up your own PR-Agent, see the [Installation](https://qodo-merge-docs.qodo.ai/installation/) section below.
Note that when you set your own PR-Agent or use Qodo hosted PR-Agent, there is no need to mention `@CodiumAI-Agent ...`. Instead, directly start with the command, e.g., `/ask ...`.
--- ---
@ -273,8 +267,6 @@ https://openai.com/enterprise-privacy
## Links ## Links
[![Join our Discord community](https://raw.githubusercontent.com/Codium-ai/codiumai-vscode-release/main/media/docs/Joincommunity.png)](https://discord.gg/kG35uSHDBc)
- Discord community: https://discord.gg/kG35uSHDBc - Discord community: https://discord.gg/kG35uSHDBc
- Qodo site: https://www.qodo.ai/ - Qodo site: https://www.qodo.ai/
- Blog: https://www.qodo.ai/blog/ - Blog: https://www.qodo.ai/blog/

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.2 KiB

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 263 KiB

After

Width:  |  Height:  |  Size: 57 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 1.2 KiB

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.7 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View File

@ -9,8 +9,9 @@ The tool can be triggered automatically every time a new PR is [opened](../usage
![code_suggestions_as_comment_open.png](https://codium.ai/images/pr_agent/code_suggestions_as_comment_open.png){width=512} ![code_suggestions_as_comment_open.png](https://codium.ai/images/pr_agent/code_suggestions_as_comment_open.png){width=512}
Note that the `Apply this suggestion` checkbox, which interactively converts a suggestion into a commitable code comment, is available only for Qodo Merge💎 users. Note that the following features are available only for Qodo Merge💎 users:
- The `Apply this suggestion` checkbox, which interactively converts a suggestion into a committable code comment
- The `More` checkbox to generate additional suggestions
## Example usage ## Example usage
@ -366,6 +367,10 @@ Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 line
<td><b>apply_suggestions_checkbox</b></td> <td><b>apply_suggestions_checkbox</b></td>
<td> Enable the checkbox to create a committable suggestion. Default is true.</td> <td> Enable the checkbox to create a committable suggestion. Default is true.</td>
</tr> </tr>
<tr>
<td><b>enable_more_suggestions_checkbox</b></td>
<td> Enable the checkbox to generate more suggestions. Default is true.</td>
</tr>
<tr> <tr>
<td><b>enable_help_text</b></td> <td><b>enable_help_text</b></td>
<td>If set to true, the tool will display a help text in the comment. Default is true.</td> <td>If set to true, the tool will display a help text in the comment. Default is true.</td>

View File

@ -45,11 +45,11 @@ MAX_TOKENS = {
'vertex_ai/claude-3-5-sonnet-v2@20241022': 100000, 'vertex_ai/claude-3-5-sonnet-v2@20241022': 100000,
'vertex_ai/gemini-1.5-pro': 1048576, 'vertex_ai/gemini-1.5-pro': 1048576,
'vertex_ai/gemini-1.5-flash': 1048576, 'vertex_ai/gemini-1.5-flash': 1048576,
'vertex_ai/gemini-2.0-flash-exp': 1048576, 'vertex_ai/gemini-2.0-flash': 1048576,
'vertex_ai/gemma2': 8200, 'vertex_ai/gemma2': 8200,
'gemini/gemini-1.5-pro': 1048576, 'gemini/gemini-1.5-pro': 1048576,
'gemini/gemini-1.5-flash': 1048576, 'gemini/gemini-1.5-flash': 1048576,
'gemini/gemini-2.0-flash-exp': 1048576, 'gemini/gemini-2.0-flash': 1048576,
'codechat-bison': 6144, 'codechat-bison': 6144,
'codechat-bison-32k': 32000, 'codechat-bison-32k': 32000,
'anthropic.claude-instant-v1': 100000, 'anthropic.claude-instant-v1': 100000,
@ -85,11 +85,19 @@ MAX_TOKENS = {
} }
USER_MESSAGE_ONLY_MODELS = [ USER_MESSAGE_ONLY_MODELS = [
"deepseek/deepseek-reasoner",
"o1-mini",
"o1-mini-2024-09-12",
"o1-preview"
]
NO_SUPPORT_TEMPERATURE_MODELS = [
"deepseek/deepseek-reasoner", "deepseek/deepseek-reasoner",
"o1-mini", "o1-mini",
"o1-mini-2024-09-12", "o1-mini-2024-09-12",
"o1", "o1",
"o1-2024-12-17", "o1-2024-12-17",
"o3-mini", "o3-mini",
"o3-mini-2025-01-31" "o3-mini-2025-01-31",
"o1-preview"
] ]

View File

@ -6,7 +6,7 @@ import requests
from litellm import acompletion from litellm import acompletion
from tenacity import retry, retry_if_exception_type, stop_after_attempt from tenacity import retry, retry_if_exception_type, stop_after_attempt
from pr_agent.algo import USER_MESSAGE_ONLY_MODELS from pr_agent.algo import NO_SUPPORT_TEMPERATURE_MODELS, USER_MESSAGE_ONLY_MODELS
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.utils import get_version from pr_agent.algo.utils import get_version
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
@ -98,6 +98,9 @@ class LiteLLMAIHandler(BaseAiHandler):
# Models that only use user meessage # Models that only use user meessage
self.user_message_only_models = USER_MESSAGE_ONLY_MODELS self.user_message_only_models = USER_MESSAGE_ONLY_MODELS
# Model that doesn't support temperature argument
self.no_support_temperature_models = NO_SUPPORT_TEMPERATURE_MODELS
def prepare_logs(self, response, system, user, resp, finish_reason): def prepare_logs(self, response, system, user, resp, finish_reason):
response_log = response.dict().copy() response_log = response.dict().copy()
response_log['system'] = system response_log['system'] = system
@ -202,7 +205,7 @@ class LiteLLMAIHandler(BaseAiHandler):
{"type": "image_url", "image_url": {"url": img_path}}] {"type": "image_url", "image_url": {"url": img_path}}]
# Currently, some models do not support a separate system and user prompts # Currently, some models do not support a separate system and user prompts
if self.user_message_only_models and any(entry.lower() in model.lower() for entry in self.user_message_only_models): if model in self.user_message_only_models:
user = f"{system}\n\n\n{user}" user = f"{system}\n\n\n{user}"
system = "" system = ""
get_logger().info(f"Using model {model}, combining system and user prompts") get_logger().info(f"Using model {model}, combining system and user prompts")
@ -219,11 +222,14 @@ class LiteLLMAIHandler(BaseAiHandler):
"model": model, "model": model,
"deployment_id": deployment_id, "deployment_id": deployment_id,
"messages": messages, "messages": messages,
"temperature": temperature,
"timeout": get_settings().config.ai_timeout, "timeout": get_settings().config.ai_timeout,
"api_base": self.api_base, "api_base": self.api_base,
} }
# Add temperature only if model supports it
if model not in self.no_support_temperature_models:
kwargs["temperature"] = temperature
if get_settings().litellm.get("enable_callbacks", False): if get_settings().litellm.get("enable_callbacks", False):
kwargs = self.add_litellm_callbacks(kwargs) kwargs = self.add_litellm_callbacks(kwargs)