Update model references from o3-mini to o4-mini and add Gemini models

This commit is contained in:
mrT23
2025-04-19 09:26:35 +03:00
parent ca88ec96d6
commit 4ac0aa56e5
5 changed files with 38 additions and 18 deletions

View File

@ -18,8 +18,8 @@ Qodo Merge is designed for companies and teams that require additional features
Here are some of the additional features and capabilities that Qodo Merge offers: Here are some of the additional features and capabilities that Qodo Merge offers:
| Feature | Description | | Feature | Description |
| -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | -------------------------------------------------------------------------------------------------------------------- |--------------------------------------------------------------------------------------------------------------------------------------------------------|
| [**Model selection**](https://qodo-merge-docs.qodo.ai/usage-guide/PR_agent_pro_models/) | Choose the model that best fits your needs, among top models like `Claude Sonnet` and `o3-mini` | | [**Model selection**](https://qodo-merge-docs.qodo.ai/usage-guide/PR_agent_pro_models/) | Choose the model that best fits your needs, among top models like `Claude Sonnet`, `o4-mini` |
| [**Global and wiki configuration**](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) | Control configurations for many repositories from a single location; <br>Edit configuration of a single repo without committing code | | [**Global and wiki configuration**](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) | Control configurations for many repositories from a single location; <br>Edit configuration of a single repo without committing code |
| [**Apply suggestions**](https://qodo-merge-docs.qodo.ai/tools/improve/#overview) | Generate committable code from the relevant suggestions interactively by clicking on a checkbox | | [**Apply suggestions**](https://qodo-merge-docs.qodo.ai/tools/improve/#overview) | Generate committable code from the relevant suggestions interactively by clicking on a checkbox |
| [**Suggestions impact**](https://qodo-merge-docs.qodo.ai/tools/improve/#assessing-impact) | Automatically mark suggestions that were implemented by the user (either directly in GitHub, or indirectly in the IDE) to enable tracking of the impact of the suggestions | | [**Suggestions impact**](https://qodo-merge-docs.qodo.ai/tools/improve/#assessing-impact) | Automatically mark suggestions that were implemented by the user (either directly in GitHub, or indirectly in the IDE) to enable tracking of the impact of the suggestions |

View File

@ -7,27 +7,44 @@ Users can configure Qodo Merge to use a specific model by editing the [configura
The models supported by Qodo Merge are: The models supported by Qodo Merge are:
- `claude-3-7-sonnet` (default) - `claude-3-7-sonnet` (default)
- `o3-mini` - `o4-mini`
- `gpt-4.1` - `gpt-4.1`
- `gemini-2.5-pro`
- `gemini-2.5-flash`
- `deepseek/r1` - `deepseek/r1`
To restrict Qodo Merge to using only `o3-mini`, add this setting: To restrict Qodo Merge to using only `o4-mini`, add this setting:
``` ```toml
[config] [config]
model="o3-mini" model="o4-mini"
``` ```
To restrict Qodo Merge to using only `GPT-4.1`, add this setting: To restrict Qodo Merge to using only `GPT-4.1`, add this setting:
``` ```toml
[config] [config]
model="gpt-4.1" model="gpt-4.1"
``` ```
To restrict Qodo Merge to using only `gemini-2.5-pro`, add this setting:
```toml
[config]
model="gemini-2.5-pro"
```
To restrict Qodo Merge to using only `gemini-2.5-flash`, add this setting:
```toml
[config]
model="gemini-2.5-flash"
```
To restrict Qodo Merge to using only `deepseek-r1` us-hosted, add this setting: To restrict Qodo Merge to using only `deepseek-r1` us-hosted, add this setting:
``` ```toml
[config] [config]
model="deepseek/r1" model="deepseek/r1"
``` ```

View File

@ -57,8 +57,10 @@ MAX_TOKENS = {
'vertex_ai/claude-3-5-sonnet-v2@20241022': 100000, 'vertex_ai/claude-3-5-sonnet-v2@20241022': 100000,
'vertex_ai/claude-3-7-sonnet@20250219': 200000, 'vertex_ai/claude-3-7-sonnet@20250219': 200000,
'vertex_ai/gemini-1.5-pro': 1048576, 'vertex_ai/gemini-1.5-pro': 1048576,
'vertex_ai/gemini-2.5-pro-preview-03-25': 1048576,
'vertex_ai/gemini-1.5-flash': 1048576, 'vertex_ai/gemini-1.5-flash': 1048576,
'vertex_ai/gemini-2.0-flash': 1048576, 'vertex_ai/gemini-2.0-flash': 1048576,
'vertex_ai/gemini-2.5-flash-preview-04-17': 1048576,
'vertex_ai/gemma2': 8200, 'vertex_ai/gemma2': 8200,
'gemini/gemini-1.5-pro': 1048576, 'gemini/gemini-1.5-pro': 1048576,
'gemini/gemini-1.5-flash': 1048576, 'gemini/gemini-1.5-flash': 1048576,

View File

@ -878,6 +878,7 @@ def get_max_tokens(model):
elif settings.config.custom_model_max_tokens > 0: elif settings.config.custom_model_max_tokens > 0:
max_tokens_model = settings.config.custom_model_max_tokens max_tokens_model = settings.config.custom_model_max_tokens
else: else:
get_logger().error(f"Model {model} is not defined in MAX_TOKENS in ./pr_agent/algo/__init__.py and no custom_model_max_tokens is set")
raise Exception(f"Ensure {model} is defined in MAX_TOKENS in ./pr_agent/algo/__init__.py or set a positive value for it in config.custom_model_max_tokens") raise Exception(f"Ensure {model} is defined in MAX_TOKENS in ./pr_agent/algo/__init__.py or set a positive value for it in config.custom_model_max_tokens")
if settings.config.max_model_tokens and settings.config.max_model_tokens > 0: if settings.config.max_model_tokens and settings.config.max_model_tokens > 0:

View File

@ -7,8 +7,8 @@
[config] [config]
# models # models
model="o4-mini" model="o4-mini"
fallback_models=["gpt-4o-2024-11-20"] fallback_models=["gpt-4.1"]
#model_weak="gpt-4o-mini-2024-07-18" # optional, a weaker model to use for some easier tasks #model_weak="gpt-4o" # optional, a weaker model to use for some easier tasks
# CLI # CLI
git_provider="github" git_provider="github"
publish_output=true publish_output=true