mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-02 03:40:38 +08:00
Add multi-model support for different reasoning tasks
This commit is contained in:
@ -516,4 +516,5 @@ Note: Chunking is primarily relevant for large PRs. For most PRs (up to 600 line
|
|||||||
- **Bug detection:** The suggestions also alert on any _critical bugs_ that may have been identified during the analysis. This provides an additional safety net to catch potential issues before they make it into production. It's perfectly acceptable to implement only the suggestions you find valuable for your specific context.
|
- **Bug detection:** The suggestions also alert on any _critical bugs_ that may have been identified during the analysis. This provides an additional safety net to catch potential issues before they make it into production. It's perfectly acceptable to implement only the suggestions you find valuable for your specific context.
|
||||||
- **Hierarchy:** Presenting the suggestions in a structured hierarchical table enables the user to _quickly_ understand them, and to decide which ones are relevant and which are not.
|
- **Hierarchy:** Presenting the suggestions in a structured hierarchical table enables the user to _quickly_ understand them, and to decide which ones are relevant and which are not.
|
||||||
- **Customization:** To guide the model to suggestions that are more relevant to the specific needs of your project, we recommend to use the [`extra_instructions`](https://qodo-merge-docs.qodo.ai/tools/improve/#extra-instructions-and-best-practices) and [`best practices`](https://qodo-merge-docs.qodo.ai/tools/improve/#best-practices) fields.
|
- **Customization:** To guide the model to suggestions that are more relevant to the specific needs of your project, we recommend to use the [`extra_instructions`](https://qodo-merge-docs.qodo.ai/tools/improve/#extra-instructions-and-best-practices) and [`best practices`](https://qodo-merge-docs.qodo.ai/tools/improve/#best-practices) fields.
|
||||||
|
- **Model Selection:** SaaS users can also [choose](https://qodo-merge-docs.qodo.ai/usage-guide/qodo_merge_models/) between different models. For specific programming languages or use cases, some models may perform better than others.
|
||||||
- **Interactive usage:** The interactive [PR chat](https://qodo-merge-docs.qodo.ai/chrome-extension/) also provides an easy way to get more tailored suggestions and feedback from the AI model.
|
- **Interactive usage:** The interactive [PR chat](https://qodo-merge-docs.qodo.ai/chrome-extension/) also provides an easy way to get more tailored suggestions and feedback from the AI model.
|
||||||
|
@ -1,16 +1,15 @@
|
|||||||
|
|
||||||
The default model used by Qodo Merge (March 2025) is Claude Sonnet 3.7.
|
The default models used by Qodo Merge (April 2025) are a combination of Claude Sonnet 3.7 and Gemini 2.5 Pro.
|
||||||
|
|
||||||
### Selecting a Specific Model
|
### Selecting a Specific Model
|
||||||
|
|
||||||
Users can configure Qodo Merge to use a specific model by editing the [configuration](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) file.
|
Users can configure Qodo Merge to use only a specific model by editing the [configuration](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) file.
|
||||||
The models supported by Qodo Merge are:
|
The models supported by Qodo Merge are:
|
||||||
|
|
||||||
- `claude-3-7-sonnet` (default)
|
- `claude-3-7-sonnet`
|
||||||
- `o4-mini`
|
- `o4-mini`
|
||||||
- `gpt-4.1`
|
- `gpt-4.1`
|
||||||
- `gemini-2.5-pro`
|
- `gemini-2.5-pro`
|
||||||
- `gemini-2.5-flash`
|
|
||||||
- `deepseek/r1`
|
- `deepseek/r1`
|
||||||
|
|
||||||
To restrict Qodo Merge to using only `o4-mini`, add this setting:
|
To restrict Qodo Merge to using only `o4-mini`, add this setting:
|
||||||
@ -34,13 +33,6 @@ To restrict Qodo Merge to using only `gemini-2.5-pro`, add this setting:
|
|||||||
model="gemini-2.5-pro"
|
model="gemini-2.5-pro"
|
||||||
```
|
```
|
||||||
|
|
||||||
To restrict Qodo Merge to using only `gemini-2.5-flash`, add this setting:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[config]
|
|
||||||
model="gemini-2.5-flash"
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
To restrict Qodo Merge to using only `deepseek-r1` us-hosted, add this setting:
|
To restrict Qodo Merge to using only `deepseek-r1` us-hosted, add this setting:
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
|
|||||||
lines_before_original = file_original_lines[extended_start1 - 1:start1 - 1]
|
lines_before_original = file_original_lines[extended_start1 - 1:start1 - 1]
|
||||||
lines_before_new = file_new_lines[extended_start2 - 1:start2 - 1]
|
lines_before_new = file_new_lines[extended_start2 - 1:start2 - 1]
|
||||||
found_header = False
|
found_header = False
|
||||||
for i, line, in enumerate(lines_before_original):
|
for i, line in enumerate(lines_before_original):
|
||||||
if section_header in line:
|
if section_header in line:
|
||||||
# Update start and size in one line each
|
# Update start and size in one line each
|
||||||
extended_start1, extended_start2 = extended_start1 + i, extended_start2 + i
|
extended_start1, extended_start2 = extended_start1 + i, extended_start2 + i
|
||||||
@ -136,7 +136,9 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
|
|||||||
delta_lines_original = delta_lines_original[i:]
|
delta_lines_original = delta_lines_original[i:]
|
||||||
delta_lines_new = delta_lines_new[i:]
|
delta_lines_new = delta_lines_new[i:]
|
||||||
extended_start1 += i
|
extended_start1 += i
|
||||||
|
extended_size1 -= i
|
||||||
extended_start2 += i
|
extended_start2 += i
|
||||||
|
extended_size2 -= i
|
||||||
found_mini_match = True
|
found_mini_match = True
|
||||||
break
|
break
|
||||||
if not found_mini_match:
|
if not found_mini_match:
|
||||||
|
@ -342,6 +342,8 @@ def _get_all_models(model_type: ModelType = ModelType.REGULAR) -> List[str]:
|
|||||||
model = get_model('model_weak')
|
model = get_model('model_weak')
|
||||||
elif model_type == ModelType.REASONING:
|
elif model_type == ModelType.REASONING:
|
||||||
model = get_model('model_reasoning')
|
model = get_model('model_reasoning')
|
||||||
|
elif model_type == ModelType.REGULAR:
|
||||||
|
model = get_settings().config.model
|
||||||
else:
|
else:
|
||||||
model = get_settings().config.model
|
model = get_settings().config.model
|
||||||
fallback_models = get_settings().config.fallback_models
|
fallback_models = get_settings().config.fallback_models
|
||||||
|
@ -417,8 +417,9 @@ class PRCodeSuggestions:
|
|||||||
|
|
||||||
# self-reflect on suggestions (mandatory, since line numbers are generated now here)
|
# self-reflect on suggestions (mandatory, since line numbers are generated now here)
|
||||||
model_reflect_with_reasoning = get_model('model_reasoning')
|
model_reflect_with_reasoning = get_model('model_reasoning')
|
||||||
if model_reflect_with_reasoning == get_settings().config.model and model != get_settings().config.model and model == \
|
fallbacks = get_settings().config.fallback_models
|
||||||
get_settings().config.fallback_models[0]:
|
if model_reflect_with_reasoning == get_settings().config.model and model != get_settings().config.model and fallbacks and model == \
|
||||||
|
fallbacks[0]:
|
||||||
# we are using a fallback model (should not happen on regular conditions)
|
# we are using a fallback model (should not happen on regular conditions)
|
||||||
get_logger().warning(f"Using the same model for self-reflection as the one used for suggestions")
|
get_logger().warning(f"Using the same model for self-reflection as the one used for suggestions")
|
||||||
model_reflect_with_reasoning = model
|
model_reflect_with_reasoning = model
|
||||||
|
Reference in New Issue
Block a user