mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-10 15:50:37 +08:00
Compare commits
1 Commits
ofir-frd-p
...
mrT23-patc
Author | SHA1 | Date | |
---|---|---|---|
0cabf57247 |
38
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
38
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -1,38 +0,0 @@
|
||||
name: "\U0001FAB2 Bug Report"
|
||||
description: Submit a bug report
|
||||
labels: ["bug"]
|
||||
body:
|
||||
|
||||
- type: dropdown
|
||||
id: information-git-provider
|
||||
attributes:
|
||||
label: Git provider
|
||||
description: 'The problem arises when using:'
|
||||
options:
|
||||
- "Github Cloud"
|
||||
- "Github Enterprise"
|
||||
- "Gitlab"
|
||||
- "Bitbucket Cloud"
|
||||
- "Bitbucket Server"
|
||||
- "Azure"
|
||||
- "Other"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: system-info
|
||||
attributes:
|
||||
label: System Info
|
||||
description: Please share your system info with us.
|
||||
placeholder: model used, deployment type (action/app/cli/...), etc...
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: bug-details
|
||||
attributes:
|
||||
label: Bug details
|
||||
description: Please describe the problem.
|
||||
placeholder: Describe the problem
|
||||
validations:
|
||||
required: true
|
10
.github/ISSUE_TEMPLATE/config.yml
vendored
10
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -1,10 +0,0 @@
|
||||
blank_issues_enabled: false
|
||||
version: 0.1
|
||||
contact_links:
|
||||
- name: Discussions
|
||||
url: https://github.com/qodo-ai/pr-agent/discussions
|
||||
about: GitHub Discussions
|
||||
|
||||
- name: Discord community
|
||||
url: https://discord.com/channels/1057273017547378788/1126104260430528613
|
||||
about: Join our discord community
|
21
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
21
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
@ -1,21 +0,0 @@
|
||||
name: "\U0001F4A1 Feature request"
|
||||
description: Submit a proposal/request for a new PR-Agent feature
|
||||
labels: ["feature"]
|
||||
body:
|
||||
- type: textarea
|
||||
id: feature-request
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Feature request
|
||||
description: |
|
||||
Description of the feature proposal.
|
||||
|
||||
- type: textarea
|
||||
id: motivation
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Motivation
|
||||
description: |
|
||||
Outline the motivation for the proposal.
|
36
.github/ISSUE_TEMPLATE/miscellaneous.yml
vendored
36
.github/ISSUE_TEMPLATE/miscellaneous.yml
vendored
@ -1,36 +0,0 @@
|
||||
name: "❔ General Issue"
|
||||
description: Submit a general issue
|
||||
labels: ["general"]
|
||||
body:
|
||||
|
||||
- type: dropdown
|
||||
id: information-git-provider
|
||||
attributes:
|
||||
label: Git provider (optional)
|
||||
description: 'Git Provider:'
|
||||
options:
|
||||
- "Github Cloud"
|
||||
- "Github Enterprise"
|
||||
- "Gitlab"
|
||||
- "Bitbucket Cloud"
|
||||
- "Bitbucket Server"
|
||||
- "Azure"
|
||||
- "Other"
|
||||
|
||||
- type: textarea
|
||||
id: system-info
|
||||
attributes:
|
||||
label: System Info (optional)
|
||||
description: Please share your system info with us.
|
||||
placeholder: model used, deployment type (action/app/cli/...), etc...
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: issues-details
|
||||
attributes:
|
||||
label: Issues details
|
||||
description: Please share the issues details.
|
||||
placeholder: Describe the issue
|
||||
validations:
|
||||
required: true
|
@ -1,45 +0,0 @@
|
||||
# Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of fostering an open
|
||||
and welcoming community, we pledge to respect all people who contribute through reporting
|
||||
issues, posting feature requests, updating documentation, submitting pull requests or
|
||||
patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free experience for
|
||||
everyone, regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
||||
religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information, such as physical or electronic addresses,
|
||||
without explicit permission
|
||||
* Other unethical or unprofessional conduct
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments,
|
||||
commits, code, wiki edits, issues, and other contributions that are not aligned to this
|
||||
Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
|
||||
that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
By adopting this Code of Conduct, project maintainers commit themselves to fairly and
|
||||
consistently applying these principles to every aspect of managing this project. Project
|
||||
maintainers who do not follow or enforce the Code of Conduct may be permanently removed
|
||||
from the project team.
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when an
|
||||
individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
|
||||
contacting a project maintainer at tal.r@qodo.ai . All complaints will
|
||||
be reviewed and investigated and will result in a response that is deemed necessary and
|
||||
appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
|
||||
with regard to the reporter of an incident.
|
||||
|
||||
This Code of Conduct is adapted from the
|
||||
[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
|
||||
[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
|
||||
|
@ -1,38 +0,0 @@
|
||||
# Contributing to PR-Agent
|
||||
|
||||
Thank you for your interest in contributing to the PR-Agent project!
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. Fork the repository and clone your fork
|
||||
2. Install Python 3.10 or higher
|
||||
3. Install dependencies (`requirements.txt` and `requirements-dev.txt`)
|
||||
4. Create a new branch for your contribution:
|
||||
- For new features: `git checkout -b feature/your-feature-name`
|
||||
- For bug fixes: `git checkout -b fix/issue-description`
|
||||
5. Make your changes
|
||||
6. Write or update tests as needed
|
||||
7. Run tests locally to ensure everything passes
|
||||
8. Commit your changes using conventional commit messages
|
||||
9. Push to your fork and submit a pull request
|
||||
|
||||
## Development Guidelines
|
||||
|
||||
- Keep pull requests focused on a single feature or fix
|
||||
- Follow the existing code style and formatting conventions
|
||||
- Add unit tests for any new functionality using pytest
|
||||
- Ensure test coverage for your changes
|
||||
- Update documentation as needed
|
||||
|
||||
## Pull Request Process
|
||||
|
||||
1. Ensure your PR includes a clear description of the changes
|
||||
2. Link any related issues
|
||||
3. Update the README.md if needed
|
||||
4. Wait for review from maintainers
|
||||
|
||||
## Questions or Need Help?
|
||||
|
||||
- Join our [Discord community](https://discord.com/channels/1057273017547378788/1126104260430528613) for questions and discussions
|
||||
- Check the [documentation](https://qodo-merge-docs.qodo.ai/) for detailed information
|
||||
- Report bugs or request features through [GitHub Issues](https://github.com/qodo-ai/pr-agent/issues)
|
@ -3,7 +3,7 @@ FROM python:3.12 as base
|
||||
WORKDIR /app
|
||||
ADD pyproject.toml .
|
||||
ADD requirements.txt .
|
||||
RUN pip install --no-cache-dir . && rm pyproject.toml requirements.txt
|
||||
RUN pip install . && rm pyproject.toml requirements.txt
|
||||
ENV PYTHONPATH=/app
|
||||
ADD docs docs
|
||||
ADD pr_agent pr_agent
|
||||
|
59
README.md
59
README.md
@ -10,12 +10,6 @@
|
||||
|
||||
</picture>
|
||||
<br/>
|
||||
|
||||
[Installation Guide](https://qodo-merge-docs.qodo.ai/installation/) |
|
||||
[Usage Guide](https://qodo-merge-docs.qodo.ai/usage-guide/) |
|
||||
[Tools Guide](https://qodo-merge-docs.qodo.ai/tools/) |
|
||||
[Qodo Merge](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/) 💎
|
||||
|
||||
PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedback and suggestions
|
||||
</div>
|
||||
|
||||
@ -28,16 +22,13 @@ PR-Agent aims to help efficiently review and handle pull requests, by providing
|
||||
</a>
|
||||
</div>
|
||||
|
||||
[//]: # (### [Documentation](https://qodo-merge-docs.qodo.ai/))
|
||||
### [Documentation](https://qodo-merge-docs.qodo.ai/)
|
||||
|
||||
[//]: # ()
|
||||
[//]: # (- See the [Installation Guide](https://qodo-merge-docs.qodo.ai/installation/) for instructions on installing PR-Agent on different platforms.)
|
||||
- See the [Installation Guide](https://qodo-merge-docs.qodo.ai/installation/) for instructions on installing PR-Agent on different platforms.
|
||||
|
||||
[//]: # ()
|
||||
[//]: # (- See the [Usage Guide](https://qodo-merge-docs.qodo.ai/usage-guide/) for instructions on running PR-Agent tools via different interfaces, such as CLI, PR Comments, or by automatically triggering them when a new PR is opened.)
|
||||
- See the [Usage Guide](https://qodo-merge-docs.qodo.ai/usage-guide/) for instructions on running PR-Agent tools via different interfaces, such as CLI, PR Comments, or by automatically triggering them when a new PR is opened.
|
||||
|
||||
[//]: # ()
|
||||
[//]: # (- See the [Tools Guide](https://qodo-merge-docs.qodo.ai/tools/) for a detailed description of the different tools, and the available configurations for each tool.)
|
||||
- See the [Tools Guide](https://qodo-merge-docs.qodo.ai/tools/) for a detailed description of the different tools, and the available configurations for each tool.
|
||||
|
||||
|
||||
## Table of Contents
|
||||
@ -46,17 +37,12 @@ PR-Agent aims to help efficiently review and handle pull requests, by providing
|
||||
- [Overview](#overview)
|
||||
- [Example results](#example-results)
|
||||
- [Try it now](#try-it-now)
|
||||
- [Qodo Merge](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/)
|
||||
- [Qodo Merge 💎](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/)
|
||||
- [How it works](#how-it-works)
|
||||
- [Why use PR-Agent?](#why-use-pr-agent)
|
||||
|
||||
## News and Updates
|
||||
|
||||
### Feb 6, 2025
|
||||
New design for the `/improve` tool:
|
||||
|
||||
<kbd><img src="https://github.com/user-attachments/assets/26506430-550e-469a-adaa-af0a09b70c6d" width="512"></kbd>
|
||||
|
||||
### Jan 25, 2025
|
||||
|
||||
The open-source GitHub organization was updated:
|
||||
@ -83,7 +69,40 @@ Update logic and [documentation](https://qodo-merge-docs.qodo.ai/usage-guide/cha
|
||||
|
||||
### December 30, 2024
|
||||
|
||||
Following feedback from the community, we have addressed two vulnerabilities identified in the open-source PR-Agent project. The [fixes](https://github.com/qodo-ai/pr-agent/pull/1425) are now included in the newly released version (v0.26), available as of today.
|
||||
Following [feedback](https://research.kudelskisecurity.com/2024/08/29/careful-where-you-code-multiple-vulnerabilities-in-ai-powered-pr-agent/) from the community, we have addressed two vulnerabilities identified in the open-source PR-Agent project. The fixes are now included in the newly released version (v0.26), available as of today.
|
||||
|
||||
### December 25, 2024
|
||||
|
||||
The `review` tool previously included a legacy feature for providing code suggestions (controlled by '--pr_reviewer.num_code_suggestion'). This functionality has been deprecated. Use instead the [`improve`](https://qodo-merge-docs.qodo.ai/tools/improve/) tool, which offers higher quality and more actionable code suggestions.
|
||||
|
||||
### December 2, 2024
|
||||
|
||||
Open-source repositories can now freely use Qodo Merge, and enjoy easy one-click installation using a marketplace [app](https://github.com/apps/qodo-merge-pro-for-open-source).
|
||||
|
||||
<kbd><img src="https://github.com/user-attachments/assets/b0838724-87b9-43b0-ab62-73739a3a855c" width="512"></kbd>
|
||||
|
||||
See [here](https://qodo-merge-docs.qodo.ai/installation/pr_agent_pro/) for more details about installing Qodo Merge for private repositories.
|
||||
|
||||
|
||||
### November 18, 2024
|
||||
|
||||
A new mode was enabled by default for code suggestions - `--pr_code_suggestions.focus_only_on_problems=true`:
|
||||
|
||||
- This option reduces the number of code suggestions received
|
||||
- The suggestions will focus more on identifying and fixing code problems, rather than style considerations like best practices, maintainability, or readability.
|
||||
- The suggestions will be categorized into just two groups: "Possible Issues" and "General".
|
||||
|
||||
Still, if you prefer the previous mode, you can set `--pr_code_suggestions.focus_only_on_problems=false` in the [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/).
|
||||
|
||||
**Example results:**
|
||||
|
||||
Original mode
|
||||
|
||||
<kbd><img src="https://qodo.ai/images/pr_agent/code_suggestions_original_mode.png" width="512"></kbd>
|
||||
|
||||
Focused mode
|
||||
|
||||
<kbd><img src="https://qodo.ai/images/pr_agent/code_suggestions_focused_mode.png" width="512"></kbd>
|
||||
|
||||
|
||||
## Overview
|
||||
|
62
SECURITY.md
62
SECURITY.md
@ -1,62 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
PR-Agent is an open-source tool to help efficiently review and handle pull requests. Qodo Merge is a paid version of PR-Agent, designed for companies and teams that require additional features and capabilities.
|
||||
|
||||
This document describes the security policy of PR-Agent. For Qodo Merge's security policy, see [here](https://qodo-merge-docs.qodo.ai/overview/data_privacy/#qodo-merge).
|
||||
|
||||
## PR-Agent Self-Hosted Solutions
|
||||
|
||||
When using PR-Agent with your OpenAI (or other LLM provider) API key, the security relationship is directly between you and the provider. We do not send your code to Qodo servers.
|
||||
|
||||
Types of [self-hosted solutions](https://qodo-merge-docs.qodo.ai/installation):
|
||||
- Locally
|
||||
- GitHub integration
|
||||
- GitLab integration
|
||||
- BitBucket integration
|
||||
- Azure DevOps integration
|
||||
|
||||
|
||||
## PR-Agent Supported Versions
|
||||
|
||||
This section outlines which versions of PR-Agent are currently supported with security updates.
|
||||
|
||||
### Docker Deployment Options
|
||||
|
||||
#### Latest Version
|
||||
|
||||
For the most recent updates, use our latest Docker image which is automatically built nightly:
|
||||
```yaml
|
||||
uses: qodo-ai/pr-agent@main
|
||||
```
|
||||
|
||||
#### Specific Release Version
|
||||
|
||||
For a fixed version, you can pin your action to a specific release version. Browse available releases at:
|
||||
[PR-Agent Releases](https://github.com/qodo-ai/pr-agent/releases)
|
||||
|
||||
For example, to github action:
|
||||
|
||||
```yaml
|
||||
steps:
|
||||
- name: PR Agent action step
|
||||
id: pragent
|
||||
uses: docker://codiumai/pr-agent:0.26-github_action
|
||||
```
|
||||
|
||||
#### Enhanced Security with Docker Digest
|
||||
|
||||
For maximum security, you can specify the Docker image using its digest:
|
||||
```yaml
|
||||
steps:
|
||||
- name: PR Agent action step
|
||||
id: pragent
|
||||
uses: docker://codiumai/pr-agent@sha256:14165e525678ace7d9b51cda8652c2d74abb4e1d76b57c4a6ccaeba84663cc64
|
||||
```
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
We take the security of PR-Agent seriously. If you discover a security vulnerability, please report it immediately to:
|
||||
|
||||
Email: tal.r@qodo.ai
|
||||
|
||||
Please include a description of the vulnerability, steps to reproduce, and the affected PR-Agent version.
|
@ -4,7 +4,7 @@ WORKDIR /app
|
||||
ADD pyproject.toml .
|
||||
ADD requirements.txt .
|
||||
ADD docs docs
|
||||
RUN pip install --no-cache-dir . && rm pyproject.toml requirements.txt
|
||||
RUN pip install . && rm pyproject.toml requirements.txt
|
||||
ENV PYTHONPATH=/app
|
||||
|
||||
FROM base AS github_app
|
||||
@ -33,7 +33,7 @@ CMD ["python", "pr_agent/servers/azuredevops_server_webhook.py"]
|
||||
|
||||
FROM base AS test
|
||||
ADD requirements-dev.txt .
|
||||
RUN pip install --no-cache-dir -r requirements-dev.txt && rm requirements-dev.txt
|
||||
RUN pip install -r requirements-dev.txt && rm requirements-dev.txt
|
||||
ADD pr_agent pr_agent
|
||||
ADD tests tests
|
||||
|
||||
|
@ -5,8 +5,8 @@ RUN yum update -y && \
|
||||
yum clean all
|
||||
|
||||
ADD pyproject.toml requirements.txt .
|
||||
RUN pip install --no-cache-dir . && rm pyproject.toml
|
||||
RUN pip install --no-cache-dir mangum==0.17.0
|
||||
RUN pip install . && rm pyproject.toml
|
||||
RUN pip install mangum==0.17.0
|
||||
COPY pr_agent/ ${LAMBDA_TASK_ROOT}/pr_agent/
|
||||
|
||||
CMD ["pr_agent.servers.serverless.serverless"]
|
||||
|
@ -19,7 +19,6 @@ Here are some additional technical blogs from Qodo, that delve deeper into the c
|
||||
These resources provide more comprehensive insights into leveraging LLMs for software development.
|
||||
|
||||
### Code Generation and LLMs
|
||||
- [Effective AI code suggestions: less is more](https://www.qodo.ai/blog/effective-code-suggestions-llms-less-is-more/)
|
||||
- [State-of-the-art Code Generation with AlphaCodium – From Prompt Engineering to Flow Engineering](https://www.qodo.ai/blog/qodoflow-state-of-the-art-code-generation-for-code-contests/)
|
||||
- [RAG for a Codebase with 10k Repos](https://www.qodo.ai/blog/rag-for-large-scale-code-repos/)
|
||||
|
||||
|
@ -15,7 +15,7 @@ Qodo Merge for GitHub cloud is available for installation through the [GitHub Ma
|
||||
|
||||
### GitHub Enterprise Server
|
||||
|
||||
To use Qodo Merge application on your private GitHub Enterprise Server, you will need to [contact](https://www.qodo.ai/contact/#pricing) Qodo for starting an Enterprise trial.
|
||||
To use Qodo Merge application on your private GitHub Enterprise Server, you will need to contact us for starting an [Enterprise](https://www.codium.ai/pricing/) trial.
|
||||
|
||||
### GitHub Open Source Projects
|
||||
|
||||
@ -34,9 +34,7 @@ Qodo Merge for Bitbucket Cloud is available for installation through the followi
|
||||
To use Qodo Merge application on your private Bitbucket Server, you will need to contact us for starting an [Enterprise](https://www.qodo.ai/pricing/) trial.
|
||||
|
||||
|
||||
## Install Qodo Merge for GitLab
|
||||
|
||||
### GitLab Cloud
|
||||
## Install Qodo Merge for GitLab (Teams & Enterprise)
|
||||
|
||||
Since GitLab platform does not support apps, installing Qodo Merge for GitLab is a bit more involved, and requires the following steps:
|
||||
|
||||
@ -81,7 +79,3 @@ Enable SSL verification: Check the box.
|
||||
You’re all set!
|
||||
|
||||
Open a new merge request or add a MR comment with one of Qodo Merge’s commands such as /review, /describe or /improve.
|
||||
|
||||
### GitLab Server
|
||||
|
||||
For a trial period of two weeks on your private GitLab Server, the same [installation steps](#gitlab-cloud) as for GitLab Cloud apply. After the trial period, you will need to [contact](https://www.qodo.ai/contact/#pricing) Qodo for moving to an Enterprise account.
|
||||
|
@ -1,6 +1,6 @@
|
||||
### Overview
|
||||
|
||||
[Qodo Merge](https://www.codium.ai/pricing/){:target="_blank"} is a paid, hosted version of open-source [PR-Agent](https://github.com/Codium-ai/pr-agent){:target="_blank"}. A complimentary two-week trial is offered, followed by a monthly subscription fee.
|
||||
[Qodo Merge](https://www.codium.ai/pricing/){:target="_blank"} is a hosted version of open-source [PR-Agent](https://github.com/Codium-ai/pr-agent){:target="_blank"}. A complimentary two-week trial is offered, followed by a monthly subscription fee.
|
||||
Qodo Merge is designed for companies and teams that require additional features and capabilities. It provides the following benefits:
|
||||
|
||||
1. **Fully managed** - We take care of everything for you - hosting, models, regular updates, and more. Installation is as simple as signing up and adding the Qodo Merge app to your GitHub\GitLab\BitBucket repo.
|
||||
|
@ -1,5 +1,3 @@
|
||||
`Platforms supported: GitHub, GitLab, Bitbucket`
|
||||
|
||||
## Overview
|
||||
|
||||
The `implement` tool converts human code review discussions and feedback into ready-to-commit code changes.
|
||||
|
18
docs/docs/usage-guide/PR_agent_pro_models.md
Normal file
18
docs/docs/usage-guide/PR_agent_pro_models.md
Normal file
@ -0,0 +1,18 @@
|
||||
## Qodo Merge Models
|
||||
|
||||
The default models used by Qodo Merge are a combination of Claude-3.5-sonnet and OpenAI's GPT-4 models.
|
||||
|
||||
Users can configure Qodo Merge to use solely a specific model by editing the [configuration](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) file.
|
||||
|
||||
For example, to restrict Qodo Merge to using only `Claude-3.5-sonnet`, add this setting:
|
||||
|
||||
```
|
||||
[config]
|
||||
model="claude-3-5-sonnet"
|
||||
```
|
||||
|
||||
Or to restrict Qodo Merge to using only `GPT-4o`, add this setting:
|
||||
```
|
||||
[config]
|
||||
model="gpt-4o"
|
||||
```
|
@ -51,7 +51,7 @@ api_base = "http://localhost:11434" # or whatever port you're running Ollama on
|
||||
|
||||
Commercial models such as GPT-4, Claude Sonnet, and Gemini have demonstrated robust capabilities in generating structured output for code analysis tasks with large input. In contrast, most open-source models currently available (as of January 2025) face challenges with these complex tasks.
|
||||
|
||||
Based on our testing, local open-source models are suitable for experimentation and learning purposes (mainly for the `ask` command), but they are not suitable for production-level code analysis tasks.
|
||||
Based on our testing, local open-source models are suitable for experimentation and learning purposes, but they are not suitable for production-level code analysis tasks.
|
||||
|
||||
Hence, for production workflows and real-world usage, we recommend using commercial models.
|
||||
|
||||
|
@ -24,4 +24,4 @@ It includes information on how to adjust Qodo Merge configurations, define which
|
||||
- [Changing a model](./additional_configurations.md#changing-a-model)
|
||||
- [Patch Extra Lines](./additional_configurations.md#patch-extra-lines)
|
||||
- [Editing the prompts](./additional_configurations.md#editing-the-prompts)
|
||||
- [Qodo Merge Models](./qodo_merge_models)
|
||||
- [Qodo Merge Models](./PR_agent_pro_models.md)
|
||||
|
@ -1,37 +0,0 @@
|
||||
|
||||
The default models used by Qodo Merge are a combination of Claude-3.5-sonnet and OpenAI's GPT-4 models.
|
||||
|
||||
### Selecting a Specific Model
|
||||
|
||||
Users can configure Qodo Merge to use a specific model by editing the [configuration](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) file.
|
||||
The models supported by Qodo Merge are:
|
||||
|
||||
- `claude-3-5-sonnet`
|
||||
- `gpt-4o`
|
||||
- `deepseek-r1`
|
||||
- `o3-mini`
|
||||
|
||||
To restrict Qodo Merge to using only `Claude-3.5-sonnet`, add this setting:
|
||||
|
||||
```
|
||||
[config]
|
||||
model="claude-3-5-sonnet"
|
||||
```
|
||||
|
||||
To restrict Qodo Merge to using only `GPT-4o`, add this setting:
|
||||
```
|
||||
[config]
|
||||
model="gpt-4o"
|
||||
```
|
||||
|
||||
To restrict Qodo Merge to using only `deepseek-r1`, add this setting:
|
||||
```
|
||||
[config]
|
||||
model="deepseek/r1"
|
||||
```
|
||||
|
||||
To restrict Qodo Merge to using only `o3-mini`, add this setting:
|
||||
```
|
||||
[config]
|
||||
model="o3-mini"
|
||||
```
|
@ -20,7 +20,7 @@ nav:
|
||||
- Managing Mail Notifications: 'usage-guide/mail_notifications.md'
|
||||
- Changing a Model: 'usage-guide/changing_a_model.md'
|
||||
- Additional Configurations: 'usage-guide/additional_configurations.md'
|
||||
- 💎 Qodo Merge Models: 'usage-guide/qodo_merge_models.md'
|
||||
- 💎 Qodo Merge Models: 'usage-guide/PR_agent_pro_models'
|
||||
- Tools:
|
||||
- 'tools/index.md'
|
||||
- Describe: 'tools/describe.md'
|
||||
|
@ -26,8 +26,6 @@ MAX_TOKENS = {
|
||||
'o1-preview-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||
'o1-2024-12-17': 204800, # 200K, but may be limited by config.max_model_tokens
|
||||
'o1': 204800, # 200K, but may be limited by config.max_model_tokens
|
||||
'o3-mini': 204800, # 200K, but may be limited by config.max_model_tokens
|
||||
'o3-mini-2025-01-31': 204800, # 200K, but may be limited by config.max_model_tokens
|
||||
'claude-instant-1': 100000,
|
||||
'claude-2': 100000,
|
||||
'command-nightly': 4096,
|
||||
@ -83,21 +81,3 @@ MAX_TOKENS = {
|
||||
"watsonx/ibm/granite-34b-code-instruct": 8191,
|
||||
"watsonx/mistralai/mistral-large": 32768,
|
||||
}
|
||||
|
||||
USER_MESSAGE_ONLY_MODELS = [
|
||||
"deepseek/deepseek-reasoner",
|
||||
"o1-mini",
|
||||
"o1-mini-2024-09-12",
|
||||
"o1-preview"
|
||||
]
|
||||
|
||||
NO_SUPPORT_TEMPERATURE_MODELS = [
|
||||
"deepseek/deepseek-reasoner",
|
||||
"o1-mini",
|
||||
"o1-mini-2024-09-12",
|
||||
"o1",
|
||||
"o1-2024-12-17",
|
||||
"o3-mini",
|
||||
"o3-mini-2025-01-31",
|
||||
"o1-preview"
|
||||
]
|
||||
|
@ -6,7 +6,6 @@ import requests
|
||||
from litellm import acompletion
|
||||
from tenacity import retry, retry_if_exception_type, stop_after_attempt
|
||||
|
||||
from pr_agent.algo import NO_SUPPORT_TEMPERATURE_MODELS, USER_MESSAGE_ONLY_MODELS
|
||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||
from pr_agent.algo.utils import get_version
|
||||
from pr_agent.config_loader import get_settings
|
||||
@ -95,12 +94,6 @@ class LiteLLMAIHandler(BaseAiHandler):
|
||||
if get_settings().get("DEEPSEEK.KEY", None):
|
||||
os.environ['DEEPSEEK_API_KEY'] = get_settings().get("DEEPSEEK.KEY")
|
||||
|
||||
# Models that only use user meessage
|
||||
self.user_message_only_models = USER_MESSAGE_ONLY_MODELS
|
||||
|
||||
# Model that doesn't support temperature argument
|
||||
self.no_support_temperature_models = NO_SUPPORT_TEMPERATURE_MODELS
|
||||
|
||||
def prepare_logs(self, response, system, user, resp, finish_reason):
|
||||
response_log = response.dict().copy()
|
||||
response_log['system'] = system
|
||||
@ -204,8 +197,10 @@ class LiteLLMAIHandler(BaseAiHandler):
|
||||
messages[1]["content"] = [{"type": "text", "text": messages[1]["content"]},
|
||||
{"type": "image_url", "image_url": {"url": img_path}}]
|
||||
|
||||
# Currently, some models do not support a separate system and user prompts
|
||||
if model in self.user_message_only_models:
|
||||
# Currently, model OpenAI o1 series does not support a separate system and user prompts
|
||||
O1_MODEL_PREFIX = 'o1'
|
||||
model_type = model.split('/')[-1] if '/' in model else model
|
||||
if (model_type.startswith(O1_MODEL_PREFIX)) or ("deepseek-reasoner" in model):
|
||||
user = f"{system}\n\n\n{user}"
|
||||
system = ""
|
||||
get_logger().info(f"Using model {model}, combining system and user prompts")
|
||||
@ -222,14 +217,11 @@ class LiteLLMAIHandler(BaseAiHandler):
|
||||
"model": model,
|
||||
"deployment_id": deployment_id,
|
||||
"messages": messages,
|
||||
"temperature": temperature,
|
||||
"timeout": get_settings().config.ai_timeout,
|
||||
"api_base": self.api_base,
|
||||
}
|
||||
|
||||
# Add temperature only if model supports it
|
||||
if model not in self.no_support_temperature_models:
|
||||
kwargs["temperature"] = temperature
|
||||
|
||||
if get_settings().litellm.get("enable_callbacks", False):
|
||||
kwargs = self.add_litellm_callbacks(kwargs)
|
||||
|
||||
|
@ -735,7 +735,7 @@ def try_fix_yaml(response_text: str,
|
||||
get_logger().info(f"Successfully parsed AI prediction after adding |-\n")
|
||||
return data
|
||||
except:
|
||||
pass
|
||||
get_logger().info(f"Failed to parse AI prediction after adding |-\n")
|
||||
|
||||
# second fallback - try to extract only range from first ```yaml to ````
|
||||
snippet_pattern = r'```(yaml)?[\s\S]*?```'
|
||||
@ -779,18 +779,9 @@ def try_fix_yaml(response_text: str,
|
||||
except:
|
||||
pass
|
||||
|
||||
# fifth fallback - try to remove leading '+' (sometimes added by AI for 'existing code' and 'improved code')
|
||||
response_text_lines_copy = response_text_lines.copy()
|
||||
for i in range(0, len(response_text_lines_copy)):
|
||||
response_text_lines_copy[i] = ' ' + response_text_lines_copy[i][1:]
|
||||
try:
|
||||
data = yaml.safe_load('\n'.join(response_text_lines_copy))
|
||||
get_logger().info(f"Successfully parsed AI prediction after removing leading '+'")
|
||||
return data
|
||||
except:
|
||||
pass
|
||||
|
||||
# sixth fallback - try to remove last lines
|
||||
# fifth fallback - try to remove last lines
|
||||
data = {}
|
||||
for i in range(1, len(response_text_lines)):
|
||||
response_text_lines_tmp = '\n'.join(response_text_lines[:-i])
|
||||
try:
|
||||
|
@ -9,7 +9,7 @@ from datetime import datetime
|
||||
from typing import Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from github import AppAuthentication, Auth, Github, GithubException
|
||||
from github import AppAuthentication, Auth, Github
|
||||
from retry import retry
|
||||
from starlette_context import context
|
||||
|
||||
@ -475,17 +475,8 @@ class GithubProvider(GitProvider):
|
||||
return False
|
||||
|
||||
def edit_comment(self, comment, body: str):
|
||||
try:
|
||||
body = self.limit_output_characters(body, self.max_comment_chars)
|
||||
comment.edit(body=body)
|
||||
except GithubException as e:
|
||||
if hasattr(e, "status") and e.status == 403:
|
||||
# Log as warning for permission-related issues (usually due to polling)
|
||||
get_logger().warning(
|
||||
"Failed to edit github comment due to permission restrictions",
|
||||
artifact={"error": e})
|
||||
else:
|
||||
get_logger().exception(f"Failed to edit github comment", artifact={"error": e})
|
||||
body = self.limit_output_characters(body, self.max_comment_chars)
|
||||
comment.edit(body=body)
|
||||
|
||||
def edit_comment_from_comment_id(self, comment_id: int, body: str):
|
||||
try:
|
||||
|
@ -81,7 +81,7 @@ async def run_action():
|
||||
get_logger().info(f"github action: failed to apply repo settings: {e}")
|
||||
|
||||
# Handle pull request opened event
|
||||
if GITHUB_EVENT_NAME == "pull_request" or GITHUB_EVENT_NAME == "pull_request_target":
|
||||
if GITHUB_EVENT_NAME == "pull_request":
|
||||
action = event_payload.get("action")
|
||||
|
||||
# Retrieve the list of actions from the configuration
|
||||
|
@ -130,14 +130,12 @@ async def is_valid_notification(notification, headers, handled_ids, session, use
|
||||
artifact={"comment": comment_body})
|
||||
return True, handled_ids, comment, comment_body, pr_url, user_tag
|
||||
|
||||
get_logger().warning(f"Failed to fetch comments for PR: {pr_url}",
|
||||
artifact={"comments": comments})
|
||||
get_logger().error(f"Failed to fetch comments for PR: {pr_url}")
|
||||
return False, handled_ids
|
||||
|
||||
return False, handled_ids
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Error processing polling notification",
|
||||
artifact={"notification": notification, "error": e})
|
||||
get_logger().error(f"Error processing notification: {e}", artifact={"traceback": traceback.format_exc()})
|
||||
return False, handled_ids
|
||||
|
||||
|
||||
|
@ -121,9 +121,6 @@ max_history_len=4
|
||||
apply_suggestions_checkbox=true
|
||||
# suggestions scoring
|
||||
suggestions_score_threshold=0 # [0-10]| recommend not to set this value above 8, since above it may clip highly relevant suggestions
|
||||
new_score_mechanism=true
|
||||
new_score_mechanism_th_high=9
|
||||
new_score_mechanism_th_medium=7
|
||||
# params for '/improve --extended' mode
|
||||
auto_extended_mode=true
|
||||
num_code_suggestions_per_chunk=4
|
||||
|
@ -90,7 +90,7 @@ class CodeSuggestion(BaseModel):
|
||||
{%- if not focus_only_on_problems %}
|
||||
label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'possible bug', 'possible issue', 'performance', 'enhancement', 'best practice', 'maintainability', 'typo'. Other relevant labels are also acceptable.")
|
||||
{%- else %}
|
||||
label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'critical bug', 'general'. The 'general' section should be used for suggestions that address a major issue, but are not necessarily on a critical level.")
|
||||
label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'critical bug', 'general'. The 'general' section should be used for suggestions that address a major issue, but are necessarily on a critical level.")
|
||||
{%- endif %}
|
||||
|
||||
|
||||
|
@ -13,7 +13,7 @@ The output must be a YAML object equivalent to type $DocHelper, according to the
|
||||
=====
|
||||
class relevant_section(BaseModel):
|
||||
file_name: str = Field(description="The name of the relevant file")
|
||||
relevant_section_header_string: str = Field(description="The exact text of the relevant markdown section heading from the relevant file (starting with '#', '##', etc.). Return empty string if the entire file is the relevant section, or if the relevant section has no heading")
|
||||
relevant_section_header_string: str = Field(description="From the relevant file, exact text of the relevant section heading. If no markdown heading is relevant, return empty string")
|
||||
|
||||
class DocHelper(BaseModel):
|
||||
user_question: str = Field(description="The user's question")
|
||||
|
@ -720,7 +720,7 @@ class PRCodeSuggestions:
|
||||
header = f"Suggestion"
|
||||
delta = 66
|
||||
header += " " * delta
|
||||
pr_body += f"""<thead><tr><td><strong>Category</strong></td><td align=left><strong>{header}</strong></td><td align=center><strong>Impact</strong></td></tr>"""
|
||||
pr_body += f"""<thead><tr><td>Category</td><td align=left>{header}</td><td align=center>Score</td></tr>"""
|
||||
pr_body += """<tbody>"""
|
||||
suggestions_labels = dict()
|
||||
# add all suggestions related to each label
|
||||
@ -740,7 +740,7 @@ class PRCodeSuggestions:
|
||||
counter_suggestions = 0
|
||||
for label, suggestions in suggestions_labels.items():
|
||||
num_suggestions = len(suggestions)
|
||||
pr_body += f"""<tr><td rowspan={num_suggestions}>{label.capitalize()}</td>\n"""
|
||||
pr_body += f"""<tr><td rowspan={num_suggestions}><strong>{label.capitalize()}</strong></td>\n"""
|
||||
for i, suggestion in enumerate(suggestions):
|
||||
|
||||
relevant_file = suggestion['relevant_file'].strip()
|
||||
@ -794,19 +794,14 @@ class PRCodeSuggestions:
|
||||
|
||||
{example_code.rstrip()}
|
||||
"""
|
||||
if suggestion.get('score_why'):
|
||||
pr_body += f"<details><summary>Suggestion importance[1-10]: {suggestion['score']}</summary>\n\n"
|
||||
pr_body += f"__\n\nWhy: {suggestion['score_why']}\n\n"
|
||||
pr_body += f"</details>"
|
||||
pr_body += f"<details><summary>Suggestion importance[1-10]: {suggestion['score']}</summary>\n\n"
|
||||
pr_body += f"Why: {suggestion['score_why']}\n\n"
|
||||
pr_body += f"</details>"
|
||||
|
||||
pr_body += f"</details>"
|
||||
|
||||
# # add another column for 'score'
|
||||
score_int = int(suggestion.get('score', 0))
|
||||
score_str = f"{score_int}"
|
||||
if get_settings().pr_code_suggestions.new_score_mechanism:
|
||||
score_str = self.get_score_str(score_int)
|
||||
pr_body += f"</td><td align=center>{score_str}\n\n"
|
||||
pr_body += f"</td><td align=center>{suggestion['score']}\n\n"
|
||||
|
||||
pr_body += f"</td></tr>"
|
||||
counter_suggestions += 1
|
||||
@ -819,16 +814,6 @@ class PRCodeSuggestions:
|
||||
get_logger().info(f"Failed to publish summarized code suggestions, error: {e}")
|
||||
return ""
|
||||
|
||||
def get_score_str(self, score: int) -> str:
|
||||
th_high = get_settings().pr_code_suggestions.get('new_score_mechanism_th_high', 9)
|
||||
th_medium = get_settings().pr_code_suggestions.get('new_score_mechanism_th_medium', 7)
|
||||
if score >= th_high:
|
||||
return "High"
|
||||
elif score >= th_medium:
|
||||
return "Medium"
|
||||
else: # score < 7
|
||||
return "Low"
|
||||
|
||||
async def self_reflect_on_suggestions(self,
|
||||
suggestion_list: List,
|
||||
patches_diff: str,
|
||||
|
@ -1,5 +1,4 @@
|
||||
import copy
|
||||
import re
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
|
||||
@ -10,9 +9,10 @@ from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
||||
from pr_agent.algo.pr_processing import retry_with_fallback_models
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.algo.utils import ModelType, clip_tokens, load_yaml, get_max_tokens
|
||||
from pr_agent.algo.utils import ModelType, clip_tokens, load_yaml
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import BitbucketServerProvider, GithubProvider, get_git_provider_with_context
|
||||
from pr_agent.git_providers import (BitbucketServerProvider, GithubProvider,
|
||||
get_git_provider_with_context)
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
@ -30,11 +30,10 @@ def extract_header(snippet):
|
||||
return res
|
||||
|
||||
class PRHelpMessage:
|
||||
def __init__(self, pr_url: str, args=None, ai_handler: partial[BaseAiHandler,] = LiteLLMAIHandler, return_as_string=False):
|
||||
def __init__(self, pr_url: str, args=None, ai_handler: partial[BaseAiHandler,] = LiteLLMAIHandler):
|
||||
self.git_provider = get_git_provider_with_context(pr_url)
|
||||
self.ai_handler = ai_handler()
|
||||
self.question_str = self.parse_args(args)
|
||||
self.return_as_string = return_as_string
|
||||
self.num_retrieved_snippets = get_settings().get('pr_help.num_retrieved_snippets', 5)
|
||||
if self.question_str:
|
||||
self.vars = {
|
||||
@ -66,34 +65,6 @@ class PRHelpMessage:
|
||||
question_str = ""
|
||||
return question_str
|
||||
|
||||
def format_markdown_header(self, header: str) -> str:
|
||||
try:
|
||||
# First, strip common characters from both ends
|
||||
cleaned = header.strip('# 💎\n')
|
||||
|
||||
# Define all characters to be removed/replaced in a single pass
|
||||
replacements = {
|
||||
"'": '',
|
||||
"`": '',
|
||||
'(': '',
|
||||
')': '',
|
||||
',': '',
|
||||
'.': '',
|
||||
'?': '',
|
||||
'!': '',
|
||||
' ': '-'
|
||||
}
|
||||
|
||||
# Compile regex pattern for characters to remove
|
||||
pattern = re.compile('|'.join(map(re.escape, replacements.keys())))
|
||||
|
||||
# Perform replacements in a single pass and convert to lowercase
|
||||
return pattern.sub(lambda m: replacements[m.group()], cleaned).lower()
|
||||
except Exception:
|
||||
get_logger().exception(f"Error while formatting markdown header", artifacts={'header': header})
|
||||
return ""
|
||||
|
||||
|
||||
async def run(self):
|
||||
try:
|
||||
if self.question_str:
|
||||
@ -135,10 +106,7 @@ class PRHelpMessage:
|
||||
get_logger().debug(f"Token count of full documentation website: {token_count}")
|
||||
|
||||
model = get_settings().config.model
|
||||
if model in MAX_TOKENS:
|
||||
max_tokens_full = MAX_TOKENS[model] # note - here we take the actual max tokens, without any reductions. we do aim to get the full documentation website in the prompt
|
||||
else:
|
||||
max_tokens_full = get_max_tokens(model)
|
||||
max_tokens_full = MAX_TOKENS[model] # note - here we take the actual max tokens, without any reductions. we do aim to get the full documentation website in the prompt
|
||||
delta_output = 2000
|
||||
if token_count > max_tokens_full - delta_output:
|
||||
get_logger().info(f"Token count {token_count} exceeds the limit {max_tokens_full - delta_output}. Skipping the PR Help message.")
|
||||
@ -146,16 +114,8 @@ class PRHelpMessage:
|
||||
self.vars['snippets'] = docs_prompt.strip()
|
||||
|
||||
# run the AI model
|
||||
response = await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.REGULAR)
|
||||
response = await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.WEAK)
|
||||
response_yaml = load_yaml(response)
|
||||
if isinstance(response_yaml, str):
|
||||
get_logger().warning(f"failing to parse response: {response_yaml}, publishing the response as is")
|
||||
if get_settings().config.publish_output:
|
||||
answer_str = f"### Question: \n{self.question_str}\n\n"
|
||||
answer_str += f"### Answer:\n\n"
|
||||
answer_str += response_yaml
|
||||
self.git_provider.publish_comment(answer_str)
|
||||
return ""
|
||||
response_str = response_yaml.get('response')
|
||||
relevant_sections = response_yaml.get('relevant_sections')
|
||||
|
||||
@ -178,7 +138,7 @@ class PRHelpMessage:
|
||||
for section in relevant_sections:
|
||||
file = section.get('file_name').strip().removesuffix('.md')
|
||||
if str(section['relevant_section_header_string']).strip():
|
||||
markdown_header = self.format_markdown_header(section['relevant_section_header_string'])
|
||||
markdown_header = section['relevant_section_header_string'].strip().strip('#').strip().lower().replace(' ', '-').replace("'", '').replace('(', '').replace(')', '').replace(',', '').replace('.', '').replace('?', '').replace('!', '')
|
||||
answer_str += f"> - {base_path}{file}#{markdown_header}\n"
|
||||
else:
|
||||
answer_str += f"> - {base_path}{file}\n"
|
||||
|
@ -371,7 +371,7 @@ class PRReviewer:
|
||||
else:
|
||||
get_logger().warning(f"Unexpected type for estimated_effort: {type(estimated_effort)}")
|
||||
if 1 <= estimated_effort_number <= 5: # 1, because ...
|
||||
review_labels.append(f'Review effort {estimated_effort_number}/5')
|
||||
review_labels.append(f'Review effort [1-5]: {estimated_effort_number}')
|
||||
if get_settings().pr_reviewer.enable_review_labels_security and get_settings().pr_reviewer.require_security_review:
|
||||
security_concerns = data['review']['security_concerns'] # yes, because ...
|
||||
security_concerns_bool = 'yes' in security_concerns.lower() or 'true' in security_concerns.lower()
|
||||
|
Reference in New Issue
Block a user