Compare commits

..

1 Commits

Author SHA1 Message Date
Tal
b9e3e5603b Update setup.py 2024-10-27 17:03:34 +02:00
164 changed files with 2009 additions and 3703 deletions

View File

@ -1,38 +0,0 @@
name: "\U0001FAB2 Bug Report"
description: Submit a bug report
labels: ["bug"]
body:
- type: dropdown
id: information-git-provider
attributes:
label: Git provider
description: 'The problem arises when using:'
options:
- "Github Cloud"
- "Github Enterprise"
- "Gitlab"
- "Bitbucket Cloud"
- "Bitbucket Server"
- "Azure"
- "Other"
validations:
required: true
- type: textarea
id: system-info
attributes:
label: System Info
description: Please share your system info with us.
placeholder: model used, deployment type (action/app/cli/...), etc...
validations:
required: true
- type: textarea
id: bug-details
attributes:
label: Bug details
description: Please describe the problem.
placeholder: Describe the problem
validations:
required: true

View File

@ -1,10 +0,0 @@
blank_issues_enabled: false
version: 0.1
contact_links:
- name: Discussions
url: https://github.com/qodo-ai/pr-agent/discussions
about: GitHub Discussions
- name: Discord community
url: https://discord.com/channels/1057273017547378788/1126104260430528613
about: Join our discord community

View File

@ -1,21 +0,0 @@
name: "\U0001F4A1 Feature request"
description: Submit a proposal/request for a new PR-Agent feature
labels: ["feature"]
body:
- type: textarea
id: feature-request
validations:
required: true
attributes:
label: Feature request
description: |
Description of the feature proposal.
- type: textarea
id: motivation
validations:
required: true
attributes:
label: Motivation
description: |
Outline the motivation for the proposal.

View File

@ -1,36 +0,0 @@
name: "❔ General Issue"
description: Submit a general issue
labels: ["general"]
body:
- type: dropdown
id: information-git-provider
attributes:
label: Git provider (optional)
description: 'Git Provider:'
options:
- "Github Cloud"
- "Github Enterprise"
- "Gitlab"
- "Bitbucket Cloud"
- "Bitbucket Server"
- "Azure"
- "Other"
- type: textarea
id: system-info
attributes:
label: System Info (optional)
description: Please share your system info with us.
placeholder: model used, deployment type (action/app/cli/...), etc...
validations:
required: false
- type: textarea
id: issues-details
attributes:
label: Issues details
description: Please share the issues details.
placeholder: Describe the issue
validations:
required: true

View File

@ -37,3 +37,5 @@ jobs:
name: Test dev docker
run: |
docker run --rm codiumai/pr-agent:test pytest -v tests/unittest

View File

@ -30,3 +30,6 @@ jobs:
GITHUB_ACTION_CONFIG.AUTO_DESCRIBE: true
GITHUB_ACTION_CONFIG.AUTO_REVIEW: true
GITHUB_ACTION_CONFIG.AUTO_IMPROVE: true

View File

@ -1,17 +0,0 @@
# disabled. We might run it manually if needed.
name: pre-commit
on:
workflow_dispatch:
# pull_request:
# push:
# branches: [main]
jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v5
# SEE https://github.com/pre-commit/action
- uses: pre-commit/action@v3.0.1

2
.gitignore vendored
View File

@ -1,7 +1,6 @@
.idea/
.lsp/
.vscode/
.env
venv/
pr_agent/settings/.secrets.toml
__pycache__
@ -10,4 +9,3 @@ dist/
build/
.DS_Store
docs/.cache/
.qodo

View File

@ -1,46 +0,0 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
default_language_version:
python: python3
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: check-added-large-files
- id: check-toml
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace
# - repo: https://github.com/rhysd/actionlint
# rev: v1.7.3
# hooks:
# - id: actionlint
- repo: https://github.com/pycqa/isort
# rev must match what's in dev-requirements.txt
rev: 5.13.2
hooks:
- id: isort
# - repo: https://github.com/PyCQA/bandit
# rev: 1.7.10
# hooks:
# - id: bandit
# args: [
# "-c", "pyproject.toml",
# ]
# - repo: https://github.com/astral-sh/ruff-pre-commit
# rev: v0.7.1
# hooks:
# - id: ruff
# args:
# - --fix
# - id: ruff-format
# - repo: https://github.com/PyCQA/autoflake
# rev: v2.3.1
# hooks:
# - id: autoflake
# args:
# - --in-place
# - --remove-all-unused-imports
# - --remove-unused-variables

View File

@ -1,45 +0,0 @@
# Contributor Code of Conduct
As contributors and maintainers of this project, and in the interest of fostering an open
and welcoming community, we pledge to respect all people who contribute through reporting
issues, posting feature requests, updating documentation, submitting pull requests or
patches, and other activities.
We are committed to making participation in this project a harassment-free experience for
everyone, regardless of level of experience, gender, gender identity and expression,
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
religion, or nationality.
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing other's private information, such as physical or electronic addresses,
without explicit permission
* Other unethical or unprofessional conduct
Project maintainers have the right and responsibility to remove, edit, or reject comments,
commits, code, wiki edits, issues, and other contributions that are not aligned to this
Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
that they deem inappropriate, threatening, offensive, or harmful.
By adopting this Code of Conduct, project maintainers commit themselves to fairly and
consistently applying these principles to every aspect of managing this project. Project
maintainers who do not follow or enforce the Code of Conduct may be permanently removed
from the project team.
This Code of Conduct applies both within project spaces and in public spaces when an
individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
contacting a project maintainer at tal.r@qodo.ai . All complaints will
be reviewed and investigated and will result in a response that is deemed necessary and
appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
with regard to the reporter of an incident.
This Code of Conduct is adapted from the
[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)

View File

@ -1,38 +0,0 @@
# Contributing to PR-Agent
Thank you for your interest in contributing to the PR-Agent project!
## Getting Started
1. Fork the repository and clone your fork
2. Install Python 3.10 or higher
3. Install dependencies (`requirements.txt` and `requirements-dev.txt`)
4. Create a new branch for your contribution:
- For new features: `git checkout -b feature/your-feature-name`
- For bug fixes: `git checkout -b fix/issue-description`
5. Make your changes
6. Write or update tests as needed
7. Run tests locally to ensure everything passes
8. Commit your changes using conventional commit messages
9. Push to your fork and submit a pull request
## Development Guidelines
- Keep pull requests focused on a single feature or fix
- Follow the existing code style and formatting conventions
- Add unit tests for any new functionality using pytest
- Ensure test coverage for your changes
- Update documentation as needed
## Pull Request Process
1. Ensure your PR includes a clear description of the changes
2. Link any related issues
3. Update the README.md if needed
4. Wait for review from maintainers
## Questions or Need Help?
- Join our [Discord community](https://discord.com/channels/1057273017547378788/1126104260430528613) for questions and discussions
- Check the [documentation](https://qodo-merge-docs.qodo.ai/) for detailed information
- Report bugs or request features through [GitHub Issues](https://github.com/qodo-ai/pr-agent/issues)

View File

@ -1,48 +0,0 @@
## 📌 Pull Request Template
### 1⃣ Short Description
<!-- Provide a concise summary of the changes in this PR. -->
---
### 2⃣ Related Open Issue
<!-- Link the related issue(s) this PR is addressing, e.g., Fixes #123 or Closes #456. -->
Fixes #
---
### 3⃣ PR Type
<!-- Select one of the following by marking `[x]` -->
- [ ] 🐞 Bug Fix
- [ ] ✨ New Feature
- [ ] 🔄 Refactoring
- [ ] 📖 Documentation Update
---
### 4⃣ Does this PR Introduce a Breaking Change?
<!-- Mark the applicable option -->
- [ ] ❌ No
- [ ] ⚠️ Yes (Explain below)
If **yes**, describe the impact and necessary migration steps:
<!-- Provide a short explanation of what needs to be changed. -->
---
### 5⃣ Current Behavior (Before Changes)
<!-- Describe the existing behavior before applying the changes in this PR. -->
---
### 6⃣ New Behavior (After Changes)
<!-- Explain how the behavior changes with this PR. -->
---
### ✅ Checklist
- [ ] Code follows the project's coding guidelines.
- [ ] Tests have been added or updated (if applicable).
- [ ] Documentation has been updated (if applicable).
- [ ] Ready for review and approval.

280
README.md
View File

@ -10,99 +10,76 @@
</picture>
<br/>
PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedback and suggestions
Qode Merge PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedback and suggestions
</div>
[![Static Badge](https://img.shields.io/badge/Chrome-Extension-violet)](https://chromewebstore.google.com/detail/qodo-merge-ai-powered-cod/ephlnjeghhogofkifjloamocljapahnl)
[![Static Badge](https://img.shields.io/badge/Pro-App-blue)](https://github.com/apps/qodo-merge-pro/)
[![Static Badge](https://img.shields.io/badge/OpenSource-App-red)](https://github.com/apps/qodo-merge-pro-for-open-source/)
[![GitHub license](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://github.com/Codium-ai/pr-agent/blob/main/LICENSE)
[![Static Badge](https://img.shields.io/badge/Chrome-Extension-violet)](https://chromewebstore.google.com/detail/pr-agent-chrome-extension/ephlnjeghhogofkifjloamocljapahnl)
[![Static Badge](https://img.shields.io/badge/Code-Benchmark-blue)](https://pr-agent-docs.codium.ai/finetuning_benchmark/)
[![Discord](https://badgen.net/badge/icon/discord?icon=discord&label&color=purple)](https://discord.com/channels/1057273017547378788/1126104260430528613)
<a href="https://github.com/Codium-ai/pr-agent/commits/main">
<img alt="GitHub" src="https://img.shields.io/github/last-commit/Codium-ai/pr-agent/main?style=for-the-badge" height="20">
</a>
[![Twitter](https://img.shields.io/twitter/follow/codiumai)](https://twitter.com/codiumai)
[![Cheat Sheet](https://img.shields.io/badge/Cheat-Sheet-red)](https://www.codium.ai/images/pr_agent/cheat_sheet.pdf)
<a href="https://github.com/Codium-ai/pr-agent/commits/main">
<img alt="GitHub" src="https://img.shields.io/github/last-commit/Codium-ai/pr-agent/main?style=for-the-badge" height="20">
</a>
</div>
### [Documentation](https://qodo-merge-docs.qodo.ai/)
### [Documentation](https://pr-agent-docs.codium.ai/)
- See the [Installation Guide](https://qodo-merge-docs.qodo.ai/installation/) for instructions on installing Qode Merge PR-Agent on different platforms.
- See the [Installation Guide](https://qodo-merge-docs.qodo.ai/installation/) for instructions on installing PR-Agent on different platforms.
- See the [Usage Guide](https://qodo-merge-docs.qodo.ai/usage-guide/) for instructions on running PR-Agent tools via different interfaces, such as CLI, PR Comments, or by automatically triggering them when a new PR is opened.
- See the [Usage Guide](https://qodo-merge-docs.qodo.ai/usage-guide/) for instructions on running Qode Merge PR-Agent tools via different interfaces, such as CLI, PR Comments, or by automatically triggering them when a new PR is opened.
- See the [Tools Guide](https://qodo-merge-docs.qodo.ai/tools/) for a detailed description of the different tools, and the available configurations for each tool.
## Table of Contents
- [News and Updates](#news-and-updates)
- [Overview](#overview)
- [Example results](#example-results)
- [Try it now](#try-it-now)
- [Qodo Merge 💎](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/)
- [PR-Agent Pro 💎](https://pr-agent-docs.codium.ai/overview/pr_agent_pro/)
- [How it works](#how-it-works)
- [Why use PR-Agent?](#why-use-pr-agent)
## News and Updates
### Jan 25, 2025
### October 27, 2024
The open-source GitHub organization was updated:
`https://github.com/codium-ai/pr-agent`
`https://github.com/qodo-ai/pr-agent`
Qodo Merge PR Agent will now automatically document accepted code suggestions in a dedicated wiki page (`.pr_agent_accepted_suggestions`), enabling users to track historical changes, assess the tool's effectiveness, and learn from previously implemented recommendations in the repository.
The docker should be redirected automatically to the new location.
However, if you have any issues, please update the GitHub action docker image from
`uses: Codium-ai/pr-agent@main`
to
`uses: qodo-ai/pr-agent@main`
This dedicated wiki page will also serve as a foundation for future AI model improvements, allowing it to learn from historically implemented suggestions and generate more targeted, contextually relevant recommendations.
Read more about this novel feature [here](https://qodo-merge-docs.qodo.ai/tools/improve/#suggestion-tracking).
<kbd><img href="https://qodo.ai/images/pr_agent/pr_agent_accepted_suggestions1.png" src="https://qodo.ai/images/pr_agent/pr_agent_accepted_suggestions1.png" width="768"></kbd>
### Jan 2, 2025
New tool [/Implement](https://qodo-merge-docs.qodo.ai/tools/implement/) (💎), which converts human code review discussions and feedback into ready-to-commit code changes.
### October 21, 2024
**Disable publishing labels by default:**
<kbd><img src="https://www.qodo.ai/images/pr_agent/implement1.png" width="512"></kbd>
The default setting for `pr_description.publish_labels` has been updated to `false`. This means that labels generated by the `/describe` tool will no longer be published, unless this configuration is explicitly set to `true`.
We constantly strive to balance informative AI analysis with reducing unnecessary noise. User feedback indicated that in many cases, the original PR title alone provides sufficient information, making the generated labels (`enhancement`, `documentation`, `bug fix`, ...) redundant.
The [`review_effort`](https://qodo-merge-docs.qodo.ai/tools/review/#configuration-options) label, generated by the `review` tool, will still be published by default, as it provides valuable information enabling reviewers to prioritize small PRs first.
However, every user has different preferences. To still publish the `describe` labels, set `pr_description.publish_labels=true` in the [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/).
For more tailored and relevant labeling, we recommend using the [`custom_labels 💎`](https://qodo-merge-docs.qodo.ai/tools/custom_labels/) tool, that allows generating labels specific to your project's needs.
<kbd>![image](https://github.com/user-attachments/assets/8f38d222-53b1-4742-b2ec-7ea0a30c9076)</kbd>
<kbd>![image](https://github.com/user-attachments/assets/8285bd90-0dda-4c7e-9237-bbfde5e21880)</kbd>
### Jan 1, 2025
Update logic and [documentation](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/#ollama) for running local models via Ollama.
### October 14, 2024
Improved support for GitHub enterprise server with [GitHub Actions](https://qodo-merge-docs.qodo.ai/installation/github/#action-for-github-enterprise-server)
### December 30, 2024
### October 10, 2024
New ability for the `review` tool - **ticket compliance feedback**. If the PR contains a ticket number, PR-Agent will check if the PR code actually [complies](https://github.com/Codium-ai/pr-agent/pull/1279#issuecomment-2404042130) with the ticket requirements.
Following feedback from the community, we have addressed two vulnerabilities identified in the open-source PR-Agent project. The fixes are now included in the newly released version (v0.26), available as of today.
### December 25, 2024
The `review` tool previously included a legacy feature for providing code suggestions (controlled by '--pr_reviewer.num_code_suggestion'). This functionality has been deprecated. Use instead the [`improve`](https://qodo-merge-docs.qodo.ai/tools/improve/) tool, which offers higher quality and more actionable code suggestions.
### December 2, 2024
Open-source repositories can now freely use Qodo Merge, and enjoy easy one-click installation using a marketplace [app](https://github.com/apps/qodo-merge-pro-for-open-source).
<kbd><img src="https://github.com/user-attachments/assets/b0838724-87b9-43b0-ab62-73739a3a855c" width="512"></kbd>
See [here](https://qodo-merge-docs.qodo.ai/installation/pr_agent_pro/) for more details about installing Qodo Merge for private repositories.
### November 18, 2024
A new mode was enabled by default for code suggestions - `--pr_code_suggestions.focus_only_on_problems=true`:
- This option reduces the number of code suggestions received
- The suggestions will focus more on identifying and fixing code problems, rather than style considerations like best practices, maintainability, or readability.
- The suggestions will be categorized into just two groups: "Possible Issues" and "General".
Still, if you prefer the previous mode, you can set `--pr_code_suggestions.focus_only_on_problems=false` in the [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/).
**Example results:**
Original mode
<kbd><img src="https://qodo.ai/images/pr_agent/code_suggestions_original_mode.png" width="512"></kbd>
Focused mode
<kbd><img src="https://qodo.ai/images/pr_agent/code_suggestions_focused_mode.png" width="512"></kbd>
<kbd><img src="https://github.com/user-attachments/assets/4a2a728b-5f47-40fa-80cc-16efd296938c" width="768"></kbd>
## Overview
@ -110,76 +87,69 @@ Focused mode
Supported commands per platform:
| | | GitHub | GitLab | Bitbucket | Azure DevOps |
| | | GitHub | Gitlab | Bitbucket | Azure DevOps |
|-------|---------------------------------------------------------------------------------------------------------|:--------------------:|:--------------------:|:--------------------:|:------------:|
| TOOLS | [Review](https://qodo-merge-docs.qodo.ai/tools/review/) | ✅ | ✅ | ✅ | ✅ |
| | [Describe](https://qodo-merge-docs.qodo.ai/tools/describe/) | ✅ | ✅ | ✅ | |
| | [Improve](https://qodo-merge-docs.qodo.ai/tools/improve/) | ✅ | ✅ | ✅ | ✅ |
| | [Ask](https://qodo-merge-docs.qodo.ai/tools/ask/) | ✅ | ✅ | ✅ | ✅ |
| | ⮑ [Ask on code lines](https://qodo-merge-docs.qodo.ai/tools/ask/#ask-lines) | ✅ | ✅ | | |
| | [Update CHANGELOG](https://qodo-merge-docs.qodo.ai/tools/update_changelog/) | ✅ | ✅ | ✅ | ✅ |
| | [Ticket Context](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/) 💎 | ✅ | ✅ | ✅ | |
| | [Utilizing Best Practices](https://qodo-merge-docs.qodo.ai/tools/improve/#best-practices) 💎 | ✅ | ✅ | | |
| | [PR Chat](https://qodo-merge-docs.qodo.ai/chrome-extension/features/#pr-chat) 💎 | ✅ | | | |
| | [Suggestion Tracking](https://qodo-merge-docs.qodo.ai/tools/improve/#suggestion-tracking) 💎 | ✅ | ✅ | | |
| | [CI Feedback](https://qodo-merge-docs.qodo.ai/tools/ci_feedback/) 💎 | ✅ | | | |
| | [PR Documentation](https://qodo-merge-docs.qodo.ai/tools/documentation/) 💎 | ✅ | ✅ | | |
| | [Custom Labels](https://qodo-merge-docs.qodo.ai/tools/custom_labels/) 💎 | ✅ | | | |
| | [Analyze](https://qodo-merge-docs.qodo.ai/tools/analyze/) 💎 | ✅ | ✅ | | |
| | [Similar Code](https://qodo-merge-docs.qodo.ai/tools/similar_code/) 💎 | ✅ | | | |
| | [Custom Prompt](https://qodo-merge-docs.qodo.ai/tools/custom_prompt/) 💎 | ✅ | ✅ | | |
| | [Test](https://qodo-merge-docs.qodo.ai/tools/test/) 💎 | ✅ | | | |
| | [Implement](https://qodo-merge-docs.qodo.ai/tools/implement/) 💎 | ✅ | ✅ | | |
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ |
| | ⮑ Incremental | ✅ | | | |
| | Describe | ✅ | ✅ | ✅ | ✅ |
| | ⮑ [Inline File Summary](https://pr-agent-docs.codium.ai/tools/describe#inline-file-summary) 💎 | ✅ | | | |
| | Improve | ✅ | ✅ | ✅ | ✅ |
| | ⮑ Extended | ✅ | ✅ | ✅ | ✅ |
| | Ask | ✅ | ✅ | ✅ | ✅ |
| | ⮑ [Ask on code lines](https://pr-agent-docs.codium.ai/tools/ask#ask-lines) | ✅ | ✅ | | |
| | [Custom Prompt](https://pr-agent-docs.codium.ai/tools/custom_prompt/) 💎 | ✅ | ✅ | ✅ | |
| | [Test](https://pr-agent-docs.codium.ai/tools/test/) 💎 | ✅ | ✅ | | |
| | Reflect and Review | ✅ | ✅ | ✅ | |
| | Update CHANGELOG.md | ✅ | ✅ | | |
| | Find Similar Issue | ✅ | | | |
| | [Add PR Documentation](https://pr-agent-docs.codium.ai/tools/documentation/) 💎 | ✅ | ✅ | | |
| | [Custom Labels](https://pr-agent-docs.codium.ai/tools/custom_labels/) 💎 | ✅ | | | |
| | [Analyze](https://pr-agent-docs.codium.ai/tools/analyze/) 💎 | ✅ | ✅ | | |
| | [CI Feedback](https://pr-agent-docs.codium.ai/tools/ci_feedback/) 💎 | ✅ | | | |
| | [Similar Code](https://pr-agent-docs.codium.ai/tools/similar_code/) 💎 | ✅ | | | |
| | | | | | |
| USAGE | [CLI](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#local-repo-cli) | ✅ | ✅ | ✅ | ✅ |
| | [App / webhook](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#github-app) | ✅ | ✅ | ✅ | ✅ |
| | [Tagging bot](https://github.com/Codium-ai/pr-agent#try-it-now) | ✅ | | | |
| | [Actions](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-action) | ✅ |✅| ✅ |✅|
| USAGE | CLI | ✅ | ✅ | ✅ | ✅ |
| | App / webhook | ✅ | ✅ | ✅ | ✅ |
| | Tagging bot | ✅ | | | |
| | Actions | ✅ |✅| ✅ |✅|
| | | | | | |
| CORE | [PR compression](https://qodo-merge-docs.qodo.ai/core-abilities/compression_strategy/) | ✅ | ✅ | ✅ | ✅ |
| CORE | PR compression | ✅ | ✅ | ✅ | ✅ |
| | Repo language prioritization | ✅ | ✅ | ✅ | ✅ |
| | Adaptive and token-aware file patch fitting | ✅ | ✅ | ✅ | ✅ |
| | [Multiple models support](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/) | ✅ | ✅ | ✅ | ✅ |
| | [Local and global metadata](https://qodo-merge-docs.qodo.ai/core-abilities/metadata/) | ✅ | ✅ | ✅ | |
| | [Dynamic context](https://qodo-merge-docs.qodo.ai/core-abilities/dynamic_context/) | ✅ | ✅ | ✅ | |
| | [Self reflection](https://qodo-merge-docs.qodo.ai/core-abilities/self_reflection/) | ✅ | ✅ | ✅ | |
| | [Static code analysis](https://qodo-merge-docs.qodo.ai/core-abilities/static_code_analysis/) 💎 | ✅ | ✅ | ✅ | |
| | [Global and wiki configurations](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) 💎 | ✅ | ✅ | ✅ | |
| | [PR interactive actions](https://www.qodo.ai/images/pr_agent/pr-actions.mp4) 💎 | ✅ | ✅ | | |
| | [Impact Evaluation](https://qodo-merge-docs.qodo.ai/core-abilities/impact_evaluation/) 💎 | ✅ | ✅ | | |
- 💎 means this feature is available only in [Qodo-Merge](https://www.qodo.ai/pricing/)
| | Multiple models support | ✅ | ✅ | ✅ | ✅ |
| | [Static code analysis](https://pr-agent-docs.codium.ai/core-abilities/#static-code-analysis) 💎 | ✅ | ✅ | ✅ | |
| | [Global and wiki configurations](https://pr-agent-docs.codium.ai/usage-guide/configuration_options/) 💎 | ✅ | ✅ | ✅ | |
| | [PR interactive actions](https://www.codium.ai/images/pr_agent/pr-actions.mp4) 💎 | ✅ | ✅ | | |
- 💎 means this feature is available only in [PR-Agent Pro](https://www.codium.ai/pricing/)
[//]: # (- Support for additional git providers is described in [here]&#40;./docs/Full_environments.md&#41;)
___
**Auto Description ([`/describe`](https://qodo-merge-docs.qodo.ai/tools/describe/))**: Automatically generating PR description - title, type, summary, code walkthrough and labels.
**Auto Description ([`/describe`](https://pr-agent-docs.codium.ai/tools/describe/))**: Automatically generating PR description - title, type, summary, code walkthrough and labels.
\
**Auto Review ([`/review`](https://qodo-merge-docs.qodo.ai/tools/review/))**: Adjustable feedback about the PR, possible issues, security concerns, review effort and more.
**Auto Review ([`/review`](https://pr-agent-docs.codium.ai/tools/review/))**: Adjustable feedback about the PR, possible issues, security concerns, review effort and more.
\
**Code Suggestions ([`/improve`](https://qodo-merge-docs.qodo.ai/tools/improve/))**: Code suggestions for improving the PR.
**Code Suggestions ([`/improve`](https://pr-agent-docs.codium.ai/tools/improve/))**: Code suggestions for improving the PR.
\
**Question Answering ([`/ask ...`](https://qodo-merge-docs.qodo.ai/tools/ask/))**: Answering free-text questions about the PR.
**Question Answering ([`/ask ...`](https://pr-agent-docs.codium.ai/tools/ask/))**: Answering free-text questions about the PR.
\
**Update Changelog ([`/update_changelog`](https://qodo-merge-docs.qodo.ai/tools/update_changelog/))**: Automatically updating the CHANGELOG.md file with the PR changes.
**Update Changelog ([`/update_changelog`](https://pr-agent-docs.codium.ai/tools/update_changelog/))**: Automatically updating the CHANGELOG.md file with the PR changes.
\
**Find Similar Issue ([`/similar_issue`](https://qodo-merge-docs.qodo.ai/tools/similar_issues/))**: Automatically retrieves and presents similar issues.
**Find Similar Issue ([`/similar_issue`](https://pr-agent-docs.codium.ai/tools/similar_issues/))**: Automatically retrieves and presents similar issues.
\
**Add Documentation 💎 ([`/add_docs`](https://qodo-merge-docs.qodo.ai/tools/documentation/))**: Generates documentation to methods/functions/classes that changed in the PR.
**Add Documentation 💎 ([`/add_docs`](https://pr-agent-docs.codium.ai/tools/documentation/))**: Generates documentation to methods/functions/classes that changed in the PR.
\
**Generate Custom Labels 💎 ([`/generate_labels`](https://qodo-merge-docs.qodo.ai/tools/custom_labels/))**: Generates custom labels for the PR, based on specific guidelines defined by the user.
**Generate Custom Labels 💎 ([`/generate_labels`](https://pr-agent-docs.codium.ai/tools/custom_labels/))**: Generates custom labels for the PR, based on specific guidelines defined by the user.
\
**Analyze 💎 ([`/analyze`](https://qodo-merge-docs.qodo.ai/tools/analyze/))**: Identify code components that changed in the PR, and enables to interactively generate tests, docs, and code suggestions for each component.
**Analyze 💎 ([`/analyze`](https://pr-agent-docs.codium.ai/tools/analyze/))**: Identify code components that changed in the PR, and enables to interactively generate tests, docs, and code suggestions for each component.
\
**Test 💎 ([`/test`](https://qodo-merge-docs.qodo.ai/tools/test/))**: Generate tests for a selected component, based on the PR code changes.
**Custom Prompt 💎 ([`/custom_prompt`](https://pr-agent-docs.codium.ai/tools/custom_prompt/))**: Automatically generates custom suggestions for improving the PR code, based on specific guidelines defined by the user.
\
**Custom Prompt 💎 ([`/custom_prompt`](https://qodo-merge-docs.qodo.ai/tools/custom_prompt/))**: Automatically generates custom suggestions for improving the PR code, based on specific guidelines defined by the user.
**Generate Tests 💎 ([`/test component_name`](https://pr-agent-docs.codium.ai/tools/test/))**: Generates unit tests for a selected component, based on the PR code changes.
\
**Generate Tests 💎 ([`/test component_name`](https://qodo-merge-docs.qodo.ai/tools/test/))**: Generates unit tests for a selected component, based on the PR code changes.
**CI Feedback 💎 ([`/checks ci_job`](https://pr-agent-docs.codium.ai/tools/ci_feedback/))**: Automatically generates feedback and analysis for a failed CI job.
\
**CI Feedback 💎 ([`/checks ci_job`](https://qodo-merge-docs.qodo.ai/tools/ci_feedback/))**: Automatically generates feedback and analysis for a failed CI job.
\
**Similar Code 💎 ([`/find_similar_component`](https://qodo-merge-docs.qodo.ai/tools/similar_code/))**: Retrieves the most similar code components from inside the organization's codebase, or from open-source code.
\
**Implement 💎 ([`/implement`](https://qodo-merge-docs.qodo.ai/tools/implement/))**: Generates implementation code from review suggestions.
**Similar Code 💎 ([`/find_similar_component`](https://pr-agent-docs.codium.ai/tools/similar_code/))**: Retrieves the most similar code components from inside the organization's codebase, or from open-source code.
___
## Example results
@ -210,8 +180,50 @@ ___
</kbd>
</p>
</div>
<hr>
<h4><a href="https://github.com/Codium-ai/pr-agent/pull/530">/generate_labels</a></h4>
<div align="center">
<p float="center">
<kbd><img src="https://www.codium.ai/images/pr_agent/geneare_custom_labels_main_short.png" width="300"></kbd>
</p>
</div>
[//]: # (<h4><a href="https://github.com/Codium-ai/pr-agent/pull/78#issuecomment-1639739496">/reflect_and_review:</a></h4>)
[//]: # (<div align="center">)
[//]: # (<p float="center">)
[//]: # (<img src="https://www.codium.ai/images/reflect_and_review.gif" width="800">)
[//]: # (</p>)
[//]: # (</div>)
[//]: # (<h4><a href="https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695020538">/ask:</a></h4>)
[//]: # (<div align="center">)
[//]: # (<p float="center">)
[//]: # (<img src="https://www.codium.ai/images/ask-2.gif" width="800">)
[//]: # (</p>)
[//]: # (</div>)
[//]: # (<h4><a href="https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695024952">/improve:</a></h4>)
[//]: # (<div align="center">)
[//]: # (<p float="center">)
[//]: # (<img src="https://www.codium.ai/images/improve-2.gif" width="800">)
[//]: # (</p>)
[//]: # (</div>)
<div align="left">
@ -235,19 +247,19 @@ It does not have 'edit' access to your repo, for example, so it cannot update th
![Review generation process](https://www.codium.ai/images/demo-2.gif)
To set up your own PR-Agent, see the [Installation](https://qodo-merge-docs.qodo.ai/installation/) section below.
Note that when you set your own PR-Agent or use Qodo hosted PR-Agent, there is no need to mention `@CodiumAI-Agent ...`. Instead, directly start with the command, e.g., `/ask ...`.
To set up your own PR-Agent, see the [Installation](https://pr-agent-docs.codium.ai/installation/) section below.
Note that when you set your own PR-Agent or use CodiumAI hosted PR-Agent, there is no need to mention `@CodiumAI-Agent ...`. Instead, directly start with the command, e.g., `/ask ...`.
---
## Qodo Merge 💎
[Qodo Merge](https://www.qodo.ai/pricing/) is a hosted version of PR-Agent, provided by Qodo. It is available for a monthly fee, and provides the following benefits:
1. **Fully managed** - We take care of everything for you - hosting, models, regular updates, and more. Installation is as simple as signing up and adding the Qodo Merge app to your GitHub\GitLab\BitBucket repo.
2. **Improved privacy** - No data will be stored or used to train models. Qodo Merge will employ zero data retention, and will use an OpenAI account with zero data retention.
3. **Improved support** - Qodo Merge users will receive priority support, and will be able to request new features and capabilities.
4. **Extra features** -In addition to the benefits listed above, Qodo Merge will emphasize more customization, and the usage of static code analysis, in addition to LLM logic, to improve results.
See [here](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/) for a list of features available in Qodo Merge.
## PR-Agent Pro 💎
[PR-Agent Pro](https://www.codium.ai/pricing/) is a hosted version of PR-Agent, provided by CodiumAI. It is available for a monthly fee, and provides the following benefits:
1. **Fully managed** - We take care of everything for you - hosting, models, regular updates, and more. Installation is as simple as signing up and adding the PR-Agent app to your GitHub\GitLab\BitBucket repo.
2. **Improved privacy** - No data will be stored or used to train models. PR-Agent Pro will employ zero data retention, and will use an OpenAI account with zero data retention.
3. **Improved support** - PR-Agent Pro users will receive priority support, and will be able to request new features and capabilities.
4. **Extra features** -In addition to the benefits listed above, PR-Agent Pro will emphasize more customization, and the usage of static code analysis, in addition to LLM logic, to improve results.
See [here](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/) for a list of features available in PR-Agent Pro.
@ -255,9 +267,9 @@ See [here](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/) for a list of
The following diagram illustrates PR-Agent tools and their flow:
![PR-Agent Tools](https://www.qodo.ai/images/pr_agent/diagram-v0.9.png)
![PR-Agent Tools](https://codium.ai/images/pr_agent/diagram-v0.9.png)
Check out the [PR Compression strategy](https://qodo-merge-docs.qodo.ai/core-abilities/#pr-compression-strategy) page for more details on how we convert a code diff to a manageable LLM prompt
Check out the [PR Compression strategy](https://pr-agent-docs.codium.ai/core-abilities/#pr-compression-strategy) page for more details on how we convert a code diff to a manageable LLM prompt
## Why use PR-Agent?
@ -266,7 +278,7 @@ A reasonable question that can be asked is: `"Why use PR-Agent? What makes it st
Here are some advantages of PR-Agent:
- We emphasize **real-life practical usage**. Each tool (review, improve, ask, ...) has a single GPT-4 call, no more. We feel that this is critical for realistic team usage - obtaining an answer quickly (~30 seconds) and affordably.
- Our [PR Compression strategy](https://qodo-merge-docs.qodo.ai/core-abilities/#pr-compression-strategy) is a core ability that enables to effectively tackle both short and long PRs.
- Our [PR Compression strategy](https://pr-agent-docs.codium.ai/core-abilities/#pr-compression-strategy) is a core ability that enables to effectively tackle both short and long PRs.
- Our JSON prompting strategy enables to have **modular, customizable tools**. For example, the '/review' tool categories can be controlled via the [configuration](pr_agent/settings/configuration.toml) file. Adding additional categories is easy and accessible.
- We support **multiple git providers** (GitHub, Gitlab, Bitbucket), **multiple ways** to use the tool (CLI, GitHub Action, GitHub App, Docker, ...), and **multiple models** (GPT-4, GPT-3.5, Anthropic, Cohere, Llama2).
@ -278,24 +290,24 @@ Here are some advantages of PR-Agent:
- If you host PR-Agent with your OpenAI API key, it is between you and OpenAI. You can read their API data privacy policy here:
https://openai.com/enterprise-privacy
### Qodo-hosted Qodo Merge 💎
### CodiumAI-hosted PR-Agent Pro 💎
- When using Qodo Merge 💎, hosted by Qodo, we will not store any of your data, nor will we use it for training. You will also benefit from an OpenAI account with zero data retention.
- When using PR-Agent Pro 💎, hosted by CodiumAI, we will not store any of your data, nor will we use it for training. You will also benefit from an OpenAI account with zero data retention.
- For certain clients, Qodo-hosted Qodo Merge will use Qodos proprietary models — if this is the case, you will be notified.
- For certain clients, CodiumAI-hosted PR-Agent Pro will use CodiumAIs proprietary models — if this is the case, you will be notified.
- No passive collection of Code and Pull Requests data — Qodo Merge will be active only when you invoke it, and it will then extract and analyze only data relevant to the executed command and queried pull request.
- No passive collection of Code and Pull Requests data — PR-Agent will be active only when you invoke it, and it will then extract and analyze only data relevant to the executed command and queried pull request.
### Qodo Merge Chrome extension
### PR-Agent Chrome extension
- The [Qodo Merge Chrome extension](https://chromewebstore.google.com/detail/qodo-merge-ai-powered-cod/ephlnjeghhogofkifjloamocljapahnl) serves solely to modify the visual appearance of a GitHub PR screen. It does not transmit any user's repo or pull request code. Code is only sent for processing when a user submits a GitHub comment that activates a PR-Agent tool, in accordance with the standard privacy policy of Qodo-Merge.
- The [PR-Agent Chrome extension](https://chromewebstore.google.com/detail/pr-agent-chrome-extension/ephlnjeghhogofkifjloamocljapahnl) serves solely to modify the visual appearance of a GitHub PR screen. It does not transmit any user's repo or pull request code. Code is only sent for processing when a user submits a GitHub comment that activates a PR-Agent tool, in accordance with the standard privacy policy of PR-Agent.
## Links
[![Join our Discord community](https://raw.githubusercontent.com/Codium-ai/codiumai-vscode-release/main/media/docs/Joincommunity.png)](https://discord.gg/kG35uSHDBc)
- Discord community: https://discord.gg/kG35uSHDBc
- Qodo site: https://www.qodo.ai/
- Blog: https://www.qodo.ai/blog/
- Troubleshooting: https://www.qodo.ai/blog/technical-faq-and-troubleshooting/
- Support: support@qodo.ai
- CodiumAI site: https://codium.ai
- Blog: https://www.codium.ai/blog/
- Troubleshooting: https://www.codium.ai/blog/technical-faq-and-troubleshooting/
- Support: support@codium.ai

View File

@ -1,62 +0,0 @@
# Security Policy
PR-Agent is an open-source tool to help efficiently review and handle pull requests. Qodo Merge is a paid version of PR-Agent, designed for companies and teams that require additional features and capabilities.
This document describes the security policy of PR-Agent. For Qodo Merge's security policy, see [here](https://qodo-merge-docs.qodo.ai/overview/data_privacy/#qodo-merge).
## PR-Agent Self-Hosted Solutions
When using PR-Agent with your OpenAI (or other LLM provider) API key, the security relationship is directly between you and the provider. We do not send your code to Qodo servers.
Types of [self-hosted solutions](https://qodo-merge-docs.qodo.ai/installation):
- Locally
- GitHub integration
- GitLab integration
- BitBucket integration
- Azure DevOps integration
## PR-Agent Supported Versions
This section outlines which versions of PR-Agent are currently supported with security updates.
### Docker Deployment Options
#### Latest Version
For the most recent updates, use our latest Docker image which is automatically built nightly:
```yaml
uses: qodo-ai/pr-agent@main
```
#### Specific Release Version
For a fixed version, you can pin your action to a specific release version. Browse available releases at:
[PR-Agent Releases](https://github.com/qodo-ai/pr-agent/releases)
For example, to github action:
```yaml
steps:
- name: PR Agent action step
id: pragent
uses: docker://codiumai/pr-agent:0.26-github_action
```
#### Enhanced Security with Docker Digest
For maximum security, you can specify the Docker image using its digest:
```yaml
steps:
- name: PR Agent action step
id: pragent
uses: docker://codiumai/pr-agent@sha256:14165e525678ace7d9b51cda8652c2d74abb4e1d76b57c4a6ccaeba84663cc64
```
## Reporting a Vulnerability
We take the security of PR-Agent seriously. If you discover a security vulnerability, please report it immediately to:
Email: tal.r@qodo.ai
Please include a description of the vulnerability, steps to reproduce, and the affected PR-Agent version.

View File

@ -4,7 +4,7 @@ WORKDIR /app
ADD pyproject.toml .
ADD requirements.txt .
ADD docs docs
RUN pip install --no-cache-dir . && rm pyproject.toml requirements.txt
RUN pip install . && rm pyproject.toml requirements.txt
ENV PYTHONPATH=/app
FROM base AS github_app
@ -33,7 +33,7 @@ CMD ["python", "pr_agent/servers/azuredevops_server_webhook.py"]
FROM base AS test
ADD requirements-dev.txt .
RUN pip install --no-cache-dir -r requirements-dev.txt && rm requirements-dev.txt
RUN pip install -r requirements-dev.txt && rm requirements-dev.txt
ADD pr_agent pr_agent
ADD tests tests

View File

@ -5,8 +5,8 @@ RUN yum update -y && \
yum clean all
ADD pyproject.toml requirements.txt .
RUN pip install --no-cache-dir . && rm pyproject.toml
RUN pip install --no-cache-dir mangum==0.17.0
RUN pip install . && rm pyproject.toml
RUN pip install mangum==0.17.0
COPY pr_agent/ ${LAMBDA_TASK_ROOT}/pr_agent/
CMD ["pr_agent.servers.serverless.serverless"]

View File

@ -1,4 +1,5 @@
We take your code's security and privacy seriously:
- The Chrome extension will not send your code to any external servers.
- For private repositories, we will first validate the user's identity and permissions. After authentication, we generate responses using the existing Qodo Merge integration.
- For private repositories, we will first validate the user's identity and permissions. After authentication, we generate responses using the existing Qodo Merge Pro integration.

View File

@ -8,7 +8,7 @@ To enable private chat, simply install the Qodo Merge Chrome extension. After in
This chat session is **private**, and won't be visible to other users.
All open-source repositories are supported.
For private repositories, you will also need to install Qodo Merge. After installation, make sure to open at least one new PR to fully register your organization. Once done, you can chat with both new and existing PRs across all installed repositories.
For private repositories, you will also need to install Qodo Merge Pro, After installation, make sure to open at least one new PR to fully register your organization. Once done, you can chat with both new and existing PRs across all installed repositories.
#### Context-aware PR chat

View File

@ -1,11 +1,11 @@
[Qodo Merge Chrome extension](https://chromewebstore.google.com/detail/pr-agent-chrome-extension/ephlnjeghhogofkifjloamocljapahnl){:target="_blank"} is a collection of tools that integrates seamlessly with your GitHub environment, aiming to enhance your Git usage experience, and providing AI-powered capabilities to your PRs.
[Qodo Merge Chrome extension](https://chromewebstore.google.com/detail/pr-agent-chrome-extension/ephlnjeghhogofkifjloamocljapahnl) is a collection of tools that integrates seamlessly with your GitHub environment, aiming to enhance your Git usage experience, and providing AI-powered capabilities to your PRs.
With a single-click installation you will gain access to a context-aware chat on your pull requests code, a toolbar extension with multiple AI feedbacks, Qodo Merge filters, and additional abilities.
The extension is powered by top code models like Claude 3.5 Sonnet and GPT4. All the extension's features are free to use on public repositories.
For private repositories, you will need to install [Qodo Merge](https://github.com/apps/qodo-merge-pro){:target="_blank"} in addition to the extension (Quick GitHub app setup with a 14-day free trial. No credit card needed).
For a demonstration of how to install Qodo Merge and use it with the Chrome extension, please refer to the tutorial video at the provided [link](https://codium.ai/images/pr_agent/private_repos.mp4){:target="_blank"}.
For private repositories, you will need to install [Qodo Merge Pro](https://github.com/apps/codiumai-pr-agent-pro) in addition to the extension (Quick GitHub app setup with a 14-day free trial. No credit card needed).
For a demonstration of how to install Qodo Merge Pro and use it with the Chrome extension, please refer to the tutorial video at the provided [link](https://codium.ai/images/pr_agent/private_repos.mp4).
<img src="https://codium.ai/images/pr_agent/PR-AgentChat.gif" width="768">

View File

@ -1,69 +0,0 @@
# Auto Best Practices 💎
`Supported Git Platforms: GitHub`
## Overview
![Auto best practice suggestion graph](https://www.qodo.ai/images/pr_agent/auto_best_practices_graph.png){width=684}
> Note - enabling a [Wiki](https://qodo-merge-docs.qodo.ai/usage-guide/enabling_a_wiki/) is required for this feature.
### Finding Code Problems - Exploration Phase
The `improve` tool identifies potential issues, problems and bugs in Pull Request (PR) code changes.
Rather than focusing on minor issues like code style or formatting, the tool intelligently analyzes code to detect meaningful problems.
The analysis intentionally takes a flexible, _exploratory_ approach to identify meaningful potential issues, allowing the tool to surface relevant code suggestions without being constrained by predefined categories.
### Tracking Implemented Suggestions
Qodo Merge features a novel [tracking system](https://qodo-merge-docs.qodo.ai/tools/improve/#suggestion-tracking) that automatically detects when PR authors implement AI-generated code suggestions.
All accepted suggestions are aggregated in a repository-specific wiki page called [`.pr_agent_accepted_suggestions`](https://github.com/qodo-ai/pr-agent/wiki/.pr_agent_accepted_suggestions)
### Learning and Applying Auto Best Practices
Monthly, Qodo Merge analyzes the collection of accepted suggestions to generate repository-specific best practices, stored in [`.pr_agent_auto_best_practices`](https://github.com/qodo-ai/pr-agent/wiki/.pr_agent_auto_best_practices) wiki file.
These best practices reflect recurring patterns in accepted code improvements.
The `improve` tool will incorporate these best practices as an additional analysis layer, checking PR code changes against known patterns of previously accepted improvements.
This creates a two-phase analysis:
1. Open exploration for general code issues
2. Targeted checking against established best practices - exploiting the knowledge gained from past suggestions
By keeping these phases decoupled, the tool remains free to discover new or unseen issues and problems, while also learning from past experiences.
When presenting the suggestions generated by the `improve` tool, Qodo Merge will add a dedicated label for each suggestion generated from the auto best practices - 'Learned best practice':
![Auto best practice suggestion](https://www.qodo.ai/images/pr_agent/auto_best_practices.png){width=684}
## Auto Best Practices vs Custom Best Practices
Teams and companies can also manually define their own [custom best practices](https://qodo-merge-docs.qodo.ai/tools/improve/#best-practices) in Qodo Merge.
When custom best practices exist, Qodo Merge will still generate an 'auto best practices' wiki file, though it won't be used by the `improve` tool.
However, this auto-generated file can still serve two valuable purposes:
1. It can help enhance your custom best practices with additional insights derived from suggestions your team found valuable enough to implement
2. It demonstrates effective patterns for writing AI-friendly best practices
Even when using custom best practices, we recommend regularly reviewing the auto best practices file to refine your custom rules.
## Relevant configurations
```toml
[auto_best_practices]
# Disable all auto best practices usage or generation
enable_auto_best_practices = true
# Disable usage of auto best practices file in the 'improve' tool
utilize_auto_best_practices = true
# Extra instructions to the auto best practices generation prompt
extra_instructions = ""
# Max number of patterns to be detected
max_patterns = 5
```

View File

@ -1,170 +0,0 @@
# Fetching Ticket Context for PRs
`Supported Git Platforms: GitHub, GitLab, Bitbucket`
## Overview
Qodo Merge PR Agent streamlines code review workflows by seamlessly connecting with multiple ticket management systems.
This integration enriches the review process by automatically surfacing relevant ticket information and context alongside code changes.
## Ticket systems supported
- GitHub
- Jira (💎)
Ticket data fetched:
1. Ticket Title
2. Ticket Description
3. Custom Fields (Acceptance criteria)
4. Subtasks (linked tasks)
5. Labels
6. Attached Images/Screenshots
## Affected Tools
Ticket Recognition Requirements:
- The PR description should contain a link to the ticket or if the branch name starts with the ticket id / number.
- For Jira tickets, you should follow the instructions in [Jira Integration](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/#jira-integration) in order to authenticate with Jira.
### Describe tool
Qodo Merge PR Agent will recognize the ticket and use the ticket content (title, description, labels) to provide additional context for the code changes.
By understanding the reasoning and intent behind modifications, the LLM can offer more insightful and relevant code analysis.
### Review tool
Similarly to the `describe` tool, the `review` tool will use the ticket content to provide additional context for the code changes.
In addition, this feature will evaluate how well a Pull Request (PR) adheres to its original purpose/intent as defined by the associated ticket or issue mentioned in the PR description.
Each ticket will be assigned a label (Compliance/Alignment level), Indicates the degree to which the PR fulfills its original purpose, Options: Fully compliant, Partially compliant or Not compliant.
![Ticket Compliance](https://www.qodo.ai/images/pr_agent/ticket_compliance_review.png){width=768}
By default, the tool will automatically validate if the PR complies with the referenced ticket.
If you want to disable this feedback, add the following line to your configuration file:
```toml
[pr_reviewer]
require_ticket_analysis_review=false
```
## Providers
### Github Issues Integration
Qodo Merge PR Agent will automatically recognize Github issues mentioned in the PR description and fetch the issue content.
Examples of valid GitHub issue references:
- `https://github.com/<ORG_NAME>/<REPO_NAME>/issues/<ISSUE_NUMBER>`
- `#<ISSUE_NUMBER>`
- `<ORG_NAME>/<REPO_NAME>#<ISSUE_NUMBER>`
Since Qodo Merge PR Agent is integrated with GitHub, it doesn't require any additional configuration to fetch GitHub issues.
### Jira Integration 💎
We support both Jira Cloud and Jira Server/Data Center.
To integrate with Jira, you can link your PR to a ticket using either of these methods:
**Method 1: Description Reference:**
Include a ticket reference in your PR description using either the complete URL format https://<JIRA_ORG>.atlassian.net/browse/ISSUE-123 or the shortened ticket ID ISSUE-123.
**Method 2: Branch Name Detection:**
Name your branch with the ticket ID as a prefix (e.g., `ISSUE-123-feature-description` or `ISSUE-123/feature-description`).
!!! note "Jira Base URL"
For shortened ticket IDs or branch detection (method 2), you must configure the Jira base URL in your configuration file under the [jira] section:
```toml
[jira]
jira_base_url = "https://<JIRA_ORG>.atlassian.net"
```
#### Jira Cloud 💎
There are two ways to authenticate with Jira Cloud:
**1) Jira App Authentication**
The recommended way to authenticate with Jira Cloud is to install the Qodo Merge app in your Jira Cloud instance. This will allow Qodo Merge to access Jira data on your behalf.
Installation steps:
1. Click [here](https://auth.atlassian.com/authorize?audience=api.atlassian.com&client_id=8krKmA4gMD8mM8z24aRCgPCSepZNP1xf&scope=read%3Ajira-work%20offline_access&redirect_uri=https%3A%2F%2Fregister.jira.pr-agent.codium.ai&state=qodomerge&response_type=code&prompt=consent) to install the Qodo Merge app in your Jira Cloud instance, click the `accept` button.<br>
![Jira Cloud App Installation](https://www.qodo.ai/images/pr_agent/jira_app_installation1.png){width=384}
2. After installing the app, you will be redirected to the Qodo Merge registration page. and you will see a success message.<br>
![Jira Cloud App success message](https://www.qodo.ai/images/pr_agent/jira_app_success.png){width=384}
3. Now you can use the Jira integration in Qodo Merge PR Agent.
**2) Email/Token Authentication**
You can create an API token from your Atlassian account:
1. Log in to https://id.atlassian.com/manage-profile/security/api-tokens.
2. Click Create API token.
3. From the dialog that appears, enter a name for your new token and click Create.
4. Click Copy to clipboard.
![Jira Cloud API Token](https://images.ctfassets.net/zsv3d0ugroxu/1RYvh9lqgeZjjNe5S3Hbfb/155e846a1cb38f30bf17512b6dfd2229/screenshot_NewAPIToken){width=384}
5. In your [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) add the following lines:
```toml
[jira]
jira_api_token = "YOUR_API_TOKEN"
jira_api_email = "YOUR_EMAIL"
```
#### Jira Data Center/Server 💎
##### Local App Authentication (For Qodo Merge On-Premise Customers)
##### 1. Step 1: Set up an application link in Jira Data Center/Server
* Go to Jira Administration > Applications > Application Links > Click on `Create link`
![application links](https://www.qodo.ai/images/pr_agent/jira_app_links.png){width=384}
* Choose `External application` and set the direction to `Incoming` and then click `Continue`
![external application](https://www.qodo.ai/images/pr_agent/jira_create_link.png){width=256}
* In the following screen, enter the following details:
* Name: `Qodo Merge`
* Redirect URL: Enter your Qodo Merge URL followed `https://{QODO_MERGE_ENDPOINT}/register_ticket_provider`
* Permission: Select `Read`
* Click `Save`
![external application details](https://www.qodo.ai/images/pr_agent/jira_fill_app_link.png){width=384}
* Copy the `Client ID` and `Client secret` and set them in your `.secrets` file:
![client id and secret](https://www.qodo.ai/images/pr_agent/jira_app_credentionals.png){width=256}
```toml
[jira]
jira_app_secret = "..."
jira_client_id = "..."
```
##### 2. Step 2: Authenticate with Jira Data Center/Server
* Open this URL in your browser: `https://{QODO_MERGE_ENDPOINT}/jira_auth`
* Click on link
![jira auth success](https://www.qodo.ai/images/pr_agent/jira_auth_page.png){width=384}
* You will be redirected to Jira Data Center/Server, click `Allow`
* You will be redirected back to Qodo Merge PR Agent and you will see a success message.
##### Personal Access Token (PAT) Authentication
We also support Personal Access Token (PAT) Authentication method.
1. Create a [Personal Access Token (PAT)](https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html) in your Jira account
2. In your Configuration file/Environment variables/Secrets file, add the following lines:
```toml
[jira]
jira_base_url = "YOUR_JIRA_BASE_URL" # e.g. https://jira.example.com
jira_api_token = "YOUR_API_TOKEN"
```

View File

@ -1,8 +1,6 @@
# Core Abilities
Qodo Merge utilizes a variety of core abilities to provide a comprehensive and efficient code review experience. These abilities include:
- [Fetching ticket context](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/)
- [Auto best practices](https://qodo-merge-docs.qodo.ai/core-abilities/auto_best_practices/)
- [Local and global metadata](https://qodo-merge-docs.qodo.ai/core-abilities/metadata/)
- [Dynamic context](https://qodo-merge-docs.qodo.ai/core-abilities/dynamic_context/)
- [Self-reflection](https://qodo-merge-docs.qodo.ai/core-abilities/self_reflection/)
@ -19,7 +17,6 @@ Here are some additional technical blogs from Qodo, that delve deeper into the c
These resources provide more comprehensive insights into leveraging LLMs for software development.
### Code Generation and LLMs
- [Effective AI code suggestions: less is more](https://www.qodo.ai/blog/effective-code-suggestions-llms-less-is-more/)
- [State-of-the-art Code Generation with AlphaCodium From Prompt Engineering to Flow Engineering](https://www.qodo.ai/blog/qodoflow-state-of-the-art-code-generation-for-code-contests/)
- [RAG for a Codebase with 10k Repos](https://www.qodo.ai/blog/rag-for-large-scale-code-repos/)

View File

@ -32,14 +32,14 @@ For example, when generating code suggestions for different files, Qodo Merge ca
@@ ... @@ def func1():
__new hunk__
11 unchanged code line0
12 unchanged code line1
13 +new code line2 added
14 unchanged code line3
11 unchanged code line0 in the PR
12 unchanged code line1 in the PR
13 +new code line2 added in the PR
14 unchanged code line3 in the PR
__old hunk__
unchanged code line0
unchanged code line1
-old code line2 removed
-old code line2 removed in the PR
unchanged code line3
@@ ... @@ def func2():

View File

@ -46,5 +46,6 @@ This results in a more refined and valuable set of suggestions for the user, sav
## Appendix - Relevant Configuration Options
```
[pr_code_suggestions]
self_reflect_on_suggestions = true # Enable self-reflection on code suggestions
suggestions_score_threshold = 0 # Filter out suggestions with a score below this threshold (0-10)
```

View File

@ -61,7 +61,7 @@ Or be triggered interactively by using the `analyze` tool.
### Find Similar Code
The [`similar code`](https://qodo-merge-docs.qodo.ai/tools/similar_code/) tool retrieves the most similar code components from inside the organization's codebase or from open-source code, including details about the license associated with each repository.
The [`similar code`](https://qodo-merge-docs.qodo.ai/tools/similar_code/) tool retrieves the most similar code components from inside the organization's codebase, or from open-source code.
For example:

View File

@ -1,6 +1,6 @@
# FAQ
??? note "Q: Can Qodo Merge serve as a substitute for a human reviewer?"
??? note "Question: Can Qodo Merge serve as a substitute for a human reviewer?"
#### Answer:<span style="display:none;">1</span>
Qodo Merge is designed to assist, not replace, human reviewers.
@ -12,7 +12,7 @@
1. Preserves user's original PR header
2. Places user's description above the AI-generated PR description
3. Won't approve PRs; approval remains reviewer's responsibility
3. Cannot approve PRs; approval remains reviewer's responsibility
4. The code suggestions are optional, and aim to:
- Encourage self-review and self-reflection
- Highlight potential bugs or oversights
@ -22,15 +22,15 @@
___
??? note "Q: I received an incorrect or irrelevant suggestion. Why?"
??? note "Question: I received an incorrect or irrelevant suggestion. Why?"
#### Answer:<span style="display:none;">2</span>
- Modern AI models, like Claude 3.5 Sonnet and GPT-4, are improving rapidly but remain imperfect. Users should critically evaluate all suggestions rather than accepting them automatically.
- AI errors are rare, but possible. A main value from reviewing the code suggestions lies in their high probability of catching **mistakes or bugs made by the PR author**. We believe it's worth spending 30-60 seconds reviewing suggestions, even if some aren't relevant, as this practice can enhance code quality and prevent bugs in production.
- AI errors are rare, but possible. A main value from reviewing the code suggestions lies in their high probability of catching **mistakes or bugs made by the PR author**. We believe it's worth spending 30-60 seconds reviewing suggestions, even if some aren't relevant, as this practice can enhances code quality and prevent bugs in production.
- The hierarchical structure of the suggestions is designed to help the user _quickly_ understand them, and to decide which ones are relevant and which are not:
- The hierarchical structure of the suggestions is designed to help the user to _quickly_ understand them, and to decide which ones are relevant and which are not:
- Only if the `Category` header is relevant, the user should move to the summarized suggestion description.
- Only if the summarized suggestion description is relevant, the user should click on the collapsible, to read the full suggestion description with a code preview example.
@ -40,14 +40,14 @@ ___
___
??? note "Q: How can I get more tailored suggestions?"
??? note "Question: How can I get more tailored suggestions?"
#### Answer:<span style="display:none;">3</span>
See [here](https://qodo-merge-docs.qodo.ai/tools/improve/#extra-instructions-and-best-practices) for more information on how to use the `extra_instructions` and `best_practices` configuration options, to guide the model to more tailored suggestions.
___
??? note "Q: Will you store my code? Are you using my code to train models?"
??? note "Question: Will you store my code ? Are you using my code to train models?"
#### Answer:<span style="display:none;">4</span>
No. Qodo Merge strict privacy policy ensures that your code is not stored or used for training purposes.
@ -56,35 +56,12 @@ ___
___
??? note "Q: Can I use my own LLM keys with Qodo Merge?"
??? note "Question: Can I use my own LLM keys with Qodo Merge?"
#### Answer:<span style="display:none;">5</span>
When you self-host the [open-source](https://github.com/Codium-ai/pr-agent) version, you use your own keys.
When you self-host, you use your own keys.
Qodo Merge with SaaS deployment is a hosted version of Qodo Merge, where Qodo manages the infrastructure and the keys.
Qodo Merge Pro with SaaS deployment is a hosted version of Qodo Merge, where Qodo manages the infrastructure and the keys.
For enterprise customers, on-prem deployment is also available. [Contact us](https://www.codium.ai/contact/#pricing) for more information.
___
??? note "Q: Can Qodo Merge review draft/offline PRs?"
#### Answer:<span style="display:none;">5</span>
Yes. While Qodo Merge won't automatically review draft PRs, you can still get feedback by manually requesting it through [online commenting](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#online-usage).
For active PRs, you can customize the automatic feedback settings [here](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#qodo-merge-automatic-feedback) to match your team's workflow.
___
??? note "Q: Can the 'Review effort' feedback be calibrated or customized?"
#### Answer:<span style="display:none;">5</span>
Yes, you can customize review effort estimates using the `extra_instructions` configuration option (see [documentation](https://qodo-merge-docs.qodo.ai/tools/review/#configuration-options)).
Example mapping:
- Effort 1: < 30 minutes review time
- Effort 2: 30-60 minutes review time
- Effort 3: 60-90 minutes review time
- ...
Note: The effort levels (1-5) are primarily meant for _comparative_ purposes, helping teams prioritize reviewing smaller PRs first. The actual review duration may vary, as the focus is on providing consistent relative effort estimates.
___

View File

@ -1,16 +1,15 @@
# Overview
[PR-Agent](https://github.com/Codium-ai/pr-agent) is an open-source tool to help efficiently review and handle pull requests.
Qodo Merge is a hosted version of PR-Agent, designed for companies and teams that require additional features and capabilities
Qodo Merge is an open-source tool to help efficiently review and handle pull requests.
- See the [Installation Guide](./installation/index.md) for instructions on installing and running the tool on different git platforms.
- See the [Usage Guide](./usage-guide/index.md) for instructions on running commands via different interfaces, including _CLI_, _online usage_, or by _automatically triggering_ them when a new PR is opened.
- See the [Usage Guide](./usage-guide/index.md) for instructions on running the Qodo Merge commands via different interfaces, including _CLI_, _online usage_, or by _automatically triggering_ them when a new PR is opened.
- See the [Tools Guide](./tools/index.md) for a detailed description of the different tools.
## Docs Smart Search
## Qodo Merge Docs Smart Search
To search the documentation site using natural language:
@ -19,12 +18,12 @@ To search the documentation site using natural language:
- A pull request where Qodo Merge is installed
- A [PR Chat](https://qodo-merge-docs.qodo.ai/chrome-extension/features/#pr-chat)
2) The bot will respond with an [answer](https://github.com/Codium-ai/pr-agent/pull/1241#issuecomment-2365259334) that includes relevant documentation links.
2) Qodo Merge will respond with an [answer](https://github.com/Codium-ai/pr-agent/pull/1241#issuecomment-2365259334) that includes relevant documentation links.
## Qodo Merge Features
Qodo Merge offers extensive pull request functionalities across various git providers:
Qodo Merge offers extensive pull request functionalities across various git providers.
| | | GitHub | Gitlab | Bitbucket | Azure DevOps |
|-------|-----------------------------------------------------------------------------------------------------------------------|:------:|:------:|:---------:|:------------:|
@ -32,18 +31,16 @@ Qodo Merge offers extensive pull request functionalities across various git prov
| | ⮑ Incremental | ✅ | | | |
| | Ask | ✅ | ✅ | ✅ | ✅ |
| | Describe | ✅ | ✅ | ✅ | ✅ |
| | ⮑ [Inline file summary](https://qodo-merge-docs.qodo.ai/tools/describe/#inline-file-summary){:target="_blank"} 💎 | ✅ | ✅ | | |
| | ⮑ [Inline file summary](https://qodo-merge-docs.qodo.ai/tools/describe/#inline-file-summary){:target="_blank"} 💎 | ✅ | ✅ | | |
| | Improve | ✅ | ✅ | ✅ | ✅ |
| | ⮑ Extended | ✅ | ✅ | ✅ | ✅ |
| | [Custom Prompt](./tools/custom_prompt.md){:target="_blank"} 💎 | ✅ | ✅ | ✅ | |
| | Reflect and Review | ✅ | ✅ | ✅ | |
| | [Custom Prompt](./tools/custom_prompt.md){:target="_blank"} 💎 | ✅ | ✅ | ✅ | |
| | Reflect and Review | ✅ | ✅ | ✅ | |
| | Update CHANGELOG.md | ✅ | ✅ | ✅ | |
| | Find Similar Issue | ✅ | | | |
| | [Add PR Documentation](./tools/documentation.md){:target="_blank"} 💎 | ✅ | ✅ | | |
| | [Generate Custom Labels](./tools/describe.md#handle-custom-labels-from-the-repos-labels-page-💎){:target="_blank"} 💎 | ✅ | ✅ | | |
| | [Analyze PR Components](./tools/analyze.md){:target="_blank"} 💎 | ✅ | ✅ | | |
| | [Test](https://pr-agent-docs.codium.ai/tools/test/) 💎 | ✅ | ✅ | | |
| | [Implement](https://pr-agent-docs.codium.ai/tools/implement/) 💎 | ✅ | ✅ | ✅ | |
| | [Add PR Documentation](./tools/documentation.md){:target="_blank"} 💎 | ✅ | ✅ | | |
| | [Generate Custom Labels](./tools/describe.md#handle-custom-labels-from-the-repos-labels-page-💎){:target="_blank"} 💎 | ✅ | ✅ | | |
| | [Analyze PR Components](./tools/analyze.md){:target="_blank"} 💎 | ✅ | ✅ | | |
| | | | | | |
| USAGE | CLI | ✅ | ✅ | ✅ | ✅ |
| | App / webhook | ✅ | ✅ | ✅ | ✅ |
@ -54,10 +51,10 @@ Qodo Merge offers extensive pull request functionalities across various git prov
| | Adaptive and token-aware file patch fitting | ✅ | ✅ | ✅ | ✅ |
| | Multiple models support | ✅ | ✅ | ✅ | ✅ |
| | Incremental PR review | ✅ | | | |
| | [Static code analysis](./tools/analyze.md/){:target="_blank"} 💎 | ✅ | ✅ | ✅ | |
| | [Multiple configuration options](./usage-guide/configuration_options.md){:target="_blank"} 💎 | ✅ | ✅ | ✅ | |
| | [Static code analysis](./tools/analyze.md/){:target="_blank"} 💎 | ✅ | ✅ | ✅ | |
| | [Multiple configuration options](./usage-guide/configuration_options.md){:target="_blank"} 💎 | ✅ | ✅ | ✅ | |
💎 marks a feature available only in [Qodo Merge](https://www.codium.ai/pricing/){:target="_blank"}, and not in the open-source version.
💎 marks a feature available only in [Qodo Merge Pro](https://www.codium.ai/pricing/){:target="_blank"}
## Example Results
@ -93,4 +90,4 @@ The following diagram illustrates Qodo Merge tools and their flow:
![Qodo Merge Tools](https://codium.ai/images/pr_agent/diagram-v0.9.png)
Check out the [PR Compression strategy](core-abilities/index.md) page for more details on how we convert a code diff to a manageable LLM prompt
Check out the [core abilities](core-abilities/index.md) page for a comprehensive overview of the variety of core abilities used by Qodo Merge.

View File

@ -1,5 +1,5 @@
## Azure DevOps Pipeline
You can use a pre-built Action Docker image to run PR-Agent as an Azure devops pipeline.
You can use a pre-built Action Docker image to run Qodo Merge as an Azure devops pipeline.
add the following file to your repository under `azure-pipelines.yml`:
```yaml
# Opt out of CI triggers
@ -51,12 +51,10 @@ stages:
```
This script will run Qodo Merge on every new merge request, with the `improve`, `review`, and `describe` commands.
Note that you need to export the `azure_devops__pat` and `OPENAI_KEY` variables in the Azure DevOps pipeline settings (Pipelines -> Library -> + Variable group):
![Qodo Merge](https://codium.ai/images/pr_agent/azure_devops_pipeline_secrets.png){width=468}
![Qodo Merge Pro](https://codium.ai/images/pr_agent/azure_devops_pipeline_secrets.png){width=468}
Make sure to give pipeline permissions to the `pr_agent` variable group.
> Note that Azure Pipelines lacks support for triggering workflows from PR comments. If you find a viable solution, please contribute it to our [issue tracker](https://github.com/Codium-ai/pr-agent/issues)
## Azure DevOps from CLI
@ -80,7 +78,7 @@ org = "https://dev.azure.com/YOUR_ORGANIZATION/"
# pat = "YOUR_PAT_TOKEN" needed only if using PAT for authentication
```
## Azure DevOps Webhook
### Azure DevOps Webhook
To trigger from an Azure webhook, you need to manually [add a webhook](https://learn.microsoft.com/en-us/azure/devops/service-hooks/services/webhooks?view=azure-devops).
Use the "Pull request created" type to trigger a review, or "Pull request commented on" to trigger any supported comment with /<command> <args> comment on the relevant PR. Note that for the "Pull request commented on" trigger, only API v2.0 is supported.

View File

@ -3,7 +3,7 @@
You can use the Bitbucket Pipeline system to run Qodo Merge on every pull request open or update.
1. Add the following file in your repository bitbucket-pipelines.yml
1. Add the following file in your repository bitbucket_pipelines.yml
```yaml
pipelines:
@ -27,6 +27,10 @@ You can get a Bitbucket token for your repository by following Repository Settin
Note that comments on a PR are not supported in Bitbucket Pipeline.
## Run using CodiumAI-hosted Bitbucket app 💎
Please contact visit [Qodo Merge Pro](https://www.codium.ai/pricing/) if you're interested in a hosted BitBucket app solution that provides full functionality including PR reviews and comment handling. It's based on the [bitbucket_app.py](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/git_providers/bitbucket_provider.py) implementation.
## Bitbucket Server and Data Center

View File

@ -1,6 +1,6 @@
## Run as a GitHub Action
You can use our pre-built Github Action Docker image to run PR-Agent as a Github Action.
You can use our pre-built Github Action Docker image to run Qodo Merge as a Github Action.
1) Add the following file to your repository under `.github/workflows/pr_agent.yml`:
@ -21,7 +21,7 @@ jobs:
steps:
- name: PR Agent action step
id: pragent
uses: qodo-ai/pr-agent@main
uses: Codium-ai/pr-agent@main
env:
OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,5 +1,5 @@
## Run as a GitLab Pipeline
You can use a pre-built Action Docker image to run PR-Agent as a GitLab pipeline. This is a simple way to get started with Qodo Merge without setting up your own server.
You can use a pre-built Action Docker image to run Qodo Merge as a GitLab pipeline. This is a simple way to get started with Qodo Merge without setting up your own server.
(1) Add the following file to your repository under `.gitlab-ci.yml`:
```yaml
@ -38,41 +38,25 @@ You can also modify the `script` section to run different Qodo Merge commands, o
Note that if your base branches are not protected, don't set the variables as `protected`, since the pipeline will not have access to them.
> **Note**: The `$CI_SERVER_FQDN` variable is available starting from GitLab version 16.10. If you're using an earlier version, this variable will not be available. However, you can combine `$CI_SERVER_HOST` and `$CI_SERVER_PORT` to achieve the same result. Please ensure you're using a compatible version or adjust your configuration.
## Run a GitLab webhook server
1. From the GitLab workspace or group, create an access token with "Reporter" role ("Developer" if using Pro version of the agent) and "api" scope.
1. From the GitLab workspace or group, create an access token. Enable the "api" scope only.
2. Generate a random secret for your app, and save it for later. For example, you can use:
```
WEBHOOK_SECRET=$(python -c "import secrets; print(secrets.token_hex(10))")
```
3. Follow the instructions to build the Docker image, setup a secrets file and deploy on your own server from [here](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-app) steps 4-7.
3. Clone this repository:
4. In the secrets file, fill in the following:
- Your OpenAI key.
- In the [gitlab] section, fill in personal_access_token and shared_secret. The access token can be a personal access token, or a group or project access token.
- Set deployment_type to 'gitlab' in [configuration.toml](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml)
```
git clone https://github.com/Codium-ai/pr-agent.git
```
5. Create a webhook in GitLab. Set the URL to ```http[s]://<PR_AGENT_HOSTNAME>/webhook```. Set the secret token to the generated secret from step 2.
In the "Trigger" section, check the comments and merge request events boxes.
4. Prepare variables and secrets. Skip this step if you plan on settings these as environment variables when running the agent:
1. In the configuration file/variables:
- Set `deployment_type` to "gitlab"
2. In the secrets file/variables:
- Set your AI model key in the respective section
- In the [gitlab] section, set `personal_access_token` (with token from step 1) and `shared_secret` (with secret from step 2)
5. Build a Docker image for the app and optionally push it to a Docker repository. We'll use Dockerhub as an example:
```
docker build . -t gitlab_pr_agent --target gitlab_webhook -f docker/Dockerfile
docker push codiumai/pr-agent:gitlab_webhook # Push to your Docker repository
```
6. Create a webhook in GitLab. Set the URL to ```http[s]://<PR_AGENT_HOSTNAME>/webhook```, the secret token to the generated secret from step 2, and enable the triggers `push`, `comments` and `merge request events`.
7. Test your installation by opening a merge request or commenting on a merge request using one of CodiumAI's commands.
boxes
6. Test your installation by opening a merge request or commenting or a merge request using one of CodiumAI's commands.

View File

@ -1,17 +1,21 @@
# Installation
## Self-hosted PR-Agent
## Self-hosted Qodo Merge
If you choose to host your own Qodo Merge, you first need to acquire two tokens:
There are several ways to use self-hosted PR-Agent:
1. An OpenAI key from [here](https://platform.openai.com/api-keys), with access to GPT-4 (or a key for other [language models](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/), if you prefer).
2. A GitHub\GitLab\BitBucket personal access token (classic), with the repo scope. [GitHub from [here](https://github.com/settings/tokens)]
There are several ways to use self-hosted Qodo Merge:
- [Locally](./locally.md)
- [GitHub integration](./github.md)
- [GitLab integration](./gitlab.md)
- [BitBucket integration](./bitbucket.md)
- [Azure DevOps integration](./azure.md)
- [GitHub](./github.md)
- [GitLab](./gitlab.md)
- [BitBucket](./bitbucket.md)
- [Azure DevOps](./azure.md)
## Qodo Merge 💎
Qodo Merge, an app hosted by QodoAI for GitHub\GitLab\BitBucket, is also available.
## Qodo Merge Pro 💎
Qodo Merge Pro, an app hosted by CodiumAI for GitHub\GitLab\BitBucket, is also available.
<br>
With Qodo Merge, installation is as simple as adding the Qodo Merge app to your relevant repositories.
See [here](https://qodo-merge-docs.qodo.ai/installation/qodo_merge/) for more details.
With Qodo Merge Pro, installation is as simple as signing up and adding the Qodo Merge app to your relevant repo.
See [here](https://qodo-merge-docs.qodo.ai/installation/pr_agent_pro/) for more details.

View File

@ -1,77 +1,3 @@
To run PR-Agent locally, you first need to acquire two keys:
1. An OpenAI key from [here](https://platform.openai.com/api-keys){:target="_blank"}, with access to GPT-4 (or a key for other [language models](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/), if you prefer).
2. A personal access token from your Git platform (GitHub, GitLab, BitBucket) with repo scope. GitHub token, for example, can be issued from [here](https://github.com/settings/tokens){:target="_blank"}
## Using Docker image
A list of the relevant tools can be found in the [tools guide](../tools/ask.md).
To invoke a tool (for example `review`), you can run PR-Agent directly from the Docker image. Here's how:
- For GitHub:
```
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
```
If you are using GitHub enterprise server, you need to specify the custom url as variable.
For example, if your GitHub server is at `https://github.mycompany.com`, add the following to the command:
```
-e GITHUB.BASE_URL=https://github.mycompany.com/api/v3
```
- For GitLab:
```
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
```
If you have a dedicated GitLab instance, you need to specify the custom url as variable:
```
-e GITLAB.URL=<your gitlab instance url>
```
- For BitBucket:
```
docker run --rm -it -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=<pr_url> review
```
For other git providers, update `CONFIG.GIT_PROVIDER` accordingly and check the [`pr_agent/settings/.secrets_template.toml`](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/.secrets_template.toml) file for environment variables expected names and values.
### Utilizing environment variables
It is also possible to provide or override the configuration by setting the corresponding environment variables.
You can define the corresponding environment variables by following this convention: `<TABLE>__<KEY>=<VALUE>` or `<TABLE>.<KEY>=<VALUE>`.
The `<TABLE>` refers to a table/section in a configuration file and `<KEY>=<VALUE>` refers to the key/value pair of a setting in the configuration file.
For example, suppose you want to run `pr_agent` that connects to a self-hosted GitLab instance similar to an example above.
You can define the environment variables in a plain text file named `.env` with the following content:
```
CONFIG__GIT_PROVIDER="gitlab"
GITLAB__URL="<your url>"
GITLAB__PERSONAL_ACCESS_TOKEN="<your token>"
OPENAI__KEY="<your key>"
```
Then, you can run `pr_agent` using Docker with the following command:
```shell
docker run --rm -it --env-file .env codiumai/pr-agent:latest <tool> <tool parameter>
```
---
### I get an error when running the Docker image. What should I do?
If you encounter an error when running the Docker image, it is almost always due to a misconfiguration of api keys or tokens.
Note that litellm, which is used by pr-agent, sometimes returns non-informative error messages such as `APIError: OpenAIException - Connection error.`
Carefully check the api keys and tokens you provided and make sure they are correct.
Adjustments may be needed depending on your llm provider.
For example, for Azure OpenAI, additional keys are [needed](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/#azure).
Same goes for other providers, make sure to check the [documentation](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/#changing-a-model)
## Using pip package
Install the package:
@ -109,6 +35,40 @@ if __name__ == '__main__':
main()
```
## Using Docker image
A list of the relevant tools can be found in the [tools guide](../tools/ask.md).
To invoke a tool (for example `review`), you can run directly from the Docker image. Here's how:
- For GitHub:
```
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
```
If you are using GitHub enterprise server, you need to specify the custom url as variable.
For example, if your GitHub server is at `https://github.mycompany.com`, add the following to the command:
```
-e GITHUB.BASE_URL=https://github.mycompany.com/api/v3
```
- For GitLab:
```
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
```
If you have a dedicated GitLab instance, you need to specify the custom url as variable:
```
-e GITLAB.URL=<your gitlab instance url>
```
- For BitBucket:
```
docker run --rm -it -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=<pr_url> review
```
For other git providers, update CONFIG.GIT_PROVIDER accordingly, and check the `pr_agent/settings/.secrets_template.toml` file for the environment variables expected names and values.
---
## Run from source

View File

@ -1,49 +0,0 @@
# PR-Agent Installation Guide
PR-Agent can be deployed in various environments and platforms. Choose the installation method that best suits your needs:
## 🖥️ Local Installation
Learn how to run PR-Agent locally using:
- Docker image
- pip package
- CLI from source code
[View Local Installation Guide →](https://qodo-merge-docs.qodo.ai/installation/locally/)
## 🐙 GitHub Integration
Set up PR-Agent with GitHub as:
- GitHub Action
- Local GitHub App
[View GitHub Integration Guide →](https://qodo-merge-docs.qodo.ai/installation/github/)
## 🦊 GitLab Integration
Deploy PR-Agent on GitLab as:
- GitLab pipeline job
- Local GitLab webhook server
[View GitLab Integration Guide →](https://qodo-merge-docs.qodo.ai/installation/gitlab/)
## 🟦 BitBucket Integration
Implement PR-Agent in BitBucket as:
- BitBucket pipeline job
- Local BitBucket server
[View BitBucket Integration Guide →](https://qodo-merge-docs.qodo.ai/installation/bitbucket/)
## 🔷 Azure DevOps Integration
Configure PR-Agent with Azure DevOps as:
- Azure DevOps pipeline job
- Local Azure DevOps webhook
[View Azure DevOps Integration Guide →](https://qodo-merge-docs.qodo.ai/installation/azure/)

View File

@ -0,0 +1,68 @@
## Getting Started with Qodo Merge Pro
Qodo Merge Pro is a versatile application compatible with GitHub, GitLab, and BitBucket, hosted by CodiumAI.
See [here](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/) for more details about the benefits of using Qodo Merge Pro.
Interested parties can subscribe to Qodo Merge Pro through the following [link](https://www.codium.ai/pricing/).
After subscribing, you are granted the ability to easily install the application across any of your repositories.
![Qodo Merge Pro](https://codium.ai/images/pr_agent/pr_agent_pro_install.png){width=468}
Each user who wants to use Qodo Merge pro needs to buy a seat.
Initially, CodiumAI offers a two-week trial period at no cost, after which continued access requires each user to secure a personal seat.
Once a user acquires a seat, they gain the flexibility to use Qodo Merge Pro across any repository where it was enabled.
Users without a purchased seat who interact with a repository featuring Qodo Merge Pro are entitled to receive up to five complimentary feedbacks.
Beyond this limit, Qodo Merge Pro will cease to respond to their inquiries unless a seat is purchased.
## Install Qodo Merge Pro for GitHub Enterprise Server
To use Qodo Merge Pro application on your private GitHub Enterprise Server, you will need to contact us for starting an [Enterprise](https://www.codium.ai/pricing/) trial.
## Install Qodo Merge Pro for GitLab (Teams & Enterprise)
Since GitLab platform does not support apps, installing Qodo Merge Pro for GitLab is a bit more involved, and requires the following steps:
### Step 1
Acquire a personal, project or group level access token. Enable the “api” scope in order to allow Qodo Merge to read pull requests, comment and respond to requests.
<figure markdown="1">
![Step 1](https://www.codium.ai/images/pr_agent/gitlab_pro_pat.png){width=750}
</figure>
Store the token in a safe place, you wont be able to access it again after it was generated.
### Step 2
Generate a shared secret and link it to the access token. Browse to [https://register.gitlab.pr-agent.codium.ai](https://register.gitlab.pr-agent.codium.ai).
Fill in your generated GitLab token and your company or personal name in the appropriate fields and click "Submit".
You should see "Success!" displayed above the Submit button, and a shared secret will be generated. Store it in a safe place, you wont be able to access it again after it was generated.
### Step 3
Install a webhook for your repository or groups, by clicking “webhooks” on the settings menu. Click the “Add new webhook” button.
<figure markdown="1">
![Step 3.1](https://www.codium.ai/images/pr_agent/gitlab_pro_add_webhook.png)
</figure>
In the webhook definition form, fill in the following fields:
URL: https://pro.gitlab.pr-agent.codium.ai/webhook
Secret token: Your CodiumAI key
Trigger: Check the comments and merge request events boxes.
Enable SSL verification: Check the box.
<figure markdown="1">
![Step 3.2](https://www.codium.ai/images/pr_agent/gitlab_pro_webhooks.png){width=750}
</figure>
### Step 4
Youre all set!
Open a new merge request or add a MR comment with one of Qodo Merges commands such as /review, /describe or /improve.

View File

@ -1,81 +0,0 @@
Qodo Merge is a versatile application compatible with GitHub, GitLab, and BitBucket, hosted by QodoAI.
See [here](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/) for more details about the benefits of using Qodo Merge.
A complimentary two-week trial is provided to all new users. Following the trial period, user licenses (seats) are required for continued access.
To purchase user licenses, please visit our [pricing page](https://www.qodo.ai/pricing/).
Once subscribed, users can seamlessly deploy the application across any of their code repositories.
## Install Qodo Merge for GitHub
### GitHub Cloud
Qodo Merge for GitHub cloud is available for installation through the [GitHub Marketplace](https://github.com/apps/qodo-merge-pro).
![Qodo Merge](https://codium.ai/images/pr_agent/pr_agent_pro_install.png){width=468}
### GitHub Enterprise Server
To use Qodo Merge application on your private GitHub Enterprise Server, you will need to contact us for starting an [Enterprise](https://www.codium.ai/pricing/) trial.
### GitHub Open Source Projects
For open-source projects, Qodo Merge is available for free usage. To install Qodo Merge for your open-source repositories, use the following marketplace [link](https://github.com/apps/qodo-merge-pro-for-open-source).
## Install Qodo Merge for Bitbucket
### Bitbucket Cloud
Qodo Merge for Bitbucket Cloud is available for installation through the following [link](https://bitbucket.org/site/addons/authorize?addon_key=d6df813252c37258)
![Qodo Merge](https://qodo.ai/images/pr_agent/pr_agent_pro_bitbucket_install.png){width=468}
### Bitbucket Server
To use Qodo Merge application on your private Bitbucket Server, you will need to contact us for starting an [Enterprise](https://www.qodo.ai/pricing/) trial.
## Install Qodo Merge for GitLab (Teams & Enterprise)
Since GitLab platform does not support apps, installing Qodo Merge for GitLab is a bit more involved, and requires the following steps:
#### Step 1
Acquire a personal, project or group level access token. Enable the “api” scope in order to allow Qodo Merge to read pull requests, comment and respond to requests.
<figure markdown="1">
![Step 1](https://www.codium.ai/images/pr_agent/gitlab_pro_pat.png){width=750}
</figure>
Store the token in a safe place, you wont be able to access it again after it was generated.
#### Step 2
Generate a shared secret and link it to the access token. Browse to [https://register.gitlab.pr-agent.codium.ai](https://register.gitlab.pr-agent.codium.ai).
Fill in your generated GitLab token and your company or personal name in the appropriate fields and click "Submit".
You should see "Success!" displayed above the Submit button, and a shared secret will be generated. Store it in a safe place, you wont be able to access it again after it was generated.
#### Step 3
Install a webhook for your repository or groups, by clicking “webhooks” on the settings menu. Click the “Add new webhook” button.
<figure markdown="1">
![Step 3.1](https://www.codium.ai/images/pr_agent/gitlab_pro_add_webhook.png)
</figure>
In the webhook definition form, fill in the following fields:
URL: https://pro.gitlab.pr-agent.codium.ai/webhook
Secret token: Your QodoAI key
Trigger: Check the comments and merge request events boxes.
Enable SSL verification: Check the box.
<figure markdown="1">
![Step 3.2](https://www.codium.ai/images/pr_agent/gitlab_pro_webhooks.png){width=750}
</figure>
#### Step 4
Youre all set!
Open a new merge request or add a MR comment with one of Qodo Merges commands such as /review, /describe or /improve.

View File

@ -1,12 +1,12 @@
## Self-hosted PR-Agent
## Self-hosted Qodo Merge
- If you self-host PR-Agent with your OpenAI (or other LLM provider) API key, it is between you and the provider. We don't send your code data to Qodo servers.
- If you self-host Qodo Merge with your OpenAI (or other LLM provider) API key, it is between you and the provider. We don't send your code data to Qodo Merge servers.
## Qodo Merge 💎
## Qodo Merge Pro 💎
- When using Qodo Merge💎, hosted by Qodo, we will not store any of your data, nor will we use it for training. You will also benefit from an OpenAI account with zero data retention.
- When using Qodo Merge Pro 💎, hosted by CodiumAI, we will not store any of your data, nor will we use it for training. You will also benefit from an OpenAI account with zero data retention.
- For certain clients, Qodo Merge will use Qodos proprietary models. If this is the case, you will be notified.
- For certain clients, CodiumAI-hosted Qodo Merge Pro will use CodiumAIs proprietary models. If this is the case, you will be notified.
- No passive collection of Code and Pull Requests data — Qodo Merge will be active only when you invoke it, and it will then extract and analyze only data relevant to the executed command and queried pull request.

View File

@ -0,0 +1,93 @@
# Overview
Qodo Merge is an open-source tool to help efficiently review and handle pull requests.
- See the [Installation Guide](./installation/index.md) for instructions on installing and running the tool on different git platforms.
- See the [Usage Guide](./usage-guide/index.md) for instructions on running the Qodo Merge commands via different interfaces, including _CLI_, _online usage_, or by _automatically triggering_ them when a new PR is opened.
- See the [Tools Guide](./tools/index.md) for a detailed description of the different tools.
## Qodo Merge Docs Smart Search
To search the documentation site using natural language:
1) Comment `/help "your question"` in either:
- A pull request where Qodo Merge is installed
- A [PR Chat](https://qodo-merge-docs.qodo.ai/chrome-extension/features/#pr-chat)
2) Qodo Merge will respond with an [answer](https://github.com/Codium-ai/pr-agent/pull/1241#issuecomment-2365259334) that includes relevant documentation links.
## Qodo Merge Features
Qodo Merge offers extensive pull request functionalities across various git providers.
| | | GitHub | Gitlab | Bitbucket | Azure DevOps |
|-------|-----------------------------------------------------------------------------------------------------------------------|:------:|:------:|:---------:|:------------:|
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ |
| | ⮑ Incremental | ✅ | | | |
| | Ask | ✅ | ✅ | ✅ | ✅ |
| | Describe | ✅ | ✅ | ✅ | ✅ |
| | ⮑ [Inline file summary](https://qodo-merge-docs.qodo.ai/tools/describe/#inline-file-summary){:target="_blank"} 💎 | ✅ | ✅ | | ✅ |
| | Improve | ✅ | ✅ | ✅ | ✅ |
| | ⮑ Extended | ✅ | ✅ | ✅ | ✅ |
| | [Custom Prompt](./tools/custom_prompt.md){:target="_blank"} 💎 | ✅ | ✅ | ✅ | ✅ |
| | Reflect and Review | ✅ | ✅ | ✅ | ✅ |
| | Update CHANGELOG.md | ✅ | ✅ | ✅ | |
| | Find Similar Issue | ✅ | | | |
| | [Add PR Documentation](./tools/documentation.md){:target="_blank"} 💎 | ✅ | ✅ | | ✅ |
| | [Generate Custom Labels](./tools/describe.md#handle-custom-labels-from-the-repos-labels-page-💎){:target="_blank"} 💎 | ✅ | ✅ | | ✅ |
| | [Analyze PR Components](./tools/analyze.md){:target="_blank"} 💎 | ✅ | ✅ | | ✅ |
| | | | | | |
| USAGE | CLI | ✅ | ✅ | ✅ | ✅ |
| | App / webhook | ✅ | ✅ | ✅ | ✅ |
| | Actions | ✅ | | | |
| | | | | |
| CORE | PR compression | ✅ | ✅ | ✅ | ✅ |
| | Repo language prioritization | ✅ | ✅ | ✅ | ✅ |
| | Adaptive and token-aware file patch fitting | ✅ | ✅ | ✅ | ✅ |
| | Multiple models support | ✅ | ✅ | ✅ | ✅ |
| | Incremental PR review | ✅ | | | |
| | [Static code analysis](./tools/analyze.md/){:target="_blank"} 💎 | ✅ | ✅ | ✅ | ✅ |
| | [Multiple configuration options](./usage-guide/configuration_options.md){:target="_blank"} 💎 | ✅ | ✅ | ✅ | ✅ |
💎 marks a feature available only in [Qodo Merge Pro](https://www.codium.ai/pricing/){:target="_blank"}
## Example Results
<hr>
#### [/describe](https://github.com/Codium-ai/pr-agent/pull/530)
<figure markdown="1">
![/describe](https://www.codium.ai/images/pr_agent/describe_new_short_main.png){width=512}
</figure>
<hr>
#### [/review](https://github.com/Codium-ai/pr-agent/pull/732#issuecomment-1975099151)
<figure markdown="1">
![/review](https://www.codium.ai/images/pr_agent/review_new_short_main.png){width=512}
</figure>
<hr>
#### [/improve](https://github.com/Codium-ai/pr-agent/pull/732#issuecomment-1975099159)
<figure markdown="1">
![/improve](https://www.codium.ai/images/pr_agent/improve_new_short_main.png){width=512}
</figure>
<hr>
#### [/generate_labels](https://github.com/Codium-ai/pr-agent/pull/530)
<figure markdown="1">
![/generate_labels](https://www.codium.ai/images/pr_agent/geneare_custom_labels_main_short.png){width=300}
</figure>
<hr>
## How it Works
The following diagram illustrates Qodo Merge tools and their flow:
![Qodo Merge Tools](https://codium.ai/images/pr_agent/diagram-v0.9.png)
Check out the [PR Compression strategy](core-abilities/index.md) page for more details on how we convert a code diff to a manageable LLM prompt

View File

@ -1,21 +1,21 @@
### Overview
[Qodo Merge](https://www.codium.ai/pricing/){:target="_blank"} is a hosted version of open-source [PR-Agent](https://github.com/Codium-ai/pr-agent){:target="_blank"}. A complimentary two-week trial is offered, followed by a monthly subscription fee.
Qodo Merge is designed for companies and teams that require additional features and capabilities. It provides the following benefits:
[Qodo Merge Pro](https://www.codium.ai/pricing/) is a hosted version of Qodo Merge, provided by Qodo. A complimentary two-week trial is offered, followed by a monthly subscription fee.
Qodo Merge Pro is designed for companies and teams that require additional features and capabilities. It provides the following benefits:
1. **Fully managed** - We take care of everything for you - hosting, models, regular updates, and more. Installation is as simple as signing up and adding the Qodo Merge app to your GitHub\GitLab\BitBucket repo.
2. **Improved privacy** - No data will be stored or used to train models. Qodo Merge will employ zero data retention, and will use an OpenAI and Claude accounts with zero data retention.
2. **Improved privacy** - No data will be stored or used to train models. Qodo Merge Pro will employ zero data retention, and will use an OpenAI and Claude accounts with zero data retention.
3. **Improved support** - Qodo Merge users will receive priority support, and will be able to request new features and capabilities.
3. **Improved support** - Qodo Merge Pro users will receive priority support, and will be able to request new features and capabilities.
4. **Supporting self-hosted git servers** - Qodo Merge can be installed on GitHub Enterprise Server, GitLab, and BitBucket. For more information, see the [installation guide](https://qodo-merge-docs.qodo.ai/installation/pr_agent_pro/).
4. **Supporting self-hosted git servers** - Qodo Merge Pro can be installed on GitHub Enterprise Server, GitLab, and BitBucket. For more information, see the [installation guide](https://qodo-merge-docs.qodo.ai/installation/pr_agent_pro/).
5. **PR Chat** - Qodo Merge allows you to engage in [private chat](https://qodo-merge-docs.qodo.ai/chrome-extension/features/#pr-chat) about your pull requests on private repositories.
5. **PR Chat** - Qodo Merge Pro allows you to engage in [private chat](https://qodo-merge-docs.qodo.ai/chrome-extension/features/#pr-chat) about your pull requests on private repositories.
### Additional features
Here are some of the additional features and capabilities that Qodo Merge offers:
Here are some of the additional features and capabilities that Qodo Merge Pro offers:
| Feature | Description |
|----------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
@ -24,29 +24,28 @@ Here are some of the additional features and capabilities that Qodo Merge offers
| [**Apply suggestions**](https://qodo-merge-docs.qodo.ai/tools/improve/#overview) | Generate committable code from the relevant suggestions interactively by clicking on a checkbox |
| [**Suggestions impact**](https://qodo-merge-docs.qodo.ai/tools/improve/#assessing-impact) | Automatically mark suggestions that were implemented by the user (either directly in GitHub, or indirectly in the IDE) to enable tracking of the impact of the suggestions |
| [**CI feedback**](https://qodo-merge-docs.qodo.ai/tools/ci_feedback/) | Automatically analyze failed CI checks on GitHub and provide actionable feedback in the PR conversation, helping to resolve issues quickly |
| [**Advanced usage statistics**](https://www.codium.ai/contact/#/) | Qodo Merge offers detailed statistics at user, repository, and company levels, including metrics about Qodo Merge usage, and also general statistics and insights |
| [**Advanced usage statistics**](https://www.codium.ai/contact/#/) | Qodo Merge Pro offers detailed statistics at user, repository, and company levels, including metrics about Qodo Merge usage, and also general statistics and insights |
| [**Incorporating companies' best practices**](https://qodo-merge-docs.qodo.ai/tools/improve/#best-practices) | Use the companies' best practices as reference to increase the effectiveness and the relevance of the code suggestions |
| [**Interactive triggering**](https://qodo-merge-docs.qodo.ai/tools/analyze/#example-usage) | Interactively apply different tools via the `analyze` command |
| [**Custom labels**](https://qodo-merge-docs.qodo.ai/tools/describe/#handle-custom-labels-from-the-repos-labels-page) | Define custom labels for Qodo Merge to assign to the PR |
### Additional tools
Here are additional tools that are available only for Qodo Merge users:
Here are additional tools that are available only for Qodo Merge Pro users:
| Feature | Description |
|---------------------------------------------------------------------------------------|-------------|
|---------|-------------|
| [**Custom Prompt Suggestions**](https://qodo-merge-docs.qodo.ai/tools/custom_prompt/) | Generate code suggestions based on custom prompts from the user |
| [**Analyze PR components**](https://qodo-merge-docs.qodo.ai/tools/analyze/) | Identify the components that changed in the PR, and enable to interactively apply different tools to them |
| [**Tests**](https://qodo-merge-docs.qodo.ai/tools/test/) | Generate tests for code components that changed in the PR |
| [**PR documentation**](https://qodo-merge-docs.qodo.ai/tools/documentation/) | Generate docstring for code components that changed in the PR |
| [**Improve Component**](https://qodo-merge-docs.qodo.ai/tools/improve_component/) | Generate code suggestions for code components that changed in the PR |
| [**Similar code search**](https://qodo-merge-docs.qodo.ai/tools/similar_code/) | Search for similar code in the repository, organization, or entire GitHub |
| [**Code implementation**](https://qodo-merge-docs.qodo.ai/tools/implement/) | Generates implementation code from review suggestions |
### Supported languages
Qodo Merge leverages the world's leading code models - Claude 3.5 Sonnet and GPT-4.
Qodo Merge Pro leverages the world's leading code models - Claude 3.5 Sonnet and GPT-4.
As a result, its primary tools such as `describe`, `review`, and `improve`, as well as the PR-chat feature, support virtually all programming languages.
For specialized commands that require static code analysis, Qodo Merge offers support for specific languages. For more details about features that require static code analysis, please refer to the [documentation](https://qodo-merge-docs.qodo.ai/tools/analyze/#overview).
For specialized commands that require static code analysis, Qodo Merge Pro offers support for specific languages. For more details about features that require static code analysis, please refer to the [documentation](https://qodo-merge-docs.qodo.ai/tools/analyze/#overview).

View File

@ -28,7 +28,7 @@ When working from CLI, you need to apply the [configuration changes](#configurat
To enable custom labels, you need to apply the [configuration changes](#configuration-options) to the local `.pr_agent.toml` file in your repository.
#### 3. Handle custom labels from the Repo's labels page 💎
> This feature is available only in Qodo Merge
> This feature is available only in Qodo Merge Pro
* GitHub : `https://github.com/{owner}/{repo}/labels`, or click on the "Labels" tab in the issues or PRs page.
* GitLab : `https://gitlab.com/{owner}/{repo}/-/labels`, or click on "Manage" -> "Labels" on the left menu.

View File

@ -1,6 +1,6 @@
## Overview
The `help` tool provides a list of all the available tools and their descriptions.
For Qodo Merge users, it also enables to trigger each tool by checking the relevant box.
For Qodo Merge Pro users, it also enables to trigger each tool by checking the relevant box.
It can be invoked manually by commenting on any PR:
```

View File

@ -1,52 +0,0 @@
`Platforms supported: GitHub, GitLab, Bitbucket`
## Overview
The `implement` tool converts human code review discussions and feedback into ready-to-commit code changes.
It leverages LLM technology to transform PR comments and review suggestions into concrete implementation code, helping developers quickly turn feedback into working solutions.
## Usage Scenarios
### For Reviewers
Reviewers can request code changes by: <br>
1. Selecting the code block to be modified. <br>
2. Adding a comment with the syntax:
```
/implement <code-change-description>
```
![implement1](https://codium.ai/images/pr_agent/implement1.png){width=640}
### For PR Authors
PR authors can implement suggested changes by replying to a review comment using either: <br>
1. Add specific implementation details as described above
```
/implement <code-change-description>
```
2. Use the original review comment as instructions
```
/implement
```
![implement2](https://codium.ai/images/pr_agent/implement2.png){width=640}
### For Referencing Comments
You can reference and implement changes from any comment by:
```
/implement <link-to-review-comment>
```
![implement3](https://codium.ai/images/pr_agent/implement3.png){width=640}
Note that the implementation will occur within the review discussion thread.
**Configuration options** <br>
- Use `/implement` to implement code change within and based on the review discussion. <br>
- Use `/implement <code-change-description>` inside a review discussion to implement specific instructions. <br>
- Use `/implement <link-to-review-comment>` to indirectly call the tool from any comment. <br>

View File

@ -9,7 +9,7 @@ The tool can be triggered automatically every time a new PR is [opened](../usage
![code_suggestions_as_comment_open.png](https://codium.ai/images/pr_agent/code_suggestions_as_comment_open.png){width=512}
Note that the `Apply this suggestion` checkbox, which interactively converts a suggestion into a commitable code comment, is available only for Qodo Merge💎 users.
Note that the `Apply this suggestion` checkbox, which interactively converts a suggestion into a commitable code comment, is available only for Qodo Merge Pro 💎 users.
## Example usage
@ -54,7 +54,7 @@ num_code_suggestions_per_chunk = ...
### Assessing Impact 💎
Note that Qodo Merge tracks two types of implementations:
Note that Qodo Merge pro tracks two types of implementations:
- Direct implementation - when the user directly applies the suggestion by clicking the `Apply` checkbox.
- Indirect implementation - when the user implements the suggestion in their IDE environment. In this case, Qodo Merge will utilize, after each commit, a dedicated logic to identify if a suggestion was implemented, and will mark it as implemented.
@ -70,7 +70,7 @@ In post-process, Qodo Merge counts the number of suggestions that were implement
## Suggestion tracking 💎
`Platforms supported: GitHub, GitLab`
Qodo Merge employs a novel detection system to automatically [identify](https://qodo-merge-docs.qodo.ai/core-abilities/impact_evaluation/) AI code suggestions that PR authors have accepted and implemented.
Qodo Merge employs an novel detection system to automatically [identify](https://qodo-merge-docs.qodo.ai/core-abilities/impact_evaluation/) AI code suggestions that PR authors have accepted and implemented.
Accepted suggestions are also automatically documented in a dedicated wiki page called `.pr_agent_accepted_suggestions`, allowing users to track historical changes, assess the tool's effectiveness, and learn from previously implemented recommendations in the repository.
An example [result](https://github.com/Codium-ai/pr-agent/wiki/.pr_agent_accepted_suggestions):
@ -95,148 +95,6 @@ This feature is controlled by a boolean configuration parameter: `pr_code_sugges
Instead, we leverage a dedicated private page, within your repository wiki, to track suggestions. This approach offers convenient secure suggestion tracking while avoiding pull requests or any noise to the main repository.
## `Extra instructions` and `best practices`
The `improve` tool can be further customized by providing additional instructions and best practices to the AI model.
### Extra instructions
>`Platforms supported: GitHub, GitLab, Bitbucket, Azure DevOps`
You can use the `extra_instructions` configuration option to give the AI model additional instructions for the `improve` tool.
Be specific, clear, and concise in the instructions. With extra instructions, you are the prompter.
Examples for possible instructions:
```toml
[pr_code_suggestions]
extra_instructions="""\
(1) Answer in japanese
(2) Don't suggest to add try-except block
(3) Ignore changes in toml files
...
"""
```
Use triple quotes to write multi-line instructions. Use bullet points or numbers to make the instructions more readable.
### Best practices 💎
>`Platforms supported: GitHub, GitLab, Bitbucket`
Another option to give additional guidance to the AI model is by creating a `best_practices.md` file, either in your repository's root directory or as a [**wiki page**](https://github.com/Codium-ai/pr-agent/wiki) (we recommend the wiki page, as editing and maintaining it over time is easier).
This page can contain a list of best practices, coding standards, and guidelines that are specific to your repo/organization.
The AI model will use this wiki page as a reference, and in case the PR code violates any of the guidelines, it will create additional suggestions, with a dedicated label: `Organization
best practice`.
Example for a python `best_practices.md` content:
```markdown
## Project best practices
- Make sure that I/O operations are encapsulated in a try-except block
- Use the `logging` module for logging instead of `print` statements
- Use `is` and `is not` to compare with `None`
- Use `if __name__ == '__main__':` to run the code only when the script is executed
- Use `with` statement to open files
...
```
Tips for writing an effective `best_practices.md` file:
- Write clearly and concisely
- Include brief code examples when helpful
- Focus on project-specific guidelines, that will result in relevant suggestions you actually want to get
- Keep the file relatively short, under 800 lines, since:
- AI models may not process effectively very long documents
- Long files tend to contain generic guidelines already known to AI
#### Local and global best practices
By default, Qodo Merge will look for a local `best_practices.md` wiki file in the root of the relevant local repo.
If you want to enable also a global `best_practices.md` wiki file, set first in the global configuration file:
```toml
[best_practices]
enable_global_best_practices = true
```
Then, create a `best_practices.md` wiki file in the root of [global](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#global-configuration-file) configuration repository, `pr-agent-settings`.
#### Best practices for multiple languages
For a git organization working with multiple programming languages, you can maintain a centralized global `best_practices.md` file containing language-specific guidelines.
When reviewing pull requests, Qodo Merge automatically identifies the programming language and applies the relevant best practices from this file.
To do this, structure your `best_practices.md` file using the following format:
```
# [Python]
...
# [Java]
...
# [JavaScript]
...
```
#### Dedicated label for best practices suggestions
Best practice suggestions are labeled as `Organization best practice` by default.
To customize this label, modify it in your configuration file:
```toml
[best_practices]
organization_name = "..."
```
And the label will be: `{organization_name} best practice`.
#### Example results
![best_practice](https://codium.ai/images/pr_agent/org_best_practice.png){width=512}
### Auto best practices 💎
>`Platforms supported: GitHub`
'Auto best practices' is a novel Qodo Merge capability that:
1. Identifies recurring patterns from accepted suggestions
2. **Automatically** generates [best practices page](https://github.com/qodo-ai/pr-agent/wiki/.pr_agent_auto_best_practices) based on what your team consistently values
3. Applies these learned patterns to future code reviews
This creates an automatic feedback loop where the system continuously learns from your team's choices to provide increasingly relevant suggestions.
The system maintains two analysis phases:
- Open exploration for new issues
- Targeted checking against established best practices
Note that when a [custom best practices](https://qodo-merge-docs.qodo.ai/tools/improve/#best-practices) exist, Qodo Merge will still generate an 'auto best practices' wiki file, though it won't use it in the `improve` tool.
Learn more about utilizing 'auto best practices' in our [detailed guide](https://qodo-merge-docs.qodo.ai/core-abilities/auto_best_practices/).
#### Relevant configurations
```toml
[auto_best_practices]
# Disable all auto best practices usage or generation
enable_auto_best_practices = true
# Disable usage of auto best practices file in the 'improve' tool
utilize_auto_best_practices = true
# Extra instructions to the auto best practices generation prompt
extra_instructions = ""
# Max number of patterns to be detected
max_patterns = 5
```
### Combining `extra instructions` and `best practices` 💎
The `extra instructions` configuration is more related to the `improve` tool prompt. It can be used, for example, to avoid specific suggestions ("Don't suggest to add try-except block", "Ignore changes in toml files", ...) or to emphasize specific aspects or formats ("Answer in Japanese", "Give only short suggestions", ...)
In contrast, the `best_practices.md` file is a general guideline for the way code should be written in the repo.
Using a combination of both can help the AI model to provide relevant and tailored suggestions.
## Usage Tips
### Implementing the proposed code suggestions
@ -333,6 +191,73 @@ This approach has two main benefits:
Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 lines of code), Qodo Merge will be able to process the entire code in a single call.
### 'Extra instructions' and 'best practices'
#### Extra instructions
>`Platforms supported: GitHub, GitLab, Bitbucket`
You can use the `extra_instructions` configuration option to give the AI model additional instructions for the `improve` tool.
Be specific, clear, and concise in the instructions. With extra instructions, you are the prompter. Specify relevant aspects that you want the model to focus on.
Examples for possible instructions:
```toml
[pr_code_suggestions]
extra_instructions="""\
(1) Answer in japanese
(2) Don't suggest to add try-excpet block
(3) Ignore changes in toml files
...
"""
```
Use triple quotes to write multi-line instructions. Use bullet points or numbers to make the instructions more readable.
#### Best practices 💎
>`Platforms supported: GitHub, GitLab`
Another option to give additional guidance to the AI model is by creating a dedicated [**wiki page**](https://github.com/Codium-ai/pr-agent/wiki) called `best_practices.md`.
This page can contain a list of best practices, coding standards, and guidelines that are specific to your repo/organization.
The AI model will use this wiki page as a reference, and in case the PR code violates any of the guidelines, it will suggest improvements accordingly, with a dedicated label: `Organization
best practice`.
Example for a `best_practices.md` content can be found [here](https://github.com/Codium-ai/pr-agent/blob/main/docs/docs/usage-guide/EXAMPLE_BEST_PRACTICE.md) (adapted from Google's [pyguide](https://google.github.io/styleguide/pyguide.html)).
This file is only an example. Since it is used as a prompt for an AI model, we want to emphasize the following:
- It should be written in a clear and concise manner
- If needed, it should give short relevant code snippets as examples
- Recommended to limit the text to 800 lines or fewer. Heres why:
1) Extremely long best practices documents may not be fully processed by the AI model.
2) A lengthy file probably represent a more "**generic**" set of guidelines, which the AI model is already familiar with. The objective is to focus on a more targeted set of guidelines tailored to the specific needs of this project.
##### Local and global best practices
By default, Qodo Merge will look for a local `best_practices.md` wiki file in the root of the relevant local repo.
If you want to enable also a global `best_practices.md` wiki file, set first in the global configuration file:
```toml
[best_practices]
enable_global_best_practices = true
```
Then, create a `best_practices.md` wiki file in the root of [global](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#global-configuration-file) configuration repository, `pr-agent-settings`.
##### Example results
![best_practice](https://codium.ai/images/pr_agent/org_best_practice.png){width=512}
#### How to combine `extra instructions` and `best practices`
The `extra instructions` configuration is more related to the `improve` tool prompt. It can be used, for example, to avoid specific suggestions ("Don't suggest to add try-except block", "Ignore changes in toml files", ...) or to emphasize specific aspects or formats ("Answer in Japanese", "Give only short suggestions", ...)
In contrast, the `best_practices.md` file is a general guideline for the way code should be written in the repo.
Using a combination of both can help the AI model to provide relevant and tailored suggestions.
## Configuration options
??? example "General options"
@ -350,14 +275,14 @@ Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 line
<td><b>dual_publishing_score_threshold</b></td>
<td>Minimum score threshold for suggestions to be presented as commitable PR comments in addition to the table. Default is -1 (disabled).</td>
</tr>
<tr>
<td><b>focus_only_on_problems</b></td>
<td>If set to true, suggestions will focus primarily on identifying and fixing code problems, and less on style considerations like best practices, maintainability, or readability. Default is true.</td>
</tr>
<tr>
<td><b>persistent_comment</b></td>
<td>If set to true, the improve comment will be persistent, meaning that every new improve request will edit the previous one. Default is false.</td>
</tr>
<tr>
<td><b>self_reflect_on_suggestions</b></td>
<td>If set to true, the improve tool will calculate an importance score for each suggestion [1-10], and sort the suggestion labels group based on this score. Default is true.</td>
</tr>
<tr>
<td><b>suggestions_score_threshold</b></td>
<td> Any suggestion with importance score less than this threshold will be removed. Default is 0. Highly recommend not to set this value above 7-8, since above it may clip relevant suggestions that can be useful. </td>
@ -378,10 +303,6 @@ Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 line
<td><b>wiki_page_accepted_suggestions</b></td>
<td>If set to true, the tool will automatically track accepted suggestions in a dedicated wiki page called `.pr_agent_accepted_suggestions`. Default is true.</td>
</tr>
<tr>
<td><b>allow_thumbs_up_down</b></td>
<td>If set to true, all code suggestions will have thumbs up and thumbs down buttons, to encourage users to provide feedback on the suggestions. Default is false.</td>
</tr>
</table>
??? example "Params for number of suggestions and AI calls"
@ -399,6 +320,10 @@ Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 line
<td><b>max_number_of_calls</b></td>
<td>Maximum number of chunks. Default is 3.</td>
</tr>
<tr>
<td><b>rank_extended_suggestions</b></td>
<td>If set to true, the tool will rank the suggestions, based on importance. Default is true.</td>
</tr>
</table>
## A note on code suggestions quality

View File

@ -3,7 +3,7 @@
Here is a list of Qodo Merge tools, each with a dedicated page that explains how to use it:
| Tool | Description |
|------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------|
|------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------|
| **[PR Description (`/describe`](./describe.md))** | Automatically generating PR description - title, type, summary, code walkthrough and labels |
| **[PR Review (`/review`](./review.md))** | Adjustable feedback about the PR, possible issues, security concerns, review effort and more |
| **[Code Suggestions (`/improve`](./improve.md))** | Code suggestions for improving the PR |
@ -13,11 +13,10 @@ Here is a list of Qodo Merge tools, each with a dedicated page that explains how
| **[Help (`/help`](./help.md))** | Provides a list of all the available tools. Also enables to trigger them interactively (💎) |
| **💎 [Add Documentation (`/add_docs`](./documentation.md))** | Generates documentation to methods/functions/classes that changed in the PR |
| **💎 [Generate Custom Labels (`/generate_labels`](./custom_labels.md))** | Generates custom labels for the PR, based on specific guidelines defined by the user |
| **💎 [Analyze (`/analyze`](./analyze.md))** | Identify code components that changed in the PR, and enables to interactively generate tests, docs, and code suggestions for each component|
| **💎 [Test (`/test`](./test.md))** | generate tests for a selected component, based on the PR code changes |
| **💎 [Analyze (`/analyze`](./analyze.md))** | Identify code components that changed in the PR, and enables to interactively generate tests, docs, and code suggestions for each component |
| **💎 [Custom Prompt (`/custom_prompt`](./custom_prompt.md))** | Automatically generates custom suggestions for improving the PR code, based on specific guidelines defined by the user |
| **💎 [Generate Tests (`/test component_name`](./test.md))** | Automatically generates unit tests for a selected component, based on the PR code changes |
| **💎 [Improve Component (`/improve_component component_name`](./improve_component.md))** | Generates code suggestions for a specific code component that changed in the PR |
| **💎 [CI Feedback (`/checks ci_job`](./ci_feedback.md))** | Automatically generates feedback and analysis for a failed CI job |
| **💎 [Implement (`/implement`](./implement.md))** | Generates implementation code from review suggestions |
Note that the tools marked with 💎 are available only for Qodo Merge users.
Note that the tools marked with 💎 are available only for Qodo Merge Pro users.

View File

@ -39,13 +39,54 @@ pr_commands = [
]
[pr_reviewer]
extra_instructions = "..."
num_code_suggestions = ...
...
```
- The `pr_commands` lists commands that will be executed automatically when a PR is opened.
- The `[pr_reviewer]` section contains the configurations for the `review` tool you want to edit (if any).
[//]: # ()
[//]: # (### Incremental Mode)
[//]: # (Incremental review only considers changes since the last Qodo Merge review. This can be useful when working on the PR in an iterative manner, and you want to focus on the changes since the last review instead of reviewing the entire PR again.)
[//]: # (For invoking the incremental mode, the following command can be used:)
[//]: # (```)
[//]: # (/review -i)
[//]: # (```)
[//]: # (Note that the incremental mode is only available for GitHub.)
[//]: # ()
[//]: # (![incremental review]&#40;https://codium.ai/images/pr_agent/incremental_review_2.png&#41;{width=512})
[//]: # (### PR Reflection)
[//]: # ()
[//]: # (By invoking:)
[//]: # (```)
[//]: # (/reflect_and_review)
[//]: # (```)
[//]: # (The tool will first ask the author questions about the PR, and will guide the review based on their answers.)
[//]: # ()
[//]: # (![reflection questions]&#40;https://codium.ai/images/pr_agent/reflection_questions.png&#41;{width=512})
[//]: # ()
[//]: # (![reflection answers]&#40;https://codium.ai/images/pr_agent/reflection_answers.png&#41;{width=512})
[//]: # ()
[//]: # (![reflection insights]&#40;https://codium.ai/images/pr_agent/reflection_insights.png&#41;{width=512})
## Configuration options
@ -53,12 +94,16 @@ extra_instructions = "..."
<table>
<tr>
<td><b>persistent_comment</b></td>
<td>If set to true, the review comment will be persistent, meaning that every new review request will edit the previous one. Default is true.</td>
<td><b>num_code_suggestions</b></td>
<td>Number of code suggestions provided by the 'review' tool. Default is 0, meaning no code suggestions will be provided by the `review` tool.</td>
</tr>
<tr>
<td><b>final_update_message</b></td>
<td>When set to true, updating a persistent review comment during online commenting will automatically add a short comment with a link to the updated review in the pull request .Default is true.</td>
<td><b>inline_code_comments</b></td>
<td>If set to true, the tool will publish the code suggestions as comments on the code diff. Default is false. Note that you need to set `num_code_suggestions`>0 to get code suggestions </td>
</tr>
<tr>
<td><b>persistent_comment</b></td>
<td>If set to true, the review comment will be persistent, meaning that every new review request will edit the previous one. Default is true.</td>
</tr>
<tr>
<td><b>extra_instructions</b></td>
@ -95,7 +140,7 @@ extra_instructions = "..."
</tr>
<tr>
<td><b>require_ticket_analysis_review</b></td>
<td>If set to true, and the PR contains a GitHub or Jira ticket link, the tool will add a section that checks if the PR in fact fulfilled the ticket requirements. Default is true.</td>
<td>If set to true, and the PR contains a GitHub ticket number, the tool will add a section that checks if the PR in fact fulfilled the ticket requirements. Default is true.</td>
</tr>
</table>
@ -123,6 +168,10 @@ If enabled, the `review` tool can approve a PR when a specific comment, `/review
<td><b>enable_auto_approval</b></td>
<td>If set to true, the tool will approve the PR when invoked with the 'auto_approve' command. Default is false. This flag can be changed only from a configuration file.</td>
</tr>
<tr>
<td><b>maximal_review_effort</b></td>
<td>Maximal effort level for auto-approval. If the PR's estimated review effort is above this threshold, the auto-approval will not run. Default is 5.</td>
</tr>
</table>
## Usage Tips
@ -140,9 +189,9 @@ If enabled, the `review` tool can approve a PR when a specific comment, `/review
!!! tip "Automation"
When you first install Qodo Merge app, the [default mode](../usage-guide/automations_and_usage.md#github-app-automatic-tools-when-a-new-pr-is-opened) for the `review` tool is:
```
pr_commands = ["/review", ...]
pr_commands = ["/review --pr_reviewer.num_code_suggestions=0", ...]
```
Meaning the `review` tool will run automatically on every PR, without any additional configurations.
Meaning the `review` tool will run automatically on every PR, without providing code suggestions.
Edit this field to enable/disable the tool, or to change the configurations used.
!!! tip "Possible labels from the review tool"
@ -194,8 +243,19 @@ If enabled, the `review` tool can approve a PR when a specific comment, `/review
Qodo Merge will automatically approve the PR, and add a comment with the approval.
!!! tip "Code suggestions"
You can also enable auto-approval only if the PR meets certain requirements, such as that the `estimated_review_effort` label is equal or below a certain threshold, by adjusting the flag:
```
[pr_reviewer]
maximal_review_effort = 5
```
The `review` tool previously included a legacy feature for providing code suggestions (controlled by `--pr_reviewer.num_code_suggestion`). This functionality has been deprecated and replaced by the [`improve`](./improve.md) tool, which offers higher quality and more actionable code suggestions.
[//]: # (!!! tip "Code suggestions")
[//]: # ()
[//]: # ( If you set `num_code_suggestions`>0 , the `review` tool will also provide code suggestions.)
[//]: # ( )
[//]: # ( Notice If you are interested **only** in the code suggestions, it is recommended to use the [`improve`]&#40;./improve.md&#41; feature instead, since it is a dedicated only to code suggestions, and usually gives better results.)
[//]: # ( Use the `review` tool if you want to get more comprehensive feedback, which includes code suggestions as well.)

View File

@ -49,10 +49,9 @@ It can be invoked automatically from the analyze table, can be accessed by:
/analyze
```
Choose the components you want to find similar code for, and click on the `similar` checkbox.
![analyze similar](https://codium.ai/images/pr_agent/analyze_similar.png){width=768}
You can search for similar code either within the organization's codebase or globally, which includes open-source repositories. Each result will include the relevant code components along with their associated license details.
If you are looking to search for similar code in the organization's codebase, you can click on the `Organization` checkbox, and it will invoke a new search command just for the organization's codebase.
![similar code global](https://codium.ai/images/pr_agent/similar_code_global.png){width=768}

View File

@ -17,8 +17,8 @@ The tool will generate tests for the selected component (if no component is stat
(Example taken from [here](https://github.com/Codium-ai/pr-agent/pull/598#issuecomment-1913679429)):
**Notes** <br>
- The following languages are currently supported: Python, Java, C++, JavaScript, TypeScript, C#. <br>
**Notes**
- Language that are currently supported by the tool: Python, Java, C++, JavaScript, TypeScript, C#.
- This tool can also be triggered interactively by using the [`analyze`](./analyze.md) tool.

View File

@ -17,4 +17,3 @@ Under the section `pr_update_changelog`, the [configuration file](https://github
- `push_changelog_changes`: whether to push the changes to CHANGELOG.md, or just print them. Default is false (print only).
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...
- `add_pr_link`: whether the model should try to add a link to the PR in the changelog. Default is true.

View File

@ -1,38 +1,18 @@
## Qodo Merge Models
## Qodo Merge Pro Models
The default models used by Qodo Merge are a combination of Claude-3.5-sonnet and OpenAI's GPT-4 models.
The default models used by Qodo Merge Pro are a combination of Claude-3.5-sonnet and OpenAI's GPT-4 models.
### Selecting a Specific Model
Users can configure Qodo Merge Pro to use solely a specific model by editing the [configuration](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) file.
Users can configure Qodo Merge to use a specific model by editing the [configuration](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) file.
The models supported by Qodo Merge are:
- `claude-3-5-sonnet`
- `gpt-4o`
- `deepseek-r1`
- `o3-mini`
To restrict Qodo Merge to using only `Claude-3.5-sonnet`, add this setting:
For example, to restrict Qodo Merge Pro to using only `Claude-3.5-sonnet`, add this setting:
```
[config]
model="claude-3-5-sonnet"
```
To restrict Qodo Merge to using only `GPT-4o`, add this setting:
Or to restrict Qodo Merge Pro to using only `GPT-4o`, add this setting:
```
[config]
model="gpt-4o"
```
To restrict Qodo Merge to using only `deepseek-r1`, add this setting:
```
[config]
model="deepseek/r1"
```
To restrict Qodo Merge to using only `o3-mini`, add this setting:
```
[config]
model="o3-mini"
```

View File

@ -1,5 +1,5 @@
## Show possible configurations
The possible configurations of Qodo Merge are stored in [here](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml){:target="_blank"}.
The possible configurations of Qodo Merge are stored in [here](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml).
In the [tools](https://qodo-merge-docs.qodo.ai/tools/) page you can find explanations on how to use these configurations for each tool.
To print all the available configurations as a comment on your PR, you can use the following command:
@ -138,17 +138,7 @@ LANGSMITH_BASE_URL=<url>
## Ignoring automatic commands in PRs
Qodo Merge allows you to automatically ignore certain PRs based on various criteria:
- PRs with specific titles (using regex matching)
- PRs between specific branches (using regex matching)
- PRs that don't include changes from specific folders (using regex matching)
- PRs containing specific labels
- PRs opened by specific users
### Example usage
#### Ignoring PRs with specific titles
In some cases, you may want to automatically ignore specific PRs . Qodo Merge enables you to ignore PR with a specific title, or from/to specific branches (regex matching).
To ignore PRs with a specific title such as "[Bump]: ...", you can add the following to your `configuration.toml` file:
@ -159,7 +149,6 @@ ignore_pr_title = ["\\[Bump\\]"]
Where the `ignore_pr_title` is a list of regex patterns to match the PR title you want to ignore. Default is `ignore_pr_title = ["^\\[Auto\\]", "^Auto"]`.
#### Ignoring PRs between specific branches
To ignore PRs from specific source or target branches, you can add the following to your `configuration.toml` file:
@ -171,46 +160,3 @@ ignore_pr_target_branches = ["qa"]
Where the `ignore_pr_source_branches` and `ignore_pr_target_branches` are lists of regex patterns to match the source and target branches you want to ignore.
They are not mutually exclusive, you can use them together or separately.
#### Ignoring PRs that don't include changes from specific folders
To allow only specific folders (often needed in large monorepos), set:
```
[config]
allow_only_specific_folders=['folder1','folder2']
```
For the configuration above, automatic feedback will only be triggered when the PR changes include files from 'folder1' or 'folder2'
#### Ignoring PRs containg specific labels
To ignore PRs containg specific labels, you can add the following to your `configuration.toml` file:
```
[config]
ignore_pr_labels = ["do-not-merge"]
```
Where the `ignore_pr_labels` is a list of labels that when present in the PR, the PR will be ignored.
#### Ignoring PRs from specific users
Qodo Merge automatically identifies and ignores pull requests created by bots using:
- GitHub's native bot detection system
- Name-based pattern matching
While this detection is robust, it may not catch all cases, particularly when:
- Bots are registered as regular user accounts
- Bot names don't match common patterns
To supplement the automatic bot detection, you can manually specify users to ignore. Add the following to your `configuration.toml` file to ignore PRs from specific users:
```
[config]
ignore_pr_authors = ["my-special-bot-user", ...]
```
Where the `ignore_pr_authors` is a list of usernames that you want to ignore.

View File

@ -1,5 +1,4 @@
## Local repo (CLI)
When running from your locally cloned Qodo Merge repo (CLI), your local configuration file will be used.
Examples of invoking the different tools via the CLI:
@ -36,29 +35,9 @@ This is useful for debugging or experimenting with different tools.
Default is "github".
### CLI Health Check
To verify that Qodo Merge has been configured correctly, you can run this health check command from the repository root:
```bash
python -m tests.health_test.main
```
If the health check passes, you will see the following output:
```
========
Health test passed successfully
========
```
At the end of the run.
Before running the health check, ensure you have:
- Configured your [LLM provider](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/)
- Added a valid GitHub token to your configuration file
## Online usage
### Online usage
Online usage means invoking Qodo Merge tools by [comments](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021901) on a PR.
Commands for invoking the different tools via comments:
@ -79,80 +58,59 @@ For example, if you want to edit the `review` tool configurations, you can run:
Any configuration value in [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml) file can be similarly edited. Comment `/config` to see the list of available configurations.
## Qodo Merge Automatic Feedback
## GitHub App
### Disabling all automatic feedback
To easily disable all automatic feedback from Qodo Merge (GitHub App, GitLab Webhook, BitBucket App, Azure DevOps Webhook), set in a configuration file:
```toml
[config]
disable_auto_feedback = true
```
When this parameter is set to `true`, Qodo Merge will not run any automatic tools (like `describe`, `review`, `improve`) when a new PR is opened, or when new code is pushed to an open PR.
### GitHub App
!!! note "Configurations for Qodo Merge"
Qodo Merge for GitHub is an App, hosted by Qodo. So all the instructions below are relevant also for Qodo Merge users.
!!! note "Configurations for Qodo Merge Pro"
Qodo Merge Pro for GitHub is an App, hosted by CodiumAI. So all the instructions below are relevant also for Qodo Merge Pro users.
Same goes for [GitLab webhook](#gitlab-webhook) and [BitBucket App](#bitbucket-app) sections.
#### GitHub app automatic tools when a new PR is opened
### GitHub app automatic tools when a new PR is opened
The [github_app](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L220) section defines GitHub app specific configurations.
The [github_app](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L108) section defines GitHub app specific configurations.
The configuration parameter `pr_commands` defines the list of tools that will be **run automatically** when a new PR is opened:
```toml
The configuration parameter `pr_commands` defines the list of tools that will be **run automatically** when a new PR is opened.
```
[github_app]
pr_commands = [
"/describe",
"/review",
"/describe --pr_description.final_update_message=false",
"/review --pr_reviewer.num_code_suggestions=0",
"/improve",
]
```
This means that when a new PR is opened/reopened or marked as ready for review, Qodo Merge will run the `describe`, `review` and `improve` tools.
For the `review` tool, for example, the `num_code_suggestions` parameter will be set to 0.
You can override the default tool parameters by using one the three options for a [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/): **wiki**, **local**, or **global**.
For example, if your configuration file contains:
```toml
For example, if your local `.pr_agent.toml` file contains:
```
[pr_description]
generate_ai_title = true
```
Every time you run the `describe` tool, including automatic runs, the PR title will be generated by the AI.
Every time you run the `describe` tool (including automatic runs) the PR title will be generated by the AI.
You can customize configurations specifically for automated runs by using the `--config_path=<value>` parameter.
For instance, to modify the `review` tool settings only for newly opened PRs, use:
```toml
To cancel the automatic run of all the tools, set:
```
[github_app]
pr_commands = [
"/describe",
"/review --pr_reviewer.extra_instructions='focus on the file: ...'",
"/improve",
]
pr_commands = []
```
#### GitHub app automatic tools for push actions (commits to an open PR)
### GitHub app automatic tools for push actions (commits to an open PR)
In addition to running automatic tools when a PR is opened, the GitHub app can also respond to new code that is pushed to an open PR.
The configuration toggle `handle_push_trigger` can be used to enable this feature.
The configuration parameter `push_commands` defines the list of tools that will be **run automatically** when new code is pushed to the PR.
```toml
```
[github_app]
handle_push_trigger = true
push_commands = [
"/describe",
"/review",
"/review --pr_reviewer.num_code_suggestions=0 --pr_reviewer.final_update_message=false",
]
```
This means that when new code is pushed to the PR, the Qodo Merge will run the `describe` and `review` tools, with the specified parameters.
### GitHub Action
## GitHub Action
`GitHub Action` is a different way to trigger Qodo Merge tools, and uses a different configuration mechanism than `GitHub App`.<br>
You can configure settings for `GitHub Action` by adding environment variables under the env section in `.github/workflows/pr_agent.yml` file.
Specifically, start by setting the following environment variables:
@ -163,7 +121,7 @@ Specifically, start by setting the following environment variables:
github_action_config.auto_review: "true" # enable\disable auto review
github_action_config.auto_describe: "true" # enable\disable auto describe
github_action_config.auto_improve: "true" # enable\disable auto improve
github_action_config.pr_actions: '["opened", "reopened", "ready_for_review", "review_requested"]'
github_action_config.pr_actions: ["opened", "reopened", "ready_for_review", "review_requested"]
```
`github_action_config.auto_review`, `github_action_config.auto_describe` and `github_action_config.auto_improve` are used to enable/disable automatic tools that run when a new PR is opened.
If not set, the default configuration is for all three tools to run automatically when a new PR is opened.
@ -178,22 +136,19 @@ The JSON structure is equivalent to the yaml data structure defined in [pr_revie
Note that you can give additional config parameters by adding environment variables to `.github/workflows/pr_agent.yml`, or by using a `.pr_agent.toml` [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#global-configuration-file) in the root of your repo
For example, you can set an environment variable: `pr_description.publish_labels=false`, or add a `.pr_agent.toml` file with the following content:
```toml
```
[pr_description]
publish_labels = false
```
to prevent Qodo Merge from publishing labels when running the `describe` tool.
### GitLab Webhook
## GitLab Webhook
After setting up a GitLab webhook, to control which commands will run automatically when a new MR is opened, you can set the `pr_commands` parameter in the configuration file, similar to the GitHub App:
```toml
```
[gitlab]
pr_commands = [
"/describe",
"/review",
"/review --pr_reviewer.num_code_suggestions=0",
"/improve",
]
```
@ -201,24 +156,24 @@ pr_commands = [
the GitLab webhook can also respond to new code that is pushed to an open MR.
The configuration toggle `handle_push_trigger` can be used to enable this feature.
The configuration parameter `push_commands` defines the list of tools that will be **run automatically** when new code is pushed to the MR.
```toml
```
[gitlab]
handle_push_trigger = true
push_commands = [
"/describe",
"/review",
"/review --pr_reviewer.num_code_suggestions=0 --pr_reviewer.final_update_message=false",
]
```
Note that to use the 'handle_push_trigger' feature, you need to give the gitlab webhook also the "Push events" scope.
### BitBucket App
## BitBucket App
Similar to GitHub app, when running Qodo Merge from BitBucket App, the default [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml) from a pre-built docker will be initially loaded.
By uploading a local `.pr_agent.toml` file to the root of the repo's main branch, you can edit and customize any configuration parameter. Note that you need to upload `.pr_agent.toml` prior to creating a PR, in order for the configuration to take effect.
For example, if your local `.pr_agent.toml` file contains:
```toml
```
[pr_reviewer]
extra_instructions = "Answer in japanese"
```
@ -227,39 +182,29 @@ Each time you invoke a `/review` tool, it will use the extra instructions you se
Note that among other limitations, BitBucket provides relatively low rate-limits for applications (up to 1000 requests per hour), and does not provide an API to track the actual rate-limit usage.
If you experience a lack of responses from Qodo Merge, you might want to set: `bitbucket_app.avoid_full_files=true` in your configuration file.
If you experience lack of responses from Qodo Merge, you might want to set: `bitbucket_app.avoid_full_files=true` in your configuration file.
This will prevent Qodo Merge from acquiring the full file content, and will only use the diff content. This will reduce the number of requests made to BitBucket, at the cost of small decrease in accuracy, as dynamic context will not be applicable.
#### BitBucket Self-Hosted App automatic tools
### BitBucket Self-Hosted App automatic tools
To control which commands will run automatically when a new PR is opened, you can set the `pr_commands` parameter in the configuration file:
Specifically, set the following values:
```toml
```
[bitbucket_app]
pr_commands = [
"/review",
"/review --pr_reviewer.num_code_suggestions=0",
"/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7",
]
```
Note that we set specifically for bitbucket, we recommend using: `--pr_code_suggestions.suggestions_score_threshold=7` and that is the default value we set for bitbucket.
Since this platform only supports inline code suggestions, we want to limit the number of suggestions, and only present a limited number.
To enable BitBucket app to respond to each **push** to the PR, set (for example):
```toml
[bitbucket_app]
handle_push_trigger = true
push_commands = [
"/describe",
"/review",
]
```
### Azure DevOps provider
## Azure DevOps provider
To use Azure DevOps provider use the following settings in configuration.toml:
```toml
```
[config]
git_provider="azure"
```
@ -278,14 +223,14 @@ org = "https://dev.azure.com/YOUR_ORGANIZATION/"
# pat = "YOUR_PAT_TOKEN" needed only if using PAT for authentication
```
#### Azure DevOps Webhook
### Azure DevOps Webhook
To control which commands will run automatically when a new PR is opened, you can set the `pr_commands` parameter in the configuration file, similar to the GitHub App:
```toml
```
[azure_devops_server]
pr_commands = [
"/describe",
"/review",
"/review --pr_reviewer.num_code_suggestions=0",
"/improve",
]
```

View File

@ -1,10 +1,11 @@
## Changing a model in PR-Agent
## Changing a model
See [here](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/algo/__init__.py) for a list of available models.
To use a different model than the default (GPT-4), you need to edit in the [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L2) the fields:
```
[config]
model = "..."
model_turbo = "..."
fallback_models = ["..."]
```
@ -26,43 +27,57 @@ deployment_id = "" # The deployment name you chose when you deployed the engine
and set in your configuration file:
```
[config]
model="" # the OpenAI model you've deployed on Azure (e.g. gpt-4o)
fallback_models=["..."]
model="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
model_turbo="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
fallback_models=["..."] # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
```
### Ollama
You can run models locally through either [VLLM](https://docs.litellm.ai/docs/providers/vllm) or [Ollama](https://docs.litellm.ai/docs/providers/ollama)
E.g. to use a new model locally via Ollama, set in `.secrets.toml` or in a configuration file:
```
[config]
model = "ollama/qwen2.5-coder:32b"
fallback_models=["ollama/qwen2.5-coder:32b"]
custom_model_max_tokens=128000 # set the maximal input tokens for the model
duplicate_examples=true # will duplicate the examples in the prompt, to help the model to generate structured output
[ollama]
api_base = "http://localhost:11434" # or whatever port you're running Ollama on
```
!!! note "Local models vs commercial models"
Qodo Merge is compatible with almost any AI model, but analyzing complex code repositories and pull requests requires a model specifically optimized for code analysis.
Commercial models such as GPT-4, Claude Sonnet, and Gemini have demonstrated robust capabilities in generating structured output for code analysis tasks with large input. In contrast, most open-source models currently available (as of January 2025) face challenges with these complex tasks.
Based on our testing, local open-source models are suitable for experimentation and learning purposes, but they are not suitable for production-level code analysis tasks.
Hence, for production workflows and real-world usage, we recommend using commercial models.
### Hugging Face
**Local**
You can run Hugging Face models locally through either [VLLM](https://docs.litellm.ai/docs/providers/vllm) or [Ollama](https://docs.litellm.ai/docs/providers/ollama)
E.g. to use a new Hugging Face model locally via Ollama, set:
```
[__init__.py]
MAX_TOKENS = {
"model-name-on-ollama": <max_tokens>
}
e.g.
MAX_TOKENS={
...,
"ollama/llama2": 4096
}
[config] # in configuration.toml
model = "ollama/llama2"
model_turbo = "ollama/llama2"
fallback_models=["ollama/llama2"]
[ollama] # in .secrets.toml
api_base = ... # the base url for your Hugging Face inference endpoint
# e.g. if running Ollama locally, you may use:
api_base = "http://localhost:11434/"
```
### Inference Endpoints
To use a new model with Hugging Face Inference Endpoints, for example, set:
```
[__init__.py]
MAX_TOKENS = {
"model-name-on-huggingface": <max_tokens>
}
e.g.
MAX_TOKENS={
...,
"meta-llama/Llama-2-7b-chat-hf": 4096
}
[config] # in configuration.toml
model = "huggingface/meta-llama/Llama-2-7b-chat-hf"
model_turbo = "huggingface/meta-llama/Llama-2-7b-chat-hf"
fallback_models=["huggingface/meta-llama/Llama-2-7b-chat-hf"]
custom_model_max_tokens=... # set the maximal input tokens for the model
[huggingface] # in .secrets.toml
key = ... # your Hugging Face api key
@ -76,6 +91,7 @@ To use Llama2 model with Replicate, for example, set:
```
[config] # in configuration.toml
model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
model_turbo = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
fallback_models=["replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"]
[replicate] # in .secrets.toml
key = ...
@ -91,6 +107,7 @@ To use Llama3 model with Groq, for example, set:
```
[config] # in configuration.toml
model = "llama3-70b-8192"
model_turbo = "llama3-70b-8192"
fallback_models = ["groq/llama3-70b-8192"]
[groq] # in .secrets.toml
key = ... # your Groq api key
@ -104,6 +121,7 @@ To use Google's Vertex AI platform and its associated models (chat-bison/codecha
```
[config] # in configuration.toml
model = "vertex_ai/codechat-bison"
model_turbo = "vertex_ai/codechat-bison"
fallback_models="vertex_ai/codechat-bison"
[vertexai] # in .secrets.toml
@ -115,28 +133,13 @@ Your [application default credentials](https://cloud.google.com/docs/authenticat
If you do want to set explicit credentials, then you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable set to a path to a json credentials file.
### Google AI Studio
To use [Google AI Studio](https://aistudio.google.com/) models, set the relevant models in the configuration section of the configuration file:
```toml
[config] # in configuration.toml
model="google_ai_studio/gemini-1.5-flash"
fallback_models=["google_ai_studio/gemini-1.5-flash"]
[google_ai_studio] # in .secrets.toml
gemini_api_key = "..."
```
If you don't want to set the API key in the .secrets.toml file, you can set the `GOOGLE_AI_STUDIO.GEMINI_API_KEY` environment variable.
### Anthropic
To use Anthropic models, set the relevant models in the configuration section of the configuration file:
```
[config]
model="anthropic/claude-3-opus-20240229"
model_turbo="anthropic/claude-3-opus-20240229"
fallback_models=["anthropic/claude-3-opus-20240229"]
```
@ -153,6 +156,7 @@ To use Amazon Bedrock and its foundational models, add the below configuration:
```
[config] # in configuration.toml
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
model_turbo="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
fallback_models=["bedrock/anthropic.claude-v2:1"]
```
@ -166,25 +170,6 @@ drop_params = true
AWS session is automatically authenticated from your environment, but you can also explicitly set `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_REGION_NAME` environment variables. Please refer to [this document](https://litellm.vercel.app/docs/providers/bedrock) for more details.
### DeepSeek
To use deepseek-chat model with DeepSeek, for example, set:
```toml
[config] # in configuration.toml
model = "deepseek/deepseek-chat"
fallback_models=["deepseek/deepseek-chat"]
```
and fill up your key
```toml
[deepseek] # in .secrets.toml
key = ...
```
(you can obtain a deepseek-chat key from [here](https://platform.deepseek.com))
### Custom models
If the relevant model doesn't appear [here](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/algo/__init__.py), you can still use it as a custom model:
@ -193,6 +178,7 @@ If the relevant model doesn't appear [here](https://github.com/Codium-ai/pr-agen
```
[config]
model="custom_model_name"
model_turbo="custom_model_name"
fallback_models=["custom_model_name"]
```
(2) Set the maximal tokens for the model:

View File

@ -20,7 +20,7 @@ In terms of precedence, wiki configurations will override local configurations,
`Platforms supported: GitHub, GitLab, Bitbucket`
With Qodo Merge, you can set configurations by creating a page called `.pr_agent.toml` in the [wiki](https://github.com/Codium-ai/pr-agent/wiki/pr_agent.toml) of the repo.
With Qodo Merge Pro, you can set configurations by creating a page called `.pr_agent.toml` in the [wiki](https://github.com/Codium-ai/pr-agent/wiki/pr_agent.toml) of the repo.
The advantage of this method is that it allows to set configurations without needing to commit new content to the repo - just edit the wiki page and **save**.

View File

@ -1,33 +0,0 @@
`Supported Git Platforms: GitHub, GitLab, Bitbucket`
For optimal functionality of Qodo Merge, we recommend enabling a wiki for each repository where Qodo Merge is installed. The wiki serves several important purposes:
**Key Wiki Features: 💎**
- Storing a [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#wiki-configuration-file)
- Defining a [`best_practices.md`](https://qodo-merge-docs.qodo.ai/tools/improve/#best-practices) file
- Track [accepted suggestions](https://qodo-merge-docs.qodo.ai/tools/improve/#suggestion-tracking)
- Facilitates learning over time by creating an [auto_best_practices.md](https://qodo-merge-docs.qodo.ai/core-abilities/auto_best_practices) file
**Setup Instructions (GitHub):**
To enable a wiki for your repository:
1. Navigate to your repository's main page on GitHub
2. Select "Settings" from the top navigation bar
3. Locate the "Features" section
4. Enable the "Wikis" option by checking the corresponding box
5. Return to your repository's main page
6. Look for the newly added "Wiki" tab in the top navigation
7. Initialize your wiki by clicking "Create the first page" and saving (this step is important - without creating an initial page, the wiki will not be fully functional)
### Why Wiki?
- Your code (and its derivatives, including accepted code suggestions) is yours. Qodo Merge will never store it on external servers.
- Repository changes typically require pull requests, which create overhead and are time-consuming. This process is too cumbersome for auto data aggregation, and is not very convenient even for managing frequently updated content like configuration files and best practices.
- A repository wiki page provides an ideal balance:
- It lives within your repository, making it suitable for code-related documentation
- It enables quick updates without the overhead of pull requests
- It maintains full Git version control, allowing you to track changes over time.

View File

@ -5,7 +5,6 @@ It includes information on how to adjust Qodo Merge configurations, define which
- [Introduction](./introduction.md)
- [Enabling a Wiki](./enabling_a_wiki)
- [Configuration File](./configuration_options.md)
- [Usage and Automation](./automations_and_usage.md)
- [Local Repo (CLI)](./automations_and_usage.md#local-repo-cli)
@ -24,4 +23,4 @@ It includes information on how to adjust Qodo Merge configurations, define which
- [Changing a model](./additional_configurations.md#changing-a-model)
- [Patch Extra Lines](./additional_configurations.md#patch-extra-lines)
- [Editing the prompts](./additional_configurations.md#editing-the-prompts)
- [Qodo Merge Models](./PR_agent_pro_models.md)
- [Qodo Merge Pro Models](./PR_agent_pro_models.md)

View File

@ -2,7 +2,7 @@
After [installation](https://qodo-merge-docs.qodo.ai/installation/), there are three basic ways to invoke Qodo Merge:
1. Locally running a CLI command
2. Online usage - by [commenting](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021901){:target="_blank"} on a PR
2. Online usage - by [commenting](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021901) on a PR
3. Enabling Qodo Merge tools to run automatically when a new PR is opened
@ -10,3 +10,4 @@ Specifically, CLI commands can be issued by invoking a pre-built [docker image](
For online usage, you will need to setup either a [GitHub App](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-app) or a [GitHub Action](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-action) (GitHub), a [GitLab webhook](https://qodo-merge-docs.qodo.ai/installation/gitlab/#run-a-gitlab-webhook-server) (GitLab), or a [BitBucket App](https://qodo-merge-docs.qodo.ai/installation/bitbucket/#run-using-codiumai-hosted-bitbucket-app) (BitBucket).
These platforms also enable to run Qodo Merge specific tools automatically when a new PR is opened, or on each push to a branch.

View File

@ -1,26 +1,29 @@
site_name: Qodo Merge (and open-source PR-Agent)
site_name: Qodo Merge (formerly known as PR-Agent)
repo_url: https://github.com/Codium-ai/pr-agent
repo_name: Codium-ai/pr-agent
nav:
- Overview:
- 'index.md'
- 💎 Qodo Merge: 'overview/pr_agent_pro.md'
- 💎 Qodo Merge Pro: 'overview/pr_agent_pro.md'
- Data Privacy: 'overview/data_privacy.md'
- Installation:
- 'installation/index.md'
- PR-Agent: 'installation/pr_agent.md'
- 💎 Qodo Merge: 'installation/qodo_merge.md'
- Locally: 'installation/locally.md'
- GitHub: 'installation/github.md'
- GitLab: 'installation/gitlab.md'
- BitBucket: 'installation/bitbucket.md'
- Azure DevOps: 'installation/azure.md'
- 💎 Qodo Merge Pro: 'installation/pr_agent_pro.md'
- Usage Guide:
- 'usage-guide/index.md'
- Introduction: 'usage-guide/introduction.md'
- Enabling a Wiki: 'usage-guide/enabling_a_wiki.md'
- Configuration File: 'usage-guide/configuration_options.md'
- Usage and Automation: 'usage-guide/automations_and_usage.md'
- Managing Mail Notifications: 'usage-guide/mail_notifications.md'
- Changing a Model: 'usage-guide/changing_a_model.md'
- Additional Configurations: 'usage-guide/additional_configurations.md'
- 💎 Qodo Merge Models: 'usage-guide/PR_agent_pro_models'
- 💎 Qodo Merge Pro Models: 'usage-guide/PR_agent_pro_models'
- Tools:
- 'tools/index.md'
- Describe: 'tools/describe.md'
@ -38,11 +41,8 @@ nav:
- 💎 Custom Prompt: 'tools/custom_prompt.md'
- 💎 CI Feedback: 'tools/ci_feedback.md'
- 💎 Similar Code: 'tools/similar_code.md'
- 💎 Implement: 'tools/implement.md'
- Core Abilities:
- 'core-abilities/index.md'
- Fetching ticket context: 'core-abilities/fetching_ticket_context.md'
- Auto best practices: 'core-abilities/auto_best_practices.md'
- Local and global metadata: 'core-abilities/metadata.md'
- Dynamic context: 'core-abilities/dynamic_context.md'
- Self-reflection: 'core-abilities/self_reflection.md'

View File

@ -3,5 +3,5 @@
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-M6PJSFV');</script>
})(window,document,'script','dataLayer','GTM-5C9KZBM3');</script>
<!-- End Google Tag Manager -->

View File

@ -0,0 +1 @@

View File

@ -3,6 +3,7 @@ from functools import partial
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.utils import update_settings_from_args
from pr_agent.config_loader import get_settings
from pr_agent.git_providers.utils import apply_repo_settings
@ -13,6 +14,7 @@ from pr_agent.tools.pr_config import PRConfig
from pr_agent.tools.pr_description import PRDescription
from pr_agent.tools.pr_generate_labels import PRGenerateLabels
from pr_agent.tools.pr_help_message import PRHelpMessage
from pr_agent.tools.pr_information_from_user import PRInformationFromUser
from pr_agent.tools.pr_line_questions import PR_LineQuestions
from pr_agent.tools.pr_questions import PRQuestions
from pr_agent.tools.pr_reviewer import PRReviewer
@ -24,6 +26,8 @@ command2class = {
"answer": PRReviewer,
"review": PRReviewer,
"review_pr": PRReviewer,
"reflect": PRInformationFromUser,
"reflect_and_review": PRInformationFromUser,
"describe": PRDescription,
"describe_pr": PRDescription,
"improve": PRCodeSuggestions,
@ -46,6 +50,7 @@ commands = list(command2class.keys())
class PRAgent:
def __init__(self, ai_handler: partial[BaseAiHandler,] = LiteLLMAIHandler):
self.ai_handler = ai_handler # will be initialized in run_action
self.forbidden_cli_args = ['enable_auto_approval']
async def handle_request(self, pr_url, request, notify=None) -> bool:
# First, apply repo specific settings if exists
@ -60,21 +65,10 @@ class PRAgent:
else:
action, *args = request
forbidden_cli_args = ['enable_auto_approval', 'approve_pr_on_self_review', 'base_url', 'url', 'app_name', 'secret_provider',
'git_provider', 'skip_keys', 'openai.key', 'ANALYTICS_FOLDER', 'uri', 'app_id', 'webhook_secret',
'bearer_token', 'PERSONAL_ACCESS_TOKEN', 'override_deployment_type', 'private_key',
'local_cache_path', 'enable_local_cache', 'jira_base_url', 'api_base', 'api_type', 'api_version',
'skip_keys']
if args:
for forbidden_arg in self.forbidden_cli_args:
for arg in args:
if arg.startswith('--'):
arg_word = arg.lower()
arg_word = arg_word.replace('__', '.') # replace double underscore with dot, e.g. --openai__key -> --openai.key
for forbidden_arg in forbidden_cli_args:
forbidden_arg_word = forbidden_arg.lower()
if '.' not in forbidden_arg_word:
forbidden_arg_word = '.' + forbidden_arg_word
if forbidden_arg_word in arg_word:
if forbidden_arg in arg:
get_logger().error(
f"CLI argument for param '{forbidden_arg}' is forbidden. Use instead a configuration file."
)
@ -83,10 +77,12 @@ class PRAgent:
action = action.lstrip("/").lower()
if action not in command2class:
get_logger().error(f"Unknown command: {action}")
get_logger().debug(f"Unknown command: {action}")
return False
with get_logger().contextualize(command=action, pr_url=pr_url):
get_logger().info("PR-Agent request handler started", analytics=True)
if action == "reflect_and_review":
get_settings().pr_reviewer.ask_and_reflect = True
if action == "answer":
if notify:
notify()

View File

@ -19,37 +19,25 @@ MAX_TOKENS = {
'gpt-4o-mini': 128000, # 128K, but may be limited by config.max_model_tokens
'gpt-4o-mini-2024-07-18': 128000, # 128K, but may be limited by config.max_model_tokens
'gpt-4o-2024-08-06': 128000, # 128K, but may be limited by config.max_model_tokens
'gpt-4o-2024-11-20': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-mini': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-mini-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-preview': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-preview-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-2024-12-17': 204800, # 200K, but may be limited by config.max_model_tokens
'o1': 204800, # 200K, but may be limited by config.max_model_tokens
'o3-mini': 204800, # 200K, but may be limited by config.max_model_tokens
'o3-mini-2025-01-31': 204800, # 200K, but may be limited by config.max_model_tokens
'claude-instant-1': 100000,
'claude-2': 100000,
'command-nightly': 4096,
'deepseek/deepseek-chat': 128000, # 128K, but may be limited by config.max_model_tokens
'deepseek/deepseek-reasoner': 64000, # 64K, but may be limited by config.max_model_tokens
'replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1': 4096,
'meta-llama/Llama-2-7b-chat-hf': 4096,
'vertex_ai/codechat-bison': 6144,
'vertex_ai/codechat-bison-32k': 32000,
'vertex_ai/claude-3-haiku@20240307': 100000,
'vertex_ai/claude-3-5-haiku@20241022': 100000,
'vertex_ai/claude-3-sonnet@20240229': 100000,
'vertex_ai/claude-3-opus@20240229': 100000,
'vertex_ai/claude-3-5-sonnet@20240620': 100000,
'vertex_ai/claude-3-5-sonnet-v2@20241022': 100000,
'vertex_ai/gemini-1.5-pro': 1048576,
'vertex_ai/gemini-1.5-flash': 1048576,
'vertex_ai/gemini-2.0-flash-exp': 1048576,
'vertex_ai/gemma2': 8200,
'gemini/gemini-1.5-pro': 1048576,
'gemini/gemini-1.5-flash': 1048576,
'gemini/gemini-2.0-flash-exp': 1048576,
'codechat-bison': 6144,
'codechat-bison-32k': 32000,
'anthropic.claude-instant-v1': 100000,
@ -58,23 +46,20 @@ MAX_TOKENS = {
'anthropic/claude-3-opus-20240229': 100000,
'anthropic/claude-3-5-sonnet-20240620': 100000,
'anthropic/claude-3-5-sonnet-20241022': 100000,
'anthropic/claude-3-5-haiku-20241022': 100000,
'bedrock/anthropic.claude-instant-v1': 100000,
'bedrock/anthropic.claude-v2': 100000,
'bedrock/anthropic.claude-v2:1': 100000,
'bedrock/anthropic.claude-3-sonnet-20240229-v1:0': 100000,
'bedrock/anthropic.claude-3-haiku-20240307-v1:0': 100000,
'bedrock/anthropic.claude-3-5-haiku-20241022-v1:0': 100000,
'bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0': 100000,
'bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0': 100000,
"bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0": 100000,
'claude-3-5-sonnet': 100000,
'groq/llama3-8b-8192': 8192,
'groq/llama3-70b-8192': 8192,
'groq/llama-3.1-8b-instant': 8192,
'groq/llama-3.3-70b-versatile': 128000,
'groq/mixtral-8x7b-32768': 32768,
'groq/gemma2-9b-it': 8192,
'groq/llama-3.1-8b-instant': 131072,
'groq/llama-3.1-70b-versatile': 131072,
'groq/llama-3.1-405b-reasoning': 131072,
'ollama/llama3': 4096,
'watsonx/meta-llama/llama-3-8b-instruct': 4096,
"watsonx/meta-llama/llama-3-70b-instruct": 4096,
@ -83,13 +68,3 @@ MAX_TOKENS = {
"watsonx/ibm/granite-34b-code-instruct": 8191,
"watsonx/mistralai/mistral-large": 32768,
}
USER_MESSAGE_ONLY_MODELS = [
"deepseek/deepseek-reasoner",
"o1-mini",
"o1-mini-2024-09-12",
"o1",
"o1-2024-12-17",
"o3-mini",
"o3-mini-2025-01-31"
]

View File

@ -1,18 +1,17 @@
try:
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_openai import AzureChatOpenAI, ChatOpenAI
from langchain_openai import ChatOpenAI, AzureChatOpenAI
from langchain_core.messages import SystemMessage, HumanMessage
except: # we don't enforce langchain as a dependency, so if it's not installed, just move on
pass
import functools
from openai import APIError, RateLimitError, Timeout
from retry import retry
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger
from openai import APIError, RateLimitError, Timeout
from retry import retry
import functools
OPENAI_RETRIES = 5
@ -74,3 +73,4 @@ class LangChainOpenAIHandler(BaseAiHandler):
raise ValueError(f"OpenAI {e.name} is required") from e
else:
raise e

View File

@ -1,14 +1,11 @@
import os
import requests
import litellm
import openai
import requests
from litellm import acompletion
from tenacity import retry, retry_if_exception_type, stop_after_attempt
from pr_agent.algo import USER_MESSAGE_ONLY_MODELS
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.utils import get_version
from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger
@ -86,18 +83,6 @@ class LiteLLMAIHandler(BaseAiHandler):
litellm.vertex_location = get_settings().get(
"VERTEXAI.VERTEX_LOCATION", None
)
# Google AI Studio
# SEE https://docs.litellm.ai/docs/providers/gemini
if get_settings().get("GOOGLE_AI_STUDIO.GEMINI_API_KEY", None):
os.environ["GEMINI_API_KEY"] = get_settings().google_ai_studio.gemini_api_key
# Support deepseek models
if get_settings().get("DEEPSEEK.KEY", None):
os.environ['DEEPSEEK_API_KEY'] = get_settings().get("DEEPSEEK.KEY")
# Models that only use user meessage
self.user_message_only_models = USER_MESSAGE_ONLY_MODELS
def prepare_logs(self, response, system, user, resp, finish_reason):
response_log = response.dict().copy()
response_log['system'] = system
@ -141,7 +126,7 @@ class LiteLLMAIHandler(BaseAiHandler):
if "langfuse" in callbacks:
metadata.update({
"trace_name": command,
"tags": [git_provider, command, f'version:{get_version()}'],
"tags": [git_provider, command],
"trace_metadata": {
"command": command,
"pr_url": pr_url,
@ -150,7 +135,7 @@ class LiteLLMAIHandler(BaseAiHandler):
if "langsmith" in callbacks:
metadata.update({
"run_name": command,
"tags": [git_provider, command, f'version:{get_version()}'],
"tags": [git_provider, command],
"extra": {
"metadata": {
"command": command,
@ -201,11 +186,13 @@ class LiteLLMAIHandler(BaseAiHandler):
messages[1]["content"] = [{"type": "text", "text": messages[1]["content"]},
{"type": "image_url", "image_url": {"url": img_path}}]
# Currently, some models do not support a separate system and user prompts
if self.user_message_only_models and any(entry.lower() in model.lower() for entry in self.user_message_only_models):
# Currently O1 does not support separate system and user prompts
O1_MODEL_PREFIX = 'o1-'
model_type = model.split('/')[-1] if '/' in model else model
if model_type.startswith(O1_MODEL_PREFIX):
user = f"{system}\n\n\n{user}"
system = ""
get_logger().info(f"Using model {model}, combining system and user prompts")
get_logger().info(f"Using O1 model, combining system and user prompts")
messages = [{"role": "user", "content": user}]
kwargs = {
"model": model,

View File

@ -1,10 +1,8 @@
from os import environ
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
import openai
from openai import APIError, AsyncOpenAI, RateLimitError, Timeout
from openai.error import APIError, RateLimitError, Timeout, TryAgain
from retry import retry
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger
@ -16,7 +14,7 @@ class OpenAIHandler(BaseAiHandler):
# Initialize OpenAIHandler specific attributes here
try:
super().__init__()
environ["OPENAI_API_KEY"] = get_settings().openai.key
openai.api_key = get_settings().openai.key
if get_settings().get("OPENAI.ORG", None):
openai.organization = get_settings().openai.org
if get_settings().get("OPENAI.API_TYPE", None):
@ -26,7 +24,7 @@ class OpenAIHandler(BaseAiHandler):
if get_settings().get("OPENAI.API_VERSION", None):
openai.api_version = get_settings().openai.api_version
if get_settings().get("OPENAI.API_BASE", None):
environ["OPENAI_BASE_URL"] = get_settings().openai.api_base
openai.api_base = get_settings().openai.api_base
except AttributeError as e:
raise ValueError("OpenAI key is required") from e
@ -38,26 +36,28 @@ class OpenAIHandler(BaseAiHandler):
"""
return get_settings().get("OPENAI.DEPLOYMENT_ID", None)
@retry(exceptions=(APIError, Timeout, AttributeError, RateLimitError),
@retry(exceptions=(APIError, Timeout, TryAgain, AttributeError, RateLimitError),
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2):
try:
deployment_id = self.deployment_id
get_logger().info("System: ", system)
get_logger().info("User: ", user)
messages = [{"role": "system", "content": system}, {"role": "user", "content": user}]
client = AsyncOpenAI()
chat_completion = await client.chat.completions.create(
chat_completion = await openai.ChatCompletion.acreate(
model=model,
deployment_id=deployment_id,
messages=messages,
temperature=temperature,
)
resp = chat_completion.choices[0].message.content
finish_reason = chat_completion.choices[0].finish_reason
usage = chat_completion.usage
resp = chat_completion["choices"][0]['message']['content']
finish_reason = chat_completion["choices"][0]["finish_reason"]
usage = chat_completion.get("usage")
get_logger().info("AI response", response=resp, messages=messages, finish_reason=finish_reason,
model=model, usage=usage)
return resp, finish_reason
except (APIError, Timeout) as e:
except (APIError, Timeout, TryAgain) as e:
get_logger().error("Error during OpenAI inference: ", e)
raise
except (RateLimitError) as e:
@ -65,4 +65,4 @@ class OpenAIHandler(BaseAiHandler):
raise
except (Exception) as e:
get_logger().error("Unknown error during OpenAI inference: ", e)
raise
raise TryAgain from e

View File

@ -3,8 +3,8 @@ from __future__ import annotations
import re
import traceback
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.config_loader import get_settings
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.log import get_logger
@ -31,7 +31,7 @@ def extend_patch(original_file_str, patch_str, patch_extra_lines_before=0,
def decode_if_bytes(original_file_str):
if isinstance(original_file_str, (bytes, bytearray)):
if isinstance(original_file_str, bytes):
try:
return original_file_str.decode('utf-8')
except UnicodeDecodeError:
@ -61,26 +61,23 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
patch_lines = patch_str.splitlines()
extended_patch_lines = []
is_valid_hunk = True
start1, size1, start2, size2 = -1, -1, -1, -1
RE_HUNK_HEADER = re.compile(
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
try:
for i,line in enumerate(patch_lines):
for line in patch_lines:
if line.startswith('@@'):
match = RE_HUNK_HEADER.match(line)
# identify hunk header
if match:
# finish processing previous hunk
if is_valid_hunk and (start1 != -1 and patch_extra_lines_after > 0):
if start1 != -1 and patch_extra_lines_after > 0:
delta_lines = [f' {line}' for line in original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after]]
extended_patch_lines.extend(delta_lines)
section_header, size1, size2, start1, start2 = extract_hunk_headers(match)
is_valid_hunk = check_if_hunk_lines_matches_to_file(i, original_lines, patch_lines, start1)
if is_valid_hunk and (patch_extra_lines_before > 0 or patch_extra_lines_after > 0):
if patch_extra_lines_before > 0 or patch_extra_lines_after > 0:
def _calc_context_limits(patch_lines_before):
extended_start1 = max(1, start1 - patch_lines_before)
extended_size1 = size1 + (start1 - extended_start1) + patch_extra_lines_after
@ -141,7 +138,7 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
return patch_str
# finish processing last hunk
if start1 != -1 and patch_extra_lines_after > 0 and is_valid_hunk:
if start1 != -1 and patch_extra_lines_after > 0:
delta_lines = original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after]
# add space at the beginning of each extra line
delta_lines = [f' {line}' for line in delta_lines]
@ -151,23 +148,6 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
return extended_patch_str
def check_if_hunk_lines_matches_to_file(i, original_lines, patch_lines, start1):
"""
Check if the hunk lines match the original file content. We saw cases where the hunk header line doesn't match the original file content, and then
extending the hunk with extra lines before the hunk header can cause the hunk to be invalid.
"""
is_valid_hunk = True
try:
if i + 1 < len(patch_lines) and patch_lines[i + 1][0] == ' ': # an existing line in the file
if patch_lines[i + 1].strip() != original_lines[start1 - 1].strip():
is_valid_hunk = False
get_logger().error(
f"Invalid hunk in PR, line {start1} in hunk header doesn't match the original file content")
except:
pass
return is_valid_hunk
def extract_hunk_headers(match):
res = list(match.groups())
for i in range(len(res)):
@ -364,7 +344,7 @@ __old hunk__
def extract_hunk_lines_from_patch(patch: str, file_name, line_start, line_end, side) -> tuple[str, str]:
try:
patch_with_lines_str = f"\n\n## File: '{file_name.strip()}'\n\n"
selected_lines = ""
patch_lines = patch.splitlines()
@ -407,8 +387,5 @@ def extract_hunk_lines_from_patch(patch: str, file_name, line_start, line_end, s
patch_with_lines_str += line + '\n'
if not line.startswith('-'): # currently we don't support /ask line for deleted lines
selected_lines_num += 1
except Exception as e:
get_logger().error(f"Failed to extract hunk lines from patch: {e}", artifact={"traceback": traceback.format_exc()})
return "", ""
return patch_with_lines_str.rstrip(), selected_lines.rstrip()

View File

@ -4,6 +4,8 @@ from typing import Dict
from pr_agent.config_loader import get_settings
def filter_bad_extensions(files):
# Bad Extensions, source: https://github.com/EleutherAI/github-downloader/blob/345e7c4cbb9e0dc8a0615fd995a08bf9d73b3fe6/download_repo_text.py # noqa: E501
bad_extensions = get_settings().bad_extensions.default

View File

@ -5,15 +5,14 @@ from typing import Callable, List, Tuple
from github import RateLimitExceededException
from pr_agent.algo.file_filter import filter_ignored
from pr_agent.algo.git_patch_processing import (
convert_to_hunks_with_lines_numbers, extend_patch, handle_patch_deletions)
from pr_agent.algo.git_patch_processing import convert_to_hunks_with_lines_numbers, extend_patch, handle_patch_deletions
from pr_agent.algo.language_handler import sort_files_by_main_languages
from pr_agent.algo.file_filter import filter_ignored
from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.algo.utils import ModelType, clip_tokens, get_max_tokens, get_weak_model
from pr_agent.algo.utils import get_max_tokens, clip_tokens, ModelType
from pr_agent.config_loader import get_settings
from pr_agent.git_providers.git_provider import GitProvider
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.log import get_logger
DELETED_FILES_ = "Deleted files:\n"
@ -205,11 +204,10 @@ def pr_generate_extended_diff(pr_languages: list,
if not extended_patch:
get_logger().warning(f"Failed to extend patch for file: {file.filename}")
continue
full_extended_patch = f"\n\n## {file.filename}\n{extended_patch.rstrip()}\n"
if add_line_numbers_to_hunks:
full_extended_patch = convert_to_hunks_with_lines_numbers(extended_patch, file)
else:
full_extended_patch = f"\n\n## File: '{file.filename.strip()}'\n{extended_patch.rstrip()}\n"
# add AI-summary metadata to the patch
if file.ai_file_summary and get_settings().get("config.enable_ai_metadata", False):
@ -317,13 +315,13 @@ def generate_full_patch(convert_hunks_to_line_numbers, file_dict, max_tokens_mod
# TODO: Option for alternative logic to remove hunks from the patch to reduce the number of tokens
# until we meet the requirements
if get_settings().config.verbosity_level >= 2:
get_logger().warning(f"Patch too large, skipping it: '{filename}'")
get_logger().warning(f"Patch too large, skipping it, {filename}")
remaining_files_list_new.append(filename)
continue
if patch:
if not convert_hunks_to_line_numbers:
patch_final = f"\n\n## File: '{filename.strip()}'\n\n{patch.strip()}\n"
patch_final = f"\n\n## File: '{filename.strip()}\n\n{patch.strip()}\n'"
else:
patch_final = "\n\n" + patch.strip()
patches.append(patch_final)
@ -355,8 +353,8 @@ async def retry_with_fallback_models(f: Callable, model_type: ModelType = ModelT
def _get_all_models(model_type: ModelType = ModelType.REGULAR) -> List[str]:
if model_type == ModelType.WEAK:
model = get_weak_model()
if model_type == ModelType.TURBO:
model = get_settings().config.model_turbo
else:
model = get_settings().config.model
fallback_models = get_settings().config.fallback_models

View File

@ -1,9 +1,8 @@
from threading import Lock
from jinja2 import Environment, StrictUndefined
from tiktoken import encoding_for_model, get_encoding
from pr_agent.config_loader import get_settings
from threading import Lock
from pr_agent.log import get_logger

View File

@ -1,6 +1,5 @@
from dataclasses import dataclass
from enum import Enum
from typing import Optional
class EDIT_TYPE(Enum):
@ -22,5 +21,4 @@ class FilePatchInfo:
old_filename: str = None
num_plus_lines: int = -1
num_minus_lines: int = -1
language: Optional[str] = None
ai_file_summary: str = None

View File

@ -7,15 +7,14 @@ import html
import json
import os
import re
import sys
import textwrap
import time
import traceback
from datetime import datetime
from enum import Enum
from importlib.metadata import PackageNotFoundError, version
from typing import Any, List, Tuple
import html2text
import requests
import yaml
@ -23,19 +22,11 @@ from pydantic import BaseModel
from starlette_context import context
from pr_agent.algo import MAX_TOKENS
from pr_agent.algo.git_patch_processing import extract_hunk_lines_from_patch
from pr_agent.algo.token_handler import TokenEncoder
from pr_agent.algo.types import FilePatchInfo
from pr_agent.config_loader import get_settings, global_settings
from pr_agent.algo.types import FilePatchInfo
from pr_agent.log import get_logger
def get_weak_model() -> str:
if get_settings().get("config.model_weak"):
return get_settings().config.model_weak
return get_settings().config.model
class Range(BaseModel):
line_start: int # should be 0-indexed
line_end: int
@ -44,17 +35,14 @@ class Range(BaseModel):
class ModelType(str, Enum):
REGULAR = "regular"
WEAK = "weak"
TURBO = "turbo"
class PRReviewHeader(str, Enum):
REGULAR = "## PR Reviewer Guide"
INCREMENTAL = "## Incremental PR Reviewer Guide"
class PRDescriptionHeader(str, Enum):
CHANGES_WALKTHROUGH = "### **Changes walkthrough** 📝"
def get_setting(key: str) -> Any:
try:
key = key.upper()
@ -105,8 +93,7 @@ def unique_strings(input_list: List[str]) -> List[str]:
def convert_to_markdown_v2(output_data: dict,
gfm_supported: bool = True,
incremental_review=None,
git_provider=None,
files=None) -> str:
git_provider=None) -> str:
"""
Convert a dictionary of data into markdown format.
Args:
@ -182,7 +169,7 @@ def convert_to_markdown_v2(output_data: dict,
if is_value_no(value):
markdown_text += f'### {emoji} No relevant tests\n\n'
else:
markdown_text += f"### {emoji} PR contains tests\n\n"
markdown_text += f"### PR contains tests\n\n"
elif 'ticket compliance check' in key_nice.lower():
markdown_text = ticket_markdown_logic(emoji, markdown_text, value, gfm_supported)
elif 'security concerns' in key_nice.lower():
@ -230,31 +217,15 @@ def convert_to_markdown_v2(output_data: dict,
continue
relevant_file = issue.get('relevant_file', '').strip()
issue_header = issue.get('issue_header', '').strip()
if issue_header.lower() == 'possible bug':
issue_header = 'Possible Issue' # Make the header less frightening
issue_content = issue.get('issue_content', '').strip()
start_line = int(str(issue.get('start_line', 0)).strip())
end_line = int(str(issue.get('end_line', 0)).strip())
relevant_lines_str = extract_relevant_lines_str(end_line, files, relevant_file, start_line, dedent=True)
if git_provider:
reference_link = git_provider.get_line_link(relevant_file, start_line, end_line)
else:
reference_link = None
if gfm_supported:
if reference_link is not None and len(reference_link) > 0:
if relevant_lines_str:
issue_str = f"<details><summary><a href='{reference_link}'><strong>{issue_header}</strong></a>\n\n{issue_content}</summary>\n\n{relevant_lines_str}\n\n</details>"
else:
issue_str = f"<a href='{reference_link}'><strong>{issue_header}</strong></a><br>{issue_content}"
else:
issue_str = f"<strong>{issue_header}</strong><br>{issue_content}"
else:
if reference_link is not None and len(reference_link) > 0:
issue_str = f"[**{issue_header}**]({reference_link})\n\n{issue_content}\n\n"
else:
issue_str = f"**{issue_header}**\n\n{issue_content}\n\n"
markdown_text += f"{issue_str}\n\n"
except Exception as e:
get_logger().exception(f"Failed to process 'Recommended focus areas for review': {e}")
@ -271,143 +242,68 @@ def convert_to_markdown_v2(output_data: dict,
if gfm_supported:
markdown_text += "</table>\n"
return markdown_text
def extract_relevant_lines_str(end_line, files, relevant_file, start_line, dedent=False) -> str:
"""
Finds 'relevant_file' in 'files', and extracts the lines from 'start_line' to 'end_line' string from the file content.
"""
try:
relevant_lines_str = ""
if files:
files = set_file_languages(files)
for file in files:
if file.filename.strip() == relevant_file:
if not file.head_file:
# as a fallback, extract relevant lines directly from patch
patch = file.patch
get_logger().info(f"No content found in file: '{file.filename}' for 'extract_relevant_lines_str'. Using patch instead")
_, selected_lines = extract_hunk_lines_from_patch(patch, file.filename, start_line, end_line,side='right')
if not selected_lines:
get_logger().error(f"Failed to extract relevant lines from patch: {file.filename}")
return ""
# filter out '-' lines
relevant_lines_str = ""
for line in selected_lines.splitlines():
if line.startswith('-'):
continue
relevant_lines_str += line[1:] + '\n'
if 'code_feedback' in output_data:
if gfm_supported:
markdown_text += f"\n\n"
markdown_text += f"<details><summary> <strong>Code feedback:</strong></summary>\n\n"
markdown_text += "<hr>"
else:
relevant_file_lines = file.head_file.splitlines()
relevant_lines_str = "\n".join(relevant_file_lines[start_line - 1:end_line])
markdown_text += f"\n\n### Code feedback:\n\n"
for i, value in enumerate(output_data['code_feedback']):
if value is None or value == '' or value == {} or value == []:
continue
markdown_text += parse_code_suggestion(value, i, gfm_supported)+"\n\n"
if markdown_text.endswith('<hr>'):
markdown_text= markdown_text[:-4]
if gfm_supported:
markdown_text += f"</details>"
if dedent and relevant_lines_str:
# Remove the longest leading string of spaces and tabs common to all lines.
relevant_lines_str = textwrap.dedent(relevant_lines_str)
relevant_lines_str = f"```{file.language}\n{relevant_lines_str}\n```"
break
return relevant_lines_str
except Exception as e:
get_logger().exception(f"Failed to extract relevant lines: {e}")
return ""
return markdown_text
def ticket_markdown_logic(emoji, markdown_text, value, gfm_supported) -> str:
ticket_compliance_str = ""
compliance_emoji = ''
# Track compliance levels across all tickets
all_compliance_levels = []
final_compliance_level = -1
if isinstance(value, list):
for ticket_analysis in value:
try:
ticket_url = ticket_analysis.get('ticket_url', '').strip()
for v in value:
ticket_url = v.get('ticket_url', '').strip()
compliance_level = v.get('overall_compliance_level', '').strip()
# add emojis, if 'Fully compliant' ✅, 'Partially compliant' 🔶, or 'Not compliant' ❌
if compliance_level.lower() == 'fully compliant':
# compliance_level = '✅ Fully compliant'
final_compliance_level = 2 if final_compliance_level == -1 else 1
elif compliance_level.lower() == 'partially compliant':
# compliance_level = '🔶 Partially compliant'
final_compliance_level = 1
elif compliance_level.lower() == 'not compliant':
# compliance_level = '❌ Not compliant'
final_compliance_level = 0 if final_compliance_level < 1 else 1
# explanation = v.get('compliance_analysis', '').strip()
explanation = ''
ticket_compliance_level = '' # Individual ticket compliance
fully_compliant_str = ticket_analysis.get('fully_compliant_requirements', '').strip()
not_compliant_str = ticket_analysis.get('not_compliant_requirements', '').strip()
requires_further_human_verification = ticket_analysis.get('requires_further_human_verification',
'').strip()
if not fully_compliant_str and not not_compliant_str:
get_logger().debug(f"Ticket compliance has no requirements",
artifact={'ticket_url': ticket_url})
continue
# Calculate individual ticket compliance level
fully_compliant_str = v.get('fully_compliant_requirements', '').strip()
not_compliant_str = v.get('not_compliant_requirements', '').strip()
if fully_compliant_str:
explanation += f"Fully compliant requirements:\n{fully_compliant_str}\n\n"
if not_compliant_str:
ticket_compliance_level = 'Partially compliant'
explanation += f"Not compliant requirements:\n{not_compliant_str}\n\n"
ticket_compliance_str += f"\n\n**[{ticket_url.split('/')[-1]}]({ticket_url}) - {compliance_level}**\n\n{explanation}\n\n"
if final_compliance_level == 2:
compliance_level = ''
elif final_compliance_level == 1:
compliance_level = '🔶'
else:
if not requires_further_human_verification:
ticket_compliance_level = 'Fully compliant'
else:
ticket_compliance_level = 'PR Code Verified'
elif not_compliant_str:
ticket_compliance_level = 'Not compliant'
compliance_level = ''
# Store the compliance level for aggregation
if ticket_compliance_level:
all_compliance_levels.append(ticket_compliance_level)
# build compliance string
if fully_compliant_str:
explanation += f"Compliant requirements:\n\n{fully_compliant_str}\n\n"
if not_compliant_str:
explanation += f"Non-compliant requirements:\n\n{not_compliant_str}\n\n"
if requires_further_human_verification:
explanation += f"Requires further human verification:\n\n{requires_further_human_verification}\n\n"
ticket_compliance_str += f"\n\n**[{ticket_url.split('/')[-1]}]({ticket_url}) - {ticket_compliance_level}**\n\n{explanation}\n\n"
# for debugging
if requires_further_human_verification:
get_logger().debug(f"Ticket compliance requires further human verification",
artifact={'ticket_url': ticket_url,
'requires_further_human_verification': requires_further_human_verification,
'compliance_level': ticket_compliance_level})
except Exception as e:
get_logger().exception(f"Failed to process ticket compliance: {e}")
continue
# Calculate overall compliance level and emoji
if all_compliance_levels:
if all(level == 'Fully compliant' for level in all_compliance_levels):
compliance_level = 'Fully compliant'
compliance_emoji = ''
elif all(level == 'PR Code Verified' for level in all_compliance_levels):
compliance_level = 'PR Code Verified'
compliance_emoji = ''
elif any(level == 'Not compliant' for level in all_compliance_levels):
# If there's a mix of compliant and non-compliant tickets
if any(level in ['Fully compliant', 'PR Code Verified'] for level in all_compliance_levels):
compliance_level = 'Partially compliant'
compliance_emoji = '🔶'
else:
compliance_level = 'Not compliant'
compliance_emoji = ''
elif any(level == 'Partially compliant' for level in all_compliance_levels):
compliance_level = 'Partially compliant'
compliance_emoji = '🔶'
else:
compliance_level = 'PR Code Verified'
compliance_emoji = ''
# Set extra statistics outside the ticket loop
get_settings().set('config.extra_statistics', {'compliance_level': compliance_level})
# editing table row for ticket compliance analysis
if gfm_supported:
markdown_text += f"<tr><td>\n\n"
markdown_text += f"**{emoji} Ticket compliance analysis {compliance_emoji}**\n\n"
markdown_text += f"**{emoji} Ticket compliance analysis {compliance_level}**\n\n"
markdown_text += ticket_compliance_str
markdown_text += f"</td></tr>\n"
else:
markdown_text += f"### {emoji} Ticket compliance analysis {compliance_emoji}\n\n"
markdown_text += ticket_compliance_str + "\n\n"
markdown_text += f"### {emoji} Ticket compliance analysis {compliance_level}\n\n"
markdown_text += ticket_compliance_str+"\n\n"
return markdown_text
@ -634,22 +530,27 @@ def load_large_diff(filename, new_file_content_str: str, original_file_content_s
"""
Generate a patch for a modified file by comparing the original content of the file with the new content provided as
input.
"""
if not original_file_content_str and not new_file_content_str:
return ""
Args:
new_file_content_str: The new content of the file as a string.
original_file_content_str: The original content of the file as a string.
Returns:
The generated or provided patch string.
Raises:
None.
"""
patch = ""
try:
original_file_content_str = (original_file_content_str or "").rstrip() + "\n"
new_file_content_str = (new_file_content_str or "").rstrip() + "\n"
diff = difflib.unified_diff(original_file_content_str.splitlines(keepends=True),
new_file_content_str.splitlines(keepends=True))
if get_settings().config.verbosity_level >= 2 and show_warning:
get_logger().info(f"File was modified, but no patch was found. Manually creating patch: {filename}.")
get_logger().warning(f"File was modified, but no patch was found. Manually creating patch: {filename}.")
patch = ''.join(diff)
except Exception:
pass
return patch
except Exception as e:
get_logger().exception(f"Failed to generate patch for file: {filename}")
return ""
def update_settings_from_args(args: List[str]) -> List[str]:
@ -735,7 +636,7 @@ def try_fix_yaml(response_text: str,
get_logger().info(f"Successfully parsed AI prediction after adding |-\n")
return data
except:
pass
get_logger().info(f"Failed to parse AI prediction after adding |-\n")
# second fallback - try to extract only range from first ```yaml to ````
snippet_pattern = r'```(yaml)?[\s\S]*?```'
@ -779,18 +680,9 @@ def try_fix_yaml(response_text: str,
except:
pass
# fifth fallback - try to remove leading '+' (sometimes added by AI for 'existing code' and 'improved code')
response_text_lines_copy = response_text_lines.copy()
for i in range(0, len(response_text_lines_copy)):
response_text_lines_copy[i] = ' ' + response_text_lines_copy[i][1:]
try:
data = yaml.safe_load('\n'.join(response_text_lines_copy))
get_logger().info(f"Successfully parsed AI prediction after removing leading '+'")
return data
except:
pass
# sixth fallback - try to remove last lines
# fifth fallback - try to remove last lines
data = {}
for i in range(1, len(response_text_lines)):
response_text_lines_tmp = '\n'.join(response_text_lines[:-i])
try:
@ -1132,7 +1024,8 @@ def process_description(description_full: str) -> Tuple[str, List]:
if not description_full:
return "", []
description_split = description_full.split(PRDescriptionHeader.CHANGES_WALKTHROUGH.value)
split_str = "### **Changes walkthrough** 📝"
description_split = description_full.split(split_str)
base_description_str = description_split[0]
changes_walkthrough_str = ""
files = []
@ -1167,9 +1060,6 @@ def process_description(description_full: str) -> Tuple[str, List]:
if not res or res.lastindex != 4:
pattern_back = r'<details>\s*<summary><strong>(.*?)</strong><dd><code>(.*?)</code>.*?</summary>\s*<hr>\s*(.*?)\n\n\s*(.*?)</details>'
res = re.search(pattern_back, file_data, re.DOTALL)
if not res or res.lastindex != 4:
pattern_back = r'<details>\s*<summary><strong>(.*?)</strong>\s*<dd><code>(.*?)</code>.*?</summary>\s*<hr>\s*(.*?)\s*-\s*(.*?)\s*</details>' # looking for hypen ('- ')
res = re.search(pattern_back, file_data, re.DOTALL)
if res and res.lastindex == 4:
short_filename = res.group(1).strip()
short_summary = res.group(2).strip()
@ -1201,48 +1091,3 @@ def process_description(description_full: str) -> Tuple[str, List]:
get_logger().exception(f"Failed to process description: {e}")
return base_description_str, files
def get_version() -> str:
# First check pyproject.toml if running directly out of repository
if os.path.exists("pyproject.toml"):
if sys.version_info >= (3, 11):
import tomllib
with open("pyproject.toml", "rb") as f:
data = tomllib.load(f)
if "project" in data and "version" in data["project"]:
return data["project"]["version"]
else:
get_logger().warning("Version not found in pyproject.toml")
else:
get_logger().warning("Unable to determine local version from pyproject.toml")
# Otherwise get the installed pip package version
try:
return version('pr-agent')
except PackageNotFoundError:
get_logger().warning("Unable to find package named 'pr-agent'")
return "unknown"
def set_file_languages(diff_files) -> List[FilePatchInfo]:
try:
# if the language is already set, do not change it
if hasattr(diff_files[0], 'language') and diff_files[0].language:
return diff_files
# map file extensions to programming languages
language_extension_map_org = get_settings().language_extension_map_org
extension_to_language = {}
for language, extensions in language_extension_map_org.items():
for ext in extensions:
extension_to_language[ext] = language
for file in diff_files:
extension_s = '.' + file.filename.rsplit('.')[-1]
language_name = "txt"
if extension_s and (extension_s in extension_to_language):
language_name = extension_to_language[extension_s]
file.language = language_name.lower()
except Exception as e:
get_logger().exception(f"Failed to set file languages: {e}")
return diff_files

View File

@ -3,9 +3,8 @@ import asyncio
import os
from pr_agent.agent.pr_agent import PRAgent, commands
from pr_agent.algo.utils import get_version
from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger, setup_logger
from pr_agent.log import setup_logger, get_logger
log_level = os.environ.get("LOG_LEVEL", "INFO")
setup_logger(log_level)
@ -46,7 +45,6 @@ def set_parser():
To edit any configuration parameter from 'configuration.toml', just add -config_path=<value>.
For example: 'python cli.py --pr_url=... review --pr_reviewer.extra_instructions="focus on the file: ..."'
""")
parser.add_argument('--version', action='version', version=f'pr-agent {get_version()}')
parser.add_argument('--pr_url', type=str, help='The URL of the PR to review', default=None)
parser.add_argument('--issue_url', type=str, help='The URL of the Issue to review', default=None)
parser.add_argument('command', type=str, help='The', choices=commands, default='review')

View File

@ -12,6 +12,7 @@ global_settings = Dynaconf(
envvar_prefix=False,
merge_enabled=True,
settings_files=[join(current_dir, f) for f in [
"settings/.secrets.toml",
"settings/configuration.toml",
"settings/ignore.toml",
"settings/language_extensions.toml",
@ -28,7 +29,6 @@ global_settings = Dynaconf(
"settings/pr_add_docs.toml",
"settings/custom_labels.toml",
"settings/pr_help_prompts.toml",
"settings/.secrets.toml",
"settings_prod/.secrets.toml",
]]
)

View File

@ -1,16 +1,14 @@
from starlette_context import context
from pr_agent.config_loader import get_settings
from pr_agent.git_providers.azuredevops_provider import AzureDevopsProvider
from pr_agent.git_providers.bitbucket_provider import BitbucketProvider
from pr_agent.git_providers.bitbucket_server_provider import \
BitbucketServerProvider
from pr_agent.git_providers.bitbucket_server_provider import BitbucketServerProvider
from pr_agent.git_providers.codecommit_provider import CodeCommitProvider
from pr_agent.git_providers.gerrit_provider import GerritProvider
from pr_agent.git_providers.git_provider import GitProvider
from pr_agent.git_providers.github_provider import GithubProvider
from pr_agent.git_providers.gitlab_provider import GitLabProvider
from pr_agent.git_providers.local_git_provider import LocalGitProvider
from pr_agent.git_providers.azuredevops_provider import AzureDevopsProvider
from pr_agent.git_providers.gerrit_provider import GerritProvider
from starlette_context import context
_GIT_PROVIDERS = {
'github': GithubProvider,

View File

@ -2,16 +2,13 @@ import os
from typing import Optional, Tuple
from urllib.parse import urlparse
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from ..algo.file_filter import filter_ignored
from ..algo.language_handler import is_valid_file
from ..algo.utils import (PRDescriptionHeader, clip_tokens,
find_line_number_of_relevant_line_in_file,
load_large_diff)
from ..config_loader import get_settings
from ..log import get_logger
from ..algo.language_handler import is_valid_file
from ..algo.utils import clip_tokens, find_line_number_of_relevant_line_in_file, load_large_diff
from ..config_loader import get_settings
from .git_provider import GitProvider
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
AZURE_DEVOPS_AVAILABLE = True
ADO_APP_CLIENT_DEFAULT_ID = "499b84ac-1321-427f-aa17-267ca6975798/.default"
@ -19,16 +16,19 @@ MAX_PR_DESCRIPTION_AZURE_LENGTH = 4000-1
try:
# noinspection PyUnresolvedReferences
from msrest.authentication import BasicAuthentication
# noinspection PyUnresolvedReferences
from azure.devops.connection import Connection
# noinspection PyUnresolvedReferences
from azure.devops.v7_1.git.models import (Comment, CommentThread,
from azure.identity import DefaultAzureCredential
# noinspection PyUnresolvedReferences
from azure.devops.v7_1.git.models import (
Comment,
CommentThread,
GitVersionDescriptor,
GitPullRequest,
GitPullRequestIterationChanges,
GitVersionDescriptor)
# noinspection PyUnresolvedReferences
from azure.identity import DefaultAzureCredential
from msrest.authentication import BasicAuthentication
)
except ImportError:
AZURE_DEVOPS_AVAILABLE = False
@ -67,12 +67,14 @@ class AzureDevopsProvider(GitProvider):
relevant_lines_end = suggestion['relevant_lines_end']
if not relevant_lines_start or relevant_lines_start == -1:
get_logger().warning(
if get_settings().config.verbosity_level >= 2:
get_logger().exception(
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}")
continue
if relevant_lines_end < relevant_lines_start:
get_logger().warning(f"Failed to publish code suggestion, "
if get_settings().config.verbosity_level >= 2:
get_logger().exception(f"Failed to publish code suggestion, "
f"relevant_lines_end is {relevant_lines_end} and "
f"relevant_lines_start is {relevant_lines_start}")
continue
@ -93,11 +95,9 @@ class AzureDevopsProvider(GitProvider):
"side": "RIGHT",
}
post_parameters_list.append(post_parameters)
if not post_parameters_list:
return False
for post_parameters in post_parameters_list:
try:
for post_parameters in post_parameters_list:
comment = Comment(content=post_parameters["body"], comment_type=1)
thread = CommentThread(comments=[comment],
thread_context={
@ -117,11 +117,15 @@ class AzureDevopsProvider(GitProvider):
repository_id=self.repo_slug,
pull_request_id=self.pr_num
)
except Exception as e:
get_logger().warning(f"Azure failed to publish code suggestion, error: {e}")
if get_settings().config.verbosity_level >= 2:
get_logger().info(
f"Published code suggestion on {self.pr_num} at {post_parameters['path']}"
)
return True
except Exception as e:
if get_settings().config.verbosity_level >= 2:
get_logger().error(f"Failed to publish code suggestion, error: {e}")
return False
def get_pr_description_full(self) -> str:
return self.pr.description
@ -326,13 +330,13 @@ class AzureDevopsProvider(GitProvider):
edit_type = EDIT_TYPE.ADDED
elif diff_types[file] == "delete":
edit_type = EDIT_TYPE.DELETED
elif "rename" in diff_types[file]: # diff_type can be `rename` | `edit, rename`
elif diff_types[file] == "rename":
edit_type = EDIT_TYPE.RENAMED
version = GitVersionDescriptor(
version=base_sha.commit_id, version_type="commit"
)
if edit_type == EDIT_TYPE.ADDED or edit_type == EDIT_TYPE.RENAMED:
if edit_type == EDIT_TYPE.ADDED:
original_file_content_str = ""
else:
try:
@ -378,9 +382,6 @@ class AzureDevopsProvider(GitProvider):
return []
def publish_comment(self, pr_comment: str, is_temporary: bool = False, thread_context=None):
if is_temporary and not get_settings().config.publish_output_progress:
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
return None
comment = Comment(content=pr_comment)
thread = CommentThread(comments=[comment], thread_context=thread_context, status=5)
thread_response = self.azure_devops_client.create_thread(
@ -403,7 +404,7 @@ class AzureDevopsProvider(GitProvider):
pr_body = pr_body[:ind]
if len(pr_body) > MAX_PR_DESCRIPTION_AZURE_LENGTH:
changes_walkthrough_text = PRDescriptionHeader.CHANGES_WALKTHROUGH.value
changes_walkthrough_text = '## **Changes walkthrough**'
ind = pr_body.find(changes_walkthrough_text)
if ind != -1:
pr_body = pr_body[:ind]
@ -619,3 +620,4 @@ class AzureDevopsProvider(GitProvider):
def publish_file_comments(self, file_comments: list) -> bool:
pass

View File

@ -1,6 +1,4 @@
import difflib
import json
import re
from typing import Optional, Tuple
from urllib.parse import urlparse
@ -8,14 +6,13 @@ import requests
from atlassian.bitbucket import Cloud
from starlette_context import context
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.algo.types import FilePatchInfo, EDIT_TYPE
from ..algo.file_filter import filter_ignored
from ..algo.language_handler import is_valid_file
from ..algo.utils import find_line_number_of_relevant_line_in_file
from ..config_loader import get_settings
from ..log import get_logger
from .git_provider import MAX_FILES_ALLOWED_FULL, GitProvider
from .git_provider import GitProvider, MAX_FILES_ALLOWED_FULL
def _gef_filename(diff):
@ -74,33 +71,19 @@ class BitbucketProvider(GitProvider):
post_parameters_list = []
for suggestion in code_suggestions:
body = suggestion["body"]
original_suggestion = suggestion.get('original_suggestion', None) # needed for diff code
if original_suggestion:
try:
existing_code = original_suggestion['existing_code'].rstrip() + "\n"
improved_code = original_suggestion['improved_code'].rstrip() + "\n"
diff = difflib.unified_diff(existing_code.split('\n'),
improved_code.split('\n'), n=999)
patch_orig = "\n".join(diff)
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
diff_code = f"\n\n```diff\n{patch.rstrip()}\n```"
# replace ```suggestion ... ``` with diff_code, using regex:
body = re.sub(r'```suggestion.*?```', diff_code, body, flags=re.DOTALL)
except Exception as e:
get_logger().exception(f"Bitbucket failed to get diff code for publishing, error: {e}")
continue
relevant_file = suggestion["relevant_file"]
relevant_lines_start = suggestion["relevant_lines_start"]
relevant_lines_end = suggestion["relevant_lines_end"]
if not relevant_lines_start or relevant_lines_start == -1:
if get_settings().config.verbosity_level >= 2:
get_logger().exception(
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}"
)
continue
if relevant_lines_end < relevant_lines_start:
if get_settings().config.verbosity_level >= 2:
get_logger().exception(
f"Failed to publish code suggestion, "
f"relevant_lines_end is {relevant_lines_end} and "
@ -129,7 +112,8 @@ class BitbucketProvider(GitProvider):
self.publish_inline_comments(post_parameters_list)
return True
except Exception as e:
get_logger().error(f"Bitbucket failed to publish code suggestion, error: {e}")
if get_settings().config.verbosity_level >= 2:
get_logger().error(f"Failed to publish code suggestion, error: {e}")
return False
def publish_file_comments(self, file_comments: list) -> bool:
@ -325,9 +309,6 @@ class BitbucketProvider(GitProvider):
self.publish_comment(pr_comment)
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
if is_temporary and not get_settings().config.publish_output_progress:
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
return None
pr_comment = self.limit_output_characters(pr_comment, self.max_comment_length)
comment = self.pr.comment(pr_comment)
if is_temporary:

View File

@ -1,21 +1,16 @@
import difflib
import re
from packaging.version import parse as parse_version
from distutils.version import LooseVersion
from requests.exceptions import HTTPError
from typing import Optional, Tuple
from urllib.parse import quote_plus, urlparse
from atlassian.bitbucket import Bitbucket
from requests.exceptions import HTTPError
from ..algo.git_patch_processing import decode_if_bytes
from ..algo.language_handler import is_valid_file
from .git_provider import GitProvider
from ..algo.types import EDIT_TYPE, FilePatchInfo
from ..algo.utils import (find_line_number_of_relevant_line_in_file,
load_large_diff)
from ..algo.language_handler import is_valid_file
from ..algo.utils import load_large_diff, find_line_number_of_relevant_line_in_file
from ..config_loader import get_settings
from ..log import get_logger
from .git_provider import GitProvider
class BitbucketServerProvider(GitProvider):
@ -40,7 +35,7 @@ class BitbucketServerProvider(GitProvider):
token=get_settings().get("BITBUCKET_SERVER.BEARER_TOKEN",
None))
try:
self.bitbucket_api_version = parse_version(self.bitbucket_client.get("rest/api/1.0/application-properties").get('version'))
self.bitbucket_api_version = LooseVersion(self.bitbucket_client.get("rest/api/1.0/application-properties").get('version'))
except Exception:
self.bitbucket_api_version = None
@ -70,33 +65,20 @@ class BitbucketServerProvider(GitProvider):
post_parameters_list = []
for suggestion in code_suggestions:
body = suggestion["body"]
original_suggestion = suggestion.get('original_suggestion', None) # needed for diff code
if original_suggestion:
try:
existing_code = original_suggestion['existing_code'].rstrip() + "\n"
improved_code = original_suggestion['improved_code'].rstrip() + "\n"
diff = difflib.unified_diff(existing_code.split('\n'),
improved_code.split('\n'), n=999)
patch_orig = "\n".join(diff)
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
diff_code = f"\n\n```diff\n{patch.rstrip()}\n```"
# replace ```suggestion ... ``` with diff_code, using regex:
body = re.sub(r'```suggestion.*?```', diff_code, body, flags=re.DOTALL)
except Exception as e:
get_logger().exception(f"Bitbucket failed to get diff code for publishing, error: {e}")
continue
relevant_file = suggestion["relevant_file"]
relevant_lines_start = suggestion["relevant_lines_start"]
relevant_lines_end = suggestion["relevant_lines_end"]
if not relevant_lines_start or relevant_lines_start == -1:
get_logger().warning(
if get_settings().config.verbosity_level >= 2:
get_logger().exception(
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}"
)
continue
if relevant_lines_end < relevant_lines_start:
get_logger().warning(
if get_settings().config.verbosity_level >= 2:
get_logger().exception(
f"Failed to publish code suggestion, "
f"relevant_lines_end is {relevant_lines_end} and "
f"relevant_lines_start is {relevant_lines_start}"
@ -177,7 +159,7 @@ class BitbucketServerProvider(GitProvider):
head_sha = self.pr.fromRef['latestCommit']
# if Bitbucket api version is >= 8.16 then use the merge-base api for 2-way diff calculation
if self.bitbucket_api_version is not None and self.bitbucket_api_version >= parse_version("8.16"):
if self.bitbucket_api_version is not None and self.bitbucket_api_version >= LooseVersion("8.16"):
try:
base_sha = self.bitbucket_client.get(self._get_merge_base())['id']
except Exception as e:
@ -192,7 +174,7 @@ class BitbucketServerProvider(GitProvider):
# if Bitbucket api version is None or < 7.0 then do a simple diff with a guaranteed common ancestor
base_sha = source_commits_list[-1]['parents'][0]['id']
# if Bitbucket api version is 7.0-8.15 then use 2-way diff functionality for the base_sha
if self.bitbucket_api_version is not None and self.bitbucket_api_version >= parse_version("7.0"):
if self.bitbucket_api_version is not None and self.bitbucket_api_version >= LooseVersion("7.0"):
try:
destination_commits = list(
self.bitbucket_client.get_commits(self.workspace_slug, self.repo_slug, base_sha,
@ -218,23 +200,27 @@ class BitbucketServerProvider(GitProvider):
case 'ADD':
edit_type = EDIT_TYPE.ADDED
new_file_content_str = self.get_file(file_path, head_sha)
new_file_content_str = decode_if_bytes(new_file_content_str)
if isinstance(new_file_content_str, (bytes, bytearray)):
new_file_content_str = new_file_content_str.decode("utf-8")
original_file_content_str = ""
case 'DELETE':
edit_type = EDIT_TYPE.DELETED
new_file_content_str = ""
original_file_content_str = self.get_file(file_path, base_sha)
original_file_content_str = decode_if_bytes(original_file_content_str)
if isinstance(original_file_content_str, (bytes, bytearray)):
original_file_content_str = original_file_content_str.decode("utf-8")
case 'RENAME':
edit_type = EDIT_TYPE.RENAMED
case _:
edit_type = EDIT_TYPE.MODIFIED
original_file_content_str = self.get_file(file_path, base_sha)
original_file_content_str = decode_if_bytes(original_file_content_str)
if isinstance(original_file_content_str, (bytes, bytearray)):
original_file_content_str = original_file_content_str.decode("utf-8")
new_file_content_str = self.get_file(file_path, head_sha)
new_file_content_str = decode_if_bytes(new_file_content_str)
if isinstance(new_file_content_str, (bytes, bytearray)):
new_file_content_str = new_file_content_str.decode("utf-8")
patch = load_large_diff(file_path, new_file_content_str, original_file_content_str, show_warning=False)
patch = load_large_diff(file_path, new_file_content_str, original_file_content_str)
diff_files.append(
FilePatchInfo(
@ -402,21 +388,10 @@ class BitbucketServerProvider(GitProvider):
try:
projects_index = path_parts.index("projects")
except ValueError:
projects_index = -1
try:
users_index = path_parts.index("users")
except ValueError:
users_index = -1
if projects_index == -1 and users_index == -1:
except ValueError as e:
raise ValueError(f"The provided URL '{pr_url}' does not appear to be a Bitbucket PR URL")
if projects_index != -1:
path_parts = path_parts[projects_index:]
else:
path_parts = path_parts[users_index:]
if len(path_parts) < 6 or path_parts[2] != "repos" or path_parts[4] != "pull-requests":
raise ValueError(
@ -424,8 +399,6 @@ class BitbucketServerProvider(GitProvider):
)
workspace_slug = path_parts[1]
if users_index != -1:
workspace_slug = f"~{workspace_slug}"
repo_slug = path_parts[3]
try:
pr_number = int(path_parts[5])

View File

@ -4,15 +4,13 @@ from collections import Counter
from typing import List, Optional, Tuple
from urllib.parse import urlparse
from pr_agent.algo.language_handler import is_valid_file
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.git_providers.codecommit_client import CodeCommitClient
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from ..algo.utils import load_large_diff
from .git_provider import GitProvider
from ..config_loader import get_settings
from ..log import get_logger
from .git_provider import GitProvider
from pr_agent.algo.language_handler import is_valid_file
class PullRequestCCMimic:
"""

View File

@ -12,9 +12,9 @@ import requests
import urllib3.util
from git import Repo
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.config_loader import get_settings
from pr_agent.git_providers.git_provider import GitProvider
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.git_providers.local_git_provider import PullRequestMimic
from pr_agent.log import get_logger

View File

@ -1,12 +1,12 @@
from abc import ABC, abstractmethod
# enum EDIT_TYPE (ADDED, DELETED, MODIFIED, RENAMED)
from typing import Optional
from pr_agent.algo.types import FilePatchInfo
from pr_agent.algo.utils import Range, process_description
from pr_agent.config_loader import get_settings
from pr_agent.algo.types import FilePatchInfo
from pr_agent.log import get_logger
MAX_FILES_ALLOWED_FULL = 50
class GitProvider(ABC):
@ -62,8 +62,8 @@ class GitProvider(ABC):
pass
def get_pr_description(self, full: bool = True, split_changes_walkthrough=False) -> str or tuple:
from pr_agent.algo.utils import clip_tokens
from pr_agent.config_loader import get_settings
from pr_agent.algo.utils import clip_tokens
max_tokens_description = get_settings().get("CONFIG.MAX_DESCRIPTION_TOKENS", None)
description = self.get_pr_description_full() if full else self.get_user_description()
if split_changes_walkthrough:

View File

@ -1,30 +1,22 @@
import copy
import difflib
import hashlib
import itertools
import re
import time
import hashlib
import traceback
from datetime import datetime
from typing import Optional, Tuple
from urllib.parse import urlparse
from github import AppAuthentication, Auth, Github, GithubException
from github import AppAuthentication, Auth, Github
from retry import retry
from starlette_context import context
from ..algo.file_filter import filter_ignored
from ..algo.git_patch_processing import extract_hunk_headers
from ..algo.language_handler import is_valid_file
from ..algo.types import EDIT_TYPE
from ..algo.utils import (PRReviewHeader, Range, clip_tokens,
find_line_number_of_relevant_line_in_file,
load_large_diff, set_file_languages)
from ..algo.utils import PRReviewHeader, load_large_diff, clip_tokens, find_line_number_of_relevant_line_in_file, Range
from ..config_loader import get_settings
from ..log import get_logger
from ..servers.utils import RateLimitExceeded
from .git_provider import (MAX_FILES_ALLOWED_FULL, FilePatchInfo, GitProvider,
IncrementalPR)
from .git_provider import FilePatchInfo, GitProvider, IncrementalPR, MAX_FILES_ALLOWED_FULL
class GithubProvider(GitProvider):
@ -174,24 +166,6 @@ class GithubProvider(GitProvider):
diff_files = []
invalid_files_names = []
is_close_to_rate_limit = False
# The base.sha will point to the current state of the base branch (including parallel merges), not the original base commit when the PR was created
# We can fix this by finding the merge base commit between the PR head and base branches
# Note that The pr.head.sha is actually correct as is - it points to the latest commit in your PR branch.
# This SHA isn't affected by parallel merges to the base branch since it's specific to your PR's branch.
repo = self.repo_obj
pr = self.pr
try:
compare = repo.compare(pr.base.sha, pr.head.sha) # communication with GitHub
merge_base_commit = compare.merge_base_commit
except Exception as e:
get_logger().error(f"Failed to get merge base commit: {e}")
merge_base_commit = pr.base
if merge_base_commit.sha != pr.base.sha:
get_logger().info(
f"Using merge base commit {merge_base_commit.sha} instead of base commit ")
counter_valid = 0
for file in files:
if not is_valid_file(file.filename):
@ -199,10 +173,7 @@ class GithubProvider(GitProvider):
continue
patch = file.patch
if is_close_to_rate_limit:
new_file_content_str = ""
original_file_content_str = ""
else:
# allow only a limited number of files to be fully loaded. We can manage the rest with diffs only
counter_valid += 1
avoid_load = False
@ -224,12 +195,10 @@ class GithubProvider(GitProvider):
if avoid_load:
original_file_content_str = ""
else:
original_file_content_str = self._get_pr_file_content(file, merge_base_commit.sha)
# original_file_content_str = self._get_pr_file_content(file, self.pr.base.sha)
original_file_content_str = self._get_pr_file_content(file, self.pr.base.sha)
if not patch:
patch = load_large_diff(file.filename, new_file_content_str, original_file_content_str)
if file.status == 'added':
edit_type = EDIT_TYPE.ADDED
elif file.status == 'removed':
@ -243,14 +212,9 @@ class GithubProvider(GitProvider):
edit_type = EDIT_TYPE.UNKNOWN
# count number of lines added and removed
if hasattr(file, 'additions') and hasattr(file, 'deletions'):
num_plus_lines = file.additions
num_minus_lines = file.deletions
else:
patch_lines = patch.splitlines(keepends=True)
num_plus_lines = len([line for line in patch_lines if line.startswith('+')])
num_minus_lines = len([line for line in patch_lines if line.startswith('-')])
file_patch_canonical_structure = FilePatchInfo(original_file_content_str, new_file_content_str, patch,
file.filename, edit_type=edit_type,
num_plus_lines=num_plus_lines,
@ -315,6 +279,7 @@ class GithubProvider(GitProvider):
relevant_line_in_file,
absolute_position)
if position == -1:
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
subject_type = "FILE"
else:
@ -327,9 +292,11 @@ class GithubProvider(GitProvider):
# publish all comments in a single message
self.pr.create_review(commit=self.last_commit_id, comments=comments)
except Exception as e:
get_logger().info(f"Initially failed to publish inline comments as committable")
if get_settings().config.verbosity_level >= 2:
get_logger().error(f"Failed to publish inline comments")
if (getattr(e, "status", None) == 422 and not disable_fallback):
if (getattr(e, "status", None) == 422
and get_settings().github.publish_inline_comments_fallback_with_verification and not disable_fallback):
pass # continue to try _publish_inline_comments_fallback_with_verification
else:
raise e # will end up with publishing the comments one by one
@ -337,6 +304,7 @@ class GithubProvider(GitProvider):
try:
self._publish_inline_comments_fallback_with_verification(comments)
except Exception as e:
if get_settings().config.verbosity_level >= 2:
get_logger().error(f"Failed to publish inline code comments fallback, error: {e}")
raise e
@ -362,8 +330,10 @@ class GithubProvider(GitProvider):
for comment in fixed_comments_as_one_liner:
try:
self.publish_inline_comments([comment], disable_fallback=True)
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"Published invalid comment as a single line comment: {comment}")
except:
if get_settings().config.verbosity_level >= 2:
get_logger().error(f"Failed to publish invalid comment as a single line comment: {comment}")
def _verify_code_comment(self, comment: dict):
@ -422,6 +392,7 @@ class GithubProvider(GitProvider):
if fixed_comment != comment:
fixed_comments.append(fixed_comment)
except Exception as e:
if get_settings().config.verbosity_level >= 2:
get_logger().error(f"Failed to fix inline comment, error: {e}")
return fixed_comments
@ -430,21 +401,20 @@ class GithubProvider(GitProvider):
Publishes code suggestions as comments on the PR.
"""
post_parameters_list = []
code_suggestions_validated = self.validate_comments_inside_hunks(code_suggestions)
for suggestion in code_suggestions_validated:
for suggestion in code_suggestions:
body = suggestion['body']
relevant_file = suggestion['relevant_file']
relevant_lines_start = suggestion['relevant_lines_start']
relevant_lines_end = suggestion['relevant_lines_end']
if not relevant_lines_start or relevant_lines_start == -1:
if get_settings().config.verbosity_level >= 2:
get_logger().exception(
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}")
continue
if relevant_lines_end < relevant_lines_start:
if get_settings().config.verbosity_level >= 2:
get_logger().exception(f"Failed to publish code suggestion, "
f"relevant_lines_end is {relevant_lines_end} and "
f"relevant_lines_start is {relevant_lines_start}")
@ -471,21 +441,13 @@ class GithubProvider(GitProvider):
self.publish_inline_comments(post_parameters_list)
return True
except Exception as e:
if get_settings().config.verbosity_level >= 2:
get_logger().error(f"Failed to publish code suggestion, error: {e}")
return False
def edit_comment(self, comment, body: str):
try:
body = self.limit_output_characters(body, self.max_comment_chars)
comment.edit(body=body)
except GithubException as e:
if hasattr(e, "status") and e.status == 403:
# Log as warning for permission-related issues (usually due to polling)
get_logger().warning(
"Failed to edit github comment due to permission restrictions",
artifact={"error": e})
else:
get_logger().exception(f"Failed to edit github comment", artifact={"error": e})
def edit_comment_from_comment_id(self, comment_id: int, body: str):
try:
@ -539,7 +501,6 @@ class GithubProvider(GitProvider):
elif self.deployment_type == 'user':
same_comment_creator = self.github_user_id == existing_comment['user']['login']
if existing_comment['subject_type'] == 'file' and comment['path'] == existing_comment['path'] and same_comment_creator:
headers, data_patch = self.pr._requester.requestJsonAndCheck(
"PATCH", f"{self.base_url}/repos/{self.repo}/pulls/comments/{existing_comment['id']}", input={"body":comment['body']}
)
@ -551,6 +512,7 @@ class GithubProvider(GitProvider):
)
return True
except Exception as e:
if get_settings().config.verbosity_level >= 2:
get_logger().error(f"Failed to publish diffview file summary, error: {e}")
return False
@ -839,6 +801,7 @@ class GithubProvider(GitProvider):
link = f"{self.base_url_html}/{self.repo}/pull/{self.pr_num}/files#diff-{sha_file}R{absolute_position}"
return link
except Exception as e:
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"Failed adding line link, error: {e}")
return ""
@ -899,89 +862,3 @@ class GithubProvider(GitProvider):
def calc_pr_statistics(self, pull_request_data: dict):
return {}
def validate_comments_inside_hunks(self, code_suggestions):
"""
validate that all committable comments are inside PR hunks - this is a must for committable comments in GitHub
"""
code_suggestions_copy = copy.deepcopy(code_suggestions)
diff_files = self.get_diff_files()
RE_HUNK_HEADER = re.compile(
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
diff_files = set_file_languages(diff_files)
for suggestion in code_suggestions_copy:
try:
relevant_file_path = suggestion['relevant_file']
for file in diff_files:
if file.filename == relevant_file_path:
# generate on-demand the patches range for the relevant file
patch_str = file.patch
if not hasattr(file, 'patches_range'):
file.patches_range = []
patch_lines = patch_str.splitlines()
for i, line in enumerate(patch_lines):
if line.startswith('@@'):
match = RE_HUNK_HEADER.match(line)
# identify hunk header
if match:
section_header, size1, size2, start1, start2 = extract_hunk_headers(match)
file.patches_range.append({'start': start2, 'end': start2 + size2 - 1})
patches_range = file.patches_range
comment_start_line = suggestion.get('relevant_lines_start', None)
comment_end_line = suggestion.get('relevant_lines_end', None)
original_suggestion = suggestion.get('original_suggestion', None) # needed for diff code
if not comment_start_line or not comment_end_line or not original_suggestion:
continue
# check if the comment is inside a valid hunk
is_valid_hunk = False
min_distance = float('inf')
patch_range_min = None
# find the hunk that contains the comment, or the closest one
for i, patch_range in enumerate(patches_range):
d1 = comment_start_line - patch_range['start']
d2 = patch_range['end'] - comment_end_line
if d1 >= 0 and d2 >= 0: # found a valid hunk
is_valid_hunk = True
min_distance = 0
patch_range_min = patch_range
break
elif d1 * d2 <= 0: # comment is possibly inside the hunk
d1_clip = abs(min(0, d1))
d2_clip = abs(min(0, d2))
d = max(d1_clip, d2_clip)
if d < min_distance:
patch_range_min = patch_range
min_distance = min(min_distance, d)
if not is_valid_hunk:
if min_distance < 10: # 10 lines - a reasonable distance to consider the comment inside the hunk
# make the suggestion non-committable, yet multi line
suggestion['relevant_lines_start'] = max(suggestion['relevant_lines_start'], patch_range_min['start'])
suggestion['relevant_lines_end'] = min(suggestion['relevant_lines_end'], patch_range_min['end'])
body = suggestion['body'].strip()
# present new diff code in collapsible
existing_code = original_suggestion['existing_code'].rstrip() + "\n"
improved_code = original_suggestion['improved_code'].rstrip() + "\n"
diff = difflib.unified_diff(existing_code.split('\n'),
improved_code.split('\n'), n=999)
patch_orig = "\n".join(diff)
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
diff_code = f"\n\n<details><summary>New proposed code:</summary>\n\n```diff\n{patch.rstrip()}\n```"
# replace ```suggestion ... ``` with diff_code, using regex:
body = re.sub(r'```suggestion.*?```', diff_code, body, flags=re.DOTALL)
body += "\n\n</details>"
suggestion['body'] = body
get_logger().info(f"Comment was moved to a valid hunk, "
f"start_line={suggestion['relevant_lines_start']}, end_line={suggestion['relevant_lines_end']}, file={file.filename}")
else:
get_logger().error(f"Comment is not inside a valid hunk, "
f"start_line={suggestion['relevant_lines_start']}, end_line={suggestion['relevant_lines_end']}, file={file.filename}")
except Exception as e:
get_logger().error(f"Failed to process patch for committable comment, error: {e}")
return code_suggestions_copy

View File

@ -1,4 +1,3 @@
import difflib
import hashlib
import re
from typing import Optional, Tuple
@ -8,16 +7,13 @@ import gitlab
import requests
from gitlab import GitlabGetError
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from ..algo.file_filter import filter_ignored
from ..algo.language_handler import is_valid_file
from ..algo.utils import (clip_tokens,
find_line_number_of_relevant_line_in_file,
load_large_diff)
from ..algo.utils import load_large_diff, clip_tokens, find_line_number_of_relevant_line_in_file
from ..config_loader import get_settings
from .git_provider import GitProvider, MAX_FILES_ALLOWED_FULL
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from ..log import get_logger
from .git_provider import MAX_FILES_ALLOWED_FULL, GitProvider
class DiffNotFoundError(Exception):
@ -194,9 +190,6 @@ class GitLabProvider(GitProvider):
self.publish_persistent_comment_full(pr_comment, initial_header, update_header, name, final_update_message)
def publish_comment(self, mr_comment: str, is_temporary: bool = False):
if is_temporary and not get_settings().config.publish_output_progress:
get_logger().debug(f"Skipping publish_comment for temporary comment: {mr_comment}")
return None
mr_comment = self.limit_output_characters(mr_comment, self.max_comment_chars)
comment = self.mr.notes.create({'body': mr_comment})
if is_temporary:
@ -282,23 +275,20 @@ class GitLabProvider(GitProvider):
new_code_snippet = original_suggestion['improved_code']
content = original_suggestion['suggestion_content']
label = original_suggestion['label']
score = original_suggestion.get('score', 7)
if 'score' in original_suggestion:
score = original_suggestion['score']
else:
score = 7
if hasattr(self, 'main_language'):
language = self.main_language
else:
language = ''
link = self.get_line_link(relevant_file, line_start, line_end)
body_fallback =f"**Suggestion:** {content} [{label}, importance: {score}]\n\n"
body_fallback +=f"\n\n<details><summary>[{target_file.filename} [{line_start}-{line_end}]]({link}):</summary>\n\n"
body_fallback += f"\n\n___\n\n`(Cannot implement directly - GitLab API allows committable suggestions strictly on MR diff lines)`"
body_fallback+="</details>\n\n"
diff_patch = difflib.unified_diff(old_code_snippet.split('\n'),
new_code_snippet.split('\n'), n=999)
patch_orig = "\n".join(diff_patch)
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
diff_code = f"\n\n```diff\n{patch.rstrip()}\n```"
body_fallback += diff_code
body_fallback =f"**Suggestion:** {content} [{label}, importance: {score}]\n___\n"
body_fallback +=f"\n\nReplace lines ([{line_start}-{line_end}]({link}))\n\n```{language}\n{old_code_snippet}\n````\n\n"
body_fallback +=f"with\n\n```{language}\n{new_code_snippet}\n````"
body_fallback += f"\n\n___\n\n`(Cannot implement this suggestion directly, as gitlab API does not enable committing to a non -+ line in a PR)`"
# Create a general note on the file in the MR
self.mr.notes.create({
@ -311,7 +301,6 @@ class GitLabProvider(GitProvider):
'file_path': f'{target_file.filename}',
}
})
get_logger().debug(f"Created fallback comment in MR {self.id_mr} with position {pos_obj}")
# get_logger().debug(
# f"Failed to create comment in MR {self.id_mr} with position {pos_obj} (probably not a '+' line)")

View File

@ -4,9 +4,9 @@ from typing import List
from git import Repo
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.config_loader import _find_repository_root, get_settings
from pr_agent.git_providers.git_provider import GitProvider
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.log import get_logger
@ -141,18 +141,6 @@ class LocalGitProvider(GitProvider):
def remove_comment(self, comment):
pass # Not applicable to the local git provider, but required by the interface
def add_eyes_reaction(self, comment):
pass # Not applicable to the local git provider, but required by the interface
def get_commit_messages(self):
pass # Not applicable to the local git provider, but required by the interface
def get_repo_settings(self):
pass # Not applicable to the local git provider, but required by the interface
def remove_reaction(self, comment):
pass # Not applicable to the local git provider, but required by the interface
def get_languages(self):
"""
Calculate percentage of languages in repository. Used for hunk prioritisation.

View File

@ -3,12 +3,11 @@ import os
import tempfile
from dynaconf import Dynaconf
from starlette_context import context
from pr_agent.config_loader import get_settings
from pr_agent.git_providers import (get_git_provider,
get_git_provider_with_context)
from pr_agent.git_providers import get_git_provider, get_git_provider_with_context
from pr_agent.log import get_logger
from starlette_context import context
def apply_repo_settings(pr_url):
@ -99,5 +98,5 @@ def set_claude_model():
"""
model_claude = "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0"
get_settings().set('config.model', model_claude)
get_settings().set('config.model_weak', model_claude)
get_settings().set('config.model_turbo', model_claude)
get_settings().set('config.fallback_models', [model_claude])

View File

@ -1,6 +1,5 @@
from pr_agent.config_loader import get_settings
from pr_agent.identity_providers.default_identity_provider import \
DefaultIdentityProvider
from pr_agent.identity_providers.default_identity_provider import DefaultIdentityProvider
_IDENTITY_PROVIDERS = {
'default': DefaultIdentityProvider

View File

@ -1,5 +1,4 @@
from pr_agent.identity_providers.identity_provider import (Eligibility,
IdentityProvider)
from pr_agent.identity_providers.identity_provider import Eligibility, IdentityProvider
class DefaultIdentityProvider(IdentityProvider):

View File

@ -8,10 +8,12 @@ def get_secret_provider():
provider_id = get_settings().config.secret_provider
if provider_id == 'google_cloud_storage':
try:
from pr_agent.secret_providers.google_cloud_storage_secret_provider import \
GoogleCloudStorageSecretProvider
from pr_agent.secret_providers.google_cloud_storage_secret_provider import GoogleCloudStorageSecretProvider
return GoogleCloudStorageSecretProvider()
except Exception as e:
raise ValueError(f"Failed to initialize google_cloud_storage secret provider {provider_id}") from e
else:
raise ValueError("Unknown SECRET_PROVIDER")

View File

@ -9,9 +9,9 @@ import secrets
from urllib.parse import unquote
import uvicorn
from fastapi import APIRouter, Depends, FastAPI, HTTPException, Request
from fastapi.encoders import jsonable_encoder
from fastapi import APIRouter, Depends, FastAPI, HTTPException
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from fastapi.encoders import jsonable_encoder
from starlette import status
from starlette.background import BackgroundTasks
from starlette.middleware import Middleware
@ -23,6 +23,9 @@ from pr_agent.agent.pr_agent import PRAgent, command2class
from pr_agent.algo.utils import update_settings_from_args
from pr_agent.config_loader import get_settings
from pr_agent.git_providers.utils import apply_repo_settings
from pr_agent.log import get_logger
from fastapi import Request, Depends
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from pr_agent.log import LoggingFormat, get_logger, setup_logger
setup_logger(fmt=LoggingFormat.JSON, level="DEBUG")
@ -64,9 +67,6 @@ def authorize(credentials: HTTPBasicCredentials = Depends(security)):
async def _perform_commands_azure(commands_conf: str, agent: PRAgent, api_url: str, log_context: dict):
apply_repo_settings(api_url)
if commands_conf == "pr_commands" and get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {api_url=}", **log_context)
return
commands = get_settings().get(f"azure_devops_server.{commands_conf}")
get_settings().set("config.is_auto_command", True)
for command in commands:

View File

@ -24,6 +24,10 @@ from pr_agent.identity_providers import get_identity_provider
from pr_agent.identity_providers.identity_provider import Eligibility
from pr_agent.log import LoggingFormat, get_logger, setup_logger
from pr_agent.secret_providers import get_secret_provider
from pr_agent.servers.github_action_runner import get_setting_or_env, is_true
from pr_agent.tools.pr_code_suggestions import PRCodeSuggestions
from pr_agent.tools.pr_description import PRDescription
from pr_agent.tools.pr_reviewer import PRReviewer
setup_logger(fmt=LoggingFormat.JSON, level="DEBUG")
router = APIRouter()
@ -71,23 +75,8 @@ async def handle_manifest(request: Request, response: Response):
return JSONResponse(manifest_obj)
def _get_username(data):
actor = data.get("data", {}).get("actor", {})
if actor:
if "username" in actor:
return actor["username"]
elif "display_name" in actor:
return actor["display_name"]
elif "nickname" in actor:
return actor["nickname"]
return ""
async def _perform_commands_bitbucket(commands_conf: str, agent: PRAgent, api_url: str, log_context: dict, data: dict):
apply_repo_settings(api_url)
if commands_conf == "pr_commands" and get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {api_url=}")
return
if data.get("event", "") == "pullrequest:created":
if not should_process_pr_logic(data):
return
@ -109,14 +98,11 @@ async def _perform_commands_bitbucket(commands_conf: str, agent: PRAgent, api_ur
def is_bot_user(data) -> bool:
try:
actor = data.get("data", {}).get("actor", {})
# allow actor type: user . if it's "AppUser" or "team" then it is a bot user
allowed_actor_types = {"user"}
if actor and actor["type"].lower() not in allowed_actor_types:
get_logger().info(f"BitBucket actor type is not 'user', skipping: {actor}")
if data["data"]["actor"]["type"] != "user":
get_logger().info(f"BitBucket actor type is not 'user': {data['data']['actor']['type']}")
return True
except Exception as e:
get_logger().error(f"Failed 'is_bot_user' logic: {e}")
get_logger().error("Failed 'is_bot_user' logic: {e}")
return False
@ -126,14 +112,6 @@ def should_process_pr_logic(data) -> bool:
title = pr_data.get("title", "")
source_branch = pr_data.get("source", {}).get("branch", {}).get("name", "")
target_branch = pr_data.get("destination", {}).get("branch", {}).get("name", "")
sender = _get_username(data)
# logic to ignore PRs from specific users
ignore_pr_users = get_settings().get("CONFIG.IGNORE_PR_AUTHORS", [])
if ignore_pr_users and sender:
if sender in ignore_pr_users:
get_logger().info(f"Ignoring PR from user '{sender}' due to 'config.ignore_pr_authors' setting")
return False
# logic to ignore PRs with specific titles
if title:
@ -183,9 +161,16 @@ async def handle_github_webhooks(background_tasks: BackgroundTasks, request: Req
return "OK"
# Get the username of the sender
log_context["sender"] = _get_username(data)
try:
username = data["data"]["actor"]["username"]
except KeyError:
try:
username = data["data"]["actor"]["display_name"]
except KeyError:
username = data["data"]["actor"]["nickname"]
log_context["sender"] = username
sender_id = data.get("data", {}).get("actor", {}).get("account_id", "")
sender_id = data["data"]["actor"]["account_id"]
log_context["sender_id"] = sender_id
jwt_parts = input_jwt.split(".")
claim_part = jwt_parts[1]

View File

@ -6,20 +6,20 @@ from typing import List
import uvicorn
from fastapi import APIRouter, FastAPI
from fastapi.encoders import jsonable_encoder
from fastapi.responses import RedirectResponse
from starlette import status
from starlette.background import BackgroundTasks
from starlette.middleware import Middleware
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette_context.middleware import RawContextMiddleware
from pr_agent.agent.pr_agent import PRAgent
from pr_agent.algo.utils import update_settings_from_args
from pr_agent.config_loader import get_settings
from pr_agent.git_providers.utils import apply_repo_settings
from pr_agent.log import LoggingFormat, get_logger, setup_logger
from pr_agent.servers.utils import verify_signature
from fastapi.responses import RedirectResponse
setup_logger(fmt=LoggingFormat.JSON, level="DEBUG")
router = APIRouter()
@ -72,11 +72,6 @@ async def handle_webhook(background_tasks: BackgroundTasks, request: Request):
commands_to_run = []
if data["eventKey"] == "pr:opened":
apply_repo_settings(pr_url)
if get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {pr_url}", **log_context)
return
get_settings().set("config.is_auto_command", True)
commands_to_run.extend(_get_commands_list_from_settings('BITBUCKET_SERVER.PR_COMMANDS'))
elif data["eventKey"] == "pr:comment:added":
commands_to_run.append(data["comment"]["text"])

View File

@ -15,8 +15,7 @@ from starlette_context.middleware import RawContextMiddleware
from pr_agent.agent.pr_agent import PRAgent
from pr_agent.algo.utils import update_settings_from_args
from pr_agent.config_loader import get_settings, global_settings
from pr_agent.git_providers import (get_git_provider,
get_git_provider_with_context)
from pr_agent.git_providers import get_git_provider, get_git_provider_with_context
from pr_agent.git_providers.git_provider import IncrementalPR
from pr_agent.git_providers.utils import apply_repo_settings
from pr_agent.identity_providers import get_identity_provider
@ -257,14 +256,6 @@ def should_process_pr_logic(body) -> bool:
pr_labels = pull_request.get("labels", [])
source_branch = pull_request.get("head", {}).get("ref", "")
target_branch = pull_request.get("base", {}).get("ref", "")
sender = body.get("sender", {}).get("login")
# logic to ignore PRs from specific users
ignore_pr_users = get_settings().get("CONFIG.IGNORE_PR_AUTHORS", [])
if ignore_pr_users and sender:
if sender in ignore_pr_users:
get_logger().info(f"Ignoring PR from user '{sender}' due to 'config.ignore_pr_authors' setting")
return False
# logic to ignore PRs with specific titles
if title:
@ -284,7 +275,6 @@ def should_process_pr_logic(body) -> bool:
get_logger().info(f"Ignoring PR with labels '{labels_str}' due to config.ignore_pr_labels settings")
return False
# logic to ignore PRs with specific source or target branches
ignore_pr_source_branches = get_settings().get("CONFIG.IGNORE_PR_SOURCE_BRANCHES", [])
ignore_pr_target_branches = get_settings().get("CONFIG.IGNORE_PR_TARGET_BRANCHES", [])
if pull_request and (ignore_pr_source_branches or ignore_pr_target_branches):
@ -383,9 +373,6 @@ def _check_pull_request_event(action: str, body: dict, log_context: dict) -> Tup
async def _perform_auto_commands_github(commands_conf: str, agent: PRAgent, body: dict, api_url: str,
log_context: dict):
apply_repo_settings(api_url)
if commands_conf == "pr_commands" and get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {api_url=}")
return
if not should_process_pr_logic(body): # Here we already updated the configuration with the repo settings
return {}
commands = get_settings().get(f"github_app.{commands_conf}")

View File

@ -1,12 +1,11 @@
import asyncio
import multiprocessing
import time
import traceback
from collections import deque
import traceback
from datetime import datetime, timezone
import aiohttp
import time
import requests
import aiohttp
from pr_agent.agent.pr_agent import PRAgent
from pr_agent.config_loader import get_settings
@ -84,7 +83,6 @@ async def is_valid_notification(notification, headers, handled_ids, session, use
return False, handled_ids
async with session.get(latest_comment, headers=headers) as comment_response:
check_prev_comments = False
user_tag = "@" + user_id
if comment_response.status == 200:
comment = await comment_response.json()
if 'id' in comment:
@ -102,6 +100,7 @@ async def is_valid_notification(notification, headers, handled_ids, session, use
get_logger().debug(f"no comment_body")
check_prev_comments = True
else:
user_tag = "@" + user_id
if user_tag not in comment_body:
get_logger().debug(f"user_tag not in comment_body")
check_prev_comments = True
@ -130,14 +129,12 @@ async def is_valid_notification(notification, headers, handled_ids, session, use
artifact={"comment": comment_body})
return True, handled_ids, comment, comment_body, pr_url, user_tag
get_logger().warning(f"Failed to fetch comments for PR: {pr_url}",
artifact={"comments": comments})
get_logger().error(f"Failed to fetch comments for PR: {pr_url}")
return False, handled_ids
return False, handled_ids
except Exception as e:
get_logger().exception(f"Error processing polling notification",
artifact={"notification": notification, "error": e})
get_logger().error(f"Error processing notification: {e}", artifact={"traceback": traceback.format_exc()})
return False, handled_ids

View File

@ -1,6 +1,6 @@
import copy
import json
import re
import json
from datetime import datetime
import uvicorn
@ -61,9 +61,6 @@ async def handle_request(api_url: str, body: str, log_context: dict, sender_id:
async def _perform_commands_gitlab(commands_conf: str, agent: PRAgent, api_url: str,
log_context: dict, data: dict):
apply_repo_settings(api_url)
if commands_conf == "pr_commands" and get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {api_url=}", **log_context)
return
if not should_process_pr_logic(data): # Here we already updated the configurations
return
commands = get_settings().get(f"gitlab.{commands_conf}", {})
@ -100,14 +97,6 @@ def should_process_pr_logic(data) -> bool:
if not data.get('object_attributes', {}):
return False
title = data['object_attributes'].get('title')
sender = data.get("user", {}).get("username", "")
# logic to ignore PRs from specific users
ignore_pr_users = get_settings().get("CONFIG.IGNORE_PR_AUTHORS", [])
if ignore_pr_users and sender:
if sender in ignore_pr_users:
get_logger().info(f"Ignoring PR from user '{sender}' due to 'config.ignore_pr_authors' settings")
return False
# logic to ignore MRs for titles, labels and source, target branches.
ignore_mr_title = get_settings().get("CONFIG.IGNORE_PR_TITLE", [])

View File

@ -5,6 +5,7 @@ from starlette_context.middleware import RawContextMiddleware
from pr_agent.servers.github_app import router
middleware = [Middleware(RawContextMiddleware)]
app = FastAPI(middleware=middleware)
app.include_router(router)

View File

@ -2,7 +2,7 @@ import hashlib
import hmac
import time
from collections import defaultdict
from typing import Any, Callable
from typing import Callable, Any
from fastapi import HTTPException

View File

@ -43,9 +43,6 @@ api_base = "" # the base url for your local Llama 2, Code Llama, and other model
vertex_project = "" # the google cloud platform project name for your vertexai deployment
vertex_location = "" # the google cloud platform location for your vertexai deployment
[google_ai_studio]
gemini_api_key = "" # the google AI Studio API key
[github]
# ---- Set the following only for deployment type == "user"
user_token = "" # A GitHub personal access token with 'repo' scope.
@ -63,7 +60,6 @@ webhook_secret = "<WEBHOOK SECRET>" # Optional, may be commented out.
[gitlab]
# Gitlab personal access token
personal_access_token = ""
shared_secret = "" # webhook secret
[bitbucket]
# For Bitbucket personal/repository bearer token
@ -91,6 +87,3 @@ pat = ""
# Optional, uncomment if you want to use Azure devops webhooks. Value assinged when you create the webhook
# webhook_username = "<basic auth user>"
# webhook_password = "<basic auth password>"
[deepseek]
key = ""

View File

@ -1,20 +1,18 @@
[config]
# models
model="gpt-4o-2024-11-20"
fallback_models=["gpt-4o-2024-08-06"]
#model_weak="gpt-4o-mini-2024-07-18" # optional, a weaker model to use for some easier tasks
model="gpt-4-turbo-2024-04-09"
model_turbo="gpt-4o-2024-08-06"
fallback_models=["gpt-4o-2024-05-13"]
# CLI
git_provider="github"
publish_output=true
publish_output_progress=true
publish_output_no_suggestions=true
verbosity_level=0 # 0,1,2
use_extra_bad_extensions=false
# Configurations
use_wiki_settings_file=true
use_repo_settings_file=true
use_global_settings_file=true
disable_auto_feedback = false
ai_timeout=120 # 2minutes
skip_keys = []
# token limits
@ -34,7 +32,6 @@ ai_disclaimer_title="" # Pro feature, title for a collapsible disclaimer to AI
ai_disclaimer="" # Pro feature, full text for the AI disclaimer
output_relevant_configurations=false
large_patch_policy = "clip" # "clip", "skip"
duplicate_prompt_examples = false
# seed
seed=-1 # set positive value to fix the seed (and ensure temperature=0)
temperature=0.2
@ -43,7 +40,6 @@ ignore_pr_title = ["^\\[Auto\\]", "^Auto"] # a list of regular expressions to ma
ignore_pr_target_branches = [] # a list of regular expressions of target branches to ignore from PR agent when an PR is created
ignore_pr_source_branches = [] # a list of regular expressions of source branches to ignore from PR agent when an PR is created
ignore_pr_labels = [] # labels to ignore from PR agent when an PR is created
ignore_pr_authors = [] # authors to ignore from PR agent when an PR is created
#
is_auto_command = false # will be auto-set to true if the command is triggered by an automation
enable_ai_metadata = false # will enable adding ai metadata
@ -57,6 +53,10 @@ require_can_be_split_review=false
require_security_review=true
require_ticket_analysis_review=true
# general options
num_code_suggestions=0
inline_code_comments = false
ask_and_reflect=false
#automatic_review=true
persistent_comment=true
extra_instructions = ""
final_update_message = true
@ -71,6 +71,7 @@ enable_intro_text=true
enable_help_text=false # Determines whether to include help text in the PR review. Enabled by default.
# auto approval
enable_auto_approval=false
maximal_review_effort=5
[pr_description] # /describe #
@ -89,7 +90,6 @@ publish_description_as_comment_persistent=true
## changes walkthrough section
enable_semantic_files_types=true
collapsible_file_list='adaptive' # true, false, 'adaptive'
collapsible_file_list_threshold=8
inline_file_summary=false # false, true, 'table'
# markers
use_description_markers=false
@ -98,6 +98,7 @@ include_generated_by_header=true
enable_large_pr_handling=true
max_ai_calls=4
async_ai_calls=true
mention_extra_files=true
#custom_labels = ['Bug fix', 'Tests', 'Bug fix with tests', 'Enhancement', 'Documentation', 'Other']
[pr_questions] # /ask #
@ -105,13 +106,13 @@ enable_help_text=false
[pr_code_suggestions] # /improve #
max_context_tokens=16000
max_context_tokens=14000
#
commitable_code_suggestions = false
dual_publishing_score_threshold=-1 # -1 to disable, [0-10] to set the threshold (>=) for publishing a code suggestion both in a table and as commitable
focus_only_on_problems=true
#
extra_instructions = ""
rank_suggestions = false
enable_help_text=false
enable_chat_text=false
enable_intro_text=true
@ -120,16 +121,14 @@ max_history_len=4
# enable to apply suggestion 💎
apply_suggestions_checkbox=true
# suggestions scoring
self_reflect_on_suggestions=true
suggestions_score_threshold=0 # [0-10]| recommend not to set this value above 8, since above it may clip highly relevant suggestions
new_score_mechanism=true
new_score_mechanism_th_high=9
new_score_mechanism_th_medium=7
# params for '/improve --extended' mode
auto_extended_mode=true
num_code_suggestions_per_chunk=4
max_number_of_calls = 3
parallel_calls = true
rank_extended_suggestions = false
final_clip_factor = 0.8
# self-review checkbox
demand_code_suggestions_self_review=false # add a checkbox for the author to self-review the code suggestions
@ -139,7 +138,6 @@ fold_suggestions_on_self_review=true # Pro feature. if true, the code suggestion
# Suggestion impact 💎
publish_post_process_suggestion_impact=true
wiki_page_accepted_suggestions=true
allow_thumbs_up_down=false
[pr_custom_prompt] # /custom_prompt #
prompt = """\
@ -163,7 +161,6 @@ class_name = "" # in case there are several methods with the same name in
[pr_update_changelog] # /update_changelog #
push_changelog_changes=false
extra_instructions = ""
add_pr_link=true
[pr_analyze] # /analyze #
enable_help_text=true
@ -220,7 +217,7 @@ override_deployment_type = true
handle_pr_actions = ['opened', 'reopened', 'ready_for_review']
pr_commands = [
"/describe --pr_description.final_update_message=false",
"/review",
"/review --pr_reviewer.num_code_suggestions=0",
"/improve",
]
# settings for "pull_request" event with "synchronize" action - used to detect and handle push triggers for new commits
@ -232,27 +229,27 @@ push_trigger_pending_tasks_backlog = true
push_trigger_pending_tasks_ttl = 300
push_commands = [
"/describe",
"/review",
"/review --pr_reviewer.num_code_suggestions=0",
]
[gitlab]
url = "https://gitlab.com"
pr_commands = [
"/describe --pr_description.final_update_message=false",
"/review",
"/review --pr_reviewer.num_code_suggestions=0",
"/improve",
]
handle_push_trigger = false
push_commands = [
"/describe",
"/review",
"/review --pr_reviewer.num_code_suggestions=0",
]
[bitbucket_app]
pr_commands = [
"/describe --pr_description.final_update_message=false",
"/review",
"/improve --pr_code_suggestions.commitable_code_suggestions=true",
"/review --pr_reviewer.num_code_suggestions=0",
"/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7",
]
avoid_full_files = false
@ -277,8 +274,8 @@ avoid_full_files = false
url = ""
pr_commands = [
"/describe --pr_description.final_update_message=false",
"/review",
"/improve --pr_code_suggestions.commitable_code_suggestions=true",
"/review --pr_reviewer.num_code_suggestions=0",
"/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7",
]
[litellm]
@ -310,16 +307,7 @@ number_of_results = 5
[lancedb]
uri = "./lancedb"
[best_practices]
content = ""
organization_name = ""
max_lines_allowed = 800
enable_global_best_practices = false
[auto_best_practices]
enable_auto_best_practices = true # public - general flag to disable all auto best practices usage
utilize_auto_best_practices = true # public - disable usage of auto best practices in the 'improve' tool
extra_instructions = "" # public - extra instructions to the auto best practices generation prompt
content = ""
max_patterns = 5 # max number of patterns to be detected

View File

@ -1,10 +1,7 @@
[pr_code_suggestions_prompt]
system="""You are PR-Reviewer, an AI specializing in Pull Request (PR) code analysis and suggestions.
{%- if not focus_only_on_problems %}
Your task is to examine the provided code diff, focusing on new code (lines prefixed with '+'), and offer concise, actionable suggestions to fix possible bugs and problems, and enhance code quality and performance.
{%- else %}
Your task is to examine the provided code diff, focusing on new code (lines prefixed with '+'), and offer concise, actionable suggestions to fix critical bugs and problems.
{%- endif %}
Your task is to examine the provided code diff, focusing on new code (lines prefixed with '+'), and offer concise, actionable suggestions to fix possible bugs and problems, and enhance code quality, readability, and performance.
The PR code diff will be in the following structured format:
======
@ -17,56 +14,42 @@ The PR code diff will be in the following structured format:
@@ ... @@ def func1():
__new hunk__
unchanged code line0
unchanged code line1
+new code line2 added
unchanged code line3
11 unchanged code line0 in the PR
12 unchanged code line1 in the PR
13 +new code line2 added in the PR
14 unchanged code line3 in the PR
__old hunk__
unchanged code line0
unchanged code line1
-old code line2 removed
-old code line2 removed in the PR
unchanged code line3
@@ ... @@ def func2():
__new hunk__
unchanged code line4
+new code line5 added
+new code line5 removed in the PR
unchanged code line6
## File: 'src/file2.py'
...
======
Important notes about the structured diff format above:
1. Each PR code chunk is decoupled into separate '__new hunk__' and '__old hunk__' sections:
- The '__new hunk__' section shows the code chunk AFTER the PR changes.
- The '__old hunk__' section shows the code chunk BEFORE the PR changes. If no code was removed from the chunk, the '__old hunk__' section will be omitted.
2. The diff uses line prefixes to show changes:
'+' → new line code added (will appear only in '__new hunk__')
'-' → line code removed (will appear only in '__old hunk__')
' ' → unchanged context lines (will appear in both sections)
- In the format above, the diff is organized into separate '__new hunk__' and '__old hunk__' sections for each code chunk. '__new hunk__' contains the updated code, while '__old hunk__' shows the removed code. If no code was removed in a specific chunk, the __old hunk__ section will be omitted.
- Line numbers were added for the '__new hunk__' sections to help referencing specific lines in the code suggestions. These numbers are for reference only and are not part of the actual code.
- Code lines are prefixed with symbols: '+' for new code added in the PR, '-' for code removed, and ' ' for unchanged code.
{%- if is_ai_metadata %}
3. When available, an AI-generated summary will precede each file's diff, with a high-level overview of the changes. Note that this summary may not be fully accurate or complete.
- When available, an AI-generated summary will precede each file's diff, with a high-level overview of the changes. Note that this summary may not be fully accurate or complete.
{%- endif %}
Specific guidelines for generating code suggestions:
{%- if not focus_only_on_problems %}
- Provide up to {{ num_code_suggestions }} distinct and insightful code suggestions.
{%- else %}
- Provide up to {{ num_code_suggestions }} distinct and insightful code suggestions. Return less suggestions if no pertinent ones are applicable.
{%- endif %}
- DO NOT suggest implementing changes that are already present in the '+' lines compared to the '-' lines.
- Focus your suggestions ONLY on new code introduced in the PR ('+' lines in '__new hunk__' sections).
{%- if not focus_only_on_problems %}
- Focus solely on enhancing new code introduced in the PR, identified by '+' prefixes in '__new hunk__' sections (after the line numbers).
- Prioritize suggestions that address potential issues, critical problems, and bugs in the PR code. Avoid repeating changes already implemented in the PR. If no pertinent suggestions are applicable, return an empty list.
- Don't suggest to add docstring, type hints, or comments, to remove unused imports, or to use more specific exception types.
{%- else %}
- Only give suggestions that address critical problems and bugs in the PR code. If no relevant suggestions are applicable, return an empty list.
- Do not suggest to change packages version, add missing import statement, or declare undefined variable.
{%- endif %}
- When mentioning code elements (variables, names, or files) in your response, surround them with backticks (`). For example: "verify that `user_id` is..."
- Note that you only see changed code segments (diff hunks in a PR), not the entire codebase. Avoid suggestions that might duplicate existing functionality or questioning code elements (like variables declarations or import statements) that may be defined elsewhere in the codebase.
- When referencing variables or names from the code, enclose them in backticks (`). Example: "ensure that `variable_name` is..."
- Be mindful you are viewing a partial PR code diff, not the full codebase. Avoid suggestions that might conflict with unseen code or alerting variables not declared in the visible scope, as the context is incomplete.
{%- if extra_instructions %}
@ -84,14 +67,12 @@ class CodeSuggestion(BaseModel):
relevant_file: str = Field(description="Full path of the relevant file")
language: str = Field(description="Programming language used by the relevant file")
suggestion_content: str = Field(description="An actionable suggestion to enhance, improve or fix the new code introduced in the PR. Don't present here actual code snippets, just the suggestion. Be short and concise")
existing_code: str = Field(description="A short code snippet, from a '__new hunk__' section after the PR changes, that the suggestion aims to enhance or fix. Include only complete code lines. Use ellipsis (...) for brevity if needed. This snippet should represent the specific PR code targeted for improvement.")
existing_code: str = Field(description="A short code snippet from a '__new hunk__' section that the suggestion aims to enhance or fix. Include only complete code lines, without line numbers. Use ellipsis (...) for brevity if needed. This snippet should represent the specific PR code targeted for improvement.")
improved_code: str = Field(description="A refined code snippet that replaces the 'existing_code' snippet after implementing the suggestion.")
one_sentence_summary: str = Field(description="A concise, single-sentence overview (up to 6 words) of the suggested improvement. Focus on the 'what'. Be general, and avoid method or variable names.")
{%- if not focus_only_on_problems %}
label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'possible bug', 'possible issue', 'performance', 'enhancement', 'best practice', 'maintainability', 'typo'. Other relevant labels are also acceptable.")
{%- else %}
label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'critical bug', 'general'. The 'general' section should be used for suggestions that address a major issue, but are not necessarily on a critical level.")
{%- endif %}
one_sentence_summary: str = Field(description="A concise, single-sentence overview of the suggested improvement. Focus on the 'what'. Be general, and avoid method or variable names.")
relevant_lines_start: int = Field(description="The relevant line number, from a '__new hunk__' section, where the suggestion starts (inclusive). Should be derived from the hunk line numbers, and correspond to the beginning of the 'existing code' snippet above")
relevant_lines_end: int = Field(description="The relevant line number, from a '__new hunk__' section, where the suggestion ends (inclusive). Should be derived from the hunk line numbers, and correspond to the end of the 'existing code' snippet above")
label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'possible bug', 'possible issue', 'performance', 'enhancement', 'best practice', 'maintainability'. Other relevant labels are also acceptable.")
class PRCodeSuggestions(BaseModel):
@ -114,10 +95,13 @@ code_suggestions:
...
one_sentence_summary: |
...
relevant_lines_start: 12
relevant_lines_end: 13
label: |
...
```
Each YAML output MUST be after a newline, indented, with block scalar indicator ('|').
"""
@ -125,40 +109,12 @@ user="""--PR Info--
Title: '{{title}}'
{%- if date %}
Today's Date: {{date}}
{%- endif %}
The PR Diff:
======
{{ diff_no_line_numbers|trim }}
{{ diff|trim }}
======
{%- if duplicate_prompt_examples %}
Example output:
```yaml
code_suggestions:
- relevant_file: |
src/file1.py
language: |
python
suggestion_content: |
...
existing_code: |
...
improved_code: |
...
one_sentence_summary: |
...
label: |
...
```
(replace '...' with actual content)
{%- endif %}
Response (should be a valid YAML, and nothing else):
```yaml

View File

@ -15,8 +15,8 @@ Be particularly vigilant for suggestions that:
- Contradict or ignore parts of the PR's modifications
In such cases, assign the suggestion a score of 0.
Evaluate each valid suggestion by scoring its potential impact on the PR's correctness, quality and functionality.
In addition, you should also detect the line numbers in the '__new hunk__' section that correspond to the 'existing_code' snippet.
For valid suggestions, your role is to provide an impartial and precise score assessment that accurately reflects each suggestion's potential impact on the PR's correctness, quality and functionality.
Key guidelines for evaluation:
- Thoroughly examine both the suggestion content and the corresponding PR code diff. Be vigilant for potential errors in each suggestion, ensuring they are logically sound, accurate, and directly derived from the PR code diff.
@ -49,14 +49,14 @@ The PR code diff will be presented in the following structured format:
@@ ... @@ def func1():
__new hunk__
11 unchanged code line0
12 unchanged code line1
13 +new code line2 added
14 unchanged code line3
11 unchanged code line0 in the PR
12 unchanged code line1 in the PR
13 +new code line2 added in the PR
14 unchanged code line3 in the PR
__old hunk__
unchanged code line0
unchanged code line1
-old code line2 removed
-old code line2 removed in the PR
unchanged code line3
@@ ... @@ def func2():
@ -82,8 +82,6 @@ The output must be a YAML object equivalent to type $PRCodeSuggestionsFeedback,
class CodeSuggestionFeedback(BaseModel):
suggestion_summary: str = Field(description="Repeated from the input")
relevant_file: str = Field(description="Repeated from the input")
relevant_lines_start: int = Field(description="The relevant line number, from a '__new hunk__' section, where the suggestion starts (inclusive). Should be derived from the hunk line numbers, and correspond to the beginning of the relevant 'existing code' snippet")
relevant_lines_end: int = Field(description="The relevant line number, from a '__new hunk__' section, where the suggestion ends (inclusive). Should be derived from the hunk line numbers, and correspond to the end of the relevant 'existing code' snippet")
suggestion_score: int = Field(description="Evaluate the suggestion and assign a score from 0 to 10. Give 0 if the suggestion is wrong. For valid suggestions, score from 1 (lowest impact/importance) to 10 (highest impact/importance).")
why: str = Field(description="Briefly explain the score given in 1-2 sentences, focusing on the suggestion's impact, relevance, and accuracy.")
@ -98,8 +96,6 @@ code_suggestions:
- suggestion_summary: |
Use a more descriptive variable name here
relevant_file: "src/file1.py"
relevant_lines_start: 13
relevant_lines_end: 14
suggestion_score: 6
why: |
The variable name 't' is not descriptive enough
@ -122,25 +118,6 @@ Below are {{ num_code_suggestions }} AI-generated code suggestions for enhancing
======
{%- if duplicate_prompt_examples %}
Example output:
```yaml
code_suggestions:
- suggestion_summary: |
...
relevant_file: "..."
relevant_lines_start: ...
relevant_lines_end: ...
suggestion_score: ...
why: |
...
- ...
```
(replace '...' with actual content)
{%- endif %}
Response (should be a valid YAML, and nothing else):
```yaml
"""

View File

@ -1,11 +1,15 @@
[pr_description_prompt]
system="""You are PR-Reviewer, a language model designed to review a Git Pull Request (PR).
Your task is to provide a full description for the PR content - type, description, title and files walkthrough.
- Focus on the new PR code (lines starting with '+' in the 'PR Git Diff' section).
{%- if enable_custom_labels %}
Your task is to provide a full description for the PR content - files walkthrough, title, type, description and labels.
{%- else %}
Your task is to provide a full description for the PR content - files walkthrough, title, type, and description.
{%- endif %}
- Focus on the new PR code (lines starting with '+').
- Keep in mind that the 'Previous title', 'Previous description' and 'Commit messages' sections may be partial, simplistic, non-informative or out of date. Hence, compare them to the PR diff code, and use them only as a reference.
- The generated title and description should prioritize the most significant changes.
- If needed, each YAML output should be in block scalar indicator ('|')
- When quoting variables, names or file paths from the code, use backticks (`) instead of single quote (').
- If needed, each YAML output should be in block scalar indicator ('|-')
- When quoting variables or names from the code, use backticks (`) instead of single quote (').
{%- if extra_instructions %}
@ -34,20 +38,22 @@ class PRType(str, Enum):
{%- if enable_semantic_files_types %}
class FileDescription(BaseModel):
filename: str = Field(description="The full file path of the relevant file")
{%- if include_file_summary_changes %}
filename: str = Field(description="The full file path of the relevant file.")
language: str = Field(description="The programming language of the relevant file.")
changes_summary: str = Field(description="concise summary of the changes in the relevant file, in bullet points (1-4 bullet points).")
{%- endif %}
changes_title: str = Field(description="one-line summary (5-10 words) capturing the main theme of changes in the file")
changes_title: str = Field(description="an informative title for the changes in the files, describing its main theme (5-10 words).")
label: str = Field(description="a single semantic label that represents a type of code changes that occurred in the File. Possible values (partial list): 'bug fix', 'tests', 'enhancement', 'documentation', 'error handling', 'configuration changes', 'dependencies', 'formatting', 'miscellaneous', ...")
{%- endif %}
class PRDescription(BaseModel):
type: List[PRType] = Field(description="one or more types that describe the PR content. Return the label member value (e.g. 'Bug fix', not 'bug_fix')")
description: str = Field(description="summarize the PR changes in up to four bullet points, each up to 8 words. For large PRs, add sub-bullets if needed. Order bullets by importance, with each bullet highlighting a key change group.")
title: str = Field(description="a concise and descriptive title that captures the PR's main theme")
{%- if enable_semantic_files_types %}
pr_files: List[FileDescription] = Field(max_items=20, description="a list of all the files that were changed in the PR, and summary of their changes. Each file must be analyzed regardless of change size.")
pr_files: List[FileDescription] = Field(max_items=15, description="a list of the files in the PR, and summary of their changes")
{%- endif %}
description: str = Field(description="an informative and concise description of the PR. Use bullet points. Display first the most significant changes.")
title: str = Field(description="an informative title for the PR, describing its main theme")
{%- if enable_custom_labels %}
labels: List[Label] = Field(min_items=0, description="choose the relevant custom labels that describe the PR content, and return their keys. Use the value field of the Label object to better understand the label meaning.")
{%- endif %}
=====
@ -58,24 +64,31 @@ Example output:
type:
- ...
- ...
description: |
...
title: |
...
{%- if enable_semantic_files_types %}
pr_files:
- filename: |
...
{%- if include_file_summary_changes %}
language: |
...
changes_summary: |
...
{%- endif %}
changes_title: |
...
label: |
label_key_1
...
...
{%- endif %}
description: |
...
title: |
...
{%- if enable_custom_labels %}
labels:
- |
...
- |
...
{%- endif %}
```
Answer should be a valid YAML, and nothing else. Each YAML output MUST be after a newline, with proper indent, and block scalar indicator ('|')
@ -123,44 +136,13 @@ Commit messages:
{%- endif %}
The PR Git Diff:
The PR Diff:
=====
{{ diff|trim }}
=====
Note that lines in the diff body are prefixed with a symbol that represents the type of change: '-' for deletions, '+' for additions, and ' ' (a space) for unchanged lines.
{%- if duplicate_prompt_examples %}
Example output:
```yaml
type:
- Bug fix
- Refactoring
- ...
description: |
...
title: |
...
{%- if enable_semantic_files_types %}
pr_files:
- filename: |
...
{%- if include_file_summary_changes %}
changes_summary: |
...
{%- endif %}
changes_title: |
...
label: |
label_key_1
...
{%- endif %}
```
(replace '...' with the actual values)
{%- endif %}
Response (should be a valid YAML, and nothing else):
```yaml

View File

@ -1,6 +1,10 @@
[pr_review_prompt]
system="""You are PR-Reviewer, a language model designed to review a Git Pull Request (PR).
{%- if num_code_suggestions > 0 %}
Your task is to provide constructive and concise feedback for the PR, and also provide meaningful code suggestions.
{%- else %}
Your task is to provide constructive and concise feedback for the PR.
{%- endif %}
The review should focus on new code added in the PR code diff (lines starting with '+')
@ -16,20 +20,20 @@ The format we will use to present the PR code diff:
@@ ... @@ def func1():
__new hunk__
11 unchanged code line0
12 unchanged code line1
13 +new code line2 added
14 unchanged code line3
11 unchanged code line0 in the PR
12 unchanged code line1 in the PR
13 +new code line2 added in the PR
14 unchanged code line3 in the PR
__old hunk__
unchanged code line0
unchanged code line1
-old code line2 removed
-old code line2 removed in the PR
unchanged code line3
@@ ... @@ def func2():
__new hunk__
unchanged code line4
+new code line5 removed
+new code line5 removed in the PR
unchanged code line6
## File: 'src/file2.py'
@ -43,8 +47,18 @@ __new hunk__
{%- if is_ai_metadata %}
- If available, an AI-generated summary will appear and provide a high-level overview of the file changes. Note that this summary may not be fully accurate or complete.
{%- endif %}
- When quoting variables, names or file paths from the code, use backticks (`) instead of single quote (').
- When quoting variables or names from the code, use backticks (`) instead of single quote (').
{%- if num_code_suggestions > 0 %}
Code suggestions guidelines:
- Provide up to {{ num_code_suggestions }} code suggestions. Try to provide diverse and insightful suggestions.
- Focus on important suggestions like fixing code problems, issues and bugs. As a second priority, provide suggestions for meaningful code improvements, like performance, vulnerability, modularity, and best practices.
- Avoid making suggestions that have already been implemented in the PR code. For example, if you want to add logs, or change a variable to const, or anything else, make sure it isn't already in the PR code.
- Don't suggest to add docstring, type hints, or comments.
- Suggestions should address the new code added in the PR diff (lines starting with '+')
{%- endif %}
{%- if extra_instructions %}
@ -66,8 +80,8 @@ class SubPR(BaseModel):
class KeyIssuesComponentLink(BaseModel):
relevant_file: str = Field(description="The full file path of the relevant file")
issue_header: str = Field(description="One or two word title for the the issue. For example: 'Possible Bug', etc.")
issue_content: str = Field(description="A short and concise summary of what should be further inspected and validated during the PR review process for this issue. Do not reference line numbers in this field.")
issue_header: str = Field(description="One or two word title for the the issue. For example: 'Possible Bug', 'Performance Issue', 'Code Smell', etc.")
issue_content: str = Field(description="A short and concise summary of what should be further inspected and validated during the PR review process for this issue. Don't state line numbers here")
start_line: int = Field(description="The start line that corresponds to this issue in the relevant file")
end_line: int = Field(description="The end line that corresponds to this issue in the relevant file")
@ -75,10 +89,10 @@ class KeyIssuesComponentLink(BaseModel):
class TicketCompliance(BaseModel):
ticket_url: str = Field(description="Ticket URL or ID")
ticket_requirements: str = Field(description="Repeat, in your own words (in bullet points), all the requirements, sub-tasks, DoD, and acceptance criteria raised by the ticket")
fully_compliant_requirements: str = Field(description="Bullet-point list of items from the 'ticket_requirements' section above that are fulfilled by the PR code. Don't explain how the requirements are met, just list them shortly. Can be empty")
not_compliant_requirements: str = Field(description="Bullet-point list of items from the 'ticket_requirements' section above that are not fulfilled by the PR code. Don't explain how the requirements are not met, just list them shortly. Can be empty")
requires_further_human_verification: str = Field(description="Bullet-point list of items from the 'ticket_requirements' section above that cannot be assessed through code review alone, are unclear, or need further human review (e.g., browser testing, UI checks). Leave empty if all 'ticket_requirements' were marked as fully compliant or not compliant")
ticket_requirements: str = Field(description="Repeat, in your own words, all ticket requirements, in bullet points")
fully_compliant_requirements: str = Field(description="A list, in bullet points, of which requirements are met by the PR code. Don't explain how the requirements are met, just list them shortly. Can be empty")
not_compliant_requirements: str = Field(description="A list, in bullet points, of which requirements are not met by the PR code. Don't explain how the requirements are not met, just list them shortly. Can be empty")
overall_compliance_level: str = Field(description="Overall give this PR one of these three values in relation to the ticket: 'Fully compliant', 'Partially compliant', or 'Not compliant'")
{%- endif %}
class Review(BaseModel):
@ -97,16 +111,32 @@ class Review(BaseModel):
{%- if question_str %}
insights_from_user_answers: str = Field(description="shortly summarize the insights you gained from the user's answers to the questions")
{%- endif %}
key_issues_to_review: List[KeyIssuesComponentLink] = Field("A short and diverse list (0-3 issues) of high-priority bugs, problems or performance concerns introduced in the PR code, which the PR reviewer should further focus on and validate during the review process.")
key_issues_to_review: List[KeyIssuesComponentLink] = Field("A diverse list of bugs, issue or major performance concerns introduced in this PR, which the PR reviewer should further investigate")
{%- if require_security_review %}
security_concerns: str = Field(description="Does this PR code introduce possible vulnerabilities such as exposure of sensitive information (e.g., API keys, secrets, passwords), or security concerns like SQL injection, XSS, CSRF, and others ? Answer 'No' (without explaining why) if there are no possible issues. If there are security concerns or issues, start your answer with a short header, such as: 'Sensitive information exposure: ...', 'SQL injection: ...' etc. Explain your answer. Be specific and give examples if possible")
{%- endif %}
{%- if require_can_be_split_review %}
can_be_split: List[SubPR] = Field(min_items=0, max_items=3, description="Can this PR, which contains {{ num_pr_files }} changed files in total, be divided into smaller sub-PRs with distinct tasks that can be reviewed and merged independently, regardless of the order ? Make sure that the sub-PRs are indeed independent, with no code dependencies between them, and that each sub-PR represent a meaningful independent task. Output an empty list if the PR code does not need to be split.")
{%- endif %}
{%- if num_code_suggestions > 0 %}
class CodeSuggestion(BaseModel):
relevant_file: str = Field(description="The full file path of the relevant file")
language: str = Field(description="The programming language of the relevant file")
suggestion: str = Field(description="a concrete suggestion for meaningfully improving the new PR code. Also describe how, specifically, the suggestion can be applied to new PR code. Add tags with importance measure that matches each suggestion ('important' or 'medium'). Do not make suggestions for updating or adding docstrings, renaming PR title and description, or linter like.")
relevant_line: str = Field(description="a single code line taken from the relevant file, to which the suggestion applies. The code line should start with a '+'. Make sure to output the line exactly as it appears in the relevant file")
{%- endif %}
{%- if num_code_suggestions > 0 %}
class PRReview(BaseModel):
review: Review
code_feedback: List[CodeSuggestion]
{%- else %}
class PRReview(BaseModel):
review: Review
{%- endif %}
=====
@ -155,6 +185,18 @@ review:
title: ...
- ...
{%- endif %}
{%- if num_code_suggestions > 0 %}
code_feedback:
- relevant_file: |
directory/xxx.py
language: |
python
suggestion: |
xxx [important]
relevant_line: |
xxx
{%- endif %}
```
Answer should be a valid YAML, and nothing else. Each YAML output MUST be after a newline, with proper indent, and block scalar indicator ('|')
@ -221,59 +263,6 @@ The PR code diff:
======
{%- if duplicate_prompt_examples %}
Example output:
```yaml
review:
{%- if related_tickets %}
ticket_compliance_check:
- ticket_url: |
...
ticket_requirements: |
...
fully_compliant_requirements: |
...
not_compliant_requirements: |
...
overall_compliance_level: |
...
{%- endif %}
{%- if require_estimate_effort_to_review %}
estimated_effort_to_review_[1-5]: |
3
{%- endif %}
{%- if require_score %}
score: 89
{%- endif %}
relevant_tests: |
No
key_issues_to_review:
- relevant_file: |
...
issue_header: |
...
issue_content: |
...
start_line: ...
end_line: ...
- ...
security_concerns: |
No
{%- if require_can_be_split_review %}
can_be_split:
- relevant_files:
- ...
- ...
title: ...
- ...
{%- endif %}
```
(replace '...' with the actual values)
{%- endif %}
Response (should be a valid YAML, and nothing else):
```yaml
"""

Some files were not shown because too many files have changed in this diff Show More