mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-04 21:00:40 +08:00
Compare commits
1 Commits
hl/detect_
...
mrT23-patc
Author | SHA1 | Date | |
---|---|---|---|
5430e9ab5a |
2
.github/workflows/build-and-test.yaml
vendored
2
.github/workflows/build-and-test.yaml
vendored
@ -37,3 +37,5 @@ jobs:
|
||||
name: Test dev docker
|
||||
run: |
|
||||
docker run --rm codiumai/pr-agent:test pytest -v tests/unittest
|
||||
|
||||
|
||||
|
4
.github/workflows/code_coverage.yaml
vendored
4
.github/workflows/code_coverage.yaml
vendored
@ -37,7 +37,7 @@ jobs:
|
||||
- id: code_cov
|
||||
name: Test dev docker
|
||||
run: |
|
||||
docker run --name test_container codiumai/pr-agent:test pytest tests/unittest --cov=pr_agent --cov-report term --cov-report xml:coverage.xml
|
||||
docker run --name test_container codiumai/pr-agent:test pytest tests/unittest --cov=pr_agent --cov-report term --cov-report xml:coverage.xml
|
||||
docker cp test_container:/app/coverage.xml coverage.xml
|
||||
docker rm test_container
|
||||
|
||||
@ -51,4 +51,4 @@ jobs:
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v4.0.1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
6
.github/workflows/docs-ci.yaml
vendored
6
.github/workflows/docs-ci.yaml
vendored
@ -1,4 +1,4 @@
|
||||
name: docs-ci
|
||||
name: docs-ci
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
@ -20,14 +20,14 @@ jobs:
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.x
|
||||
- run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
|
||||
- run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
key: mkdocs-material-${{ env.cache_id }}
|
||||
path: .cache
|
||||
restore-keys: |
|
||||
mkdocs-material-
|
||||
- run: pip install mkdocs-material
|
||||
- run: pip install mkdocs-material
|
||||
- run: pip install "mkdocs-material[imaging]"
|
||||
- run: pip install mkdocs-glightbox
|
||||
- run: mkdocs gh-deploy -f docs/mkdocs.yml --force
|
||||
|
2
.github/workflows/e2e_tests.yaml
vendored
2
.github/workflows/e2e_tests.yaml
vendored
@ -43,4 +43,4 @@ jobs:
|
||||
- id: test3
|
||||
name: E2E bitbucket app
|
||||
run: |
|
||||
docker run -e BITBUCKET.USERNAME=${{ secrets.BITBUCKET_USERNAME }} -e BITBUCKET.PASSWORD=${{ secrets.BITBUCKET_PASSWORD }} --rm codiumai/pr-agent:test pytest -v tests/e2e_tests/test_bitbucket_app.py
|
||||
docker run -e BITBUCKET.USERNAME=${{ secrets.BITBUCKET_USERNAME }} -e BITBUCKET.PASSWORD=${{ secrets.BITBUCKET_PASSWORD }} --rm codiumai/pr-agent:test pytest -v tests/e2e_tests/test_bitbucket_app.py
|
5
.github/workflows/pr-agent-review.yaml
vendored
5
.github/workflows/pr-agent-review.yaml
vendored
@ -1,4 +1,4 @@
|
||||
# This workflow enables developers to call PR-Agents `/[actions]` in PR's comments and upon PR creation.
|
||||
# This workflow enables developers to call PR-Agents `/[actions]` in PR's comments and upon PR creation.
|
||||
# Learn more at https://www.codium.ai/pr-agent/
|
||||
# This is v0.2 of this workflow file
|
||||
|
||||
@ -30,3 +30,6 @@ jobs:
|
||||
GITHUB_ACTION_CONFIG.AUTO_DESCRIBE: true
|
||||
GITHUB_ACTION_CONFIG.AUTO_REVIEW: true
|
||||
GITHUB_ACTION_CONFIG.AUTO_IMPROVE: true
|
||||
|
||||
|
||||
|
||||
|
17
.github/workflows/pre-commit.yml
vendored
17
.github/workflows/pre-commit.yml
vendored
@ -1,17 +0,0 @@
|
||||
# disabled. We might run it manually if needed.
|
||||
name: pre-commit
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
# pull_request:
|
||||
# push:
|
||||
# branches: [main]
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v5
|
||||
# SEE https://github.com/pre-commit/action
|
||||
- uses: pre-commit/action@v3.0.1
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -8,4 +8,4 @@ dist/
|
||||
*.egg-info/
|
||||
build/
|
||||
.DS_Store
|
||||
docs/.cache/
|
||||
docs/.cache/
|
@ -1,46 +0,0 @@
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
|
||||
default_language_version:
|
||||
python: python3
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-toml
|
||||
- id: check-yaml
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
# - repo: https://github.com/rhysd/actionlint
|
||||
# rev: v1.7.3
|
||||
# hooks:
|
||||
# - id: actionlint
|
||||
- repo: https://github.com/pycqa/isort
|
||||
# rev must match what's in dev-requirements.txt
|
||||
rev: 5.13.2
|
||||
hooks:
|
||||
- id: isort
|
||||
# - repo: https://github.com/PyCQA/bandit
|
||||
# rev: 1.7.10
|
||||
# hooks:
|
||||
# - id: bandit
|
||||
# args: [
|
||||
# "-c", "pyproject.toml",
|
||||
# ]
|
||||
# - repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# rev: v0.7.1
|
||||
# hooks:
|
||||
# - id: ruff
|
||||
# args:
|
||||
# - --fix
|
||||
# - id: ruff-format
|
||||
# - repo: https://github.com/PyCQA/autoflake
|
||||
# rev: v2.3.1
|
||||
# hooks:
|
||||
# - id: autoflake
|
||||
# args:
|
||||
# - --in-place
|
||||
# - --remove-all-unused-imports
|
||||
# - --remove-unused-variables
|
2
LICENSE
2
LICENSE
@ -199,4 +199,4 @@
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
limitations under the License.
|
@ -1,2 +1,2 @@
|
||||
recursive-include pr_agent *.toml
|
||||
recursive-exclude pr_agent *.secrets.toml
|
||||
recursive-exclude pr_agent *.secrets.toml
|
152
README.md
152
README.md
@ -13,13 +13,15 @@
|
||||
Qode Merge PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedback and suggestions
|
||||
</div>
|
||||
|
||||
[](https://github.com/Codium-ai/pr-agent/blob/main/LICENSE)
|
||||
[](https://chromewebstore.google.com/detail/pr-agent-chrome-extension/ephlnjeghhogofkifjloamocljapahnl)
|
||||
[](https://github.com/apps/qodo-merge-pro/)
|
||||
[](https://github.com/apps/qodo-merge-pro-for-open-source/)
|
||||
[](https://pr-agent-docs.codium.ai/finetuning_benchmark/)
|
||||
[](https://discord.com/channels/1057273017547378788/1126104260430528613)
|
||||
<a href="https://github.com/Codium-ai/pr-agent/commits/main">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/last-commit/Codium-ai/pr-agent/main?style=for-the-badge" height="20">
|
||||
</a>
|
||||
[](https://twitter.com/codiumai)
|
||||
[](https://www.codium.ai/images/pr_agent/cheat_sheet.pdf)
|
||||
<a href="https://github.com/Codium-ai/pr-agent/commits/main">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/last-commit/Codium-ai/pr-agent/main?style=for-the-badge" height="20">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
### [Documentation](https://pr-agent-docs.codium.ai/)
|
||||
@ -38,85 +40,98 @@ Qode Merge PR-Agent aims to help efficiently review and handle pull requests, by
|
||||
- [PR-Agent Pro 💎](https://pr-agent-docs.codium.ai/overview/pr_agent_pro/)
|
||||
- [How it works](#how-it-works)
|
||||
- [Why use PR-Agent?](#why-use-pr-agent)
|
||||
|
||||
|
||||
## News and Updates
|
||||
|
||||
### December 2, 2024
|
||||
|
||||
Open-source repositories can now freely use Qodo Merge Pro, and enjoy easy one-click installation using a marketplace [app](https://github.com/apps/qodo-merge-pro-for-open-source).
|
||||
|
||||
<kbd><img src="https://github.com/user-attachments/assets/b0838724-87b9-43b0-ab62-73739a3a855c" width="512"></kbd>
|
||||
|
||||
See [here](https://qodo-merge-docs.qodo.ai/installation/pr_agent_pro/) for more details about installing Qodo Merge Pro for private repositories.
|
||||
|
||||
|
||||
### November 18, 2024
|
||||
|
||||
A new mode was enabled by default for code suggestions - `--pr_code_suggestions.focus_only_on_problems=true`:
|
||||
|
||||
- This option reduces the number of code suggestions received
|
||||
- The suggestions will focus more on identifying and fixing code problems, rather than style considerations like best practices, maintainability, or readability.
|
||||
- The suggestions will be categorized into just two groups: "Possible Issues" and "General".
|
||||
|
||||
Still, if you prefer the previous mode, you can set `--pr_code_suggestions.focus_only_on_problems=false` in the [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/).
|
||||
|
||||
**Example results:**
|
||||
|
||||
Original mode
|
||||
|
||||
<kbd><img src="https://qodo.ai/images/pr_agent/code_suggestions_original_mode.png" width="512"></kbd>
|
||||
|
||||
Focused mode
|
||||
|
||||
<kbd><img src="https://qodo.ai/images/pr_agent/code_suggestions_focused_mode.png" width="512"></kbd>
|
||||
|
||||
|
||||
### November 4, 2024
|
||||
|
||||
Qodo Merge PR Agent will now leverage context from Jira or GitHub tickets to enhance the PR Feedback. Read more about this feature
|
||||
Qodo Merge PR Agent will now leverage context from Jira or GitHub tickets to enhance the PR Feedback. Read more about this feature
|
||||
[here](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/)
|
||||
|
||||
### November 3, 2024
|
||||
|
||||
Meaningful improvement to the quality of code suggestions by separating the code suggestion generation from [line number detection](https://github.com/Codium-ai/pr-agent/pull/1338)
|
||||
|
||||
<kbd></kbd>
|
||||
|
||||
|
||||
### October 27, 2024
|
||||
|
||||
Qodo Merge PR Agent will now automatically document accepted code suggestions in a dedicated wiki page (`.pr_agent_accepted_suggestions`), enabling users to track historical changes, assess the tool's effectiveness, and learn from previously implemented recommendations in the repository.
|
||||
|
||||
This dedicated wiki page will also serve as a foundation for future AI model improvements, allowing it to learn from historically implemented suggestions and generate more targeted, contextually relevant recommendations.
|
||||
Read more about this novel feature [here](https://qodo-merge-docs.qodo.ai/tools/improve/#suggestion-tracking).
|
||||
|
||||
<kbd><img href="https://qodo.ai/images/pr_agent/pr_agent_accepted_suggestions1.png" src="https://qodo.ai/images/pr_agent/pr_agent_accepted_suggestions1.png" width="768"></kbd>
|
||||
|
||||
|
||||
|
||||
### October 21, 2024
|
||||
**Disable publishing labels by default:**
|
||||
|
||||
The default setting for `pr_description.publish_labels` has been updated to `false`. This means that labels generated by the `/describe` tool will no longer be published, unless this configuration is explicitly set to `true`.
|
||||
|
||||
We constantly strive to balance informative AI analysis with reducing unnecessary noise. User feedback indicated that in many cases, the original PR title alone provides sufficient information, making the generated labels (`enhancement`, `documentation`, `bug fix`, ...) redundant.
|
||||
The [`review_effort`](https://qodo-merge-docs.qodo.ai/tools/review/#configuration-options) label, generated by the `review` tool, will still be published by default, as it provides valuable information enabling reviewers to prioritize small PRs first.
|
||||
|
||||
However, every user has different preferences. To still publish the `describe` labels, set `pr_description.publish_labels=true` in the [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/).
|
||||
For more tailored and relevant labeling, we recommend using the [`custom_labels 💎`](https://qodo-merge-docs.qodo.ai/tools/custom_labels/) tool, that allows generating labels specific to your project's needs.
|
||||
|
||||
<kbd></kbd>
|
||||
|
||||
→
|
||||
|
||||
<kbd></kbd>
|
||||
|
||||
|
||||
|
||||
### October 14, 2024
|
||||
Improved support for GitHub enterprise server with [GitHub Actions](https://qodo-merge-docs.qodo.ai/installation/github/#action-for-github-enterprise-server)
|
||||
|
||||
### October 10, 2024
|
||||
New ability for the `review` tool - **ticket compliance feedback**. If the PR contains a ticket number, PR-Agent will check if the PR code actually [complies](https://github.com/Codium-ai/pr-agent/pull/1279#issuecomment-2404042130) with the ticket requirements.
|
||||
|
||||
<kbd><img src="https://github.com/user-attachments/assets/4a2a728b-5f47-40fa-80cc-16efd296938c" width="768"></kbd>
|
||||
|
||||
|
||||
## Overview
|
||||
<div style="text-align:left;">
|
||||
|
||||
Supported commands per platform:
|
||||
|
||||
| | | GitHub | GitLab | Bitbucket | Azure DevOps |
|
||||
| | | GitHub | Gitlab | Bitbucket | Azure DevOps |
|
||||
|-------|---------------------------------------------------------------------------------------------------------|:--------------------:|:--------------------:|:--------------------:|:------------:|
|
||||
| TOOLS | [Review](https://qodo-merge-docs.qodo.ai/tools/review/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Describe](https://qodo-merge-docs.qodo.ai/tools/describe/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Improve](https://qodo-merge-docs.qodo.ai/tools/improve/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Ask](https://qodo-merge-docs.qodo.ai/tools/ask/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ |
|
||||
| | ⮑ Incremental | ✅ | | | |
|
||||
| | Describe | ✅ | ✅ | ✅ | ✅ |
|
||||
| | ⮑ [Inline File Summary](https://pr-agent-docs.codium.ai/tools/describe#inline-file-summary) 💎 | ✅ | | | |
|
||||
| | Improve | ✅ | ✅ | ✅ | ✅ |
|
||||
| | ⮑ Extended | ✅ | ✅ | ✅ | ✅ |
|
||||
| | Ask | ✅ | ✅ | ✅ | ✅ |
|
||||
| | ⮑ [Ask on code lines](https://pr-agent-docs.codium.ai/tools/ask#ask-lines) | ✅ | ✅ | | |
|
||||
| | [Update CHANGELOG](https://qodo-merge-docs.qodo.ai/tools/update_changelog/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Ticket Context](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/) 💎 | ✅ | ✅ | ✅ | |
|
||||
| | [Utilizing Best Practices](https://qodo-merge-docs.qodo.ai/tools/improve/#best-practices) 💎 | ✅ | ✅ | ✅ | |
|
||||
| | [PR Chat](https://qodo-merge-docs.qodo.ai/chrome-extension/features/#pr-chat) 💎 | ✅ | | | |
|
||||
| | [Suggestion Tracking](https://qodo-merge-docs.qodo.ai/tools/improve/#suggestion-tracking) 💎 | ✅ | ✅ | | |
|
||||
| | [CI Feedback](https://pr-agent-docs.codium.ai/tools/ci_feedback/) 💎 | ✅ | | | |
|
||||
| | [PR Documentation](https://pr-agent-docs.codium.ai/tools/documentation/) 💎 | ✅ | ✅ | | |
|
||||
| | [Custom Labels](https://pr-agent-docs.codium.ai/tools/custom_labels/) 💎 | ✅ | ✅ | | |
|
||||
| | [Analyze](https://pr-agent-docs.codium.ai/tools/analyze/) 💎 | ✅ | ✅ | | |
|
||||
| | [Similar Code](https://pr-agent-docs.codium.ai/tools/similar_code/) 💎 | ✅ | | | |
|
||||
| | [Custom Prompt](https://pr-agent-docs.codium.ai/tools/custom_prompt/) 💎 | ✅ | ✅ | ✅ | |
|
||||
| | [Test](https://pr-agent-docs.codium.ai/tools/test/) 💎 | ✅ | ✅ | | |
|
||||
| | Reflect and Review | ✅ | ✅ | ✅ | ✅ |
|
||||
| | Update CHANGELOG.md | ✅ | ✅ | ✅ | ✅ |
|
||||
| | Find Similar Issue | ✅ | | | |
|
||||
| | [Add PR Documentation](https://pr-agent-docs.codium.ai/tools/documentation/) 💎 | ✅ | ✅ | | |
|
||||
| | [Custom Labels](https://pr-agent-docs.codium.ai/tools/custom_labels/) 💎 | ✅ | ✅ | | |
|
||||
| | [Analyze](https://pr-agent-docs.codium.ai/tools/analyze/) 💎 | ✅ | ✅ | | |
|
||||
| | [CI Feedback](https://pr-agent-docs.codium.ai/tools/ci_feedback/) 💎 | ✅ | | | |
|
||||
| | [Similar Code](https://pr-agent-docs.codium.ai/tools/similar_code/) 💎 | ✅ | | | |
|
||||
| | | | | | |
|
||||
| USAGE | [CLI](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#local-repo-cli) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [App / webhook](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#github-app) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Tagging bot](https://github.com/Codium-ai/pr-agent#try-it-now) | ✅ | | | |
|
||||
| | [Actions](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-action) | ✅ |✅| ✅ |✅|
|
||||
| USAGE | CLI | ✅ | ✅ | ✅ | ✅ |
|
||||
| | App / webhook | ✅ | ✅ | ✅ | ✅ |
|
||||
| | Tagging bot | ✅ | | | |
|
||||
| | Actions | ✅ |✅| ✅ |✅|
|
||||
| | | | | | |
|
||||
| CORE | [PR compression](https://qodo-merge-docs.qodo.ai/core-abilities/compression_strategy/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| CORE | PR compression | ✅ | ✅ | ✅ | ✅ |
|
||||
| | Repo language prioritization | ✅ | ✅ | ✅ | ✅ |
|
||||
| | Adaptive and token-aware file patch fitting | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Multiple models support](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Local and global metadata](https://qodo-merge-docs.qodo.ai/core-abilities/metadata/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Dynamic context](https://qodo-merge-docs.qodo.ai/core-abilities/dynamic_context/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Self reflection](https://qodo-merge-docs.qodo.ai/core-abilities/self_reflection/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Static code analysis](https://qodo-merge-docs.qodo.ai/core-abilities/static_code_analysis/) 💎 | ✅ | ✅ | ✅ | |
|
||||
| | Multiple models support | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Static code analysis](https://pr-agent-docs.codium.ai/core-abilities/#static-code-analysis) 💎 | ✅ | ✅ | ✅ | |
|
||||
| | [Global and wiki configurations](https://pr-agent-docs.codium.ai/usage-guide/configuration_options/) 💎 | ✅ | ✅ | ✅ | |
|
||||
| | [PR interactive actions](https://www.codium.ai/images/pr_agent/pr-actions.mp4) 💎 | ✅ | ✅ | | |
|
||||
| | [Impact Evaluation](https://qodo-merge-docs.qodo.ai/core-abilities/impact_evaluation/) 💎 | ✅ | ✅ | | |
|
||||
- 💎 means this feature is available only in [PR-Agent Pro](https://www.codium.ai/pricing/)
|
||||
|
||||
[//]: # (- Support for additional git providers is described in [here](./docs/Full_environments.md))
|
||||
@ -177,9 +192,14 @@ ___
|
||||
</kbd>
|
||||
</p>
|
||||
</div>
|
||||
<hr>
|
||||
|
||||
|
||||
|
||||
<h4><a href="https://github.com/Codium-ai/pr-agent/pull/530">/generate_labels</a></h4>
|
||||
<div align="center">
|
||||
<p float="center">
|
||||
<kbd><img src="https://www.codium.ai/images/pr_agent/geneare_custom_labels_main_short.png" width="300"></kbd>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
[//]: # (<h4><a href="https://github.com/Codium-ai/pr-agent/pull/78#issuecomment-1639739496">/reflect_and_review:</a></h4>)
|
||||
|
||||
@ -250,7 +270,7 @@ Note that when you set your own PR-Agent or use CodiumAI hosted PR-Agent, there
|
||||
1. **Fully managed** - We take care of everything for you - hosting, models, regular updates, and more. Installation is as simple as signing up and adding the PR-Agent app to your GitHub\GitLab\BitBucket repo.
|
||||
2. **Improved privacy** - No data will be stored or used to train models. PR-Agent Pro will employ zero data retention, and will use an OpenAI account with zero data retention.
|
||||
3. **Improved support** - PR-Agent Pro users will receive priority support, and will be able to request new features and capabilities.
|
||||
4. **Extra features** -In addition to the benefits listed above, PR-Agent Pro will emphasize more customization, and the usage of static code analysis, in addition to LLM logic, to improve results.
|
||||
4. **Extra features** -In addition to the benefits listed above, PR-Agent Pro will emphasize more customization, and the usage of static code analysis, in addition to LLM logic, to improve results.
|
||||
See [here](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/) for a list of features available in PR-Agent Pro.
|
||||
|
||||
|
||||
|
@ -88,7 +88,7 @@ Significant documentation updates (see [Installation Guide](https://github.com/C
|
||||
- codiumai/pr-agent:0.7-gitlab_webhook
|
||||
- codiumai/pr-agent:0.7-github_polling
|
||||
- codiumai/pr-agent:0.7-github_action
|
||||
|
||||
|
||||
### Added::Algo
|
||||
- New tool /similar_issue - Currently on GitHub app and CLI: indexes the issues in the repo, find the most similar issues to the target issue.
|
||||
- Describe markers: Empower the /describe tool with a templating capability (see more details in https://github.com/Codium-ai/pr-agent/pull/273).
|
||||
|
@ -1 +1 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?><svg id="Layer_1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 109.77 81.94"><defs><style>.cls-1{fill:#7968fa;}.cls-1,.cls-2{stroke-width:0px;}.cls-2{fill:#5ae3ae;}</style></defs><path class="cls-2" d="m109.77,40.98c0,22.62-7.11,40.96-15.89,40.96-3.6,0-6.89-3.09-9.58-8.31,6.82-7.46,11.22-19.3,11.22-32.64s-4.4-25.21-11.22-32.67C86.99,3.09,90.29,0,93.89,0c8.78,0,15.89,18.33,15.89,40.97"/><path class="cls-1" d="m95.53,40.99c0,13.35-4.4,25.19-11.23,32.64-3.81-7.46-6.28-19.3-6.28-32.64s2.47-25.21,6.28-32.67c6.83,7.46,11.23,19.32,11.23,32.67"/><path class="cls-2" d="m55.38,78.15c-4.99,2.42-10.52,3.79-16.38,3.79C17.46,81.93,0,63.6,0,40.98S17.46,0,39,0C44.86,0,50.39,1.37,55.38,3.79c-9.69,6.47-16.43,20.69-16.43,37.19s6.73,30.7,16.43,37.17"/><path class="cls-1" d="m78.02,40.99c0,16.48-9.27,30.7-22.65,37.17-9.69-6.47-16.43-20.69-16.43-37.17S45.68,10.28,55.38,3.81c13.37,6.49,22.65,20.69,22.65,37.19"/><path class="cls-2" d="m84.31,73.63c-4.73,5.22-10.64,8.31-17.06,8.31-4.24,0-8.27-1.35-11.87-3.79,13.37-6.48,22.65-20.7,22.65-37.17,0,13.35,2.47,25.19,6.28,32.64"/><path class="cls-2" d="m84.31,8.31c-3.81,7.46-6.28,19.32-6.28,32.67,0-16.5-9.27-30.7-22.65-37.19,3.6-2.45,7.63-3.8,11.87-3.8,6.43,0,12.33,3.09,17.06,8.31"/></svg>
|
||||
<?xml version="1.0" encoding="UTF-8"?><svg id="Layer_1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 109.77 81.94"><defs><style>.cls-1{fill:#7968fa;}.cls-1,.cls-2{stroke-width:0px;}.cls-2{fill:#5ae3ae;}</style></defs><path class="cls-2" d="m109.77,40.98c0,22.62-7.11,40.96-15.89,40.96-3.6,0-6.89-3.09-9.58-8.31,6.82-7.46,11.22-19.3,11.22-32.64s-4.4-25.21-11.22-32.67C86.99,3.09,90.29,0,93.89,0c8.78,0,15.89,18.33,15.89,40.97"/><path class="cls-1" d="m95.53,40.99c0,13.35-4.4,25.19-11.23,32.64-3.81-7.46-6.28-19.3-6.28-32.64s2.47-25.21,6.28-32.67c6.83,7.46,11.23,19.32,11.23,32.67"/><path class="cls-2" d="m55.38,78.15c-4.99,2.42-10.52,3.79-16.38,3.79C17.46,81.93,0,63.6,0,40.98S17.46,0,39,0C44.86,0,50.39,1.37,55.38,3.79c-9.69,6.47-16.43,20.69-16.43,37.19s6.73,30.7,16.43,37.17"/><path class="cls-1" d="m78.02,40.99c0,16.48-9.27,30.7-22.65,37.17-9.69-6.47-16.43-20.69-16.43-37.17S45.68,10.28,55.38,3.81c13.37,6.49,22.65,20.69,22.65,37.19"/><path class="cls-2" d="m84.31,73.63c-4.73,5.22-10.64,8.31-17.06,8.31-4.24,0-8.27-1.35-11.87-3.79,13.37-6.48,22.65-20.7,22.65-37.17,0,13.35,2.47,25.19,6.28,32.64"/><path class="cls-2" d="m84.31,8.31c-3.81,7.46-6.28,19.32-6.28,32.67,0-16.5-9.27-30.7-22.65-37.19,3.6-2.45,7.63-3.8,11.87-3.8,6.43,0,12.33,3.09,17.06,8.31"/></svg>
|
Before Width: | Height: | Size: 1.2 KiB After Width: | Height: | Size: 1.2 KiB |
@ -2,3 +2,4 @@ We take your code's security and privacy seriously:
|
||||
|
||||
- The Chrome extension will not send your code to any external servers.
|
||||
- For private repositories, we will first validate the user's identity and permissions. After authentication, we generate responses using the existing Qodo Merge Pro integration.
|
||||
|
||||
|
@ -2,9 +2,9 @@
|
||||
|
||||
With a single-click installation you will gain access to a context-aware chat on your pull requests code, a toolbar extension with multiple AI feedbacks, Qodo Merge filters, and additional abilities.
|
||||
|
||||
The extension is powered by top code models like Claude 3.5 Sonnet and GPT4. All the extension's features are free to use on public repositories.
|
||||
The extension is powered by top code models like Claude 3.5 Sonnet and GPT4. All the extension's features are free to use on public repositories.
|
||||
|
||||
For private repositories, you will need to install [Qodo Merge Pro](https://github.com/apps/qodo-merge-pro) in addition to the extension (Quick GitHub app setup with a 14-day free trial. No credit card needed).
|
||||
For private repositories, you will need to install [Qodo Merge Pro](https://github.com/apps/codiumai-pr-agent-pro) in addition to the extension (Quick GitHub app setup with a 14-day free trial. No credit card needed).
|
||||
For a demonstration of how to install Qodo Merge Pro and use it with the Chrome extension, please refer to the tutorial video at the provided [link](https://codium.ai/images/pr_agent/private_repos.mp4).
|
||||
|
||||
<img src="https://codium.ai/images/pr_agent/PR-AgentChat.gif" width="768">
|
||||
|
@ -1,2 +1,2 @@
|
||||
## Overview
|
||||
TBD
|
||||
TBD
|
@ -12,9 +12,9 @@ We prioritize the languages of the repo based on the following criteria:
|
||||
|
||||
1. Exclude binary files and non code files (e.g. images, pdfs, etc)
|
||||
2. Given the main languages used in the repo
|
||||
3. We sort the PR files by the most common languages in the repo (in descending order):
|
||||
3. We sort the PR files by the most common languages in the repo (in descending order):
|
||||
* ```[[file.py, file2.py],[file3.js, file4.jsx],[readme.md]]```
|
||||
|
||||
|
||||
|
||||
### Small PR
|
||||
In this case, we can fit the entire PR in a single prompt:
|
||||
|
@ -1,7 +1,7 @@
|
||||
## TL;DR
|
||||
|
||||
Qodo Merge uses an **asymmetric and dynamic context strategy** to improve AI analysis of code changes in pull requests.
|
||||
It provides more context before changes than after, and dynamically adjusts the context based on code structure (e.g., enclosing functions or classes).
|
||||
Qodo Merge uses an **asymmetric and dynamic context strategy** to improve AI analysis of code changes in pull requests.
|
||||
It provides more context before changes than after, and dynamically adjusts the context based on code structure (e.g., enclosing functions or classes).
|
||||
This approach balances providing sufficient context for accurate analysis, while avoiding needle-in-the-haystack information overload that could degrade AI performance or exceed token limits.
|
||||
|
||||
## Introduction
|
||||
@ -17,12 +17,12 @@ Pull request code changes are retrieved in a unified diff format, showing three
|
||||
code line that already existed in the file...
|
||||
code line that already existed in the file...
|
||||
code line that already existed in the file...
|
||||
|
||||
|
||||
@@ -26,2 +26,4 @@ def func2():
|
||||
...
|
||||
```
|
||||
|
||||
This unified diff format can be challenging for AI models to interpret accurately, as it provides limited context for understanding the full scope of code changes.
|
||||
This unified diff format can be challenging for AI models to interpret accurately, as it provides limited context for understanding the full scope of code changes.
|
||||
The presentation of code using '+', '-', and ' ' symbols to indicate additions, deletions, and unchanged lines respectively also differs from the standard code formatting typically used to train AI models.
|
||||
|
||||
|
||||
@ -37,7 +37,7 @@ Pros:
|
||||
Cons:
|
||||
|
||||
- Excessive context may overwhelm the model with extraneous information, creating a "needle in a haystack" scenario where focusing on the relevant details (the code that actually changed) becomes challenging.
|
||||
LLM quality is known to degrade when the context gets larger.
|
||||
LLM quality is known to degrade when the context gets larger.
|
||||
Pull requests often encompass multiple changes across many files, potentially spanning hundreds of lines of modified code. This complexity presents a genuine risk of overwhelming the model with excessive context.
|
||||
|
||||
- Increased context expands the token count, increasing processing time and cost, and may prevent the model from processing the entire pull request in a single pass.
|
||||
@ -47,18 +47,18 @@ To address these challenges, Qodo Merge employs an **asymmetric** and **dynamic*
|
||||
|
||||
**Asymmetric:**
|
||||
|
||||
We start by recognizing that the context preceding a code change is typically more crucial for understanding the modification than the context following it.
|
||||
We start by recognizing that the context preceding a code change is typically more crucial for understanding the modification than the context following it.
|
||||
Consequently, Qodo Merge implements an asymmetric context policy, decoupling the context window into two distinct segments: one for the code before the change and another for the code after.
|
||||
|
||||
By independently adjusting each context window, Qodo Merge can supply the model with a more tailored and pertinent context for individual code changes.
|
||||
By independently adjusting each context window, Qodo Merge can supply the model with a more tailored and pertinent context for individual code changes.
|
||||
|
||||
**Dynamic:**
|
||||
|
||||
We also employ a "dynamic" context strategy.
|
||||
We start by recognizing that the optimal context for a code change often corresponds to its enclosing code component (e.g., function, class), rather than a fixed number of lines.
|
||||
We start by recognizing that the optimal context for a code change often corresponds to its enclosing code component (e.g., function, class), rather than a fixed number of lines.
|
||||
Consequently, we dynamically adjust the context window based on the code's structure, ensuring the model receives the most pertinent information for each modification.
|
||||
|
||||
To prevent overwhelming the model with excessive context, we impose a limit on the number of lines searched when identifying the enclosing component.
|
||||
To prevent overwhelming the model with excessive context, we impose a limit on the number of lines searched when identifying the enclosing component.
|
||||
This balance allows for comprehensive understanding while maintaining efficiency and limiting context token usage.
|
||||
|
||||
## Appendix - relevant configuration options
|
||||
@ -69,4 +69,4 @@ allow_dynamic_context=true # Allow dynamic context extension
|
||||
max_extra_lines_before_dynamic_context = 8 # will try to include up to X extra lines before the hunk in the patch, until we reach an enclosing function or class
|
||||
patch_extra_lines_before = 3 # Number of extra lines (+3 default ones) to include before each hunk in the patch
|
||||
patch_extra_lines_after = 1 # Number of extra lines (+3 default ones) to include after each hunk in the patch
|
||||
```
|
||||
```
|
@ -1,29 +1,16 @@
|
||||
# Fetching Ticket Context for PRs
|
||||
`Supported Git Platforms : GitHub, GitLab, Bitbucket`
|
||||
|
||||
## Overview
|
||||
Qodo Merge PR Agent streamlines code review workflows by seamlessly connecting with multiple ticket management systems.
|
||||
This integration enriches the review process by automatically surfacing relevant ticket information and context alongside code changes.
|
||||
|
||||
## Ticket systems supported
|
||||
- GitHub
|
||||
- Jira (💎)
|
||||
|
||||
Ticket data fetched:
|
||||
|
||||
1. Ticket Title
|
||||
2. Ticket Description
|
||||
3. Custom Fields (Acceptance criteria)
|
||||
4. Subtasks (linked tasks)
|
||||
5. Labels
|
||||
6. Attached Images/Screenshots
|
||||
|
||||
## Affected Tools
|
||||
|
||||
Ticket Recognition Requirements:
|
||||
|
||||
- The PR description should contain a link to the ticket or if the branch name starts with the ticket id / number.
|
||||
- For Jira tickets, you should follow the instructions in [Jira Integration](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/#jira-integration) in order to authenticate with Jira.
|
||||
1. The PR description should contain a link to the ticket.
|
||||
2. For Jira tickets, you should follow the instructions in [Jira Integration](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/#jira-integration) in order to authenticate with Jira.
|
||||
|
||||
|
||||
### Describe tool
|
||||
Qodo Merge PR Agent will recognize the ticket and use the ticket content (title, description, labels) to provide additional context for the code changes.
|
||||
@ -62,18 +49,12 @@ Since Qodo Merge PR Agent is integrated with GitHub, it doesn't require any addi
|
||||
### Jira Integration 💎
|
||||
|
||||
We support both Jira Cloud and Jira Server/Data Center.
|
||||
To integrate with Jira, you can link your PR to a ticket using either of these methods:
|
||||
To integrate with Jira, The PR Description should contain a link to the Jira ticket.
|
||||
|
||||
**Method 1: Description Reference:**
|
||||
|
||||
Include a ticket reference in your PR description using either the complete URL format https://<JIRA_ORG>.atlassian.net/browse/ISSUE-123 or the shortened ticket ID ISSUE-123.
|
||||
|
||||
**Method 2: Branch Name Detection:**
|
||||
|
||||
Name your branch with the ticket ID as a prefix (e.g., `ISSUE-123-feature-description` or `ISSUE-123/feature-description`).
|
||||
For Jira integration, include a ticket reference in your PR description using either the complete URL format `https://<JIRA_ORG>.atlassian.net/browse/ISSUE-123` or the shortened ticket ID `ISSUE-123`.
|
||||
|
||||
!!! note "Jira Base URL"
|
||||
For shortened ticket IDs or branch detection (method 2), you must configure the Jira base URL in your configuration file under the [jira] section:
|
||||
If using the shortened format, ensure your configuration file contains the Jira base URL under the [jira] section like this:
|
||||
|
||||
```toml
|
||||
[jira]
|
||||
@ -131,4 +112,4 @@ Currently, we only support the Personal Access Token (PAT) Authentication method
|
||||
[jira]
|
||||
jira_base_url = "YOUR_JIRA_BASE_URL" # e.g. https://jira.example.com
|
||||
jira_api_token = "YOUR_API_TOKEN"
|
||||
```
|
||||
```
|
@ -41,4 +41,4 @@ Here are key metrics that the dashboard tracks:
|
||||
|
||||
#### Suggestion Score Distribution
|
||||
{width=512}
|
||||
> Explanation: The distribution of the suggestion score for the implemented suggestions, ensuring that higher-scored suggestions truly represent more significant improvements.
|
||||
> Explanation: The distribution of the suggestion score for the implemented suggestions, ensuring that higher-scored suggestions truly represent more significant improvements.
|
||||
|
@ -14,7 +14,7 @@ Qodo Merge utilizes a variety of core abilities to provide a comprehensive and e
|
||||
|
||||
## Blogs
|
||||
|
||||
Here are some additional technical blogs from Qodo, that delve deeper into the core capabilities and features of Large Language Models (LLMs) when applied to coding tasks.
|
||||
Here are some additional technical blogs from Qodo, that delve deeper into the core capabilities and features of Large Language Models (LLMs) when applied to coding tasks.
|
||||
These resources provide more comprehensive insights into leveraging LLMs for software development.
|
||||
|
||||
### Code Generation and LLMs
|
||||
@ -26,4 +26,4 @@ These resources provide more comprehensive insights into leveraging LLMs for sof
|
||||
- [Introduction to Code Coverage Testing](https://www.qodo.ai/blog/introduction-to-code-coverage-testing/)
|
||||
|
||||
### Cost Optimization
|
||||
- [Reduce Your Costs by 30% When Using GPT for Python Code](https://www.qodo.ai/blog/reduce-your-costs-by-30-when-using-gpt-3-for-python-code/)
|
||||
- [Reduce Your Costs by 30% When Using GPT for Python Code](https://www.qodo.ai/blog/reduce-your-costs-by-30-when-using-gpt-3-for-python-code/)
|
@ -1,2 +1,2 @@
|
||||
## Interactive invocation 💎
|
||||
TBD
|
||||
TBD
|
@ -53,4 +53,4 @@ __old hunk__
|
||||
|
||||
|
||||
(4) All the metadata described above represents several level of cumulative analysis - ranging from hunk level, to file level, to PR level, to organization level.
|
||||
This comprehensive approach enables Qodo Merge AI models to generate more precise and contextually relevant suggestions and feedback.
|
||||
This comprehensive approach enables Qodo Merge AI models to generate more precise and contextually relevant suggestions and feedback.
|
@ -1,7 +1,7 @@
|
||||
## TL;DR
|
||||
|
||||
Qodo Merge implements a **self-reflection** process where the AI model reflects, scores, and re-ranks its own suggestions, eliminating irrelevant or incorrect ones.
|
||||
This approach improves the quality and relevance of suggestions, saving users time and enhancing their experience.
|
||||
Qodo Merge implements a **self-reflection** process where the AI model reflects, scores, and re-ranks its own suggestions, eliminating irrelevant or incorrect ones.
|
||||
This approach improves the quality and relevance of suggestions, saving users time and enhancing their experience.
|
||||
Configuration options allow users to set a score threshold for further filtering out suggestions.
|
||||
|
||||
## Introduction - Efficient Review with Hierarchical Presentation
|
||||
@ -24,7 +24,7 @@ The AI model is initially tasked with generating suggestions, and outputting the
|
||||
However, in practice we observe that models often struggle to simultaneously generate high-quality code suggestions and rank them well in a single pass.
|
||||
Furthermore, the initial set of generated suggestions sometimes contains easily identifiable errors.
|
||||
|
||||
To address these issues, we implemented a "self-reflection" process that refines suggestion ranking and eliminates irrelevant or incorrect proposals.
|
||||
To address these issues, we implemented a "self-reflection" process that refines suggestion ranking and eliminates irrelevant or incorrect proposals.
|
||||
This process consists of the following steps:
|
||||
|
||||
1. Presenting the generated suggestions to the model in a follow-up call.
|
||||
@ -47,4 +47,4 @@ This results in a more refined and valuable set of suggestions for the user, sav
|
||||
```
|
||||
[pr_code_suggestions]
|
||||
suggestions_score_threshold = 0 # Filter out suggestions with a score below this threshold (0-10)
|
||||
```
|
||||
```
|
@ -61,7 +61,7 @@ Or be triggered interactively by using the `analyze` tool.
|
||||
|
||||
### Find Similar Code
|
||||
|
||||
The [`similar code`](https://qodo-merge-docs.qodo.ai/tools/similar_code/) tool retrieves the most similar code components from inside the organization's codebase or from open-source code, including details about the license associated with each repository.
|
||||
The [`similar code`](https://qodo-merge-docs.qodo.ai/tools/similar_code/) tool retrieves the most similar code components from inside the organization's codebase, or from open-source code.
|
||||
|
||||
For example:
|
||||
|
||||
|
@ -31,11 +31,11 @@ ___
|
||||
|
||||
|
||||
- The hierarchical structure of the suggestions is designed to help the user to _quickly_ understand them, and to decide which ones are relevant and which are not:
|
||||
|
||||
|
||||
- Only if the `Category` header is relevant, the user should move to the summarized suggestion description.
|
||||
- Only if the summarized suggestion description is relevant, the user should click on the collapsible, to read the full suggestion description with a code preview example.
|
||||
|
||||
- In addition, we recommend to use the [`extra_instructions`](https://qodo-merge-docs.qodo.ai/tools/improve/#extra-instructions-and-best-practices) field to guide the model to suggestions that are more relevant to the specific needs of the project.
|
||||
- In addition, we recommend to use the [`extra_instructions`](https://qodo-merge-docs.qodo.ai/tools/improve/#extra-instructions-and-best-practices) field to guide the model to suggestions that are more relevant to the specific needs of the project.
|
||||
- The interactive [PR chat](https://qodo-merge-docs.qodo.ai/chrome-extension/) also provides an easy way to get more tailored suggestions and feedback from the AI model.
|
||||
|
||||
___
|
||||
@ -51,7 +51,7 @@ ___
|
||||
#### Answer:<span style="display:none;">4</span>
|
||||
|
||||
No. Qodo Merge strict privacy policy ensures that your code is not stored or used for training purposes.
|
||||
|
||||
|
||||
For a detailed overview of our data privacy policy, please refer to [this link](https://qodo-merge-docs.qodo.ai/overview/data_privacy/)
|
||||
|
||||
___
|
||||
@ -59,9 +59,9 @@ ___
|
||||
??? note "Question: Can I use my own LLM keys with Qodo Merge?"
|
||||
#### Answer:<span style="display:none;">5</span>
|
||||
|
||||
When you self-host, you use your own keys.
|
||||
When you self-host, you use your own keys.
|
||||
|
||||
Qodo Merge Pro with SaaS deployment is a hosted version of Qodo Merge, where Qodo manages the infrastructure and the keys.
|
||||
For enterprise customers, on-prem deployment is also available. [Contact us](https://www.codium.ai/contact/#pricing) for more information.
|
||||
|
||||
___
|
||||
___
|
@ -84,10 +84,10 @@ command: improve
|
||||
model1_score: 9,
|
||||
model2_score: 6,
|
||||
why: |
|
||||
Response 1 is better because it provides more actionable and specific suggestions that directly
|
||||
enhance the code's maintainability, performance, and best practices. For example, it suggests
|
||||
using a variable for reusable widget instances and using named routes for navigation, which
|
||||
are practical improvements. In contrast, Response 2 focuses more on general advice and less
|
||||
actionable suggestions, such as changing variable names and adding comments, which are less
|
||||
Response 1 is better because it provides more actionable and specific suggestions that directly
|
||||
enhance the code's maintainability, performance, and best practices. For example, it suggests
|
||||
using a variable for reusable widget instances and using named routes for navigation, which
|
||||
are practical improvements. In contrast, Response 2 focuses more on general advice and less
|
||||
actionable suggestions, such as changing variable names and adding comments, which are less
|
||||
critical for immediate code improvement."
|
||||
```
|
||||
|
@ -25,43 +25,36 @@ To search the documentation site using natural language:
|
||||
|
||||
Qodo Merge offers extensive pull request functionalities across various git providers.
|
||||
|
||||
| | | GitHub | GitLab | Bitbucket | Azure DevOps |
|
||||
|-------|---------------------------------------------------------------------------------------------------------|:--------------------:|:--------------------:|:--------------------:|:------------:|
|
||||
| TOOLS | [Review](https://qodo-merge-docs.qodo.ai/tools/review/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Describe](https://qodo-merge-docs.qodo.ai/tools/describe/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Improve](https://qodo-merge-docs.qodo.ai/tools/improve/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Ask](https://qodo-merge-docs.qodo.ai/tools/ask/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | ⮑ [Ask on code lines](https://pr-agent-docs.codium.ai/tools/ask#ask-lines) | ✅ | ✅ | | |
|
||||
| | [Update CHANGELOG](https://qodo-merge-docs.qodo.ai/tools/update_changelog/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Ticket Context](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/) 💎 | ✅ | ✅ | ✅ | |
|
||||
| | [Utilizing Best Practices](https://qodo-merge-docs.qodo.ai/tools/improve/#best-practices) 💎 | ✅ | ✅ | ✅ | |
|
||||
| | [PR Chat](https://qodo-merge-docs.qodo.ai/chrome-extension/features/#pr-chat) 💎 | ✅ | | | |
|
||||
| | [Suggestion Tracking](https://qodo-merge-docs.qodo.ai/tools/improve/#suggestion-tracking) 💎 | ✅ | ✅ | | |
|
||||
| | [CI Feedback](https://pr-agent-docs.codium.ai/tools/ci_feedback/) 💎 | ✅ | | | |
|
||||
| | [PR Documentation](https://pr-agent-docs.codium.ai/tools/documentation/) 💎 | ✅ | ✅ | | |
|
||||
| | [Custom Labels](https://pr-agent-docs.codium.ai/tools/custom_labels/) 💎 | ✅ | ✅ | | |
|
||||
| | [Analyze](https://pr-agent-docs.codium.ai/tools/analyze/) 💎 | ✅ | ✅ | | |
|
||||
| | [Similar Code](https://pr-agent-docs.codium.ai/tools/similar_code/) 💎 | ✅ | | | |
|
||||
| | [Custom Prompt](https://pr-agent-docs.codium.ai/tools/custom_prompt/) 💎 | ✅ | ✅ | ✅ | |
|
||||
| | [Test](https://pr-agent-docs.codium.ai/tools/test/) 💎 | ✅ | ✅ | | |
|
||||
| | | | | | |
|
||||
| USAGE | [CLI](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#local-repo-cli) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [App / webhook](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#github-app) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Tagging bot](https://github.com/Codium-ai/pr-agent#try-it-now) | ✅ | | | |
|
||||
| | [Actions](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-action) | ✅ |✅| ✅ |✅|
|
||||
| | | | | | |
|
||||
| CORE | [PR compression](https://qodo-merge-docs.qodo.ai/core-abilities/compression_strategy/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | Adaptive and token-aware file patch fitting | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Multiple models support](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Local and global metadata](https://qodo-merge-docs.qodo.ai/core-abilities/metadata/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Dynamic context](https://qodo-merge-docs.qodo.ai/core-abilities/dynamic_context/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Self reflection](https://qodo-merge-docs.qodo.ai/core-abilities/self_reflection/) | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Static code analysis](https://qodo-merge-docs.qodo.ai/core-abilities/static_code_analysis/) 💎 | ✅ | ✅ | ✅ | |
|
||||
| | [Global and wiki configurations](https://pr-agent-docs.codium.ai/usage-guide/configuration_options/) 💎 | ✅ | ✅ | ✅ | |
|
||||
| | [PR interactive actions](https://www.codium.ai/images/pr_agent/pr-actions.mp4) 💎 | ✅ | ✅ | | |
|
||||
| | [Impact Evaluation](https://qodo-merge-docs.qodo.ai/core-abilities/impact_evaluation/) 💎 | ✅ | ✅ | | |
|
||||
| | | GitHub | Gitlab | Bitbucket | Azure DevOps |
|
||||
|-------|-----------------------------------------------------------------------------------------------------------------------|:------:|:------:|:---------:|:------------:|
|
||||
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ |
|
||||
| | ⮑ Incremental | ✅ | | | |
|
||||
| | Ask | ✅ | ✅ | ✅ | ✅ |
|
||||
| | Describe | ✅ | ✅ | ✅ | ✅ |
|
||||
| | ⮑ [Inline file summary](https://qodo-merge-docs.qodo.ai/tools/describe/#inline-file-summary){:target="_blank"} 💎 | ✅ | ✅ | | |
|
||||
| | Improve | ✅ | ✅ | ✅ | ✅ |
|
||||
| | ⮑ Extended | ✅ | ✅ | ✅ | ✅ |
|
||||
| | [Custom Prompt](./tools/custom_prompt.md){:target="_blank"} 💎 | ✅ | ✅ | ✅ | |
|
||||
| | Reflect and Review | ✅ | ✅ | ✅ | |
|
||||
| | Update CHANGELOG.md | ✅ | ✅ | ✅ | ️ |
|
||||
| | Find Similar Issue | ✅ | | | ️ |
|
||||
| | [Add PR Documentation](./tools/documentation.md){:target="_blank"} 💎 | ✅ | ✅ | | |
|
||||
| | [Generate Custom Labels](./tools/describe.md#handle-custom-labels-from-the-repos-labels-page-💎){:target="_blank"} 💎 | ✅ | ✅ | | |
|
||||
| | [Analyze PR Components](./tools/analyze.md){:target="_blank"} 💎 | ✅ | ✅ | | |
|
||||
| | | | | | ️ |
|
||||
| USAGE | CLI | ✅ | ✅ | ✅ | ✅ |
|
||||
| | App / webhook | ✅ | ✅ | ✅ | ✅ |
|
||||
| | Actions | ✅ | | | ️ |
|
||||
| | | | | |
|
||||
| CORE | PR compression | ✅ | ✅ | ✅ | ✅ |
|
||||
| | Repo language prioritization | ✅ | ✅ | ✅ | ✅ |
|
||||
| | Adaptive and token-aware file patch fitting | ✅ | ✅ | ✅ | ✅ |
|
||||
| | Multiple models support | ✅ | ✅ | ✅ | ✅ |
|
||||
| | Incremental PR review | ✅ | | | |
|
||||
| | [Static code analysis](./tools/analyze.md/){:target="_blank"} 💎 | ✅ | ✅ | ✅ | |
|
||||
| | [Multiple configuration options](./usage-guide/configuration_options.md){:target="_blank"} 💎 | ✅ | ✅ | ✅ | |
|
||||
|
||||
💎 marks a feature available only in [Qodo Merge Pro](https://www.qodo.ai/pricing/){:target="_blank"}
|
||||
💎 marks a feature available only in [Qodo Merge Pro](https://www.codium.ai/pricing/){:target="_blank"}
|
||||
|
||||
|
||||
## Example Results
|
||||
|
@ -40,7 +40,7 @@ stages:
|
||||
|
||||
export azure_devops__org="$ORG_URL"
|
||||
export config__git_provider="azure"
|
||||
|
||||
|
||||
pr-agent --pr_url="$PR_URL" describe
|
||||
pr-agent --pr_url="$PR_URL" review
|
||||
pr-agent --pr_url="$PR_URL" improve
|
||||
@ -51,12 +51,10 @@ stages:
|
||||
```
|
||||
This script will run Qodo Merge on every new merge request, with the `improve`, `review`, and `describe` commands.
|
||||
Note that you need to export the `azure_devops__pat` and `OPENAI_KEY` variables in the Azure DevOps pipeline settings (Pipelines -> Library -> + Variable group):
|
||||
|
||||
{width=468}
|
||||
|
||||
Make sure to give pipeline permissions to the `pr_agent` variable group.
|
||||
|
||||
> Note that Azure Pipelines lacks support for triggering workflows from PR comments. If you find a viable solution, please contribute it to our [issue tracker](https://github.com/Codium-ai/pr-agent/issues)
|
||||
|
||||
## Azure DevOps from CLI
|
||||
|
||||
@ -67,11 +65,11 @@ git_provider="azure"
|
||||
```
|
||||
|
||||
Azure DevOps provider supports [PAT token](https://learn.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=Windows) or [DefaultAzureCredential](https://learn.microsoft.com/en-us/azure/developer/python/sdk/authentication-overview#authentication-in-server-environments) authentication.
|
||||
PAT is faster to create, but has build in expiration date, and will use the user identity for API calls.
|
||||
PAT is faster to create, but has build in expiration date, and will use the user identity for API calls.
|
||||
Using DefaultAzureCredential you can use managed identity or Service principle, which are more secure and will create separate ADO user identity (via AAD) to the agent.
|
||||
|
||||
If PAT was chosen, you can assign the value in .secrets.toml.
|
||||
If DefaultAzureCredential was chosen, you can assigned the additional env vars like AZURE_CLIENT_SECRET directly,
|
||||
If PAT was chosen, you can assign the value in .secrets.toml.
|
||||
If DefaultAzureCredential was chosen, you can assigned the additional env vars like AZURE_CLIENT_SECRET directly,
|
||||
or use managed identity/az cli (for local development) without any additional configuration.
|
||||
in any case, 'org' value must be assigned in .secrets.toml:
|
||||
```
|
||||
@ -82,7 +80,7 @@ org = "https://dev.azure.com/YOUR_ORGANIZATION/"
|
||||
|
||||
### Azure DevOps Webhook
|
||||
|
||||
To trigger from an Azure webhook, you need to manually [add a webhook](https://learn.microsoft.com/en-us/azure/devops/service-hooks/services/webhooks?view=azure-devops).
|
||||
To trigger from an Azure webhook, you need to manually [add a webhook](https://learn.microsoft.com/en-us/azure/devops/service-hooks/services/webhooks?view=azure-devops).
|
||||
Use the "Pull request created" type to trigger a review, or "Pull request commented on" to trigger any supported comment with /<command> <args> comment on the relevant PR. Note that for the "Pull request commented on" trigger, only API v2.0 is supported.
|
||||
|
||||
|
||||
|
@ -60,7 +60,7 @@ See detailed usage instructions in the [USAGE GUIDE](https://qodo-merge-docs.qod
|
||||
uses: docker://codiumai/pr-agent:0.23-github_action
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
For enhanced security, you can also specify the Docker image by its [digest](https://hub.docker.com/repository/docker/codiumai/pr-agent/tags):
|
||||
```yaml
|
||||
...
|
||||
@ -71,17 +71,17 @@ See detailed usage instructions in the [USAGE GUIDE](https://qodo-merge-docs.qod
|
||||
...
|
||||
```
|
||||
|
||||
### Action for GitHub enterprise server
|
||||
### Action for GitHub enterprise server
|
||||
!!! tip ""
|
||||
To use the action with a GitHub enterprise server, add an environment variable `GITHUB.BASE_URL` with the API URL of your GitHub server.
|
||||
|
||||
|
||||
For example, if your GitHub server is at `https://github.mycompany.com`, add the following to your workflow file:
|
||||
```yaml
|
||||
env:
|
||||
# ... previous environment values
|
||||
GITHUB.BASE_URL: "https://github.mycompany.com/api/v3"
|
||||
```
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
@ -118,7 +118,7 @@ git clone https://github.com/Codium-ai/pr-agent.git
|
||||
```
|
||||
|
||||
5) Copy the secrets template file and fill in the following:
|
||||
|
||||
|
||||
```
|
||||
cp pr_agent/settings/.secrets_template.toml pr_agent/settings/.secrets.toml
|
||||
# Edit .secrets.toml file
|
||||
@ -147,7 +147,7 @@ cp pr_agent/settings/.secrets_template.toml pr_agent/settings/.secrets.toml
|
||||
- mountPath: /app/pr_agent/settings_prod
|
||||
name: settings-volume
|
||||
```
|
||||
|
||||
|
||||
> Another option is to set the secrets as environment variables in your deployment environment, for example `OPENAI.KEY` and `GITHUB.USER_TOKEN`.
|
||||
|
||||
6) Build a Docker image for the app and optionally push it to a Docker repository. We'll use Dockerhub as an example:
|
||||
|
@ -38,7 +38,6 @@ You can also modify the `script` section to run different Qodo Merge commands, o
|
||||
|
||||
Note that if your base branches are not protected, don't set the variables as `protected`, since the pipeline will not have access to them.
|
||||
|
||||
> **Note**: The `$CI_SERVER_FQDN` variable is available starting from GitLab version 16.10. If you're using an earlier version, this variable will not be available. However, you can combine `$CI_SERVER_HOST` and `$CI_SERVER_PORT` to achieve the same result. Please ensure you're using a compatible version or adjust your configuration.
|
||||
|
||||
|
||||
## Run a GitLab webhook server
|
||||
@ -75,4 +74,4 @@ docker push codiumai/pr-agent:gitlab_webhook # Push to your Docker repository
|
||||
6. Create a webhook in GitLab. Set the URL to ```http[s]://<PR_AGENT_HOSTNAME>/webhook```, the secret token to the generated secret from step 2, and enable the triggers `push`, `comments` and `merge request events`.
|
||||
|
||||
7. Test your installation by opening a merge request or commenting on a merge request using one of CodiumAI's commands.
|
||||
boxes
|
||||
boxes
|
@ -15,7 +15,7 @@ There are several ways to use self-hosted Qodo Merge:
|
||||
- [Azure DevOps](./azure.md)
|
||||
|
||||
## Qodo Merge Pro 💎
|
||||
Qodo Merge Pro, an app hosted by CodiumAI for GitHub\GitLab\BitBucket, is also available.
|
||||
Qodo Merge Pro, an app hosted by CodiumAI for GitHub\GitLab\BitBucket, is also available.
|
||||
<br>
|
||||
With Qodo Merge Pro, installation is as simple as signing up and adding the Qodo Merge app to your relevant repo.
|
||||
With Qodo Merge Pro, installation is as simple as signing up and adding the Qodo Merge app to your relevant repo.
|
||||
See [here](https://qodo-merge-docs.qodo.ai/installation/pr_agent_pro/) for more details.
|
||||
|
@ -45,7 +45,7 @@ To invoke a tool (for example `review`), you can run directly from the Docker im
|
||||
```
|
||||
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
|
||||
```
|
||||
If you are using GitHub enterprise server, you need to specify the custom url as variable.
|
||||
If you are using GitHub enterprise server, you need to specify the custom url as variable.
|
||||
For example, if your GitHub server is at `https://github.mycompany.com`, add the following to the command:
|
||||
```
|
||||
-e GITHUB.BASE_URL=https://github.mycompany.com/api/v3
|
||||
@ -58,7 +58,7 @@ To invoke a tool (for example `review`), you can run directly from the Docker im
|
||||
|
||||
If you have a dedicated GitLab instance, you need to specify the custom url as variable:
|
||||
```
|
||||
-e GITLAB.URL=<your gitlab instance url>
|
||||
-e GITLAB.URL=<your gitlab instance url>
|
||||
```
|
||||
|
||||
- For BitBucket:
|
||||
@ -110,4 +110,4 @@ python3 -m pr_agent.cli --issue_url <issue_url> similar_issue
|
||||
[Optional] Add the pr_agent folder to your PYTHONPATH
|
||||
```
|
||||
export PYTHONPATH=$PYTHONPATH:<PATH to pr_agent folder>
|
||||
```
|
||||
```
|
@ -1,44 +1,31 @@
|
||||
Qodo Merge Pro is a versatile application compatible with GitHub, GitLab, and BitBucket, hosted by QodoAI.
|
||||
|
||||
## Getting Started with Qodo Merge Pro
|
||||
|
||||
Qodo Merge Pro is a versatile application compatible with GitHub, GitLab, and BitBucket, hosted by CodiumAI.
|
||||
See [here](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/) for more details about the benefits of using Qodo Merge Pro.
|
||||
|
||||
A complimentary two-week trial is provided to all new users. Following the trial period, user licenses (seats) are required for continued access.
|
||||
To purchase user licenses, please visit our [pricing page](https://www.qodo.ai/pricing/).
|
||||
Once subscribed, users can seamlessly deploy the application across any of their code repositories.
|
||||
|
||||
## Install Qodo Merge Pro for GitHub
|
||||
|
||||
### GitHub Cloud
|
||||
|
||||
Qodo Merge Pro for GitHub cloud is available for installation through the [GitHub Marketplace](https://github.com/apps/qodo-merge-pro).
|
||||
Interested parties can subscribe to Qodo Merge Pro through the following [link](https://www.codium.ai/pricing/).
|
||||
After subscribing, you are granted the ability to easily install the application across any of your repositories.
|
||||
|
||||
{width=468}
|
||||
|
||||
### GitHub Enterprise Server
|
||||
Each user who wants to use Qodo Merge pro needs to buy a seat.
|
||||
Initially, CodiumAI offers a two-week trial period at no cost, after which continued access requires each user to secure a personal seat.
|
||||
Once a user acquires a seat, they gain the flexibility to use Qodo Merge Pro across any repository where it was enabled.
|
||||
|
||||
Users without a purchased seat who interact with a repository featuring Qodo Merge Pro are entitled to receive up to five complimentary feedbacks.
|
||||
Beyond this limit, Qodo Merge Pro will cease to respond to their inquiries unless a seat is purchased.
|
||||
|
||||
## Install Qodo Merge Pro for GitHub Enterprise Server
|
||||
|
||||
To use Qodo Merge Pro application on your private GitHub Enterprise Server, you will need to contact us for starting an [Enterprise](https://www.codium.ai/pricing/) trial.
|
||||
|
||||
### GitHub Open Source Projects
|
||||
|
||||
For open-source projects, Qodo Merge Pro is available for free usage. To install Qodo Merge Pro for your open-source repositories, use the following marketplace [link](https://github.com/apps/qodo-merge-pro-for-open-source).
|
||||
|
||||
## Install Qodo Merge Pro for Bitbucket
|
||||
|
||||
### Bitbucket Cloud
|
||||
|
||||
Qodo Merge Pro for Bitbucket Cloud is available for installation through the following [link](https://bitbucket.org/site/addons/authorize?addon_key=d6df813252c37258)
|
||||
|
||||
{width=468}
|
||||
|
||||
### Bitbucket Server
|
||||
|
||||
To use Qodo Merge Pro application on your private Bitbucket Server, you will need to contact us for starting an [Enterprise](https://www.codium.ai/pricing/) trial.
|
||||
|
||||
|
||||
## Install Qodo Merge Pro for GitLab (Teams & Enterprise)
|
||||
|
||||
Since GitLab platform does not support apps, installing Qodo Merge Pro for GitLab is a bit more involved, and requires the following steps:
|
||||
|
||||
#### Step 1
|
||||
### Step 1
|
||||
|
||||
Acquire a personal, project or group level access token. Enable the “api” scope in order to allow Qodo Merge to read pull requests, comment and respond to requests.
|
||||
|
||||
@ -48,14 +35,14 @@ Acquire a personal, project or group level access token. Enable the “api” sc
|
||||
|
||||
Store the token in a safe place, you won’t be able to access it again after it was generated.
|
||||
|
||||
#### Step 2
|
||||
### Step 2
|
||||
|
||||
Generate a shared secret and link it to the access token. Browse to [https://register.gitlab.pr-agent.codium.ai](https://register.gitlab.pr-agent.codium.ai).
|
||||
Fill in your generated GitLab token and your company or personal name in the appropriate fields and click "Submit".
|
||||
|
||||
You should see "Success!" displayed above the Submit button, and a shared secret will be generated. Store it in a safe place, you won’t be able to access it again after it was generated.
|
||||
|
||||
#### Step 3
|
||||
### Step 3
|
||||
|
||||
Install a webhook for your repository or groups, by clicking “webhooks” on the settings menu. Click the “Add new webhook” button.
|
||||
|
||||
@ -66,7 +53,7 @@ Install a webhook for your repository or groups, by clicking “webhooks” on t
|
||||
In the webhook definition form, fill in the following fields:
|
||||
URL: https://pro.gitlab.pr-agent.codium.ai/webhook
|
||||
|
||||
Secret token: Your QodoAI key
|
||||
Secret token: Your CodiumAI key
|
||||
Trigger: Check the ‘comments’ and ‘merge request events’ boxes.
|
||||
Enable SSL verification: Check the box.
|
||||
|
||||
@ -74,8 +61,8 @@ Enable SSL verification: Check the box.
|
||||
{width=750}
|
||||
</figure>
|
||||
|
||||
#### Step 4
|
||||
### Step 4
|
||||
|
||||
You’re all set!
|
||||
|
||||
Open a new merge request or add a MR comment with one of Qodo Merge’s commands such as /review, /describe or /improve.
|
||||
Open a new merge request or add a MR comment with one of Qodo Merge’s commands such as /review, /describe or /improve.
|
@ -90,4 +90,4 @@ The following diagram illustrates Qodo Merge tools and their flow:
|
||||
|
||||

|
||||
|
||||
Check out the [PR Compression strategy](core-abilities/index.md) page for more details on how we convert a code diff to a manageable LLM prompt
|
||||
Check out the [PR Compression strategy](core-abilities/index.md) page for more details on how we convert a code diff to a manageable LLM prompt
|
@ -1,6 +1,6 @@
|
||||
### Overview
|
||||
|
||||
[Qodo Merge Pro](https://www.codium.ai/pricing/) is a hosted version of open-source [Qodo Merge (PR-Agent)](https://github.com/Codium-ai/pr-agent). A complimentary two-week trial is offered, followed by a monthly subscription fee.
|
||||
[Qodo Merge Pro](https://www.codium.ai/pricing/) is a hosted version of Qodo Merge, provided by Qodo. A complimentary two-week trial is offered, followed by a monthly subscription fee.
|
||||
Qodo Merge Pro is designed for companies and teams that require additional features and capabilities. It provides the following benefits:
|
||||
|
||||
1. **Fully managed** - We take care of everything for you - hosting, models, regular updates, and more. Installation is as simple as signing up and adding the Qodo Merge app to your GitHub\GitLab\BitBucket repo.
|
||||
@ -19,7 +19,7 @@ Here are some of the additional features and capabilities that Qodo Merge Pro of
|
||||
|
||||
| Feature | Description |
|
||||
|----------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [**Model selection**](https://qodo-merge-docs.qodo.ai/usage-guide/PR_agent_pro_models/) | Choose the model that best fits your needs, among top models like `GPT4` and `Claude-Sonnet-3.5`
|
||||
| [**Model selection**](https://qodo-merge-docs.qodo.ai/usage-guide/PR_agent_pro_models/) | Choose the model that best fits your needs, among top models like `GPT4` and `Claude-Sonnet-3.5`
|
||||
| [**Global and wiki configuration**](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) | Control configurations for many repositories from a single location; <br>Edit configuration of a single repo without committing code |
|
||||
| [**Apply suggestions**](https://qodo-merge-docs.qodo.ai/tools/improve/#overview) | Generate committable code from the relevant suggestions interactively by clicking on a checkbox |
|
||||
| [**Suggestions impact**](https://qodo-merge-docs.qodo.ai/tools/improve/#assessing-impact) | Automatically mark suggestions that were implemented by the user (either directly in GitHub, or indirectly in the IDE) to enable tracking of the impact of the suggestions |
|
||||
@ -45,7 +45,7 @@ Here are additional tools that are available only for Qodo Merge Pro users:
|
||||
|
||||
### Supported languages
|
||||
|
||||
Qodo Merge Pro leverages the world's leading code models - Claude 3.5 Sonnet and GPT-4.
|
||||
Qodo Merge Pro leverages the world's leading code models - Claude 3.5 Sonnet and GPT-4.
|
||||
As a result, its primary tools such as `describe`, `review`, and `improve`, as well as the PR-chat feature, support virtually all programming languages.
|
||||
|
||||
For specialized commands that require static code analysis, Qodo Merge Pro offers support for specific languages. For more details about features that require static code analysis, please refer to the [documentation](https://qodo-merge-docs.qodo.ai/tools/analyze/#overview).
|
||||
|
@ -16,4 +16,4 @@ An example result:
|
||||
|
||||
**Notes**
|
||||
|
||||
- Language that are currently supported: Python, Java, C++, JavaScript, TypeScript, C#.
|
||||
- Language that are currently supported: Python, Java, C++, JavaScript, TypeScript, C#.
|
@ -16,7 +16,7 @@ It can be invoked manually by commenting on any PR:
|
||||
|
||||
You can run `/ask` on specific lines of code in the PR from the PR's diff view. The tool will answer questions based on the code changes in the selected lines.
|
||||
- Click on the '+' sign next to the line number to select the line.
|
||||
- To select multiple lines, click on the '+' sign of the first line and then hold and drag to select the rest of the lines.
|
||||
- To select multiple lines, click on the '+' sign of the first line and then hold and drag to select the rest of the lines.
|
||||
- write `/ask "..."` in the comment box and press `Add single comment` button.
|
||||
|
||||
{width=512}
|
||||
@ -56,4 +56,4 @@ To get a direct link to an image, we recommend using the following scheme:
|
||||
{width=512}
|
||||
|
||||
|
||||
See a full video tutorial [here](https://codium.ai/images/pr_agent/ask_image_video.mov)
|
||||
See a full video tutorial [here](https://codium.ai/images/pr_agent/ask_image_video.mov)
|
@ -28,7 +28,7 @@ When working from CLI, you need to apply the [configuration changes](#configurat
|
||||
To enable custom labels, you need to apply the [configuration changes](#configuration-options) to the local `.pr_agent.toml` file in your repository.
|
||||
|
||||
#### 3. Handle custom labels from the Repo's labels page 💎
|
||||
> This feature is available only in Qodo Merge Pro
|
||||
> This feature is available only in Qodo Merge Pro
|
||||
|
||||
* GitHub : `https://github.com/{owner}/{repo}/labels`, or click on the "Labels" tab in the issues or PRs page.
|
||||
* GitLab : `https://gitlab.com/{owner}/{repo}/-/labels`, or click on "Manage" -> "Labels" on the left menu.
|
||||
|
@ -41,7 +41,7 @@ The code suggestions should focus only on the following:
|
||||
- make sure every variable has a meaningful name
|
||||
- make sure the code is efficient
|
||||
"""
|
||||
```
|
||||
```
|
||||
|
||||
(The instructions above are just an example. We want to emphasize that the prompt should be specific and clear, and be tailored to the needs of your project)
|
||||
|
||||
@ -55,4 +55,4 @@ Results obtained with the prompt above:
|
||||
|
||||
`num_code_suggestions`: number of code suggestions provided by the 'custom_prompt' tool. Default is 4.
|
||||
|
||||
`enable_help_text`: if set to true, the tool will display a help text in the comment. Default is true.
|
||||
`enable_help_text`: if set to true, the tool will display a help text in the comment. Default is true.
|
@ -30,4 +30,4 @@ You can state a name of a specific component in the PR to get documentation only
|
||||
**Notes**
|
||||
|
||||
- Language that are currently fully supported: Python, Java, C++, JavaScript, TypeScript, C#.
|
||||
- This tool can also be triggered interactively by using the [`analyze`](./analyze.md) tool.
|
||||
- This tool can also be triggered interactively by using the [`analyze`](./analyze.md) tool.
|
@ -83,124 +83,18 @@ This feature is controlled by a boolean configuration parameter: `pr_code_sugges
|
||||
|
||||
!!! note "Wiki must be enabled"
|
||||
While the aggregation process is automatic, GitHub repositories require a one-time manual wiki setup.
|
||||
|
||||
To initialize the wiki: navigate to `Wiki`, select `Create the first page`, then click `Save page`.
|
||||
|
||||
To initialize the wiki: navigate to `Wiki`, select `Create the first page`, then click `Save page`.
|
||||
|
||||
{width=768}
|
||||
|
||||
Once a wiki repo is created, the tool will automatically use this wiki for tracking suggestions.
|
||||
|
||||
!!! note "Why a wiki page?"
|
||||
Your code belongs to you, and we respect your privacy. Hence, we won't store any code suggestions in an external database.
|
||||
Your code belongs to you, and we respect your privacy. Hence, we won't store any code suggestions in an external database.
|
||||
|
||||
Instead, we leverage a dedicated private page, within your repository wiki, to track suggestions. This approach offers convenient secure suggestion tracking while avoiding pull requests or any noise to the main repository.
|
||||
|
||||
## `Extra instructions` and `best practices`
|
||||
|
||||
The `improve` tool can be further customized by providing additional instructions and best practices to the AI model.
|
||||
|
||||
### Extra instructions
|
||||
|
||||
>`Platforms supported: GitHub, GitLab, Bitbucket, Azure DevOps`
|
||||
|
||||
You can use the `extra_instructions` configuration option to give the AI model additional instructions for the `improve` tool.
|
||||
Be specific, clear, and concise in the instructions. With extra instructions, you are the prompter.
|
||||
|
||||
Examples for possible instructions:
|
||||
```toml
|
||||
[pr_code_suggestions]
|
||||
extra_instructions="""\
|
||||
(1) Answer in japanese
|
||||
(2) Don't suggest to add try-except block
|
||||
(3) Ignore changes in toml files
|
||||
...
|
||||
"""
|
||||
```
|
||||
Use triple quotes to write multi-line instructions. Use bullet points or numbers to make the instructions more readable.
|
||||
|
||||
### Best practices 💎
|
||||
|
||||
>`Platforms supported: GitHub, GitLab, Bitbucket`
|
||||
|
||||
Another option to give additional guidance to the AI model is by creating a dedicated [**wiki page**](https://github.com/Codium-ai/pr-agent/wiki) called `best_practices.md`.
|
||||
This page can contain a list of best practices, coding standards, and guidelines that are specific to your repo/organization.
|
||||
|
||||
The AI model will use this wiki page as a reference, and in case the PR code violates any of the guidelines, it will create additional suggestions, with a dedicated label: `Organization
|
||||
best practice`.
|
||||
|
||||
Example for a python `best_practices.md` content:
|
||||
```markdown
|
||||
## Project best practices
|
||||
- Make sure that I/O operations are encapsulated in a try-except block
|
||||
- Use the `logging` module for logging instead of `print` statements
|
||||
- Use `is` and `is not` to compare with `None`
|
||||
- Use `if __name__ == '__main__':` to run the code only when the script is executed
|
||||
- Use `with` statement to open files
|
||||
...
|
||||
```
|
||||
|
||||
Tips for writing an effective `best_practices.md` file:
|
||||
|
||||
- Write clearly and concisely
|
||||
- Include brief code examples when helpful
|
||||
- Focus on project-specific guidelines, that will result in relevant suggestions you actually want to get
|
||||
- Keep the file relatively short, under 800 lines, since:
|
||||
- AI models may not process effectively very long documents
|
||||
- Long files tend to contain generic guidelines already known to AI
|
||||
|
||||
#### Local and global best practices
|
||||
By default, Qodo Merge will look for a local `best_practices.md` wiki file in the root of the relevant local repo.
|
||||
|
||||
If you want to enable also a global `best_practices.md` wiki file, set first in the global configuration file:
|
||||
|
||||
```toml
|
||||
[best_practices]
|
||||
enable_global_best_practices = true
|
||||
```
|
||||
|
||||
Then, create a `best_practices.md` wiki file in the root of [global](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#global-configuration-file) configuration repository, `pr-agent-settings`.
|
||||
|
||||
#### Best practices for multiple languages
|
||||
For a git organization working with multiple programming languages, you can maintain a centralized global `best_practices.md` file containing language-specific guidelines.
|
||||
When reviewing pull requests, Qodo Merge automatically identifies the programming language and applies the relevant best practices from this file.
|
||||
|
||||
To do this, structure your `best_practices.md` file using the following format:
|
||||
|
||||
```
|
||||
# [Python]
|
||||
...
|
||||
# [Java]
|
||||
...
|
||||
# [JavaScript]
|
||||
...
|
||||
```
|
||||
|
||||
#### Dedicated label for best practices suggestions
|
||||
Best practice suggestions are labeled as `Organization best practice` by default.
|
||||
To customize this label, modify it in your configuration file:
|
||||
|
||||
```toml
|
||||
[best_practices]
|
||||
organization_name = "..."
|
||||
```
|
||||
|
||||
And the label will be: `{organization_name} best practice`.
|
||||
|
||||
|
||||
#### Example results
|
||||
|
||||
{width=512}
|
||||
|
||||
|
||||
### How to combine `extra instructions` and `best practices`
|
||||
|
||||
The `extra instructions` configuration is more related to the `improve` tool prompt. It can be used, for example, to avoid specific suggestions ("Don't suggest to add try-except block", "Ignore changes in toml files", ...) or to emphasize specific aspects or formats ("Answer in Japanese", "Give only short suggestions", ...)
|
||||
|
||||
In contrast, the `best_practices.md` file is a general guideline for the way code should be written in the repo.
|
||||
|
||||
Using a combination of both can help the AI model to provide relevant and tailored suggestions.
|
||||
|
||||
|
||||
## Usage Tips
|
||||
|
||||
### Implementing the proposed code suggestions
|
||||
@ -215,10 +109,10 @@ In addition to mistakes (which may happen, but are rare), sometimes the presente
|
||||
In such cases, we recommend prioritizing the suggestion's detailed description, using the diff snippet primarily as a supporting reference.
|
||||
|
||||
### Dual publishing mode
|
||||
Our recommended approach for presenting code suggestions is through a [table](https://qodo-merge-docs.qodo.ai/tools/improve/#overview) (`--pr_code_suggestions.commitable_code_suggestions=false`).
|
||||
Our recommended approach for presenting code suggestions is through a [table](https://qodo-merge-docs.qodo.ai/tools/improve/#overview) (`--pr_code_suggestions.commitable_code_suggestions=false`).
|
||||
This method significantly reduces the PR footprint and allows for quick and easy digestion of multiple suggestions.
|
||||
|
||||
We also offer a complementary **dual publishing mode**. When enabled, suggestions exceeding a certain score threshold are not only displayed in the table, but also presented as commitable PR comments.
|
||||
We also offer a complementary **dual publishing mode**. When enabled, suggestions exceeding a certain score threshold are not only displayed in the table, but also presented as commitable PR comments.
|
||||
This mode helps highlight suggestions deemed more critical.
|
||||
|
||||
To activate dual publishing mode, use the following setting:
|
||||
@ -271,9 +165,9 @@ code_suggestions_self_review_text = "... (your text here) ..."
|
||||
{width=512}
|
||||
|
||||
- If you keep the number of required reviewers for a PR to 1 and enable this configuration, this effectively means that the PR author can approve the PR by actively clicking the self-review checkbox.
|
||||
|
||||
|
||||
To prevent unauthorized approvals, this configuration defaults to false, and cannot be altered through online comments; enabling requires a direct update to the configuration file and a commit to the repository. This ensures that utilizing the feature demands a deliberate documented decision by the repository owner.
|
||||
|
||||
|
||||
|
||||
### How many code suggestions are generated?
|
||||
Qodo Merge uses a dynamic strategy to generate code suggestions based on the size of the pull request (PR). Here's how it works:
|
||||
@ -297,10 +191,77 @@ This approach has two main benefits:
|
||||
Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 lines of code), Qodo Merge will be able to process the entire code in a single call.
|
||||
|
||||
|
||||
### 'Extra instructions' and 'best practices'
|
||||
|
||||
#### Extra instructions
|
||||
|
||||
>`Platforms supported: GitHub, GitLab, Bitbucket`
|
||||
|
||||
You can use the `extra_instructions` configuration option to give the AI model additional instructions for the `improve` tool.
|
||||
Be specific, clear, and concise in the instructions. With extra instructions, you are the prompter. Specify relevant aspects that you want the model to focus on.
|
||||
|
||||
Examples for possible instructions:
|
||||
```toml
|
||||
[pr_code_suggestions]
|
||||
extra_instructions="""\
|
||||
(1) Answer in japanese
|
||||
(2) Don't suggest to add try-excpet block
|
||||
(3) Ignore changes in toml files
|
||||
...
|
||||
"""
|
||||
```
|
||||
Use triple quotes to write multi-line instructions. Use bullet points or numbers to make the instructions more readable.
|
||||
|
||||
#### Best practices 💎
|
||||
|
||||
>`Platforms supported: GitHub, GitLab`
|
||||
|
||||
Another option to give additional guidance to the AI model is by creating a dedicated [**wiki page**](https://github.com/Codium-ai/pr-agent/wiki) called `best_practices.md`.
|
||||
This page can contain a list of best practices, coding standards, and guidelines that are specific to your repo/organization.
|
||||
|
||||
The AI model will use this wiki page as a reference, and in case the PR code violates any of the guidelines, it will suggest improvements accordingly, with a dedicated label: `Organization
|
||||
best practice`.
|
||||
|
||||
Example for a `best_practices.md` content can be found [here](https://github.com/Codium-ai/pr-agent/blob/main/docs/docs/usage-guide/EXAMPLE_BEST_PRACTICE.md) (adapted from Google's [pyguide](https://google.github.io/styleguide/pyguide.html)).
|
||||
This file is only an example. Since it is used as a prompt for an AI model, we want to emphasize the following:
|
||||
|
||||
- It should be written in a clear and concise manner
|
||||
- If needed, it should give short relevant code snippets as examples
|
||||
- Recommended to limit the text to 800 lines or fewer. Here’s why:
|
||||
|
||||
1) Extremely long best practices documents may not be fully processed by the AI model.
|
||||
|
||||
2) A lengthy file probably represent a more "**generic**" set of guidelines, which the AI model is already familiar with. The objective is to focus on a more targeted set of guidelines tailored to the specific needs of this project.
|
||||
|
||||
##### Local and global best practices
|
||||
By default, Qodo Merge will look for a local `best_practices.md` wiki file in the root of the relevant local repo.
|
||||
|
||||
If you want to enable also a global `best_practices.md` wiki file, set first in the global configuration file:
|
||||
|
||||
```toml
|
||||
[best_practices]
|
||||
enable_global_best_practices = true
|
||||
```
|
||||
|
||||
Then, create a `best_practices.md` wiki file in the root of [global](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#global-configuration-file) configuration repository, `pr-agent-settings`.
|
||||
|
||||
##### Example results
|
||||
|
||||
{width=512}
|
||||
|
||||
|
||||
#### How to combine `extra instructions` and `best practices`
|
||||
|
||||
The `extra instructions` configuration is more related to the `improve` tool prompt. It can be used, for example, to avoid specific suggestions ("Don't suggest to add try-except block", "Ignore changes in toml files", ...) or to emphasize specific aspects or formats ("Answer in Japanese", "Give only short suggestions", ...)
|
||||
|
||||
In contrast, the `best_practices.md` file is a general guideline for the way code should be written in the repo.
|
||||
|
||||
Using a combination of both can help the AI model to provide relevant and tailored suggestions.
|
||||
|
||||
## Configuration options
|
||||
|
||||
??? example "General options"
|
||||
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td><b>extra_instructions</b></td>
|
||||
@ -314,10 +275,6 @@ Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 line
|
||||
<td><b>dual_publishing_score_threshold</b></td>
|
||||
<td>Minimum score threshold for suggestions to be presented as commitable PR comments in addition to the table. Default is -1 (disabled).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b>focus_only_on_problems</b></td>
|
||||
<td>If set to true, suggestions will focus primarily on identifying and fixing code problems, and less on style considerations like best practices, maintainability, or readability. Default is true.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b>persistent_comment</b></td>
|
||||
<td>If set to true, the improve comment will be persistent, meaning that every new improve request will edit the previous one. Default is false.</td>
|
||||
@ -342,10 +299,6 @@ Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 line
|
||||
<td><b>wiki_page_accepted_suggestions</b></td>
|
||||
<td>If set to true, the tool will automatically track accepted suggestions in a dedicated wiki page called `.pr_agent_accepted_suggestions`. Default is true.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b>allow_thumbs_up_down</b></td>
|
||||
<td>If set to true, all code suggestions will have thumbs up and thumbs down buttons, to encourage users to provide feedback on the suggestions. Default is false.</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
??? example "Params for number of suggestions and AI calls"
|
||||
@ -363,6 +316,10 @@ Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 line
|
||||
<td><b>max_number_of_calls</b></td>
|
||||
<td>Maximum number of chunks. Default is 3.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b>rank_extended_suggestions</b></td>
|
||||
<td>If set to true, the tool will rank the suggestions, based on importance. Default is true.</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## A note on code suggestions quality
|
||||
@ -374,5 +331,5 @@ Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 line
|
||||
- Only if the `Category` header is relevant, the user should move to the summarized suggestion description
|
||||
- Only if the summarized suggestion description is relevant, the user should click on the collapsible, to read the full suggestion description with a code preview example.
|
||||
|
||||
- In addition, we recommend to use the [`extra_instructions`](https://qodo-merge-docs.qodo.ai/tools/improve/#extra-instructions-and-best-practices) field to guide the model to suggestions that are more relevant to the specific needs of the project.
|
||||
- In addition, we recommend to use the [`extra_instructions`](https://qodo-merge-docs.qodo.ai/tools/improve/#extra-instructions-and-best-practices) field to guide the model to suggestions that are more relevant to the specific needs of the project.
|
||||
- The interactive [PR chat](https://qodo-merge-docs.qodo.ai/chrome-extension/) also provides an easy way to get more tailored suggestions and feedback from the AI model.
|
||||
|
@ -26,4 +26,4 @@ The tool will generate code suggestions for the selected component (if no compon
|
||||
- `num_code_suggestions`: number of code suggestions to provide. Default is 4
|
||||
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on ...".
|
||||
- `file`: in case there are several components with the same name, you can specify the relevant file.
|
||||
- `class_name`: in case there are several methods with the same name in the same file, you can specify the relevant class name.
|
||||
- `class_name`: in case there are several methods with the same name in the same file, you can specify the relevant class name.
|
@ -19,4 +19,4 @@ Here is a list of Qodo Merge tools, each with a dedicated page that explains how
|
||||
| **💎 [Improve Component (`/improve_component component_name`](./improve_component.md))** | Generates code suggestions for a specific code component that changed in the PR |
|
||||
| **💎 [CI Feedback (`/checks ci_job`](./ci_feedback.md))** | Automatically generates feedback and analysis for a failed CI job |
|
||||
|
||||
Note that the tools marked with 💎 are available only for Qodo Merge Pro users.
|
||||
Note that the tools marked with 💎 are available only for Qodo Merge Pro users.
|
@ -39,7 +39,7 @@ pr_commands = [
|
||||
]
|
||||
|
||||
[pr_reviewer]
|
||||
extra_instructions = "..."
|
||||
num_code_suggestions = ...
|
||||
...
|
||||
```
|
||||
|
||||
@ -95,7 +95,7 @@ extra_instructions = "..."
|
||||
<table>
|
||||
<tr>
|
||||
<td><b>num_code_suggestions</b></td>
|
||||
<td>Number of code suggestions provided by the 'review' tool. Default is 0, meaning no code suggestions will be provided by the `review` tool. Note that this is a legacy feature, that will be removed in future releases. Use the `improve` tool instead for code suggestions</td>
|
||||
<td>Number of code suggestions provided by the 'review' tool. Default is 0, meaning no code suggestions will be provided by the `review` tool.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b>inline_code_comments</b></td>
|
||||
@ -180,10 +180,10 @@ If enabled, the `review` tool can approve a PR when a specific comment, `/review
|
||||
|
||||
The `review` tool provides a collection of configurable feedbacks about a PR.
|
||||
It is recommended to review the [Configuration options](#configuration-options) section, and choose the relevant options for your use case.
|
||||
|
||||
Some of the features that are disabled by default are quite useful, and should be considered for enabling. For example:
|
||||
|
||||
Some of the features that are disabled by default are quite useful, and should be considered for enabling. For example:
|
||||
`require_score_review`, and more.
|
||||
|
||||
|
||||
On the other hand, if you find one of the enabled features to be irrelevant for your use case, disable it. No default configuration can fit all use cases.
|
||||
|
||||
!!! tip "Automation"
|
||||
@ -197,19 +197,19 @@ If enabled, the `review` tool can approve a PR when a specific comment, `/review
|
||||
!!! tip "Possible labels from the review tool"
|
||||
|
||||
The `review` tool can auto-generate two specific types of labels for a PR:
|
||||
|
||||
|
||||
- a `possible security issue` label that detects if a possible [security issue](https://github.com/Codium-ai/pr-agent/blob/tr/user_description/pr_agent/settings/pr_reviewer_prompts.toml#L136) exists in the PR code (`enable_review_labels_security` flag)
|
||||
- a `Review effort [1-5]: x` label, where x is the estimated effort to review the PR (`enable_review_labels_effort` flag)
|
||||
|
||||
|
||||
Both modes are useful, and we recommended to enable them.
|
||||
|
||||
!!! tip "Extra instructions"
|
||||
|
||||
Extra instructions are important.
|
||||
The `review` tool can be configured with extra instructions, which can be used to guide the model to a feedback tailored to the needs of your project.
|
||||
|
||||
|
||||
Be specific, clear, and concise in the instructions. With extra instructions, you are the prompter. Specify the relevant sub-tool, and the relevant aspects of the PR that you want to emphasize.
|
||||
|
||||
|
||||
Examples of extra instructions:
|
||||
```
|
||||
[pr_reviewer]
|
||||
@ -227,22 +227,22 @@ If enabled, the `review` tool can approve a PR when a specific comment, `/review
|
||||
!!! tip "Auto-approval"
|
||||
|
||||
Qodo Merge can approve a PR when a specific comment is invoked.
|
||||
|
||||
|
||||
To ensure safety, the auto-approval feature is disabled by default. To enable auto-approval, you need to actively set in a pre-defined configuration file the following:
|
||||
```
|
||||
[pr_reviewer]
|
||||
enable_auto_approval = true
|
||||
```
|
||||
(this specific flag cannot be set with a command line argument, only in the configuration file, committed to the repository)
|
||||
|
||||
|
||||
|
||||
|
||||
After enabling, by commenting on a PR:
|
||||
```
|
||||
/review auto_approve
|
||||
```
|
||||
Qodo Merge will automatically approve the PR, and add a comment with the approval.
|
||||
|
||||
|
||||
|
||||
|
||||
You can also enable auto-approval only if the PR meets certain requirements, such as that the `estimated_review_effort` label is equal or below a certain threshold, by adjusting the flag:
|
||||
```
|
||||
[pr_reviewer]
|
||||
@ -258,3 +258,4 @@ If enabled, the `review` tool can approve a PR when a specific comment, `/review
|
||||
[//]: # ( Notice If you are interested **only** in the code suggestions, it is recommended to use the [`improve`](./improve.md) feature instead, since it is a dedicated only to code suggestions, and usually gives better results.)
|
||||
|
||||
[//]: # ( Use the `review` tool if you want to get more comprehensive feedback, which includes code suggestions as well.)
|
||||
|
||||
|
@ -49,10 +49,9 @@ It can be invoked automatically from the analyze table, can be accessed by:
|
||||
/analyze
|
||||
```
|
||||
Choose the components you want to find similar code for, and click on the `similar` checkbox.
|
||||
|
||||
{width=768}
|
||||
|
||||
You can search for similar code either within the organization's codebase or globally, which includes open-source repositories. Each result will include the relevant code components along with their associated license details.
|
||||
If you are looking to search for similar code in the organization's codebase, you can click on the `Organization` checkbox, and it will invoke a new search command just for the organization's codebase.
|
||||
|
||||
{width=768}
|
||||
|
||||
|
@ -17,7 +17,7 @@ It can be invoked manually by commenting on any PR:
|
||||
Note that to perform retrieval, the `similar_issue` tool indexes all the repo previous issues (once).
|
||||
|
||||
|
||||
**Select VectorDBs** by changing `pr_similar_issue` parameter in `configuration.toml` file
|
||||
**Select VectorDBs** by changing `pr_similar_issue` parameter in `configuration.toml` file
|
||||
|
||||
2 VectorDBs are available to switch in
|
||||
1. LanceDB
|
||||
@ -40,4 +40,4 @@ These parameters can be obtained by registering to [Pinecone](https://app.pineco
|
||||
- To invoke the 'similar' issue tool via online usage, [comment](https://github.com/Codium-ai/pr-agent/issues/178#issuecomment-1716934893) on a PR:
|
||||
`/similar_issue`
|
||||
|
||||
- You can also enable the 'similar issue' tool to run automatically when a new issue is opened, by adding it to the [pr_commands list in the github_app section](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L66)
|
||||
- You can also enable the 'similar issue' tool to run automatically when a new issue is opened, by adding it to the [pr_commands list in the github_app section](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L66)
|
@ -29,4 +29,4 @@ The tool will generate tests for the selected component (if no component is stat
|
||||
- `extra_instructions`: Optional extra instructions to the tool. For example: "use the following mock injection scheme: ...".
|
||||
- `file`: in case there are several components with the same name, you can specify the relevant file.
|
||||
- `class_name`: in case there are several methods with the same name in the same file, you can specify the relevant class name.
|
||||
- `enable_help_text`: if set to true, the tool will add a help text to the PR comment. Default is true.
|
||||
- `enable_help_text`: if set to true, the tool will add a help text to the PR comment. Default is true.
|
@ -16,4 +16,4 @@ It can be invoked manually by commenting on any PR:
|
||||
Under the section `pr_update_changelog`, the [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L50) contains options to customize the 'update changelog' tool:
|
||||
|
||||
- `push_changelog_changes`: whether to push the changes to CHANGELOG.md, or just print them. Default is false (print only).
|
||||
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...
|
||||
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...
|
@ -186,4 +186,4 @@ Do not rely on the atomicity of built-in types.
|
||||
|
||||
While Python’s built-in data types such as dictionaries appear to have atomic operations, there are corner cases where they aren’t atomic (e.g. if `__hash__` or `__eq__` are implemented as Python methods) and their atomicity should not be relied upon. Neither should you rely on atomic variable assignment (since this in turn depends on dictionaries).
|
||||
|
||||
Use the `queue` module’s `Queue` data type as the preferred way to communicate data between threads. Otherwise, use the `threading` module and its locking primitives. Prefer condition variables and `threading.Condition` instead of using lower-level locks.
|
||||
Use the `queue` module’s `Queue` data type as the preferred way to communicate data between threads. Otherwise, use the `threading` module and its locking primitives. Prefer condition variables and `threading.Condition` instead of using lower-level locks.
|
@ -1,5 +1,4 @@
|
||||
## Local repo (CLI)
|
||||
|
||||
When running from your locally cloned Qodo Merge repo (CLI), your local configuration file will be used.
|
||||
Examples of invoking the different tools via the CLI:
|
||||
|
||||
@ -36,29 +35,9 @@ This is useful for debugging or experimenting with different tools.
|
||||
|
||||
Default is "github".
|
||||
|
||||
### CLI Health Check
|
||||
To verify that Qodo Merge has been configured correctly, you can run this health check command from the repository root:
|
||||
|
||||
```bash
|
||||
python -m tests.health_test.main
|
||||
```
|
||||
|
||||
If the health check passes, you will see the following output:
|
||||
|
||||
```
|
||||
========
|
||||
Health test passed successfully
|
||||
========
|
||||
```
|
||||
|
||||
At the end of the run.
|
||||
|
||||
Before running the health check, ensure you have:
|
||||
|
||||
- Configured your [LLM provider](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/)
|
||||
- Added a valid GitHub token to your configuration file
|
||||
|
||||
## Online usage
|
||||
### Online usage
|
||||
|
||||
Online usage means invoking Qodo Merge tools by [comments](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021901) on a PR.
|
||||
Commands for invoking the different tools via comments:
|
||||
@ -79,70 +58,49 @@ For example, if you want to edit the `review` tool configurations, you can run:
|
||||
Any configuration value in [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml) file can be similarly edited. Comment `/config` to see the list of available configurations.
|
||||
|
||||
|
||||
## Qodo Merge Automatic Feedback
|
||||
|
||||
|
||||
### Disabling all automatic feedback
|
||||
|
||||
To easily disable all automatic feedback from Qodo Merge (GitHub App, GitLab Webhook, BitBucket App, Azure DevOps Webhook), set in a configuration file:
|
||||
|
||||
```toml
|
||||
[config]
|
||||
disable_auto_feedback = true
|
||||
```
|
||||
|
||||
When this parameter is set to `true`, Qodo Merge will not run any automatic tools (like `describe`, `review`, `improve`) when a new PR is opened, or when new code is pushed to an open PR.
|
||||
|
||||
### GitHub App
|
||||
## GitHub App
|
||||
|
||||
!!! note "Configurations for Qodo Merge Pro"
|
||||
Qodo Merge Pro for GitHub is an App, hosted by CodiumAI. So all the instructions below are relevant also for Qodo Merge Pro users.
|
||||
Same goes for [GitLab webhook](#gitlab-webhook) and [BitBucket App](#bitbucket-app) sections.
|
||||
|
||||
#### GitHub app automatic tools when a new PR is opened
|
||||
### GitHub app automatic tools when a new PR is opened
|
||||
|
||||
The [github_app](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L220) section defines GitHub app specific configurations.
|
||||
The [github_app](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L108) section defines GitHub app specific configurations.
|
||||
|
||||
The configuration parameter `pr_commands` defines the list of tools that will be **run automatically** when a new PR is opened:
|
||||
```toml
|
||||
The configuration parameter `pr_commands` defines the list of tools that will be **run automatically** when a new PR is opened.
|
||||
```
|
||||
[github_app]
|
||||
pr_commands = [
|
||||
"/describe",
|
||||
"/review",
|
||||
"/improve",
|
||||
"/improve --pr_code_suggestions.suggestions_score_threshold=5",
|
||||
]
|
||||
```
|
||||
|
||||
This means that when a new PR is opened/reopened or marked as ready for review, Qodo Merge will run the `describe`, `review` and `improve` tools.
|
||||
For the `improve` tool, for example, the `suggestions_score_threshold` parameter will be set to 5 (suggestions below a score of 5 won't be presented)
|
||||
|
||||
You can override the default tool parameters by using one the three options for a [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/): **wiki**, **local**, or **global**.
|
||||
For example, if your configuration file contains:
|
||||
|
||||
```toml
|
||||
You can override the default tool parameters by using one the three options for a [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/): **wiki**, **local**, or **global**.
|
||||
For example, if your local `.pr_agent.toml` file contains:
|
||||
```
|
||||
[pr_description]
|
||||
generate_ai_title = true
|
||||
```
|
||||
Every time you run the `describe` tool, including automatic runs, the PR title will be generated by the AI.
|
||||
|
||||
Every time you run the `describe` tool (including automatic runs) the PR title will be generated by the AI.
|
||||
|
||||
You can customize configurations specifically for automated runs by using the `--config_path=<value>` parameter.
|
||||
For instance, to modify the `review` tool settings only for newly opened PRs, use:
|
||||
```toml
|
||||
To cancel the automatic run of all the tools, set:
|
||||
```
|
||||
[github_app]
|
||||
pr_commands = [
|
||||
"/describe",
|
||||
"/review --pr_reviewer.extra_instructions='focus on the file: ...'",
|
||||
"/improve",
|
||||
]
|
||||
pr_commands = []
|
||||
```
|
||||
|
||||
#### GitHub app automatic tools for push actions (commits to an open PR)
|
||||
### GitHub app automatic tools for push actions (commits to an open PR)
|
||||
|
||||
In addition to running automatic tools when a PR is opened, the GitHub app can also respond to new code that is pushed to an open PR.
|
||||
|
||||
The configuration toggle `handle_push_trigger` can be used to enable this feature.
|
||||
The configuration toggle `handle_push_trigger` can be used to enable this feature.
|
||||
The configuration parameter `push_commands` defines the list of tools that will be **run automatically** when new code is pushed to the PR.
|
||||
```toml
|
||||
```
|
||||
[github_app]
|
||||
handle_push_trigger = true
|
||||
push_commands = [
|
||||
@ -152,9 +110,9 @@ push_commands = [
|
||||
```
|
||||
This means that when new code is pushed to the PR, the Qodo Merge will run the `describe` and `review` tools, with the specified parameters.
|
||||
|
||||
### GitHub Action
|
||||
## GitHub Action
|
||||
`GitHub Action` is a different way to trigger Qodo Merge tools, and uses a different configuration mechanism than `GitHub App`.<br>
|
||||
You can configure settings for `GitHub Action` by adding environment variables under the env section in `.github/workflows/pr_agent.yml` file.
|
||||
You can configure settings for `GitHub Action` by adding environment variables under the env section in `.github/workflows/pr_agent.yml` file.
|
||||
Specifically, start by setting the following environment variables:
|
||||
```yaml
|
||||
env:
|
||||
@ -163,7 +121,7 @@ Specifically, start by setting the following environment variables:
|
||||
github_action_config.auto_review: "true" # enable\disable auto review
|
||||
github_action_config.auto_describe: "true" # enable\disable auto describe
|
||||
github_action_config.auto_improve: "true" # enable\disable auto improve
|
||||
github_action_config.pr_actions: '["opened", "reopened", "ready_for_review", "review_requested"]'
|
||||
github_action_config.pr_actions: ["opened", "reopened", "ready_for_review", "review_requested"]
|
||||
```
|
||||
`github_action_config.auto_review`, `github_action_config.auto_describe` and `github_action_config.auto_improve` are used to enable/disable automatic tools that run when a new PR is opened.
|
||||
If not set, the default configuration is for all three tools to run automatically when a new PR is opened.
|
||||
@ -171,25 +129,22 @@ If not set, the default configuration is for all three tools to run automaticall
|
||||
`github_action_config.pr_actions` is used to configure which `pull_requests` events will trigger the enabled auto flags
|
||||
If not set, the default configuration is `["opened", "reopened", "ready_for_review", "review_requested"]`
|
||||
|
||||
`github_action_config.enable_output` are used to enable/disable github actions [output parameter](https://docs.github.com/en/actions/creating-actions/metadata-syntax-for-github-actions#outputs-for-docker-container-and-javascript-actions) (default is `true`).
|
||||
`github_action_config.enable_output` are used to enable/disable github actions [output parameter](https://docs.github.com/en/actions/creating-actions/metadata-syntax-for-github-actions#outputs-for-docker-container-and-javascript-actions) (default is `true`).
|
||||
Review result is output as JSON to `steps.{step-id}.outputs.review` property.
|
||||
The JSON structure is equivalent to the yaml data structure defined in [pr_reviewer_prompts.toml](https://github.com/idubnori/pr-agent/blob/main/pr_agent/settings/pr_reviewer_prompts.toml).
|
||||
|
||||
Note that you can give additional config parameters by adding environment variables to `.github/workflows/pr_agent.yml`, or by using a `.pr_agent.toml` [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#global-configuration-file) in the root of your repo
|
||||
|
||||
For example, you can set an environment variable: `pr_description.publish_labels=false`, or add a `.pr_agent.toml` file with the following content:
|
||||
|
||||
```toml
|
||||
```
|
||||
[pr_description]
|
||||
publish_labels = false
|
||||
```
|
||||
|
||||
to prevent Qodo Merge from publishing labels when running the `describe` tool.
|
||||
|
||||
### GitLab Webhook
|
||||
## GitLab Webhook
|
||||
After setting up a GitLab webhook, to control which commands will run automatically when a new MR is opened, you can set the `pr_commands` parameter in the configuration file, similar to the GitHub App:
|
||||
|
||||
```toml
|
||||
```
|
||||
[gitlab]
|
||||
pr_commands = [
|
||||
"/describe",
|
||||
@ -198,10 +153,10 @@ pr_commands = [
|
||||
]
|
||||
```
|
||||
|
||||
the GitLab webhook can also respond to new code that is pushed to an open MR.
|
||||
The configuration toggle `handle_push_trigger` can be used to enable this feature.
|
||||
The GitLab webhook can also respond to new code that is pushed to an open MR.
|
||||
The configuration toggle `handle_push_trigger` can be used to enable this feature.
|
||||
The configuration parameter `push_commands` defines the list of tools that will be **run automatically** when new code is pushed to the MR.
|
||||
```toml
|
||||
```
|
||||
[gitlab]
|
||||
handle_push_trigger = true
|
||||
push_commands = [
|
||||
@ -212,13 +167,13 @@ push_commands = [
|
||||
|
||||
Note that to use the 'handle_push_trigger' feature, you need to give the gitlab webhook also the "Push events" scope.
|
||||
|
||||
### BitBucket App
|
||||
## BitBucket App
|
||||
Similar to GitHub app, when running Qodo Merge from BitBucket App, the default [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml) from a pre-built docker will be initially loaded.
|
||||
|
||||
By uploading a local `.pr_agent.toml` file to the root of the repo's main branch, you can edit and customize any configuration parameter. Note that you need to upload `.pr_agent.toml` prior to creating a PR, in order for the configuration to take effect.
|
||||
|
||||
For example, if your local `.pr_agent.toml` file contains:
|
||||
```toml
|
||||
```
|
||||
[pr_reviewer]
|
||||
extra_instructions = "Answer in japanese"
|
||||
```
|
||||
@ -231,12 +186,12 @@ If you experience a lack of responses from Qodo Merge, you might want to set: `b
|
||||
This will prevent Qodo Merge from acquiring the full file content, and will only use the diff content. This will reduce the number of requests made to BitBucket, at the cost of small decrease in accuracy, as dynamic context will not be applicable.
|
||||
|
||||
|
||||
#### BitBucket Self-Hosted App automatic tools
|
||||
### BitBucket Self-Hosted App automatic tools
|
||||
|
||||
To control which commands will run automatically when a new PR is opened, you can set the `pr_commands` parameter in the configuration file:
|
||||
Specifically, set the following values:
|
||||
|
||||
```toml
|
||||
```
|
||||
[bitbucket_app]
|
||||
pr_commands = [
|
||||
"/review",
|
||||
@ -247,7 +202,7 @@ Note that we set specifically for bitbucket, we recommend using: `--pr_code_sugg
|
||||
Since this platform only supports inline code suggestions, we want to limit the number of suggestions, and only present a limited number.
|
||||
|
||||
To enable BitBucket app to respond to each **push** to the PR, set (for example):
|
||||
```toml
|
||||
```
|
||||
[bitbucket_app]
|
||||
handle_push_trigger = true
|
||||
push_commands = [
|
||||
@ -256,20 +211,20 @@ push_commands = [
|
||||
]
|
||||
```
|
||||
|
||||
### Azure DevOps provider
|
||||
## Azure DevOps provider
|
||||
|
||||
To use Azure DevOps provider use the following settings in configuration.toml:
|
||||
```toml
|
||||
```
|
||||
[config]
|
||||
git_provider="azure"
|
||||
```
|
||||
|
||||
Azure DevOps provider supports [PAT token](https://learn.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=Windows) or [DefaultAzureCredential](https://learn.microsoft.com/en-us/azure/developer/python/sdk/authentication-overview#authentication-in-server-environments) authentication.
|
||||
PAT is faster to create, but has build in expiration date, and will use the user identity for API calls.
|
||||
PAT is faster to create, but has a build in expiration date, and will use the user identity for API calls.
|
||||
Using DefaultAzureCredential you can use managed identity or Service principle, which are more secure and will create separate ADO user identity (via AAD) to the agent.
|
||||
|
||||
If PAT was chosen, you can assign the value in .secrets.toml.
|
||||
If DefaultAzureCredential was chosen, you can assigned the additional env vars like AZURE_CLIENT_SECRET directly,
|
||||
If PAT was chosen, you can assign the value in .secrets.toml.
|
||||
If DefaultAzureCredential was chosen, you can assigned the additional env vars like AZURE_CLIENT_SECRET directly,
|
||||
or use managed identity/az cli (for local development) without any additional configuration.
|
||||
in any case, 'org' value must be assigned in .secrets.toml:
|
||||
```
|
||||
@ -278,10 +233,10 @@ org = "https://dev.azure.com/YOUR_ORGANIZATION/"
|
||||
# pat = "YOUR_PAT_TOKEN" needed only if using PAT for authentication
|
||||
```
|
||||
|
||||
#### Azure DevOps Webhook
|
||||
### Azure DevOps Webhook
|
||||
|
||||
To control which commands will run automatically when a new PR is opened, you can set the `pr_commands` parameter in the configuration file, similar to the GitHub App:
|
||||
```toml
|
||||
```
|
||||
[azure_devops_server]
|
||||
pr_commands = [
|
||||
"/describe",
|
||||
|
@ -5,10 +5,11 @@ To use a different model than the default (GPT-4), you need to edit in the [conf
|
||||
```
|
||||
[config]
|
||||
model = "..."
|
||||
model_turbo = "..."
|
||||
fallback_models = ["..."]
|
||||
```
|
||||
|
||||
For models and environments not from OpenAI, you might need to provide additional keys and other parameters.
|
||||
For models and environments not from OpenAI, you might need to provide additional keys and other parameters.
|
||||
You can give parameters via a configuration file (see below for instructions), or from environment variables. See [litellm documentation](https://litellm.vercel.app/docs/proxy/quick_start#supported-llms) for the environment variables relevant per model.
|
||||
|
||||
### Azure
|
||||
@ -26,8 +27,9 @@ deployment_id = "" # The deployment name you chose when you deployed the engine
|
||||
and set in your configuration file:
|
||||
```
|
||||
[config]
|
||||
model="" # the OpenAI model you've deployed on Azure (e.g. gpt-4o)
|
||||
fallback_models=["..."]
|
||||
model="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
|
||||
model_turbo="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
|
||||
fallback_models=["..."] # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
|
||||
```
|
||||
|
||||
### Hugging Face
|
||||
@ -50,6 +52,7 @@ MAX_TOKENS={
|
||||
|
||||
[config] # in configuration.toml
|
||||
model = "ollama/llama2"
|
||||
model_turbo = "ollama/llama2"
|
||||
fallback_models=["ollama/llama2"]
|
||||
|
||||
[ollama] # in .secrets.toml
|
||||
@ -73,6 +76,7 @@ MAX_TOKENS={
|
||||
}
|
||||
[config] # in configuration.toml
|
||||
model = "huggingface/meta-llama/Llama-2-7b-chat-hf"
|
||||
model_turbo = "huggingface/meta-llama/Llama-2-7b-chat-hf"
|
||||
fallback_models=["huggingface/meta-llama/Llama-2-7b-chat-hf"]
|
||||
|
||||
[huggingface] # in .secrets.toml
|
||||
@ -87,6 +91,7 @@ To use Llama2 model with Replicate, for example, set:
|
||||
```
|
||||
[config] # in configuration.toml
|
||||
model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
|
||||
model_turbo = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
|
||||
fallback_models=["replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"]
|
||||
[replicate] # in .secrets.toml
|
||||
key = ...
|
||||
@ -102,7 +107,8 @@ To use Llama3 model with Groq, for example, set:
|
||||
```
|
||||
[config] # in configuration.toml
|
||||
model = "llama3-70b-8192"
|
||||
fallback_models = ["groq/llama3-70b-8192"]
|
||||
model_turbo = "llama3-70b-8192"
|
||||
fallback_models = ["groq/llama3-70b-8192"]
|
||||
[groq] # in .secrets.toml
|
||||
key = ... # your Groq api key
|
||||
```
|
||||
@ -112,9 +118,10 @@ key = ... # your Groq api key
|
||||
|
||||
To use Google's Vertex AI platform and its associated models (chat-bison/codechat-bison) set:
|
||||
|
||||
```
|
||||
```
|
||||
[config] # in configuration.toml
|
||||
model = "vertex_ai/codechat-bison"
|
||||
model_turbo = "vertex_ai/codechat-bison"
|
||||
fallback_models="vertex_ai/codechat-bison"
|
||||
|
||||
[vertexai] # in .secrets.toml
|
||||
@ -133,6 +140,7 @@ To use [Google AI Studio](https://aistudio.google.com/) models, set the relevant
|
||||
```toml
|
||||
[config] # in configuration.toml
|
||||
model="google_ai_studio/gemini-1.5-flash"
|
||||
model_turbo="google_ai_studio/gemini-1.5-flash"
|
||||
fallback_models=["google_ai_studio/gemini-1.5-flash"]
|
||||
|
||||
[google_ai_studio] # in .secrets.toml
|
||||
@ -148,6 +156,7 @@ To use Anthropic models, set the relevant models in the configuration section of
|
||||
```
|
||||
[config]
|
||||
model="anthropic/claude-3-opus-20240229"
|
||||
model_turbo="anthropic/claude-3-opus-20240229"
|
||||
fallback_models=["anthropic/claude-3-opus-20240229"]
|
||||
```
|
||||
|
||||
@ -161,9 +170,10 @@ KEY = "..."
|
||||
|
||||
To use Amazon Bedrock and its foundational models, add the below configuration:
|
||||
|
||||
```
|
||||
```
|
||||
[config] # in configuration.toml
|
||||
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
|
||||
model_turbo="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
|
||||
fallback_models=["bedrock/anthropic.claude-v2:1"]
|
||||
```
|
||||
|
||||
@ -185,6 +195,7 @@ If the relevant model doesn't appear [here](https://github.com/Codium-ai/pr-agen
|
||||
```
|
||||
[config]
|
||||
model="custom_model_name"
|
||||
model_turbo="custom_model_name"
|
||||
fallback_models=["custom_model_name"]
|
||||
```
|
||||
(2) Set the maximal tokens for the model:
|
||||
|
@ -20,7 +20,7 @@ In terms of precedence, wiki configurations will override local configurations,
|
||||
|
||||
`Platforms supported: GitHub, GitLab, Bitbucket`
|
||||
|
||||
With Qodo Merge Pro, you can set configurations by creating a page called `.pr_agent.toml` in the [wiki](https://github.com/Codium-ai/pr-agent/wiki/pr_agent.toml) of the repo.
|
||||
With Qodo Merge Pro, you can set configurations by creating a page called `.pr_agent.toml` in the [wiki](https://github.com/Codium-ai/pr-agent/wiki/pr_agent.toml) of the repo.
|
||||
The advantage of this method is that it allows to set configurations without needing to commit new content to the repo - just edit the wiki page and **save**.
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Usage guide
|
||||
|
||||
This page provides a detailed guide on how to use Qodo Merge.
|
||||
This page provides a detailed guide on how to use Qodo Merge.
|
||||
It includes information on how to adjust Qodo Merge configurations, define which tools will run automatically, and other advanced configurations.
|
||||
|
||||
|
||||
@ -23,4 +23,4 @@ It includes information on how to adjust Qodo Merge configurations, define which
|
||||
- [Changing a model](./additional_configurations.md#changing-a-model)
|
||||
- [Patch Extra Lines](./additional_configurations.md#patch-extra-lines)
|
||||
- [Editing the prompts](./additional_configurations.md#editing-the-prompts)
|
||||
- [Qodo Merge Pro Models](./PR_agent_pro_models.md)
|
||||
- [Qodo Merge Pro Models](./PR_agent_pro_models.md)
|
@ -10,3 +10,4 @@ Specifically, CLI commands can be issued by invoking a pre-built [docker image](
|
||||
|
||||
For online usage, you will need to setup either a [GitHub App](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-app) or a [GitHub Action](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-action) (GitHub), a [GitLab webhook](https://qodo-merge-docs.qodo.ai/installation/gitlab/#run-a-gitlab-webhook-server) (GitLab), or a [BitBucket App](https://qodo-merge-docs.qodo.ai/installation/bitbucket/#run-using-codiumai-hosted-bitbucket-app) (BitBucket).
|
||||
These platforms also enable to run Qodo Merge specific tools automatically when a new PR is opened, or on each push to a branch.
|
||||
|
||||
|
@ -9,10 +9,10 @@ As an alternative, you can filter in your mail provider the notifications specif
|
||||
{width=512}
|
||||
|
||||
|
||||
Another option to reduce the mail overload, yet still receive notifications on Qodo Merge tools, is to disable the help collapsible section in Qodo Merge bot comments.
|
||||
Another option to reduce the mail overload, yet still receive notifications on Qodo Merge tools, is to disable the help collapsible section in Qodo Merge bot comments.
|
||||
This can done by setting `enable_help_text=false` for the relevant tool in the configuration file.
|
||||
For example, to disable the help text for the `pr_reviewer` tool, set:
|
||||
```
|
||||
[pr_reviewer]
|
||||
enable_help_text = false
|
||||
```
|
||||
```
|
@ -2,9 +2,9 @@
|
||||
|
||||
{% block scripts %}
|
||||
{{ super() }}
|
||||
|
||||
|
||||
<!-- Google Tag Manager (noscript) -->
|
||||
<noscript><iframe src="https://www.googletagmanager.com/ns.html?id=GTM-5C9KZBM3"
|
||||
height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript>
|
||||
<!-- End Google Tag Manager (noscript) -->
|
||||
{% endblock %}
|
||||
{% endblock %}
|
@ -42,7 +42,7 @@
|
||||
}
|
||||
|
||||
.social-icons svg {
|
||||
width: 24px;
|
||||
width: 24px;
|
||||
height: auto;
|
||||
fill: white;
|
||||
}
|
||||
|
@ -3,5 +3,5 @@
|
||||
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
|
||||
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
|
||||
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
|
||||
})(window,document,'script','dataLayer','GTM-M6PJSFV');</script>
|
||||
<!-- End Google Tag Manager -->
|
||||
})(window,document,'script','dataLayer','GTM-5C9KZBM3');</script>
|
||||
<!-- End Google Tag Manager -->
|
@ -0,0 +1 @@
|
||||
|
||||
|
@ -3,6 +3,7 @@ from functools import partial
|
||||
|
||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
||||
|
||||
from pr_agent.algo.utils import update_settings_from_args
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers.utils import apply_repo_settings
|
||||
|
@ -19,13 +19,10 @@ MAX_TOKENS = {
|
||||
'gpt-4o-mini': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||
'gpt-4o-mini-2024-07-18': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||
'gpt-4o-2024-08-06': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||
'gpt-4o-2024-11-20': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||
'o1-mini': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||
'o1-mini-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||
'o1-preview': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||
'o1-preview-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||
'o1-2024-12-17': 204800, # 200K, but may be limited by config.max_model_tokens
|
||||
'o1': 204800, # 200K, but may be limited by config.max_model_tokens
|
||||
'claude-instant-1': 100000,
|
||||
'claude-2': 100000,
|
||||
'command-nightly': 4096,
|
||||
@ -34,7 +31,6 @@ MAX_TOKENS = {
|
||||
'vertex_ai/codechat-bison': 6144,
|
||||
'vertex_ai/codechat-bison-32k': 32000,
|
||||
'vertex_ai/claude-3-haiku@20240307': 100000,
|
||||
'vertex_ai/claude-3-5-haiku@20241022': 100000,
|
||||
'vertex_ai/claude-3-sonnet@20240229': 100000,
|
||||
'vertex_ai/claude-3-opus@20240229': 100000,
|
||||
'vertex_ai/claude-3-5-sonnet@20240620': 100000,
|
||||
@ -44,7 +40,6 @@ MAX_TOKENS = {
|
||||
'vertex_ai/gemma2': 8200,
|
||||
'gemini/gemini-1.5-pro': 1048576,
|
||||
'gemini/gemini-1.5-flash': 1048576,
|
||||
'gemini/gemini-2.0-flash-exp': 1048576,
|
||||
'codechat-bison': 6144,
|
||||
'codechat-bison-32k': 32000,
|
||||
'anthropic.claude-instant-v1': 100000,
|
||||
@ -53,13 +48,11 @@ MAX_TOKENS = {
|
||||
'anthropic/claude-3-opus-20240229': 100000,
|
||||
'anthropic/claude-3-5-sonnet-20240620': 100000,
|
||||
'anthropic/claude-3-5-sonnet-20241022': 100000,
|
||||
'anthropic/claude-3-5-haiku-20241022': 100000,
|
||||
'bedrock/anthropic.claude-instant-v1': 100000,
|
||||
'bedrock/anthropic.claude-v2': 100000,
|
||||
'bedrock/anthropic.claude-v2:1': 100000,
|
||||
'bedrock/anthropic.claude-3-sonnet-20240229-v1:0': 100000,
|
||||
'bedrock/anthropic.claude-3-haiku-20240307-v1:0': 100000,
|
||||
'bedrock/anthropic.claude-3-5-haiku-20241022-v1:0': 100000,
|
||||
'bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0': 100000,
|
||||
'bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0': 100000,
|
||||
'claude-3-5-sonnet': 100000,
|
||||
|
@ -3,7 +3,7 @@ from abc import ABC, abstractmethod
|
||||
|
||||
class BaseAiHandler(ABC):
|
||||
"""
|
||||
This class defines the interface for an AI handler to be used by the PR Agents.
|
||||
This class defines the interface for an AI handler to be used by the PR Agents.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
@ -23,6 +23,6 @@ class BaseAiHandler(ABC):
|
||||
model (str): the name of the model to use for the chat completion
|
||||
system (str): the system message string to use for the chat completion
|
||||
user (str): the user message string to use for the chat completion
|
||||
temperature (float): the temperature to use for the chat completion
|
||||
temperature (float): the temperature to use for the chat completion
|
||||
"""
|
||||
pass
|
||||
|
@ -1,18 +1,17 @@
|
||||
try:
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
from langchain_openai import AzureChatOpenAI, ChatOpenAI
|
||||
from langchain_openai import ChatOpenAI, AzureChatOpenAI
|
||||
from langchain_core.messages import SystemMessage, HumanMessage
|
||||
except: # we don't enforce langchain as a dependency, so if it's not installed, just move on
|
||||
pass
|
||||
|
||||
import functools
|
||||
|
||||
from openai import APIError, RateLimitError, Timeout
|
||||
from retry import retry
|
||||
|
||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
from openai import APIError, RateLimitError, Timeout
|
||||
from retry import retry
|
||||
import functools
|
||||
|
||||
OPENAI_RETRIES = 5
|
||||
|
||||
|
||||
@ -74,3 +73,4 @@ class LangChainOpenAIHandler(BaseAiHandler):
|
||||
raise ValueError(f"OpenAI {e.name} is required") from e
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
@ -1,13 +1,11 @@
|
||||
import os
|
||||
|
||||
import requests
|
||||
import litellm
|
||||
import openai
|
||||
import requests
|
||||
from litellm import acompletion
|
||||
from tenacity import retry, retry_if_exception_type, stop_after_attempt
|
||||
|
||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||
from pr_agent.algo.utils import get_version
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
@ -133,7 +131,7 @@ class LiteLLMAIHandler(BaseAiHandler):
|
||||
if "langfuse" in callbacks:
|
||||
metadata.update({
|
||||
"trace_name": command,
|
||||
"tags": [git_provider, command, f'version:{get_version()}'],
|
||||
"tags": [git_provider, command],
|
||||
"trace_metadata": {
|
||||
"command": command,
|
||||
"pr_url": pr_url,
|
||||
@ -142,7 +140,7 @@ class LiteLLMAIHandler(BaseAiHandler):
|
||||
if "langsmith" in callbacks:
|
||||
metadata.update({
|
||||
"run_name": command,
|
||||
"tags": [git_provider, command, f'version:{get_version()}'],
|
||||
"tags": [git_provider, command],
|
||||
"extra": {
|
||||
"metadata": {
|
||||
"command": command,
|
||||
@ -193,8 +191,8 @@ class LiteLLMAIHandler(BaseAiHandler):
|
||||
messages[1]["content"] = [{"type": "text", "text": messages[1]["content"]},
|
||||
{"type": "image_url", "image_url": {"url": img_path}}]
|
||||
|
||||
# Currently, model OpenAI o1 series does not support a separate system and user prompts
|
||||
O1_MODEL_PREFIX = 'o1'
|
||||
# Currently O1 does not support separate system and user prompts
|
||||
O1_MODEL_PREFIX = 'o1-'
|
||||
model_type = model.split('/')[-1] if '/' in model else model
|
||||
if model_type.startswith(O1_MODEL_PREFIX):
|
||||
user = f"{system}\n\n\n{user}"
|
||||
|
@ -4,7 +4,6 @@ import openai
|
||||
from openai import APIError, AsyncOpenAI, RateLimitError, Timeout
|
||||
from retry import retry
|
||||
|
||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
@ -42,6 +41,7 @@ class OpenAIHandler(BaseAiHandler):
|
||||
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
|
||||
async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2):
|
||||
try:
|
||||
deployment_id = self.deployment_id
|
||||
get_logger().info("System: ", system)
|
||||
get_logger().info("User: ", user)
|
||||
messages = [{"role": "system", "content": system}, {"role": "user", "content": user}]
|
||||
@ -65,4 +65,4 @@ class OpenAIHandler(BaseAiHandler):
|
||||
raise
|
||||
except (Exception) as e:
|
||||
get_logger().error("Unknown error during OpenAI inference: ", e)
|
||||
raise
|
||||
raise
|
@ -3,8 +3,8 @@ from __future__ import annotations
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
@ -31,7 +31,7 @@ def extend_patch(original_file_str, patch_str, patch_extra_lines_before=0,
|
||||
|
||||
|
||||
def decode_if_bytes(original_file_str):
|
||||
if isinstance(original_file_str, (bytes, bytearray)):
|
||||
if isinstance(original_file_str, bytes):
|
||||
try:
|
||||
return original_file_str.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
@ -61,26 +61,23 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
|
||||
patch_lines = patch_str.splitlines()
|
||||
extended_patch_lines = []
|
||||
|
||||
is_valid_hunk = True
|
||||
start1, size1, start2, size2 = -1, -1, -1, -1
|
||||
RE_HUNK_HEADER = re.compile(
|
||||
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
|
||||
try:
|
||||
for i,line in enumerate(patch_lines):
|
||||
for line in patch_lines:
|
||||
if line.startswith('@@'):
|
||||
match = RE_HUNK_HEADER.match(line)
|
||||
# identify hunk header
|
||||
if match:
|
||||
# finish processing previous hunk
|
||||
if is_valid_hunk and (start1 != -1 and patch_extra_lines_after > 0):
|
||||
if start1 != -1 and patch_extra_lines_after > 0:
|
||||
delta_lines = [f' {line}' for line in original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after]]
|
||||
extended_patch_lines.extend(delta_lines)
|
||||
|
||||
section_header, size1, size2, start1, start2 = extract_hunk_headers(match)
|
||||
|
||||
is_valid_hunk = check_if_hunk_lines_matches_to_file(i, original_lines, patch_lines, start1)
|
||||
|
||||
if is_valid_hunk and (patch_extra_lines_before > 0 or patch_extra_lines_after > 0):
|
||||
if patch_extra_lines_before > 0 or patch_extra_lines_after > 0:
|
||||
def _calc_context_limits(patch_lines_before):
|
||||
extended_start1 = max(1, start1 - patch_lines_before)
|
||||
extended_size1 = size1 + (start1 - extended_start1) + patch_extra_lines_after
|
||||
@ -141,7 +138,7 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
|
||||
return patch_str
|
||||
|
||||
# finish processing last hunk
|
||||
if start1 != -1 and patch_extra_lines_after > 0 and is_valid_hunk:
|
||||
if start1 != -1 and patch_extra_lines_after > 0:
|
||||
delta_lines = original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after]
|
||||
# add space at the beginning of each extra line
|
||||
delta_lines = [f' {line}' for line in delta_lines]
|
||||
@ -151,23 +148,6 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
|
||||
return extended_patch_str
|
||||
|
||||
|
||||
def check_if_hunk_lines_matches_to_file(i, original_lines, patch_lines, start1):
|
||||
"""
|
||||
Check if the hunk lines match the original file content. We saw cases where the hunk header line doesn't match the original file content, and then
|
||||
extending the hunk with extra lines before the hunk header can cause the hunk to be invalid.
|
||||
"""
|
||||
is_valid_hunk = True
|
||||
try:
|
||||
if i + 1 < len(patch_lines) and patch_lines[i + 1][0] == ' ': # an existing line in the file
|
||||
if patch_lines[i + 1].strip() != original_lines[start1 - 1].strip():
|
||||
is_valid_hunk = False
|
||||
get_logger().error(
|
||||
f"Invalid hunk in PR, line {start1} in hunk header doesn't match the original file content")
|
||||
except:
|
||||
pass
|
||||
return is_valid_hunk
|
||||
|
||||
|
||||
def extract_hunk_headers(match):
|
||||
res = list(match.groups())
|
||||
for i in range(len(res)):
|
||||
@ -408,4 +388,4 @@ def extract_hunk_lines_from_patch(patch: str, file_name, line_start, line_end, s
|
||||
if not line.startswith('-'): # currently we don't support /ask line for deleted lines
|
||||
selected_lines_num += 1
|
||||
|
||||
return patch_with_lines_str.rstrip(), selected_lines.rstrip()
|
||||
return patch_with_lines_str.rstrip(), selected_lines.rstrip()
|
@ -4,6 +4,8 @@ from typing import Dict
|
||||
from pr_agent.config_loader import get_settings
|
||||
|
||||
|
||||
|
||||
|
||||
def filter_bad_extensions(files):
|
||||
# Bad Extensions, source: https://github.com/EleutherAI/github-downloader/blob/345e7c4cbb9e0dc8a0615fd995a08bf9d73b3fe6/download_repo_text.py # noqa: E501
|
||||
bad_extensions = get_settings().bad_extensions.default
|
||||
|
@ -5,15 +5,14 @@ from typing import Callable, List, Tuple
|
||||
|
||||
from github import RateLimitExceededException
|
||||
|
||||
from pr_agent.algo.file_filter import filter_ignored
|
||||
from pr_agent.algo.git_patch_processing import (
|
||||
convert_to_hunks_with_lines_numbers, extend_patch, handle_patch_deletions)
|
||||
from pr_agent.algo.git_patch_processing import convert_to_hunks_with_lines_numbers, extend_patch, handle_patch_deletions
|
||||
from pr_agent.algo.language_handler import sort_files_by_main_languages
|
||||
from pr_agent.algo.file_filter import filter_ignored
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
from pr_agent.algo.utils import ModelType, clip_tokens, get_max_tokens, get_weak_model
|
||||
from pr_agent.algo.utils import get_max_tokens, clip_tokens, ModelType
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers.git_provider import GitProvider
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
DELETED_FILES_ = "Deleted files:\n"
|
||||
@ -354,8 +353,8 @@ async def retry_with_fallback_models(f: Callable, model_type: ModelType = ModelT
|
||||
|
||||
|
||||
def _get_all_models(model_type: ModelType = ModelType.REGULAR) -> List[str]:
|
||||
if model_type == ModelType.WEAK:
|
||||
model = get_weak_model()
|
||||
if model_type == ModelType.TURBO:
|
||||
model = get_settings().config.model_turbo
|
||||
else:
|
||||
model = get_settings().config.model
|
||||
fallback_models = get_settings().config.fallback_models
|
||||
|
@ -1,9 +1,8 @@
|
||||
from threading import Lock
|
||||
|
||||
from jinja2 import Environment, StrictUndefined
|
||||
from tiktoken import encoding_for_model, get_encoding
|
||||
|
||||
from pr_agent.config_loader import get_settings
|
||||
from threading import Lock
|
||||
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
@ -86,4 +85,4 @@ class TokenHandler:
|
||||
Returns:
|
||||
The number of tokens in the patch string.
|
||||
"""
|
||||
return len(self.encoder.encode(patch, disallowed_special=()))
|
||||
return len(self.encoder.encode(patch, disallowed_special=()))
|
@ -7,15 +7,14 @@ import html
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import textwrap
|
||||
import time
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
from typing import Any, List, Tuple
|
||||
|
||||
|
||||
import html2text
|
||||
import requests
|
||||
import yaml
|
||||
@ -24,17 +23,10 @@ from starlette_context import context
|
||||
|
||||
from pr_agent.algo import MAX_TOKENS
|
||||
from pr_agent.algo.token_handler import TokenEncoder
|
||||
from pr_agent.algo.types import FilePatchInfo
|
||||
from pr_agent.config_loader import get_settings, global_settings
|
||||
from pr_agent.algo.types import FilePatchInfo
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
def get_weak_model() -> str:
|
||||
if get_settings().get("config.model_weak"):
|
||||
return get_settings().config.model_weak
|
||||
return get_settings().config.model
|
||||
|
||||
|
||||
class Range(BaseModel):
|
||||
line_start: int # should be 0-indexed
|
||||
line_end: int
|
||||
@ -43,7 +35,8 @@ class Range(BaseModel):
|
||||
|
||||
class ModelType(str, Enum):
|
||||
REGULAR = "regular"
|
||||
WEAK = "weak"
|
||||
TURBO = "turbo"
|
||||
|
||||
|
||||
class PRReviewHeader(str, Enum):
|
||||
REGULAR = "## PR Reviewer Guide"
|
||||
@ -104,8 +97,7 @@ def unique_strings(input_list: List[str]) -> List[str]:
|
||||
def convert_to_markdown_v2(output_data: dict,
|
||||
gfm_supported: bool = True,
|
||||
incremental_review=None,
|
||||
git_provider=None,
|
||||
files=None) -> str:
|
||||
git_provider=None) -> str:
|
||||
"""
|
||||
Convert a dictionary of data into markdown format.
|
||||
Args:
|
||||
@ -181,7 +173,7 @@ def convert_to_markdown_v2(output_data: dict,
|
||||
if is_value_no(value):
|
||||
markdown_text += f'### {emoji} No relevant tests\n\n'
|
||||
else:
|
||||
markdown_text += f"### {emoji} PR contains tests\n\n"
|
||||
markdown_text += f"### PR contains tests\n\n"
|
||||
elif 'ticket compliance check' in key_nice.lower():
|
||||
markdown_text = ticket_markdown_logic(emoji, markdown_text, value, gfm_supported)
|
||||
elif 'security concerns' in key_nice.lower():
|
||||
@ -229,31 +221,15 @@ def convert_to_markdown_v2(output_data: dict,
|
||||
continue
|
||||
relevant_file = issue.get('relevant_file', '').strip()
|
||||
issue_header = issue.get('issue_header', '').strip()
|
||||
if issue_header.lower() == 'possible bug':
|
||||
issue_header = 'Possible Issue' # Make the header less frightening
|
||||
issue_content = issue.get('issue_content', '').strip()
|
||||
start_line = int(str(issue.get('start_line', 0)).strip())
|
||||
end_line = int(str(issue.get('end_line', 0)).strip())
|
||||
|
||||
relevant_lines_str = extract_relevant_lines_str(end_line, files, relevant_file, start_line)
|
||||
if git_provider:
|
||||
reference_link = git_provider.get_line_link(relevant_file, start_line, end_line)
|
||||
else:
|
||||
reference_link = None
|
||||
reference_link = git_provider.get_line_link(relevant_file, start_line, end_line)
|
||||
|
||||
if gfm_supported:
|
||||
if reference_link is not None and len(reference_link) > 0:
|
||||
if relevant_lines_str:
|
||||
issue_str = f"<details><summary><a href='{reference_link}'><strong>{issue_header}</strong></a>\n\n{issue_content}</summary>\n\n{relevant_lines_str}\n\n</details>"
|
||||
else:
|
||||
issue_str = f"<a href='{reference_link}'><strong>{issue_header}</strong></a><br>{issue_content}"
|
||||
else:
|
||||
issue_str = f"<strong>{issue_header}</strong><br>{issue_content}"
|
||||
issue_str = f"<a href='{reference_link}'><strong>{issue_header}</strong></a><br>{issue_content}"
|
||||
else:
|
||||
if reference_link is not None and len(reference_link) > 0:
|
||||
issue_str = f"[**{issue_header}**]({reference_link})\n\n{issue_content}\n\n"
|
||||
else:
|
||||
issue_str = f"**{issue_header}**\n\n{issue_content}\n\n"
|
||||
issue_str = f"[**{issue_header}**]({reference_link})\n\n{issue_content}\n\n"
|
||||
markdown_text += f"{issue_str}\n\n"
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Failed to process 'Recommended focus areas for review': {e}")
|
||||
@ -288,25 +264,6 @@ def convert_to_markdown_v2(output_data: dict,
|
||||
|
||||
return markdown_text
|
||||
|
||||
def extract_relevant_lines_str(end_line, files, relevant_file, start_line):
|
||||
try:
|
||||
relevant_lines_str = ""
|
||||
if files:
|
||||
files = set_file_languages(files)
|
||||
for file in files:
|
||||
if file.filename.strip() == relevant_file:
|
||||
if not file.head_file:
|
||||
get_logger().warning(f"No content found in file: {file.filename}")
|
||||
return ""
|
||||
relevant_file_lines = file.head_file.splitlines()
|
||||
relevant_lines_str = "\n".join(relevant_file_lines[start_line - 1:end_line])
|
||||
relevant_lines_str = f"```{file.language}\n{relevant_lines_str}\n```"
|
||||
break
|
||||
return relevant_lines_str
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Failed to extract relevant lines: {e}")
|
||||
return ""
|
||||
|
||||
|
||||
def ticket_markdown_logic(emoji, markdown_text, value, gfm_supported) -> str:
|
||||
ticket_compliance_str = ""
|
||||
@ -1140,48 +1097,3 @@ def process_description(description_full: str) -> Tuple[str, List]:
|
||||
get_logger().exception(f"Failed to process description: {e}")
|
||||
|
||||
return base_description_str, files
|
||||
|
||||
def get_version() -> str:
|
||||
# First check pyproject.toml if running directly out of repository
|
||||
if os.path.exists("pyproject.toml"):
|
||||
if sys.version_info >= (3, 11):
|
||||
import tomllib
|
||||
with open("pyproject.toml", "rb") as f:
|
||||
data = tomllib.load(f)
|
||||
if "project" in data and "version" in data["project"]:
|
||||
return data["project"]["version"]
|
||||
else:
|
||||
get_logger().warning("Version not found in pyproject.toml")
|
||||
else:
|
||||
get_logger().warning("Unable to determine local version from pyproject.toml")
|
||||
|
||||
# Otherwise get the installed pip package version
|
||||
try:
|
||||
return version('pr-agent')
|
||||
except PackageNotFoundError:
|
||||
get_logger().warning("Unable to find package named 'pr-agent'")
|
||||
return "unknown"
|
||||
|
||||
|
||||
def set_file_languages(diff_files) -> List[FilePatchInfo]:
|
||||
try:
|
||||
# if the language is already set, do not change it
|
||||
if hasattr(diff_files[0], 'language') and diff_files[0].language:
|
||||
return diff_files
|
||||
|
||||
# map file extensions to programming languages
|
||||
language_extension_map_org = get_settings().language_extension_map_org
|
||||
extension_to_language = {}
|
||||
for language, extensions in language_extension_map_org.items():
|
||||
for ext in extensions:
|
||||
extension_to_language[ext] = language
|
||||
for file in diff_files:
|
||||
extension_s = '.' + file.filename.rsplit('.')[-1]
|
||||
language_name = "txt"
|
||||
if extension_s and (extension_s in extension_to_language):
|
||||
language_name = extension_to_language[extension_s]
|
||||
file.language = language_name.lower()
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Failed to set file languages: {e}")
|
||||
|
||||
return diff_files
|
||||
|
@ -3,9 +3,8 @@ import asyncio
|
||||
import os
|
||||
|
||||
from pr_agent.agent.pr_agent import PRAgent, commands
|
||||
from pr_agent.algo.utils import get_version
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.log import get_logger, setup_logger
|
||||
from pr_agent.log import setup_logger, get_logger
|
||||
|
||||
log_level = os.environ.get("LOG_LEVEL", "INFO")
|
||||
setup_logger(log_level)
|
||||
@ -46,7 +45,6 @@ def set_parser():
|
||||
To edit any configuration parameter from 'configuration.toml', just add -config_path=<value>.
|
||||
For example: 'python cli.py --pr_url=... review --pr_reviewer.extra_instructions="focus on the file: ..."'
|
||||
""")
|
||||
parser.add_argument('--version', action='version', version=f'pr-agent {get_version()}')
|
||||
parser.add_argument('--pr_url', type=str, help='The URL of the PR to review', default=None)
|
||||
parser.add_argument('--issue_url', type=str, help='The URL of the Issue to review', default=None)
|
||||
parser.add_argument('command', type=str, help='The', choices=commands, default='review')
|
||||
|
@ -1,16 +1,14 @@
|
||||
from starlette_context import context
|
||||
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers.azuredevops_provider import AzureDevopsProvider
|
||||
from pr_agent.git_providers.bitbucket_provider import BitbucketProvider
|
||||
from pr_agent.git_providers.bitbucket_server_provider import \
|
||||
BitbucketServerProvider
|
||||
from pr_agent.git_providers.bitbucket_server_provider import BitbucketServerProvider
|
||||
from pr_agent.git_providers.codecommit_provider import CodeCommitProvider
|
||||
from pr_agent.git_providers.gerrit_provider import GerritProvider
|
||||
from pr_agent.git_providers.git_provider import GitProvider
|
||||
from pr_agent.git_providers.github_provider import GithubProvider
|
||||
from pr_agent.git_providers.gitlab_provider import GitLabProvider
|
||||
from pr_agent.git_providers.local_git_provider import LocalGitProvider
|
||||
from pr_agent.git_providers.azuredevops_provider import AzureDevopsProvider
|
||||
from pr_agent.git_providers.gerrit_provider import GerritProvider
|
||||
from starlette_context import context
|
||||
|
||||
_GIT_PROVIDERS = {
|
||||
'github': GithubProvider,
|
||||
|
@ -2,16 +2,13 @@ import os
|
||||
from typing import Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
|
||||
from ..algo.file_filter import filter_ignored
|
||||
from ..algo.language_handler import is_valid_file
|
||||
from ..algo.utils import (PRDescriptionHeader, clip_tokens,
|
||||
find_line_number_of_relevant_line_in_file,
|
||||
load_large_diff)
|
||||
from ..config_loader import get_settings
|
||||
from ..log import get_logger
|
||||
from ..algo.language_handler import is_valid_file
|
||||
from ..algo.utils import clip_tokens, find_line_number_of_relevant_line_in_file, load_large_diff, PRDescriptionHeader
|
||||
from ..config_loader import get_settings
|
||||
from .git_provider import GitProvider
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
|
||||
AZURE_DEVOPS_AVAILABLE = True
|
||||
ADO_APP_CLIENT_DEFAULT_ID = "499b84ac-1321-427f-aa17-267ca6975798/.default"
|
||||
@ -19,16 +16,19 @@ MAX_PR_DESCRIPTION_AZURE_LENGTH = 4000-1
|
||||
|
||||
try:
|
||||
# noinspection PyUnresolvedReferences
|
||||
from msrest.authentication import BasicAuthentication
|
||||
# noinspection PyUnresolvedReferences
|
||||
from azure.devops.connection import Connection
|
||||
# noinspection PyUnresolvedReferences
|
||||
from azure.devops.v7_1.git.models import (Comment, CommentThread,
|
||||
GitPullRequest,
|
||||
GitPullRequestIterationChanges,
|
||||
GitVersionDescriptor)
|
||||
# noinspection PyUnresolvedReferences
|
||||
from azure.identity import DefaultAzureCredential
|
||||
from msrest.authentication import BasicAuthentication
|
||||
# noinspection PyUnresolvedReferences
|
||||
from azure.devops.v7_1.git.models import (
|
||||
Comment,
|
||||
CommentThread,
|
||||
GitVersionDescriptor,
|
||||
GitPullRequest,
|
||||
GitPullRequestIterationChanges,
|
||||
)
|
||||
except ImportError:
|
||||
AZURE_DEVOPS_AVAILABLE = False
|
||||
|
||||
@ -67,14 +67,16 @@ class AzureDevopsProvider(GitProvider):
|
||||
relevant_lines_end = suggestion['relevant_lines_end']
|
||||
|
||||
if not relevant_lines_start or relevant_lines_start == -1:
|
||||
get_logger().warning(
|
||||
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}")
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().exception(
|
||||
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}")
|
||||
continue
|
||||
|
||||
if relevant_lines_end < relevant_lines_start:
|
||||
get_logger().warning(f"Failed to publish code suggestion, "
|
||||
f"relevant_lines_end is {relevant_lines_end} and "
|
||||
f"relevant_lines_start is {relevant_lines_start}")
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().exception(f"Failed to publish code suggestion, "
|
||||
f"relevant_lines_end is {relevant_lines_end} and "
|
||||
f"relevant_lines_start is {relevant_lines_start}")
|
||||
continue
|
||||
|
||||
if relevant_lines_end > relevant_lines_start:
|
||||
@ -93,11 +95,9 @@ class AzureDevopsProvider(GitProvider):
|
||||
"side": "RIGHT",
|
||||
}
|
||||
post_parameters_list.append(post_parameters)
|
||||
if not post_parameters_list:
|
||||
return False
|
||||
|
||||
for post_parameters in post_parameters_list:
|
||||
try:
|
||||
try:
|
||||
for post_parameters in post_parameters_list:
|
||||
comment = Comment(content=post_parameters["body"], comment_type=1)
|
||||
thread = CommentThread(comments=[comment],
|
||||
thread_context={
|
||||
@ -117,11 +117,15 @@ class AzureDevopsProvider(GitProvider):
|
||||
repository_id=self.repo_slug,
|
||||
pull_request_id=self.pr_num
|
||||
)
|
||||
except Exception as e:
|
||||
get_logger().warning(f"Azure failed to publish code suggestion, error: {e}")
|
||||
return True
|
||||
|
||||
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(
|
||||
f"Published code suggestion on {self.pr_num} at {post_parameters['path']}"
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().error(f"Failed to publish code suggestion, error: {e}")
|
||||
return False
|
||||
|
||||
def get_pr_description_full(self) -> str:
|
||||
return self.pr.description
|
||||
@ -378,9 +382,6 @@ class AzureDevopsProvider(GitProvider):
|
||||
return []
|
||||
|
||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False, thread_context=None):
|
||||
if is_temporary and not get_settings().config.publish_output_progress:
|
||||
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
|
||||
return None
|
||||
comment = Comment(content=pr_comment)
|
||||
thread = CommentThread(comments=[comment], thread_context=thread_context, status=5)
|
||||
thread_response = self.azure_devops_client.create_thread(
|
||||
@ -619,3 +620,4 @@ class AzureDevopsProvider(GitProvider):
|
||||
|
||||
def publish_file_comments(self, file_comments: list) -> bool:
|
||||
pass
|
||||
|
||||
|
@ -1,6 +1,4 @@
|
||||
import difflib
|
||||
import json
|
||||
import re
|
||||
from typing import Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
@ -8,14 +6,13 @@ import requests
|
||||
from atlassian.bitbucket import Cloud
|
||||
from starlette_context import context
|
||||
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
|
||||
from pr_agent.algo.types import FilePatchInfo, EDIT_TYPE
|
||||
from ..algo.file_filter import filter_ignored
|
||||
from ..algo.language_handler import is_valid_file
|
||||
from ..algo.utils import find_line_number_of_relevant_line_in_file
|
||||
from ..config_loader import get_settings
|
||||
from ..log import get_logger
|
||||
from .git_provider import MAX_FILES_ALLOWED_FULL, GitProvider
|
||||
from .git_provider import GitProvider, MAX_FILES_ALLOWED_FULL
|
||||
|
||||
|
||||
def _gef_filename(diff):
|
||||
@ -74,38 +71,24 @@ class BitbucketProvider(GitProvider):
|
||||
post_parameters_list = []
|
||||
for suggestion in code_suggestions:
|
||||
body = suggestion["body"]
|
||||
original_suggestion = suggestion.get('original_suggestion', None) # needed for diff code
|
||||
if original_suggestion:
|
||||
try:
|
||||
existing_code = original_suggestion['existing_code'].rstrip() + "\n"
|
||||
improved_code = original_suggestion['improved_code'].rstrip() + "\n"
|
||||
diff = difflib.unified_diff(existing_code.split('\n'),
|
||||
improved_code.split('\n'), n=999)
|
||||
patch_orig = "\n".join(diff)
|
||||
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
|
||||
diff_code = f"\n\n```diff\n{patch.rstrip()}\n```"
|
||||
# replace ```suggestion ... ``` with diff_code, using regex:
|
||||
body = re.sub(r'```suggestion.*?```', diff_code, body, flags=re.DOTALL)
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Bitbucket failed to get diff code for publishing, error: {e}")
|
||||
continue
|
||||
|
||||
relevant_file = suggestion["relevant_file"]
|
||||
relevant_lines_start = suggestion["relevant_lines_start"]
|
||||
relevant_lines_end = suggestion["relevant_lines_end"]
|
||||
|
||||
if not relevant_lines_start or relevant_lines_start == -1:
|
||||
get_logger().exception(
|
||||
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}"
|
||||
)
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().exception(
|
||||
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}"
|
||||
)
|
||||
continue
|
||||
|
||||
if relevant_lines_end < relevant_lines_start:
|
||||
get_logger().exception(
|
||||
f"Failed to publish code suggestion, "
|
||||
f"relevant_lines_end is {relevant_lines_end} and "
|
||||
f"relevant_lines_start is {relevant_lines_start}"
|
||||
)
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().exception(
|
||||
f"Failed to publish code suggestion, "
|
||||
f"relevant_lines_end is {relevant_lines_end} and "
|
||||
f"relevant_lines_start is {relevant_lines_start}"
|
||||
)
|
||||
continue
|
||||
|
||||
if relevant_lines_end > relevant_lines_start:
|
||||
@ -129,7 +112,8 @@ class BitbucketProvider(GitProvider):
|
||||
self.publish_inline_comments(post_parameters_list)
|
||||
return True
|
||||
except Exception as e:
|
||||
get_logger().error(f"Bitbucket failed to publish code suggestion, error: {e}")
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().error(f"Failed to publish code suggestion, error: {e}")
|
||||
return False
|
||||
|
||||
def publish_file_comments(self, file_comments: list) -> bool:
|
||||
@ -137,7 +121,7 @@ class BitbucketProvider(GitProvider):
|
||||
|
||||
def is_supported(self, capability: str) -> bool:
|
||||
if capability in ['get_issue_comments', 'publish_inline_comments', 'get_labels', 'gfm_markdown',
|
||||
'publish_file_comments']:
|
||||
'publish_file_comments']:
|
||||
return False
|
||||
return True
|
||||
|
||||
@ -325,9 +309,6 @@ class BitbucketProvider(GitProvider):
|
||||
self.publish_comment(pr_comment)
|
||||
|
||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||
if is_temporary and not get_settings().config.publish_output_progress:
|
||||
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
|
||||
return None
|
||||
pr_comment = self.limit_output_characters(pr_comment, self.max_comment_length)
|
||||
comment = self.pr.comment(pr_comment)
|
||||
if is_temporary:
|
||||
|
@ -1,21 +1,16 @@
|
||||
import difflib
|
||||
import re
|
||||
|
||||
from packaging.version import parse as parse_version
|
||||
from distutils.version import LooseVersion
|
||||
from requests.exceptions import HTTPError
|
||||
from typing import Optional, Tuple
|
||||
from urllib.parse import quote_plus, urlparse
|
||||
|
||||
from atlassian.bitbucket import Bitbucket
|
||||
from requests.exceptions import HTTPError
|
||||
|
||||
from ..algo.git_patch_processing import decode_if_bytes
|
||||
from ..algo.language_handler import is_valid_file
|
||||
from .git_provider import GitProvider
|
||||
from ..algo.types import EDIT_TYPE, FilePatchInfo
|
||||
from ..algo.utils import (find_line_number_of_relevant_line_in_file,
|
||||
load_large_diff)
|
||||
from ..algo.language_handler import is_valid_file
|
||||
from ..algo.utils import load_large_diff, find_line_number_of_relevant_line_in_file
|
||||
from ..config_loader import get_settings
|
||||
from ..log import get_logger
|
||||
from .git_provider import GitProvider
|
||||
|
||||
|
||||
class BitbucketServerProvider(GitProvider):
|
||||
@ -40,7 +35,7 @@ class BitbucketServerProvider(GitProvider):
|
||||
token=get_settings().get("BITBUCKET_SERVER.BEARER_TOKEN",
|
||||
None))
|
||||
try:
|
||||
self.bitbucket_api_version = parse_version(self.bitbucket_client.get("rest/api/1.0/application-properties").get('version'))
|
||||
self.bitbucket_api_version = LooseVersion(self.bitbucket_client.get("rest/api/1.0/application-properties").get('version'))
|
||||
except Exception:
|
||||
self.bitbucket_api_version = None
|
||||
|
||||
@ -70,37 +65,24 @@ class BitbucketServerProvider(GitProvider):
|
||||
post_parameters_list = []
|
||||
for suggestion in code_suggestions:
|
||||
body = suggestion["body"]
|
||||
original_suggestion = suggestion.get('original_suggestion', None) # needed for diff code
|
||||
if original_suggestion:
|
||||
try:
|
||||
existing_code = original_suggestion['existing_code'].rstrip() + "\n"
|
||||
improved_code = original_suggestion['improved_code'].rstrip() + "\n"
|
||||
diff = difflib.unified_diff(existing_code.split('\n'),
|
||||
improved_code.split('\n'), n=999)
|
||||
patch_orig = "\n".join(diff)
|
||||
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
|
||||
diff_code = f"\n\n```diff\n{patch.rstrip()}\n```"
|
||||
# replace ```suggestion ... ``` with diff_code, using regex:
|
||||
body = re.sub(r'```suggestion.*?```', diff_code, body, flags=re.DOTALL)
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Bitbucket failed to get diff code for publishing, error: {e}")
|
||||
continue
|
||||
relevant_file = suggestion["relevant_file"]
|
||||
relevant_lines_start = suggestion["relevant_lines_start"]
|
||||
relevant_lines_end = suggestion["relevant_lines_end"]
|
||||
|
||||
if not relevant_lines_start or relevant_lines_start == -1:
|
||||
get_logger().warning(
|
||||
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}"
|
||||
)
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().exception(
|
||||
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}"
|
||||
)
|
||||
continue
|
||||
|
||||
if relevant_lines_end < relevant_lines_start:
|
||||
get_logger().warning(
|
||||
f"Failed to publish code suggestion, "
|
||||
f"relevant_lines_end is {relevant_lines_end} and "
|
||||
f"relevant_lines_start is {relevant_lines_start}"
|
||||
)
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().exception(
|
||||
f"Failed to publish code suggestion, "
|
||||
f"relevant_lines_end is {relevant_lines_end} and "
|
||||
f"relevant_lines_start is {relevant_lines_start}"
|
||||
)
|
||||
continue
|
||||
|
||||
if relevant_lines_end > relevant_lines_start:
|
||||
@ -177,7 +159,7 @@ class BitbucketServerProvider(GitProvider):
|
||||
head_sha = self.pr.fromRef['latestCommit']
|
||||
|
||||
# if Bitbucket api version is >= 8.16 then use the merge-base api for 2-way diff calculation
|
||||
if self.bitbucket_api_version is not None and self.bitbucket_api_version >= parse_version("8.16"):
|
||||
if self.bitbucket_api_version is not None and self.bitbucket_api_version >= LooseVersion("8.16"):
|
||||
try:
|
||||
base_sha = self.bitbucket_client.get(self._get_merge_base())['id']
|
||||
except Exception as e:
|
||||
@ -192,7 +174,7 @@ class BitbucketServerProvider(GitProvider):
|
||||
# if Bitbucket api version is None or < 7.0 then do a simple diff with a guaranteed common ancestor
|
||||
base_sha = source_commits_list[-1]['parents'][0]['id']
|
||||
# if Bitbucket api version is 7.0-8.15 then use 2-way diff functionality for the base_sha
|
||||
if self.bitbucket_api_version is not None and self.bitbucket_api_version >= parse_version("7.0"):
|
||||
if self.bitbucket_api_version is not None and self.bitbucket_api_version >= LooseVersion("7.0"):
|
||||
try:
|
||||
destination_commits = list(
|
||||
self.bitbucket_client.get_commits(self.workspace_slug, self.repo_slug, base_sha,
|
||||
@ -218,21 +200,25 @@ class BitbucketServerProvider(GitProvider):
|
||||
case 'ADD':
|
||||
edit_type = EDIT_TYPE.ADDED
|
||||
new_file_content_str = self.get_file(file_path, head_sha)
|
||||
new_file_content_str = decode_if_bytes(new_file_content_str)
|
||||
if isinstance(new_file_content_str, (bytes, bytearray)):
|
||||
new_file_content_str = new_file_content_str.decode("utf-8")
|
||||
original_file_content_str = ""
|
||||
case 'DELETE':
|
||||
edit_type = EDIT_TYPE.DELETED
|
||||
new_file_content_str = ""
|
||||
original_file_content_str = self.get_file(file_path, base_sha)
|
||||
original_file_content_str = decode_if_bytes(original_file_content_str)
|
||||
if isinstance(original_file_content_str, (bytes, bytearray)):
|
||||
original_file_content_str = original_file_content_str.decode("utf-8")
|
||||
case 'RENAME':
|
||||
edit_type = EDIT_TYPE.RENAMED
|
||||
case _:
|
||||
edit_type = EDIT_TYPE.MODIFIED
|
||||
original_file_content_str = self.get_file(file_path, base_sha)
|
||||
original_file_content_str = decode_if_bytes(original_file_content_str)
|
||||
if isinstance(original_file_content_str, (bytes, bytearray)):
|
||||
original_file_content_str = original_file_content_str.decode("utf-8")
|
||||
new_file_content_str = self.get_file(file_path, head_sha)
|
||||
new_file_content_str = decode_if_bytes(new_file_content_str)
|
||||
if isinstance(new_file_content_str, (bytes, bytearray)):
|
||||
new_file_content_str = new_file_content_str.decode("utf-8")
|
||||
|
||||
patch = load_large_diff(file_path, new_file_content_str, original_file_content_str)
|
||||
|
||||
@ -343,10 +329,10 @@ class BitbucketServerProvider(GitProvider):
|
||||
for comment in comments:
|
||||
if 'position' in comment:
|
||||
self.publish_inline_comment(comment['body'], comment['position'], comment['path'])
|
||||
elif 'start_line' in comment: # multi-line comment
|
||||
elif 'start_line' in comment: # multi-line comment
|
||||
# note that bitbucket does not seem to support range - only a comment on a single line - https://community.developer.atlassian.com/t/api-post-endpoint-for-inline-pull-request-comments/60452
|
||||
self.publish_inline_comment(comment['body'], comment['start_line'], comment['path'])
|
||||
elif 'line' in comment: # single-line comment
|
||||
elif 'line' in comment: # single-line comment
|
||||
self.publish_inline_comment(comment['body'], comment['line'], comment['path'])
|
||||
else:
|
||||
get_logger().error(f"Could not publish inline comment: {comment}")
|
||||
|
@ -4,15 +4,13 @@ from collections import Counter
|
||||
from typing import List, Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from pr_agent.algo.language_handler import is_valid_file
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
from pr_agent.git_providers.codecommit_client import CodeCommitClient
|
||||
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
from ..algo.utils import load_large_diff
|
||||
from .git_provider import GitProvider
|
||||
from ..config_loader import get_settings
|
||||
from ..log import get_logger
|
||||
from .git_provider import GitProvider
|
||||
|
||||
from pr_agent.algo.language_handler import is_valid_file
|
||||
|
||||
class PullRequestCCMimic:
|
||||
"""
|
||||
@ -164,7 +162,7 @@ class CodeCommitProvider(GitProvider):
|
||||
pr_body=CodeCommitProvider._add_additional_newlines(pr_body),
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValueError(f"CodeCommit Cannot publish description for PR: {self.pr_num}") from e
|
||||
raise ValueError(f"CodeCommit Cannot publish description for PR: {self.pr_num}") from e
|
||||
|
||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||
if is_temporary:
|
||||
@ -192,7 +190,7 @@ class CodeCommitProvider(GitProvider):
|
||||
if not all(key in suggestion for key in ["body", "relevant_file", "relevant_lines_start"]):
|
||||
get_logger().warning(f"Skipping code suggestion #{counter}: Each suggestion must have 'body', 'relevant_file', 'relevant_lines_start' keys")
|
||||
continue
|
||||
|
||||
|
||||
# Publish the code suggestion to CodeCommit
|
||||
try:
|
||||
get_logger().debug(f"Code Suggestion #{counter} in file: {suggestion['relevant_file']}: {suggestion['relevant_lines_start']}")
|
||||
@ -207,12 +205,12 @@ class CodeCommitProvider(GitProvider):
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValueError(f"CodeCommit Cannot publish code suggestions for PR: {self.pr_num}") from e
|
||||
|
||||
|
||||
counter += 1
|
||||
|
||||
# The calling function passes in a list of code suggestions, and this function publishes each suggestion one at a time.
|
||||
# If we were to return False here, the calling function will attempt to publish the same list of code suggestions again, one at a time.
|
||||
# Since this function publishes the suggestions one at a time anyway, we always return True here to avoid the retry.
|
||||
# Since this function publishes the suggestions one at a time anyway, we always return True here to avoid the retry.
|
||||
return True
|
||||
|
||||
def publish_labels(self, labels):
|
||||
@ -240,7 +238,7 @@ class CodeCommitProvider(GitProvider):
|
||||
def get_pr_id(self):
|
||||
"""
|
||||
Returns the PR ID in the format: "repo_name/pr_number".
|
||||
Note: This is an internal identifier for PR-Agent,
|
||||
Note: This is an internal identifier for PR-Agent,
|
||||
and is not the same as the CodeCommit PR identifier.
|
||||
"""
|
||||
try:
|
||||
@ -248,7 +246,7 @@ class CodeCommitProvider(GitProvider):
|
||||
return pr_id
|
||||
except:
|
||||
return ""
|
||||
|
||||
|
||||
def get_languages(self):
|
||||
"""
|
||||
Returns a dictionary of languages, containing the percentage of each language used in the PR.
|
||||
@ -350,7 +348,7 @@ class CodeCommitProvider(GitProvider):
|
||||
"""
|
||||
Check if the provided hostname is a valid AWS CodeCommit hostname.
|
||||
|
||||
This is not an exhaustive check of AWS region names,
|
||||
This is not an exhaustive check of AWS region names,
|
||||
but instead uses a regex to check for matching AWS region patterns.
|
||||
|
||||
Args:
|
||||
|
@ -12,9 +12,9 @@ import requests
|
||||
import urllib3.util
|
||||
from git import Repo
|
||||
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers.git_provider import GitProvider
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
from pr_agent.git_providers.local_git_provider import PullRequestMimic
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
@ -1,12 +1,12 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
# enum EDIT_TYPE (ADDED, DELETED, MODIFIED, RENAMED)
|
||||
from typing import Optional
|
||||
|
||||
from pr_agent.algo.types import FilePatchInfo
|
||||
from pr_agent.algo.utils import Range, process_description
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.algo.types import FilePatchInfo
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
MAX_FILES_ALLOWED_FULL = 50
|
||||
|
||||
class GitProvider(ABC):
|
||||
@ -62,8 +62,8 @@ class GitProvider(ABC):
|
||||
pass
|
||||
|
||||
def get_pr_description(self, full: bool = True, split_changes_walkthrough=False) -> str or tuple:
|
||||
from pr_agent.algo.utils import clip_tokens
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.algo.utils import clip_tokens
|
||||
max_tokens_description = get_settings().get("CONFIG.MAX_DESCRIPTION_TOKENS", None)
|
||||
description = self.get_pr_description_full() if full else self.get_user_description()
|
||||
if split_changes_walkthrough:
|
||||
|
@ -1,30 +1,22 @@
|
||||
import copy
|
||||
import difflib
|
||||
import hashlib
|
||||
import itertools
|
||||
import re
|
||||
import time
|
||||
import hashlib
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from typing import Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from github import AppAuthentication, Auth, Github
|
||||
from retry import retry
|
||||
from starlette_context import context
|
||||
|
||||
from ..algo.file_filter import filter_ignored
|
||||
from ..algo.git_patch_processing import extract_hunk_headers
|
||||
from ..algo.language_handler import is_valid_file
|
||||
from ..algo.types import EDIT_TYPE
|
||||
from ..algo.utils import (PRReviewHeader, Range, clip_tokens,
|
||||
find_line_number_of_relevant_line_in_file,
|
||||
load_large_diff, set_file_languages)
|
||||
from ..algo.utils import PRReviewHeader, load_large_diff, clip_tokens, find_line_number_of_relevant_line_in_file, Range
|
||||
from ..config_loader import get_settings
|
||||
from ..log import get_logger
|
||||
from ..servers.utils import RateLimitExceeded
|
||||
from .git_provider import (MAX_FILES_ALLOWED_FULL, FilePatchInfo, GitProvider,
|
||||
IncrementalPR)
|
||||
from .git_provider import FilePatchInfo, GitProvider, IncrementalPR, MAX_FILES_ALLOWED_FULL
|
||||
|
||||
|
||||
class GithubProvider(GitProvider):
|
||||
@ -203,24 +195,7 @@ class GithubProvider(GitProvider):
|
||||
if avoid_load:
|
||||
original_file_content_str = ""
|
||||
else:
|
||||
# The base.sha will point to the current state of the base branch (including parallel merges), not the original base commit when the PR was created
|
||||
# We can fix this by finding the merge base commit between the PR head and base branches
|
||||
# Note that The pr.head.sha is actually correct as is - it points to the latest commit in your PR branch.
|
||||
# This SHA isn't affected by parallel merges to the base branch since it's specific to your PR's branch.
|
||||
repo = self.repo_obj
|
||||
pr = self.pr
|
||||
try:
|
||||
compare = repo.compare(pr.base.sha, pr.head.sha)
|
||||
merge_base_commit = compare.merge_base_commit
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed to get merge base commit: {e}")
|
||||
merge_base_commit = pr.base
|
||||
if merge_base_commit.sha != pr.base.sha:
|
||||
get_logger().info(
|
||||
f"Using merge base commit {merge_base_commit.sha} instead of base commit "
|
||||
f"{pr.base.sha} for {file.filename}")
|
||||
original_file_content_str = self._get_pr_file_content(file, merge_base_commit.sha)
|
||||
|
||||
original_file_content_str = self._get_pr_file_content(file, self.pr.base.sha)
|
||||
if not patch:
|
||||
patch = load_large_diff(file.filename, new_file_content_str, original_file_content_str)
|
||||
|
||||
@ -304,7 +279,8 @@ class GithubProvider(GitProvider):
|
||||
relevant_line_in_file,
|
||||
absolute_position)
|
||||
if position == -1:
|
||||
get_logger().info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
|
||||
subject_type = "FILE"
|
||||
else:
|
||||
subject_type = "LINE"
|
||||
@ -316,9 +292,11 @@ class GithubProvider(GitProvider):
|
||||
# publish all comments in a single message
|
||||
self.pr.create_review(commit=self.last_commit_id, comments=comments)
|
||||
except Exception as e:
|
||||
get_logger().info(f"Initially failed to publish inline comments as committable")
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().error(f"Failed to publish inline comments")
|
||||
|
||||
if (getattr(e, "status", None) == 422 and not disable_fallback):
|
||||
if (getattr(e, "status", None) == 422
|
||||
and get_settings().github.publish_inline_comments_fallback_with_verification and not disable_fallback):
|
||||
pass # continue to try _publish_inline_comments_fallback_with_verification
|
||||
else:
|
||||
raise e # will end up with publishing the comments one by one
|
||||
@ -326,7 +304,8 @@ class GithubProvider(GitProvider):
|
||||
try:
|
||||
self._publish_inline_comments_fallback_with_verification(comments)
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed to publish inline code comments fallback, error: {e}")
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().error(f"Failed to publish inline code comments fallback, error: {e}")
|
||||
raise e
|
||||
|
||||
def _publish_inline_comments_fallback_with_verification(self, comments: list[dict]):
|
||||
@ -351,9 +330,11 @@ class GithubProvider(GitProvider):
|
||||
for comment in fixed_comments_as_one_liner:
|
||||
try:
|
||||
self.publish_inline_comments([comment], disable_fallback=True)
|
||||
get_logger().info(f"Published invalid comment as a single line comment: {comment}")
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"Published invalid comment as a single line comment: {comment}")
|
||||
except:
|
||||
get_logger().error(f"Failed to publish invalid comment as a single line comment: {comment}")
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().error(f"Failed to publish invalid comment as a single line comment: {comment}")
|
||||
|
||||
def _verify_code_comment(self, comment: dict):
|
||||
is_verified = False
|
||||
@ -411,7 +392,8 @@ class GithubProvider(GitProvider):
|
||||
if fixed_comment != comment:
|
||||
fixed_comments.append(fixed_comment)
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed to fix inline comment, error: {e}")
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().error(f"Failed to fix inline comment, error: {e}")
|
||||
return fixed_comments
|
||||
|
||||
def publish_code_suggestions(self, code_suggestions: list) -> bool:
|
||||
@ -419,24 +401,23 @@ class GithubProvider(GitProvider):
|
||||
Publishes code suggestions as comments on the PR.
|
||||
"""
|
||||
post_parameters_list = []
|
||||
|
||||
code_suggestions_validated = self.validate_comments_inside_hunks(code_suggestions)
|
||||
|
||||
for suggestion in code_suggestions_validated:
|
||||
for suggestion in code_suggestions:
|
||||
body = suggestion['body']
|
||||
relevant_file = suggestion['relevant_file']
|
||||
relevant_lines_start = suggestion['relevant_lines_start']
|
||||
relevant_lines_end = suggestion['relevant_lines_end']
|
||||
|
||||
if not relevant_lines_start or relevant_lines_start == -1:
|
||||
get_logger().exception(
|
||||
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}")
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().exception(
|
||||
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}")
|
||||
continue
|
||||
|
||||
if relevant_lines_end < relevant_lines_start:
|
||||
get_logger().exception(f"Failed to publish code suggestion, "
|
||||
f"relevant_lines_end is {relevant_lines_end} and "
|
||||
f"relevant_lines_start is {relevant_lines_start}")
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().exception(f"Failed to publish code suggestion, "
|
||||
f"relevant_lines_end is {relevant_lines_end} and "
|
||||
f"relevant_lines_start is {relevant_lines_start}")
|
||||
continue
|
||||
|
||||
if relevant_lines_end > relevant_lines_start:
|
||||
@ -460,7 +441,8 @@ class GithubProvider(GitProvider):
|
||||
self.publish_inline_comments(post_parameters_list)
|
||||
return True
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed to publish code suggestion, error: {e}")
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().error(f"Failed to publish code suggestion, error: {e}")
|
||||
return False
|
||||
|
||||
def edit_comment(self, comment, body: str):
|
||||
@ -519,7 +501,6 @@ class GithubProvider(GitProvider):
|
||||
elif self.deployment_type == 'user':
|
||||
same_comment_creator = self.github_user_id == existing_comment['user']['login']
|
||||
if existing_comment['subject_type'] == 'file' and comment['path'] == existing_comment['path'] and same_comment_creator:
|
||||
|
||||
headers, data_patch = self.pr._requester.requestJsonAndCheck(
|
||||
"PATCH", f"{self.base_url}/repos/{self.repo}/pulls/comments/{existing_comment['id']}", input={"body":comment['body']}
|
||||
)
|
||||
@ -531,7 +512,8 @@ class GithubProvider(GitProvider):
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed to publish diffview file summary, error: {e}")
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().error(f"Failed to publish diffview file summary, error: {e}")
|
||||
return False
|
||||
|
||||
def remove_initial_comment(self):
|
||||
@ -819,7 +801,8 @@ class GithubProvider(GitProvider):
|
||||
link = f"{self.base_url_html}/{self.repo}/pull/{self.pr_num}/files#diff-{sha_file}R{absolute_position}"
|
||||
return link
|
||||
except Exception as e:
|
||||
get_logger().info(f"Failed adding line link, error: {e}")
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"Failed adding line link, error: {e}")
|
||||
|
||||
return ""
|
||||
|
||||
@ -879,89 +862,3 @@ class GithubProvider(GitProvider):
|
||||
|
||||
def calc_pr_statistics(self, pull_request_data: dict):
|
||||
return {}
|
||||
|
||||
def validate_comments_inside_hunks(self, code_suggestions):
|
||||
"""
|
||||
validate that all committable comments are inside PR hunks - this is a must for committable comments in GitHub
|
||||
"""
|
||||
code_suggestions_copy = copy.deepcopy(code_suggestions)
|
||||
diff_files = self.get_diff_files()
|
||||
RE_HUNK_HEADER = re.compile(
|
||||
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
|
||||
|
||||
diff_files = set_file_languages(diff_files)
|
||||
|
||||
for suggestion in code_suggestions_copy:
|
||||
try:
|
||||
relevant_file_path = suggestion['relevant_file']
|
||||
for file in diff_files:
|
||||
if file.filename == relevant_file_path:
|
||||
|
||||
# generate on-demand the patches range for the relevant file
|
||||
patch_str = file.patch
|
||||
if not hasattr(file, 'patches_range'):
|
||||
file.patches_range = []
|
||||
patch_lines = patch_str.splitlines()
|
||||
for i, line in enumerate(patch_lines):
|
||||
if line.startswith('@@'):
|
||||
match = RE_HUNK_HEADER.match(line)
|
||||
# identify hunk header
|
||||
if match:
|
||||
section_header, size1, size2, start1, start2 = extract_hunk_headers(match)
|
||||
file.patches_range.append({'start': start2, 'end': start2 + size2 - 1})
|
||||
|
||||
patches_range = file.patches_range
|
||||
comment_start_line = suggestion.get('relevant_lines_start', None)
|
||||
comment_end_line = suggestion.get('relevant_lines_end', None)
|
||||
original_suggestion = suggestion.get('original_suggestion', None) # needed for diff code
|
||||
if not comment_start_line or not comment_end_line or not original_suggestion:
|
||||
continue
|
||||
|
||||
# check if the comment is inside a valid hunk
|
||||
is_valid_hunk = False
|
||||
min_distance = float('inf')
|
||||
patch_range_min = None
|
||||
# find the hunk that contains the comment, or the closest one
|
||||
for i, patch_range in enumerate(patches_range):
|
||||
d1 = comment_start_line - patch_range['start']
|
||||
d2 = patch_range['end'] - comment_end_line
|
||||
if d1 >= 0 and d2 >= 0: # found a valid hunk
|
||||
is_valid_hunk = True
|
||||
min_distance = 0
|
||||
patch_range_min = patch_range
|
||||
break
|
||||
elif d1 * d2 <= 0: # comment is possibly inside the hunk
|
||||
d1_clip = abs(min(0, d1))
|
||||
d2_clip = abs(min(0, d2))
|
||||
d = max(d1_clip, d2_clip)
|
||||
if d < min_distance:
|
||||
patch_range_min = patch_range
|
||||
min_distance = min(min_distance, d)
|
||||
if not is_valid_hunk:
|
||||
if min_distance < 10: # 10 lines - a reasonable distance to consider the comment inside the hunk
|
||||
# make the suggestion non-committable, yet multi line
|
||||
suggestion['relevant_lines_start'] = max(suggestion['relevant_lines_start'], patch_range_min['start'])
|
||||
suggestion['relevant_lines_end'] = min(suggestion['relevant_lines_end'], patch_range_min['end'])
|
||||
body = suggestion['body'].strip()
|
||||
|
||||
# present new diff code in collapsible
|
||||
existing_code = original_suggestion['existing_code'].rstrip() + "\n"
|
||||
improved_code = original_suggestion['improved_code'].rstrip() + "\n"
|
||||
diff = difflib.unified_diff(existing_code.split('\n'),
|
||||
improved_code.split('\n'), n=999)
|
||||
patch_orig = "\n".join(diff)
|
||||
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
|
||||
diff_code = f"\n\n<details><summary>New proposed code:</summary>\n\n```diff\n{patch.rstrip()}\n```"
|
||||
# replace ```suggestion ... ``` with diff_code, using regex:
|
||||
body = re.sub(r'```suggestion.*?```', diff_code, body, flags=re.DOTALL)
|
||||
body += "\n\n</details>"
|
||||
suggestion['body'] = body
|
||||
get_logger().info(f"Comment was moved to a valid hunk, "
|
||||
f"start_line={suggestion['relevant_lines_start']}, end_line={suggestion['relevant_lines_end']}, file={file.filename}")
|
||||
else:
|
||||
get_logger().error(f"Comment is not inside a valid hunk, "
|
||||
f"start_line={suggestion['relevant_lines_start']}, end_line={suggestion['relevant_lines_end']}, file={file.filename}")
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed to process patch for committable comment, error: {e}")
|
||||
return code_suggestions_copy
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
import difflib
|
||||
import hashlib
|
||||
import re
|
||||
from typing import Optional, Tuple
|
||||
@ -8,16 +7,13 @@ import gitlab
|
||||
import requests
|
||||
from gitlab import GitlabGetError
|
||||
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
|
||||
from ..algo.file_filter import filter_ignored
|
||||
from ..algo.language_handler import is_valid_file
|
||||
from ..algo.utils import (clip_tokens,
|
||||
find_line_number_of_relevant_line_in_file,
|
||||
load_large_diff)
|
||||
from ..algo.utils import load_large_diff, clip_tokens, find_line_number_of_relevant_line_in_file
|
||||
from ..config_loader import get_settings
|
||||
from .git_provider import GitProvider, MAX_FILES_ALLOWED_FULL
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
from ..log import get_logger
|
||||
from .git_provider import MAX_FILES_ALLOWED_FULL, GitProvider
|
||||
|
||||
|
||||
class DiffNotFoundError(Exception):
|
||||
@ -194,9 +190,6 @@ class GitLabProvider(GitProvider):
|
||||
self.publish_persistent_comment_full(pr_comment, initial_header, update_header, name, final_update_message)
|
||||
|
||||
def publish_comment(self, mr_comment: str, is_temporary: bool = False):
|
||||
if is_temporary and not get_settings().config.publish_output_progress:
|
||||
get_logger().debug(f"Skipping publish_comment for temporary comment: {mr_comment}")
|
||||
return None
|
||||
mr_comment = self.limit_output_characters(mr_comment, self.max_comment_chars)
|
||||
comment = self.mr.notes.create({'body': mr_comment})
|
||||
if is_temporary:
|
||||
@ -282,23 +275,20 @@ class GitLabProvider(GitProvider):
|
||||
new_code_snippet = original_suggestion['improved_code']
|
||||
content = original_suggestion['suggestion_content']
|
||||
label = original_suggestion['label']
|
||||
score = original_suggestion.get('score', 7)
|
||||
if 'score' in original_suggestion:
|
||||
score = original_suggestion['score']
|
||||
else:
|
||||
score = 7
|
||||
|
||||
if hasattr(self, 'main_language'):
|
||||
language = self.main_language
|
||||
else:
|
||||
language = ''
|
||||
link = self.get_line_link(relevant_file, line_start, line_end)
|
||||
body_fallback =f"**Suggestion:** {content} [{label}, importance: {score}]\n\n"
|
||||
body_fallback +=f"\n\n<details><summary>[{target_file.filename} [{line_start}-{line_end}]]({link}):</summary>\n\n"
|
||||
body_fallback += f"\n\n___\n\n`(Cannot implement directly - GitLab API allows committable suggestions strictly on MR diff lines)`"
|
||||
body_fallback+="</details>\n\n"
|
||||
diff_patch = difflib.unified_diff(old_code_snippet.split('\n'),
|
||||
new_code_snippet.split('\n'), n=999)
|
||||
patch_orig = "\n".join(diff_patch)
|
||||
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
|
||||
diff_code = f"\n\n```diff\n{patch.rstrip()}\n```"
|
||||
body_fallback += diff_code
|
||||
body_fallback =f"**Suggestion:** {content} [{label}, importance: {score}]\n___\n"
|
||||
body_fallback +=f"\n\nReplace lines ([{line_start}-{line_end}]({link}))\n\n```{language}\n{old_code_snippet}\n````\n\n"
|
||||
body_fallback +=f"with\n\n```{language}\n{new_code_snippet}\n````"
|
||||
body_fallback += f"\n\n___\n\n`(Cannot implement this suggestion directly, as gitlab API does not enable committing to a non -+ line in a PR)`"
|
||||
|
||||
# Create a general note on the file in the MR
|
||||
self.mr.notes.create({
|
||||
@ -311,7 +301,6 @@ class GitLabProvider(GitProvider):
|
||||
'file_path': f'{target_file.filename}',
|
||||
}
|
||||
})
|
||||
get_logger().debug(f"Created fallback comment in MR {self.id_mr} with position {pos_obj}")
|
||||
|
||||
# get_logger().debug(
|
||||
# f"Failed to create comment in MR {self.id_mr} with position {pos_obj} (probably not a '+' line)")
|
||||
|
@ -4,9 +4,9 @@ from typing import List
|
||||
|
||||
from git import Repo
|
||||
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
from pr_agent.config_loader import _find_repository_root, get_settings
|
||||
from pr_agent.git_providers.git_provider import GitProvider
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
|
@ -3,12 +3,11 @@ import os
|
||||
import tempfile
|
||||
|
||||
from dynaconf import Dynaconf
|
||||
from starlette_context import context
|
||||
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import (get_git_provider,
|
||||
get_git_provider_with_context)
|
||||
from pr_agent.git_providers import get_git_provider, get_git_provider_with_context
|
||||
from pr_agent.log import get_logger
|
||||
from starlette_context import context
|
||||
|
||||
|
||||
def apply_repo_settings(pr_url):
|
||||
@ -99,24 +98,5 @@ def set_claude_model():
|
||||
"""
|
||||
model_claude = "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0"
|
||||
get_settings().set('config.model', model_claude)
|
||||
get_settings().set('config.model_weak', model_claude)
|
||||
get_settings().set('config.model_turbo', model_claude)
|
||||
get_settings().set('config.fallback_models', [model_claude])
|
||||
|
||||
|
||||
def is_user_name_a_bot(name: str) -> bool:
|
||||
if not name:
|
||||
return False
|
||||
bot_indicators = ['codium', 'bot_', 'bot-', '_bot', '-bot', 'qodo', "service", "github", "jenkins", "auto",
|
||||
"cicd", "validator", "ci-", "assistant", "srv-"]
|
||||
return any(indicator in name.lower() for indicator in bot_indicators)
|
||||
|
||||
|
||||
def is_pr_description_indicating_bot(description: str) -> bool:
|
||||
if not description:
|
||||
return False
|
||||
bot_descriptions = ["Snyk has created this PR", "This PR was created automatically by",
|
||||
"This PR was created by a bot",
|
||||
"This pull request was automatically generated by"]
|
||||
# Check is it's a Snyk bot
|
||||
if any(bot_description in description for bot_description in bot_descriptions):
|
||||
return True
|
@ -1,6 +1,5 @@
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.identity_providers.default_identity_provider import \
|
||||
DefaultIdentityProvider
|
||||
from pr_agent.identity_providers.default_identity_provider import DefaultIdentityProvider
|
||||
|
||||
_IDENTITY_PROVIDERS = {
|
||||
'default': DefaultIdentityProvider
|
||||
@ -11,4 +10,4 @@ def get_identity_provider():
|
||||
identity_provider_id = get_settings().get("CONFIG.IDENTITY_PROVIDER", "default")
|
||||
if identity_provider_id not in _IDENTITY_PROVIDERS:
|
||||
raise ValueError(f"Unknown identity provider: {identity_provider_id}")
|
||||
return _IDENTITY_PROVIDERS[identity_provider_id]()
|
||||
return _IDENTITY_PROVIDERS[identity_provider_id]()
|
@ -1,5 +1,4 @@
|
||||
from pr_agent.identity_providers.identity_provider import (Eligibility,
|
||||
IdentityProvider)
|
||||
from pr_agent.identity_providers.identity_provider import Eligibility, IdentityProvider
|
||||
|
||||
|
||||
class DefaultIdentityProvider(IdentityProvider):
|
||||
|
@ -8,10 +8,12 @@ def get_secret_provider():
|
||||
provider_id = get_settings().config.secret_provider
|
||||
if provider_id == 'google_cloud_storage':
|
||||
try:
|
||||
from pr_agent.secret_providers.google_cloud_storage_secret_provider import \
|
||||
GoogleCloudStorageSecretProvider
|
||||
from pr_agent.secret_providers.google_cloud_storage_secret_provider import GoogleCloudStorageSecretProvider
|
||||
return GoogleCloudStorageSecretProvider()
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to initialize google_cloud_storage secret provider {provider_id}") from e
|
||||
else:
|
||||
raise ValueError("Unknown SECRET_PROVIDER")
|
||||
|
||||
|
||||
|
||||
|
@ -9,9 +9,9 @@ import secrets
|
||||
from urllib.parse import unquote
|
||||
|
||||
import uvicorn
|
||||
from fastapi import APIRouter, Depends, FastAPI, HTTPException, Request
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
from fastapi import APIRouter, Depends, FastAPI, HTTPException
|
||||
from fastapi.security import HTTPBasic, HTTPBasicCredentials
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
from starlette import status
|
||||
from starlette.background import BackgroundTasks
|
||||
from starlette.middleware import Middleware
|
||||
@ -23,6 +23,9 @@ from pr_agent.agent.pr_agent import PRAgent, command2class
|
||||
from pr_agent.algo.utils import update_settings_from_args
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers.utils import apply_repo_settings
|
||||
from pr_agent.log import get_logger
|
||||
from fastapi import Request, Depends
|
||||
from fastapi.security import HTTPBasic, HTTPBasicCredentials
|
||||
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
||||
|
||||
setup_logger(fmt=LoggingFormat.JSON, level="DEBUG")
|
||||
@ -64,9 +67,6 @@ def authorize(credentials: HTTPBasicCredentials = Depends(security)):
|
||||
|
||||
async def _perform_commands_azure(commands_conf: str, agent: PRAgent, api_url: str, log_context: dict):
|
||||
apply_repo_settings(api_url)
|
||||
if commands_conf == "pr_commands" and get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
|
||||
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {api_url=}", **log_context)
|
||||
return
|
||||
commands = get_settings().get(f"azure_devops_server.{commands_conf}")
|
||||
get_settings().set("config.is_auto_command", True)
|
||||
for command in commands:
|
||||
|
@ -19,7 +19,7 @@ from starlette_context.middleware import RawContextMiddleware
|
||||
from pr_agent.agent.pr_agent import PRAgent
|
||||
from pr_agent.algo.utils import update_settings_from_args
|
||||
from pr_agent.config_loader import get_settings, global_settings
|
||||
from pr_agent.git_providers.utils import apply_repo_settings, is_user_name_a_bot, is_pr_description_indicating_bot
|
||||
from pr_agent.git_providers.utils import apply_repo_settings
|
||||
from pr_agent.identity_providers import get_identity_provider
|
||||
from pr_agent.identity_providers.identity_provider import Eligibility
|
||||
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
||||
@ -77,9 +77,6 @@ async def handle_manifest(request: Request, response: Response):
|
||||
|
||||
async def _perform_commands_bitbucket(commands_conf: str, agent: PRAgent, api_url: str, log_context: dict, data: dict):
|
||||
apply_repo_settings(api_url)
|
||||
if commands_conf == "pr_commands" and get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
|
||||
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {api_url=}")
|
||||
return
|
||||
if data.get("event", "") == "pullrequest:created":
|
||||
if not should_process_pr_logic(data):
|
||||
return
|
||||
@ -101,25 +98,11 @@ async def _perform_commands_bitbucket(commands_conf: str, agent: PRAgent, api_ur
|
||||
|
||||
def is_bot_user(data) -> bool:
|
||||
try:
|
||||
actor = data.get("data", {}).get("actor", {})
|
||||
description = data.get("data", {}).get("pullrequest", {}).get("description", "")
|
||||
# allow actor type: user . if it's "AppUser" or "team" then it is a bot user
|
||||
allowed_actor_types = {"user"}
|
||||
if actor and actor["type"].lower() not in allowed_actor_types:
|
||||
get_logger().info(f"BitBucket actor type is not 'user', skipping: {actor}")
|
||||
return True
|
||||
|
||||
username = actor.get("username", "")
|
||||
if username and is_user_name_a_bot(username):
|
||||
get_logger().info(f"BitBucket actor is a bot user, skipping: {username}")
|
||||
return True
|
||||
|
||||
if description and is_pr_description_indicating_bot(description):
|
||||
get_logger().info(f"Description indicates a bot user: {actor}",
|
||||
artifact={"description": description})
|
||||
if data["data"]["actor"]["type"] != "user":
|
||||
get_logger().info(f"BitBucket actor type is not 'user': {data['data']['actor']['type']}")
|
||||
return True
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed 'is_bot_user' logic: {e}")
|
||||
get_logger().error("Failed 'is_bot_user' logic: {e}")
|
||||
return False
|
||||
|
||||
|
||||
@ -178,18 +161,16 @@ async def handle_github_webhooks(background_tasks: BackgroundTasks, request: Req
|
||||
return "OK"
|
||||
|
||||
# Get the username of the sender
|
||||
actor = data.get("data", {}).get("actor", {})
|
||||
if actor:
|
||||
try:
|
||||
username = data["data"]["actor"]["username"]
|
||||
except KeyError:
|
||||
try:
|
||||
username = actor["username"]
|
||||
username = data["data"]["actor"]["display_name"]
|
||||
except KeyError:
|
||||
try:
|
||||
username = actor["display_name"]
|
||||
except KeyError:
|
||||
username = actor["nickname"]
|
||||
log_context["sender"] = username
|
||||
username = data["data"]["actor"]["nickname"]
|
||||
log_context["sender"] = username
|
||||
|
||||
sender_id = data.get("data", {}).get("actor", {}).get("account_id", "")
|
||||
sender_id = data["data"]["actor"]["account_id"]
|
||||
log_context["sender_id"] = sender_id
|
||||
jwt_parts = input_jwt.split(".")
|
||||
claim_part = jwt_parts[1]
|
||||
|
@ -6,20 +6,20 @@ from typing import List
|
||||
import uvicorn
|
||||
from fastapi import APIRouter, FastAPI
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
from fastapi.responses import RedirectResponse
|
||||
from starlette import status
|
||||
from starlette.background import BackgroundTasks
|
||||
from starlette.middleware import Middleware
|
||||
from starlette.requests import Request
|
||||
from starlette.responses import JSONResponse
|
||||
from starlette_context.middleware import RawContextMiddleware
|
||||
|
||||
from pr_agent.agent.pr_agent import PRAgent
|
||||
from pr_agent.algo.utils import update_settings_from_args
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers.utils import apply_repo_settings
|
||||
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
||||
from pr_agent.servers.utils import verify_signature
|
||||
from fastapi.responses import RedirectResponse
|
||||
|
||||
|
||||
setup_logger(fmt=LoggingFormat.JSON, level="DEBUG")
|
||||
router = APIRouter()
|
||||
@ -72,11 +72,6 @@ async def handle_webhook(background_tasks: BackgroundTasks, request: Request):
|
||||
commands_to_run = []
|
||||
|
||||
if data["eventKey"] == "pr:opened":
|
||||
apply_repo_settings(pr_url)
|
||||
if get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
|
||||
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {pr_url}", **log_context)
|
||||
return
|
||||
get_settings().set("config.is_auto_command", True)
|
||||
commands_to_run.extend(_get_commands_list_from_settings('BITBUCKET_SERVER.PR_COMMANDS'))
|
||||
elif data["eventKey"] == "pr:comment:added":
|
||||
commands_to_run.append(data["comment"]["text"])
|
||||
|
@ -15,10 +15,9 @@ from starlette_context.middleware import RawContextMiddleware
|
||||
from pr_agent.agent.pr_agent import PRAgent
|
||||
from pr_agent.algo.utils import update_settings_from_args
|
||||
from pr_agent.config_loader import get_settings, global_settings
|
||||
from pr_agent.git_providers import (get_git_provider,
|
||||
get_git_provider_with_context)
|
||||
from pr_agent.git_providers import get_git_provider, get_git_provider_with_context
|
||||
from pr_agent.git_providers.git_provider import IncrementalPR
|
||||
from pr_agent.git_providers.utils import apply_repo_settings, is_user_name_a_bot, is_pr_description_indicating_bot
|
||||
from pr_agent.git_providers.utils import apply_repo_settings
|
||||
from pr_agent.identity_providers import get_identity_provider
|
||||
from pr_agent.identity_providers.identity_provider import Eligibility
|
||||
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
||||
@ -238,22 +237,13 @@ def get_log_context(body, event, action, build_number):
|
||||
return log_context, sender, sender_id, sender_type
|
||||
|
||||
|
||||
def is_bot_user(sender, sender_type, user_description):
|
||||
def is_bot_user(sender, sender_type):
|
||||
try:
|
||||
# logic to ignore PRs opened by bot
|
||||
if get_settings().get("GITHUB_APP.IGNORE_BOT_PR", False):
|
||||
if sender_type.lower() == "bot":
|
||||
if 'pr-agent' not in sender:
|
||||
get_logger().info(f"Ignoring PR from '{sender=}' because it is a bot")
|
||||
return True
|
||||
if is_user_name_a_bot(sender):
|
||||
if get_settings().get("GITHUB_APP.IGNORE_BOT_PR", False) and sender_type == "Bot":
|
||||
if 'pr-agent' not in sender:
|
||||
get_logger().info(f"Ignoring PR from '{sender=}' because it is a bot")
|
||||
return True
|
||||
# Ignore PRs opened by bot users based on their description
|
||||
if isinstance(user_description, str) and is_pr_description_indicating_bot(user_description):
|
||||
get_logger().info(f"Description indicates a bot user: {sender}",
|
||||
artifact={"description": user_description})
|
||||
return True
|
||||
return True
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed 'is_bot_user' logic: {e}")
|
||||
return False
|
||||
@ -316,8 +306,7 @@ async def handle_request(body: Dict[str, Any], event: str):
|
||||
log_context, sender, sender_id, sender_type = get_log_context(body, event, action, build_number)
|
||||
|
||||
# logic to ignore PRs opened by bot, PRs with specific titles, labels, source branches, or target branches
|
||||
pr_description = body.get("pull_request", {}).get("body", "")
|
||||
if is_bot_user(sender, sender_type, pr_description) and 'check_run' not in body:
|
||||
if is_bot_user(sender, sender_type) and 'check_run' not in body:
|
||||
return {}
|
||||
if action != 'created' and 'check_run' not in body:
|
||||
if not should_process_pr_logic(body):
|
||||
@ -384,9 +373,6 @@ def _check_pull_request_event(action: str, body: dict, log_context: dict) -> Tup
|
||||
async def _perform_auto_commands_github(commands_conf: str, agent: PRAgent, body: dict, api_url: str,
|
||||
log_context: dict):
|
||||
apply_repo_settings(api_url)
|
||||
if commands_conf == "pr_commands" and get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
|
||||
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {api_url=}")
|
||||
return
|
||||
if not should_process_pr_logic(body): # Here we already updated the configuration with the repo settings
|
||||
return {}
|
||||
commands = get_settings().get(f"github_app.{commands_conf}")
|
||||
|
@ -1,12 +1,11 @@
|
||||
import asyncio
|
||||
import multiprocessing
|
||||
import time
|
||||
import traceback
|
||||
from collections import deque
|
||||
import traceback
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import aiohttp
|
||||
import time
|
||||
import requests
|
||||
import aiohttp
|
||||
|
||||
from pr_agent.agent.pr_agent import PRAgent
|
||||
from pr_agent.config_loader import get_settings
|
||||
@ -84,7 +83,6 @@ async def is_valid_notification(notification, headers, handled_ids, session, use
|
||||
return False, handled_ids
|
||||
async with session.get(latest_comment, headers=headers) as comment_response:
|
||||
check_prev_comments = False
|
||||
user_tag = "@" + user_id
|
||||
if comment_response.status == 200:
|
||||
comment = await comment_response.json()
|
||||
if 'id' in comment:
|
||||
@ -102,6 +100,7 @@ async def is_valid_notification(notification, headers, handled_ids, session, use
|
||||
get_logger().debug(f"no comment_body")
|
||||
check_prev_comments = True
|
||||
else:
|
||||
user_tag = "@" + user_id
|
||||
if user_tag not in comment_body:
|
||||
get_logger().debug(f"user_tag not in comment_body")
|
||||
check_prev_comments = True
|
||||
|
@ -1,6 +1,6 @@
|
||||
import copy
|
||||
import json
|
||||
import re
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
import uvicorn
|
||||
@ -15,7 +15,7 @@ from starlette_context.middleware import RawContextMiddleware
|
||||
from pr_agent.agent.pr_agent import PRAgent
|
||||
from pr_agent.algo.utils import update_settings_from_args
|
||||
from pr_agent.config_loader import get_settings, global_settings
|
||||
from pr_agent.git_providers.utils import apply_repo_settings, is_user_name_a_bot, is_pr_description_indicating_bot
|
||||
from pr_agent.git_providers.utils import apply_repo_settings
|
||||
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
||||
from pr_agent.secret_providers import get_secret_provider
|
||||
|
||||
@ -61,9 +61,6 @@ async def handle_request(api_url: str, body: str, log_context: dict, sender_id:
|
||||
async def _perform_commands_gitlab(commands_conf: str, agent: PRAgent, api_url: str,
|
||||
log_context: dict, data: dict):
|
||||
apply_repo_settings(api_url)
|
||||
if commands_conf == "pr_commands" and get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
|
||||
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {api_url=}", **log_context)
|
||||
return
|
||||
if not should_process_pr_logic(data): # Here we already updated the configurations
|
||||
return
|
||||
commands = get_settings().get(f"gitlab.{commands_conf}", {})
|
||||
@ -86,14 +83,10 @@ def is_bot_user(data) -> bool:
|
||||
try:
|
||||
# logic to ignore bot users (unlike Github, no direct flag for bot users in gitlab)
|
||||
sender_name = data.get("user", {}).get("name", "unknown").lower()
|
||||
if is_user_name_a_bot(sender_name):
|
||||
bot_indicators = ['codium', 'bot_', 'bot-', '_bot', '-bot']
|
||||
if any(indicator in sender_name for indicator in bot_indicators):
|
||||
get_logger().info(f"Skipping GitLab bot user: {sender_name}")
|
||||
return True
|
||||
pr_description = data.get('object_attributes', {}).get('description', '')
|
||||
if pr_description and is_pr_description_indicating_bot(pr_description):
|
||||
get_logger().info(f"Description indicates a bot user: {sender_name}",
|
||||
artifact={"description": pr_description})
|
||||
return True
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed 'is_bot_user' logic: {e}")
|
||||
return False
|
||||
|
@ -82,7 +82,7 @@ the tool will replace every marker of the form `pr_agent:marker_name` in the PR
|
||||
- `walkthrough`: the PR walkthrough.
|
||||
|
||||
Note that when markers are enabled, if the original PR description does not contain any markers, the tool will not alter the description at all.
|
||||
|
||||
|
||||
"""
|
||||
output += "\n\n</details></td></tr>\n\n"
|
||||
|
||||
@ -101,7 +101,7 @@ Examples for custom labels:
|
||||
|
||||
The list above is eclectic, and aims to give an idea of different possibilities. Define custom labels that are relevant for your repo and use cases.
|
||||
Note that Labels are not mutually exclusive, so you can add multiple label categories.
|
||||
Make sure to provide proper title, and a detailed and well-phrased description for each label, so the tool will know when to suggest it.
|
||||
Make sure to provide proper title, and a detailed and well-phrased description for each label, so the tool will know when to suggest it.
|
||||
"""
|
||||
output += "\n\n</details></td></tr>\n\n"
|
||||
|
||||
@ -126,7 +126,7 @@ Be specific, clear, and concise in the instructions. With extra instructions, yo
|
||||
|
||||
Examples for extra instructions:
|
||||
```
|
||||
[pr_description]
|
||||
[pr_description]
|
||||
extra_instructions="""\
|
||||
- The PR title should be in the format: '<PR type>: <title>'
|
||||
- The title should be short and concise (up to 10 words)
|
||||
@ -159,8 +159,8 @@ It can be invoked manually by commenting on any PR:
|
||||
/ask "..."
|
||||
```
|
||||
|
||||
Note that the tool does not have "memory" of previous questions, and answers each question independently.
|
||||
You can ask questions about the entire PR, about specific code lines, or about an image related to the PR code changes.
|
||||
Note that the tool does not have "memory" of previous questions, and answers each question independently.
|
||||
You can ask questions about the entire PR, about specific code lines, or about an image related to the PR code changes.
|
||||
"""
|
||||
# output += "\n\n<table>"
|
||||
#
|
||||
@ -195,9 +195,9 @@ You can ask questions about the entire PR, about specific code lines, or about a
|
||||
some_config1=...
|
||||
some_config2=...
|
||||
```
|
||||
|
||||
|
||||
"""
|
||||
|
||||
output += f"\n\nSee the improve [usage page](https://pr-agent-docs.codium.ai/tools/improve/) for a comprehensive guide on using this tool.\n\n"
|
||||
|
||||
return output
|
||||
return output
|
@ -5,6 +5,7 @@ from starlette_context.middleware import RawContextMiddleware
|
||||
|
||||
from pr_agent.servers.github_app import router
|
||||
|
||||
|
||||
middleware = [Middleware(RawContextMiddleware)]
|
||||
app = FastAPI(middleware=middleware)
|
||||
app.include_router(router)
|
||||
|
@ -2,7 +2,7 @@ import hashlib
|
||||
import hmac
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from typing import Any, Callable
|
||||
from typing import Callable, Any
|
||||
|
||||
from fastapi import HTTPException
|
||||
|
||||
|
@ -87,7 +87,7 @@ org = ""
|
||||
pat = ""
|
||||
|
||||
[azure_devops_server]
|
||||
# For Azure devops Server basic auth - configured in the webhook creation
|
||||
# For Azure devops Server basic auth - configured in the webhook creation
|
||||
# Optional, uncomment if you want to use Azure devops webhooks. Value assinged when you create the webhook
|
||||
# webhook_username = "<basic auth user>"
|
||||
# webhook_password = "<basic auth password>"
|
||||
|
@ -1,8 +1,8 @@
|
||||
[config]
|
||||
# models
|
||||
model="gpt-4o-2024-11-20"
|
||||
fallback_models=["gpt-4o-2024-08-06"]
|
||||
#model_weak="gpt-4o-mini-2024-07-18" # optional, a weaker model to use for some easier tasks
|
||||
model="gpt-4-turbo-2024-04-09"
|
||||
model_turbo="gpt-4o-2024-08-06"
|
||||
fallback_models=["gpt-4o-2024-05-13"]
|
||||
# CLI
|
||||
git_provider="github"
|
||||
publish_output=true
|
||||
@ -14,7 +14,6 @@ use_extra_bad_extensions=false
|
||||
use_wiki_settings_file=true
|
||||
use_repo_settings_file=true
|
||||
use_global_settings_file=true
|
||||
disable_auto_feedback = false
|
||||
ai_timeout=120 # 2minutes
|
||||
skip_keys = []
|
||||
# token limits
|
||||
@ -55,9 +54,10 @@ require_can_be_split_review=false
|
||||
require_security_review=true
|
||||
require_ticket_analysis_review=true
|
||||
# general options
|
||||
num_code_suggestions=0 # legacy mode. use the `improve` command instead
|
||||
num_code_suggestions=0
|
||||
inline_code_comments = false
|
||||
ask_and_reflect=false
|
||||
#automatic_review=true
|
||||
persistent_comment=true
|
||||
extra_instructions = ""
|
||||
final_update_message = true
|
||||
@ -107,13 +107,13 @@ enable_help_text=false
|
||||
|
||||
|
||||
[pr_code_suggestions] # /improve #
|
||||
max_context_tokens=16000
|
||||
max_context_tokens=14000
|
||||
#
|
||||
commitable_code_suggestions = false
|
||||
dual_publishing_score_threshold=-1 # -1 to disable, [0-10] to set the threshold (>=) for publishing a code suggestion both in a table and as commitable
|
||||
focus_only_on_problems=true
|
||||
#
|
||||
extra_instructions = ""
|
||||
rank_suggestions = false
|
||||
enable_help_text=false
|
||||
enable_chat_text=false
|
||||
enable_intro_text=true
|
||||
@ -128,7 +128,7 @@ auto_extended_mode=true
|
||||
num_code_suggestions_per_chunk=4
|
||||
max_number_of_calls = 3
|
||||
parallel_calls = true
|
||||
|
||||
rank_extended_suggestions = false
|
||||
final_clip_factor = 0.8
|
||||
# self-review checkbox
|
||||
demand_code_suggestions_self_review=false # add a checkbox for the author to self-review the code suggestions
|
||||
@ -138,7 +138,6 @@ fold_suggestions_on_self_review=true # Pro feature. if true, the code suggestion
|
||||
# Suggestion impact 💎
|
||||
publish_post_process_suggestion_impact=true
|
||||
wiki_page_accepted_suggestions=true
|
||||
allow_thumbs_up_down=false
|
||||
|
||||
[pr_custom_prompt] # /custom_prompt #
|
||||
prompt = """\
|
||||
@ -218,7 +217,7 @@ override_deployment_type = true
|
||||
handle_pr_actions = ['opened', 'reopened', 'ready_for_review']
|
||||
pr_commands = [
|
||||
"/describe --pr_description.final_update_message=false",
|
||||
"/review",
|
||||
"/review --pr_reviewer.num_code_suggestions=0",
|
||||
"/improve",
|
||||
]
|
||||
# settings for "pull_request" event with "synchronize" action - used to detect and handle push triggers for new commits
|
||||
@ -230,27 +229,27 @@ push_trigger_pending_tasks_backlog = true
|
||||
push_trigger_pending_tasks_ttl = 300
|
||||
push_commands = [
|
||||
"/describe",
|
||||
"/review",
|
||||
"/review --pr_reviewer.num_code_suggestions=0",
|
||||
]
|
||||
|
||||
[gitlab]
|
||||
url = "https://gitlab.com"
|
||||
pr_commands = [
|
||||
"/describe --pr_description.final_update_message=false",
|
||||
"/review",
|
||||
"/review --pr_reviewer.num_code_suggestions=0",
|
||||
"/improve",
|
||||
]
|
||||
handle_push_trigger = false
|
||||
push_commands = [
|
||||
"/describe",
|
||||
"/review",
|
||||
"/review --pr_reviewer.num_code_suggestions=0",
|
||||
]
|
||||
|
||||
[bitbucket_app]
|
||||
pr_commands = [
|
||||
"/describe --pr_description.final_update_message=false",
|
||||
"/review",
|
||||
"/improve --pr_code_suggestions.commitable_code_suggestions=true",
|
||||
"/review --pr_reviewer.num_code_suggestions=0",
|
||||
"/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7",
|
||||
]
|
||||
avoid_full_files = false
|
||||
|
||||
@ -275,8 +274,8 @@ avoid_full_files = false
|
||||
url = ""
|
||||
pr_commands = [
|
||||
"/describe --pr_description.final_update_message=false",
|
||||
"/review",
|
||||
"/improve --pr_code_suggestions.commitable_code_suggestions=true",
|
||||
"/review --pr_reviewer.num_code_suggestions=0",
|
||||
"/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7",
|
||||
]
|
||||
|
||||
[litellm]
|
||||
|
@ -13,4 +13,4 @@ enable_custom_labels=false
|
||||
#[custom_labels."Documentation"]
|
||||
#description = """Adds or modifies documentation"""
|
||||
#[custom_labels."Other"]
|
||||
#description = """Other changes that do not fit in any of the above categories"""
|
||||
#description = """Other changes that do not fit in any of the above categories"""
|
@ -437,4 +437,4 @@ xBase = [".prg", ".prw", ]
|
||||
|
||||
[docs_blacklist_extensions]
|
||||
# Disable docs for these extensions of text files and scripts that are not programming languages of function, classes and methods
|
||||
docs_blacklist = ['sql', 'txt', 'yaml', 'json', 'xml', 'md', 'rst', 'rest', 'rest.txt', 'rst.txt', 'mdpolicy', 'mdown', 'markdown', 'mdwn', 'mkd', 'mkdn', 'mkdown', 'sh']
|
||||
docs_blacklist = ['sql', 'txt', 'yaml', 'json', 'xml', 'md', 'rst', 'rest', 'rest.txt', 'rst.txt', 'mdpolicy', 'mdown', 'markdown', 'mdwn', 'mkd', 'mkdn', 'mkdown', 'sh']
|
@ -1,10 +1,7 @@
|
||||
[pr_code_suggestions_prompt]
|
||||
system="""You are PR-Reviewer, an AI specializing in Pull Request (PR) code analysis and suggestions.
|
||||
{%- if not focus_only_on_problems %}
|
||||
Your task is to examine the provided code diff, focusing on new code (lines prefixed with '+'), and offer concise, actionable suggestions to fix possible bugs and problems, and enhance code quality and performance.
|
||||
{%- else %}
|
||||
Your task is to examine the provided code diff, focusing on new code (lines prefixed with '+'), and offer concise, actionable suggestions to fix critical bugs and problems.
|
||||
{%- endif %}
|
||||
Your task is to examine the provided code diff, focusing on new code (lines prefixed with '+'), and offer concise, actionable suggestions to fix possible bugs and problems, and enhance code quality, readability, and performance.
|
||||
|
||||
|
||||
The PR code diff will be in the following structured format:
|
||||
======
|
||||
@ -45,17 +42,9 @@ __new hunk__
|
||||
|
||||
|
||||
Specific guidelines for generating code suggestions:
|
||||
{%- if not focus_only_on_problems %}
|
||||
- Provide up to {{ num_code_suggestions }} distinct and insightful code suggestions.
|
||||
{%- else %}
|
||||
- Provide up to {{ num_code_suggestions }} distinct and insightful code suggestions. Return less suggestions if no pertinent ones are applicable.
|
||||
{%- endif %}
|
||||
- Focus solely on enhancing new code introduced in the PR, identified by '+' prefixes in '__new hunk__' sections.
|
||||
{%- if not focus_only_on_problems %}
|
||||
- Prioritize suggestions that address potential issues, critical problems, and bugs in the PR code. Avoid repeating changes already implemented in the PR. If no pertinent suggestions are applicable, return an empty list.
|
||||
{%- else %}
|
||||
- Only give suggestions that address critical problems and bugs in the PR code. If no relevant suggestions are applicable, return an empty list.
|
||||
{%- endif %}
|
||||
- Don't suggest to add docstring, type hints, or comments, to remove unused imports, or to use more specific exception types.
|
||||
- When referencing variables or names from the code, enclose them in backticks (`). Example: "ensure that `variable_name` is..."
|
||||
- Be mindful you are viewing a partial PR code diff, not the full codebase. Avoid suggestions that might conflict with unseen code or alerting variables not declared in the visible scope, as the context is incomplete.
|
||||
@ -80,11 +69,7 @@ class CodeSuggestion(BaseModel):
|
||||
existing_code: str = Field(description="A short code snippet from a '__new hunk__' section that the suggestion aims to enhance or fix. Include only complete code lines. Use ellipsis (...) for brevity if needed. This snippet should represent the specific PR code targeted for improvement.")
|
||||
improved_code: str = Field(description="A refined code snippet that replaces the 'existing_code' snippet after implementing the suggestion.")
|
||||
one_sentence_summary: str = Field(description="A concise, single-sentence overview of the suggested improvement. Focus on the 'what'. Be general, and avoid method or variable names.")
|
||||
{%- if not focus_only_on_problems %}
|
||||
label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'possible bug', 'possible issue', 'performance', 'enhancement', 'best practice', 'maintainability', 'typo'. Other relevant labels are also acceptable.")
|
||||
{%- else %}
|
||||
label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'critical bug', 'general'. The 'general' section should be used for suggestions that address a major issue, but are necessarily on a critical level.")
|
||||
{%- endif %}
|
||||
|
||||
|
||||
class PRCodeSuggestions(BaseModel):
|
||||
@ -128,4 +113,4 @@ The PR Diff:
|
||||
|
||||
Response (should be a valid YAML, and nothing else):
|
||||
```yaml
|
||||
"""
|
||||
"""
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user