mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-13 01:00:39 +08:00
Compare commits
185 Commits
tr/static_
...
v0.25
Author | SHA1 | Date | |
---|---|---|---|
91bf3c0749 | |||
159155785e | |||
eabc296246 | |||
b44030114e | |||
1d6f87be3b | |||
a7c6fa7bd2 | |||
a825aec5f3 | |||
4df097c228 | |||
6871e1b27a | |||
4afe05761d | |||
7d1b6c2f0a | |||
3547cf2057 | |||
f2043d639c | |||
6240de3898 | |||
f08b20c667 | |||
e64b468556 | |||
d48d14dac7 | |||
eb0c959ca9 | |||
741a70ad9d | |||
22ee03981e | |||
b1336e7d08 | |||
751caca141 | |||
612004727c | |||
577ee0241d | |||
a141ca133c | |||
a14b6a580d | |||
cc5005c490 | |||
3a5d0f54ce | |||
cd8ba4f59f | |||
fe27f96bf1 | |||
2c3aa7b2dc | |||
c934523f2d | |||
2f4545dc15 | |||
cbd490b3d7 | |||
b07f96d26a | |||
065777040f | |||
9c82047dc3 | |||
e0c15409bb | |||
d956c72cb6 | |||
dfb3d801cf | |||
5c5a3e267c | |||
f9380c2440 | |||
e6a1f14c0e | |||
6339845eb4 | |||
732cc18fd6 | |||
84d0f80c81 | |||
ee26bf35c1 | |||
7a5e9102fd | |||
a8c97bfa73 | |||
af653a048f | |||
d2663f959a | |||
e650fe9ce9 | |||
daeca42ae8 | |||
04496f9b0e | |||
0eacb3e35e | |||
c5ed2f040a | |||
c394fc2767 | |||
157251493a | |||
4a982a849d | |||
6e3544f523 | |||
bf3ebbb95f | |||
eb44ecb1be | |||
45bae48701 | |||
b2181e4c79 | |||
5939d3b17b | |||
c1f4964a55 | |||
022e407d84 | |||
93ba2d239a | |||
fa49dd5167 | |||
16029e66ad | |||
7bd6713335 | |||
ef3241285d | |||
d9ef26dc1c | |||
02949b2b96 | |||
d301c76b65 | |||
dacb45dd8a | |||
443d06df06 | |||
15e8c988a4 | |||
60fab1b301 | |||
84c1c1b1ca | |||
7419a6d51a | |||
ee58a92fb3 | |||
6b64924355 | |||
2f5e8472b9 | |||
852bb371af | |||
7c90e44656 | |||
81dea65856 | |||
a3d572fb69 | |||
7186bf4bb3 | |||
115fca58a3 | |||
cbf60ca636 | |||
64ac45d03b | |||
db062e3e35 | |||
e85472f367 | |||
597f1c6f83 | |||
66d4f56777 | |||
fbfb9e0881 | |||
223b5408d7 | |||
509135a8d4 | |||
8db7151bf0 | |||
b8cfcdbc12 | |||
a3cd433184 | |||
0f284711e6 | |||
67b46e7f30 | |||
68f2cec077 | |||
8e94c8b2f5 | |||
a221f8edd0 | |||
3b47c75c32 | |||
2e34d7a05a | |||
204a0a7912 | |||
9786499fa6 | |||
4f14742233 | |||
c077c71fdb | |||
7b5a3d45bd | |||
c6c6a9b4f0 | |||
a5e7c37fcc | |||
12a9e13509 | |||
0b4b6b1589 | |||
bf049381bd | |||
65c917b84b | |||
b4700bd7c0 | |||
a957554262 | |||
d491a942cc | |||
6c55a2720a | |||
f1d0401f82 | |||
c5bd09e2c9 | |||
c70acdc7cd | |||
1c6f7f9c06 | |||
0b32b253ca | |||
3efd2213f2 | |||
0705bd03c4 | |||
927d005e99 | |||
0dccfdbbf0 | |||
dcb7b66fd7 | |||
b7437147af | |||
e82afdd2cb | |||
0946da3810 | |||
d1f4069d3f | |||
d45a892fd2 | |||
4a91b8ed8d | |||
fb85cb721a | |||
3a52122677 | |||
13c6ed9098 | |||
9dd1995464 | |||
eb804d0b34 | |||
e0ee878e84 | |||
27abe48a34 | |||
8fe504a7ec | |||
f6ba49819a | |||
22bf7af9ba | |||
840e8c4d6b | |||
49f8d86c77 | |||
05827d125b | |||
74ee9a333e | |||
e9769fa602 | |||
3adff8cf4c | |||
d249b47ce9 | |||
892d1ad15c | |||
76d95bb6d7 | |||
7db9a03805 | |||
4eef3e9190 | |||
014ea884d2 | |||
c1c5ee7cfb | |||
3ac1ddc5d7 | |||
e6c56c7355 | |||
727b08fde3 | |||
5d9d48dc82 | |||
8e8062fefc | |||
23a3e208a5 | |||
bb84063ef2 | |||
a476e85fa7 | |||
4b05a3e858 | |||
cd158f24f6 | |||
ada0a3d10f | |||
ddf1afb23f | |||
e2b5489495 | |||
6459535e39 | |||
5a719c1904 | |||
1a2ea2c87d | |||
ca79bafab3 | |||
618224beef | |||
481c2a5985 | |||
e21d9dc9e3 | |||
6872a7076b | |||
c2ae429805 |
2
.github/workflows/build-and-test.yaml
vendored
2
.github/workflows/build-and-test.yaml
vendored
@ -37,5 +37,3 @@ jobs:
|
|||||||
name: Test dev docker
|
name: Test dev docker
|
||||||
run: |
|
run: |
|
||||||
docker run --rm codiumai/pr-agent:test pytest -v tests/unittest
|
docker run --rm codiumai/pr-agent:test pytest -v tests/unittest
|
||||||
|
|
||||||
|
|
||||||
|
3
.github/workflows/pr-agent-review.yaml
vendored
3
.github/workflows/pr-agent-review.yaml
vendored
@ -30,6 +30,3 @@ jobs:
|
|||||||
GITHUB_ACTION_CONFIG.AUTO_DESCRIBE: true
|
GITHUB_ACTION_CONFIG.AUTO_DESCRIBE: true
|
||||||
GITHUB_ACTION_CONFIG.AUTO_REVIEW: true
|
GITHUB_ACTION_CONFIG.AUTO_REVIEW: true
|
||||||
GITHUB_ACTION_CONFIG.AUTO_IMPROVE: true
|
GITHUB_ACTION_CONFIG.AUTO_IMPROVE: true
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
17
.github/workflows/pre-commit.yml
vendored
Normal file
17
.github/workflows/pre-commit.yml
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# disabled. We might run it manually if needed.
|
||||||
|
name: pre-commit
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
# pull_request:
|
||||||
|
# push:
|
||||||
|
# branches: [main]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
pre-commit:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
# SEE https://github.com/pre-commit/action
|
||||||
|
- uses: pre-commit/action@v3.0.1
|
@ -1,6 +1,3 @@
|
|||||||
[pr_reviewer]
|
[pr_reviewer]
|
||||||
enable_review_labels_effort = true
|
enable_review_labels_effort = true
|
||||||
enable_auto_approval = true
|
enable_auto_approval = true
|
||||||
|
|
||||||
[config]
|
|
||||||
model="claude-3-5-sonnet"
|
|
||||||
|
46
.pre-commit-config.yaml
Normal file
46
.pre-commit-config.yaml
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
# See https://pre-commit.com for more information
|
||||||
|
# See https://pre-commit.com/hooks.html for more hooks
|
||||||
|
|
||||||
|
default_language_version:
|
||||||
|
python: python3
|
||||||
|
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v5.0.0
|
||||||
|
hooks:
|
||||||
|
- id: check-added-large-files
|
||||||
|
- id: check-toml
|
||||||
|
- id: check-yaml
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: trailing-whitespace
|
||||||
|
# - repo: https://github.com/rhysd/actionlint
|
||||||
|
# rev: v1.7.3
|
||||||
|
# hooks:
|
||||||
|
# - id: actionlint
|
||||||
|
- repo: https://github.com/pycqa/isort
|
||||||
|
# rev must match what's in dev-requirements.txt
|
||||||
|
rev: 5.13.2
|
||||||
|
hooks:
|
||||||
|
- id: isort
|
||||||
|
# - repo: https://github.com/PyCQA/bandit
|
||||||
|
# rev: 1.7.10
|
||||||
|
# hooks:
|
||||||
|
# - id: bandit
|
||||||
|
# args: [
|
||||||
|
# "-c", "pyproject.toml",
|
||||||
|
# ]
|
||||||
|
# - repo: https://github.com/astral-sh/ruff-pre-commit
|
||||||
|
# rev: v0.7.1
|
||||||
|
# hooks:
|
||||||
|
# - id: ruff
|
||||||
|
# args:
|
||||||
|
# - --fix
|
||||||
|
# - id: ruff-format
|
||||||
|
# - repo: https://github.com/PyCQA/autoflake
|
||||||
|
# rev: v2.3.1
|
||||||
|
# hooks:
|
||||||
|
# - id: autoflake
|
||||||
|
# args:
|
||||||
|
# - --in-place
|
||||||
|
# - --remove-all-unused-imports
|
||||||
|
# - --remove-unused-variables
|
@ -1,10 +1,11 @@
|
|||||||
FROM python:3.10 as base
|
FROM python:3.12 as base
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
ADD pyproject.toml .
|
ADD pyproject.toml .
|
||||||
ADD requirements.txt .
|
ADD requirements.txt .
|
||||||
RUN pip install . && rm pyproject.toml requirements.txt
|
RUN pip install . && rm pyproject.toml requirements.txt
|
||||||
ENV PYTHONPATH=/app
|
ENV PYTHONPATH=/app
|
||||||
|
ADD docs docs
|
||||||
ADD pr_agent pr_agent
|
ADD pr_agent pr_agent
|
||||||
ADD github_action/entrypoint.sh /
|
ADD github_action/entrypoint.sh /
|
||||||
RUN chmod +x /entrypoint.sh
|
RUN chmod +x /entrypoint.sh
|
||||||
|
54
README.md
54
README.md
@ -10,7 +10,7 @@
|
|||||||
|
|
||||||
</picture>
|
</picture>
|
||||||
<br/>
|
<br/>
|
||||||
CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedback and suggestions
|
Qode Merge PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedback and suggestions
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
[](https://github.com/Codium-ai/pr-agent/blob/main/LICENSE)
|
[](https://github.com/Codium-ai/pr-agent/blob/main/LICENSE)
|
||||||
@ -25,9 +25,9 @@ CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by p
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
### [Documentation](https://pr-agent-docs.codium.ai/)
|
### [Documentation](https://pr-agent-docs.codium.ai/)
|
||||||
- See the [Installation Guide](https://qodo-merge-docs.qodo.ai/installation/) for instructions on installing PR-Agent on different platforms.
|
- See the [Installation Guide](https://qodo-merge-docs.qodo.ai/installation/) for instructions on installing Qode Merge PR-Agent on different platforms.
|
||||||
|
|
||||||
- See the [Usage Guide](https://qodo-merge-docs.qodo.ai/usage-guide/) for instructions on running PR-Agent tools via different interfaces, such as CLI, PR Comments, or by automatically triggering them when a new PR is opened.
|
- See the [Usage Guide](https://qodo-merge-docs.qodo.ai/usage-guide/) for instructions on running Qode Merge PR-Agent tools via different interfaces, such as CLI, PR Comments, or by automatically triggering them when a new PR is opened.
|
||||||
|
|
||||||
- See the [Tools Guide](https://qodo-merge-docs.qodo.ai/tools/) for a detailed description of the different tools, and the available configurations for each tool.
|
- See the [Tools Guide](https://qodo-merge-docs.qodo.ai/tools/) for a detailed description of the different tools, and the available configurations for each tool.
|
||||||
|
|
||||||
@ -43,45 +43,38 @@ CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by p
|
|||||||
|
|
||||||
## News and Updates
|
## News and Updates
|
||||||
|
|
||||||
### September 21, 2024
|
### December 2, 2024
|
||||||
Need help with PR-Agent? New feature - simply comment `/help "your question"` in a pull request, and PR-Agent will provide you with the [relevant documentation](https://github.com/Codium-ai/pr-agent/pull/1241#issuecomment-2365259334).
|
|
||||||
|
|
||||||
<kbd><img src="https://www.codium.ai/images/pr_agent/pr_help_chat.png" width="768"></kbd>
|
Open-source repositories can now freely use Qodo Merge Pro, and enjoy easy one-click installation using our dedicated [app](https://github.com/apps/qodo-merge-pro-for-open-source).
|
||||||
|
|
||||||
|
<kbd><img src="https://github.com/user-attachments/assets/b0838724-87b9-43b0-ab62-73739a3a855c" width="512"></kbd>
|
||||||
|
|
||||||
|
|
||||||
### September 12, 2024
|
### November 18, 2024
|
||||||
[Dynamic context](https://pr-agent-docs.codium.ai/core-abilities/dynamic_context/) is now the default option for context extension.
|
|
||||||
This feature enables PR-Agent to dynamically adjusting the relevant context for each code hunk, while avoiding overflowing the model with too much information.
|
|
||||||
|
|
||||||
### September 3, 2024
|
A new mode was enabled by default for code suggestions - `--pr_code_suggestions.focus_only_on_problems=true`:
|
||||||
|
|
||||||
New version of PR-Agent, v0.24 was released. See the [release notes](https://github.com/Codium-ai/pr-agent/releases/tag/v0.24) for more information.
|
- This option reduces the number of code suggestions received
|
||||||
|
- The suggestions will focus more on identifying and fixing code problems, rather than style considerations like best practices, maintainability, or readability.
|
||||||
|
- The suggestions will be categorized into just two groups: "Possible Issues" and "General".
|
||||||
|
|
||||||
### August 26, 2024
|
Still, if you prefer the previous mode, you can set `--pr_code_suggestions.focus_only_on_problems=false` in the [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/).
|
||||||
|
|
||||||
New version of [PR Agent Chrome Extension](https://chromewebstore.google.com/detail/pr-agent-chrome-extension/ephlnjeghhogofkifjloamocljapahnl) was released, with full support of context-aware **PR Chat**. This novel feature is free to use for any open-source repository. See more details in [here](https://pr-agent-docs.codium.ai/chrome-extension/#pr-chat).
|
**Example results:**
|
||||||
|
|
||||||
<kbd><img src="https://www.codium.ai/images/pr_agent/pr_chat_1.png" width="768"></kbd>
|
Original mode
|
||||||
|
|
||||||
<kbd><img src="https://www.codium.ai/images/pr_agent/pr_chat_2.png" width="768"></kbd>
|
<kbd><img src="https://qodo.ai/images/pr_agent/code_suggestions_original_mode.png" width="512"></kbd>
|
||||||
|
|
||||||
|
Focused mode
|
||||||
|
|
||||||
|
<kbd><img src="https://qodo.ai/images/pr_agent/code_suggestions_focused_mode.png" width="512"></kbd>
|
||||||
|
|
||||||
|
|
||||||
### August 11, 2024
|
### November 4, 2024
|
||||||
Increased PR context size for improved results, and enabled [asymmetric context](https://github.com/Codium-ai/pr-agent/pull/1114/files#diff-9290a3ad9a86690b31f0450b77acd37ef1914b41fabc8a08682d4da433a77f90R69-R70)
|
|
||||||
|
|
||||||
### August 10, 2024
|
|
||||||
Added support for [Azure devops pipeline](https://pr-agent-docs.codium.ai/installation/azure/) - you can now easily run PR-Agent as an Azure devops pipeline, without needing to set up your own server.
|
|
||||||
|
|
||||||
|
|
||||||
### August 5, 2024
|
|
||||||
Added support for [GitLab pipeline](https://pr-agent-docs.codium.ai/installation/gitlab/#run-as-a-gitlab-pipeline) - you can now run easily PR-Agent as a GitLab pipeline, without needing to set up your own server.
|
|
||||||
|
|
||||||
### July 28, 2024
|
|
||||||
|
|
||||||
(1) improved support for bitbucket server - [auto commands](https://github.com/Codium-ai/pr-agent/pull/1059) and [direct links](https://github.com/Codium-ai/pr-agent/pull/1061)
|
|
||||||
|
|
||||||
(2) custom models are now [supported](https://pr-agent-docs.codium.ai/usage-guide/changing_a_model/#custom-models)
|
|
||||||
|
|
||||||
|
Qodo Merge PR Agent will now leverage context from Jira or GitHub tickets to enhance the PR Feedback. Read more about this feature
|
||||||
|
[here](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/)
|
||||||
|
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
@ -93,7 +86,6 @@ Supported commands per platform:
|
|||||||
|-------|---------------------------------------------------------------------------------------------------------|:--------------------:|:--------------------:|:--------------------:|:------------:|
|
|-------|---------------------------------------------------------------------------------------------------------|:--------------------:|:--------------------:|:--------------------:|:------------:|
|
||||||
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ |
|
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ |
|
||||||
| | ⮑ Incremental | ✅ | | | |
|
| | ⮑ Incremental | ✅ | | | |
|
||||||
| | ⮑ [SOC2 Compliance](https://pr-agent-docs.codium.ai/tools/review/#soc2-ticket-compliance) 💎 | ✅ | ✅ | ✅ | |
|
|
||||||
| | Describe | ✅ | ✅ | ✅ | ✅ |
|
| | Describe | ✅ | ✅ | ✅ | ✅ |
|
||||||
| | ⮑ [Inline File Summary](https://pr-agent-docs.codium.ai/tools/describe#inline-file-summary) 💎 | ✅ | | | |
|
| | ⮑ [Inline File Summary](https://pr-agent-docs.codium.ai/tools/describe#inline-file-summary) 💎 | ✅ | | | |
|
||||||
| | Improve | ✅ | ✅ | ✅ | ✅ |
|
| | Improve | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
FROM python:3.12.3 AS base
|
FROM python:3.12.3 AS base
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
ADD docs/chroma_db.zip /app/docs/chroma_db.zip
|
|
||||||
ADD pyproject.toml .
|
ADD pyproject.toml .
|
||||||
ADD requirements.txt .
|
ADD requirements.txt .
|
||||||
|
ADD docs docs
|
||||||
RUN pip install . && rm pyproject.toml requirements.txt
|
RUN pip install . && rm pyproject.toml requirements.txt
|
||||||
ENV PYTHONPATH=/app
|
ENV PYTHONPATH=/app
|
||||||
|
|
||||||
|
Binary file not shown.
Binary file not shown.
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 4.2 KiB |
@ -1,140 +1 @@
|
|||||||
<?xml version="1.0" encoding="utf-8"?>
|
<?xml version="1.0" encoding="UTF-8"?><svg id="Layer_1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 109.77 81.94"><defs><style>.cls-1{fill:#7968fa;}.cls-1,.cls-2{stroke-width:0px;}.cls-2{fill:#5ae3ae;}</style></defs><path class="cls-2" d="m109.77,40.98c0,22.62-7.11,40.96-15.89,40.96-3.6,0-6.89-3.09-9.58-8.31,6.82-7.46,11.22-19.3,11.22-32.64s-4.4-25.21-11.22-32.67C86.99,3.09,90.29,0,93.89,0c8.78,0,15.89,18.33,15.89,40.97"/><path class="cls-1" d="m95.53,40.99c0,13.35-4.4,25.19-11.23,32.64-3.81-7.46-6.28-19.3-6.28-32.64s2.47-25.21,6.28-32.67c6.83,7.46,11.23,19.32,11.23,32.67"/><path class="cls-2" d="m55.38,78.15c-4.99,2.42-10.52,3.79-16.38,3.79C17.46,81.93,0,63.6,0,40.98S17.46,0,39,0C44.86,0,50.39,1.37,55.38,3.79c-9.69,6.47-16.43,20.69-16.43,37.19s6.73,30.7,16.43,37.17"/><path class="cls-1" d="m78.02,40.99c0,16.48-9.27,30.7-22.65,37.17-9.69-6.47-16.43-20.69-16.43-37.17S45.68,10.28,55.38,3.81c13.37,6.49,22.65,20.69,22.65,37.19"/><path class="cls-2" d="m84.31,73.63c-4.73,5.22-10.64,8.31-17.06,8.31-4.24,0-8.27-1.35-11.87-3.79,13.37-6.48,22.65-20.7,22.65-37.17,0,13.35,2.47,25.19,6.28,32.64"/><path class="cls-2" d="m84.31,8.31c-3.81,7.46-6.28,19.32-6.28,32.67,0-16.5-9.27-30.7-22.65-37.19,3.6-2.45,7.63-3.8,11.87-3.8,6.43,0,12.33,3.09,17.06,8.31"/></svg>
|
||||||
<!-- Generator: Adobe Illustrator 28.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
|
||||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
|
||||||
width="64px" height="64px" viewBox="0 0 64 64" enable-background="new 0 0 64 64" xml:space="preserve">
|
|
||||||
<g>
|
|
||||||
<defs>
|
|
||||||
<rect id="SVGID_1_" x="0.4" y="0.1" width="63.4" height="63.4"/>
|
|
||||||
</defs>
|
|
||||||
<clipPath id="SVGID_00000008836131916906499950000015813697852011234749_">
|
|
||||||
<use xlink:href="#SVGID_1_" overflow="visible"/>
|
|
||||||
</clipPath>
|
|
||||||
<g clip-path="url(#SVGID_00000008836131916906499950000015813697852011234749_)">
|
|
||||||
<path fill="#05E5AD" d="M21.4,9.8c3,0,5.9,0.7,8.5,1.9c-5.7,3.4-9.8,11.1-9.8,20.1c0,9,4,16.7,9.8,20.1c-2.6,1.2-5.5,1.9-8.5,1.9
|
|
||||||
c-11.6,0-21-9.8-21-22S9.8,9.8,21.4,9.8z"/>
|
|
||||||
|
|
||||||
<radialGradient id="SVGID_00000150822754378345238340000008985053211526864828_" cx="-140.0905" cy="350.1757" r="4.8781" gradientTransform="matrix(-4.7708 -6.961580e-02 -0.1061 7.2704 -601.3099 -2523.8489)" gradientUnits="userSpaceOnUse">
|
|
||||||
<stop offset="0" style="stop-color:#6447FF"/>
|
|
||||||
<stop offset="6.666670e-02" style="stop-color:#6348FE"/>
|
|
||||||
<stop offset="0.1333" style="stop-color:#614DFC"/>
|
|
||||||
<stop offset="0.2" style="stop-color:#5C54F8"/>
|
|
||||||
<stop offset="0.2667" style="stop-color:#565EF3"/>
|
|
||||||
<stop offset="0.3333" style="stop-color:#4E6CEC"/>
|
|
||||||
<stop offset="0.4" style="stop-color:#447BE4"/>
|
|
||||||
<stop offset="0.4667" style="stop-color:#3A8DDB"/>
|
|
||||||
<stop offset="0.5333" style="stop-color:#2F9FD1"/>
|
|
||||||
<stop offset="0.6" style="stop-color:#25B1C8"/>
|
|
||||||
<stop offset="0.6667" style="stop-color:#1BC0C0"/>
|
|
||||||
<stop offset="0.7333" style="stop-color:#13CEB9"/>
|
|
||||||
<stop offset="0.8" style="stop-color:#0DD8B4"/>
|
|
||||||
<stop offset="0.8667" style="stop-color:#08DFB0"/>
|
|
||||||
<stop offset="0.9333" style="stop-color:#06E4AE"/>
|
|
||||||
<stop offset="1" style="stop-color:#05E5AD"/>
|
|
||||||
</radialGradient>
|
|
||||||
<path fill="url(#SVGID_00000150822754378345238340000008985053211526864828_)" d="M21.4,9.8c3,0,5.9,0.7,8.5,1.9
|
|
||||||
c-5.7,3.4-9.8,11.1-9.8,20.1c0,9,4,16.7,9.8,20.1c-2.6,1.2-5.5,1.9-8.5,1.9c-11.6,0-21-9.8-21-22S9.8,9.8,21.4,9.8z"/>
|
|
||||||
|
|
||||||
<radialGradient id="SVGID_00000022560571240417802950000012439139323268113305_" cx="-191.7649" cy="385.7387" r="4.8781" gradientTransform="matrix(-2.5514 -0.7616 -0.8125 2.7217 -130.733 -1180.2209)" gradientUnits="userSpaceOnUse">
|
|
||||||
<stop offset="0" style="stop-color:#6447FF"/>
|
|
||||||
<stop offset="6.666670e-02" style="stop-color:#6348FE"/>
|
|
||||||
<stop offset="0.1333" style="stop-color:#614DFC"/>
|
|
||||||
<stop offset="0.2" style="stop-color:#5C54F8"/>
|
|
||||||
<stop offset="0.2667" style="stop-color:#565EF3"/>
|
|
||||||
<stop offset="0.3333" style="stop-color:#4E6CEC"/>
|
|
||||||
<stop offset="0.4" style="stop-color:#447BE4"/>
|
|
||||||
<stop offset="0.4667" style="stop-color:#3A8DDB"/>
|
|
||||||
<stop offset="0.5333" style="stop-color:#2F9FD1"/>
|
|
||||||
<stop offset="0.6" style="stop-color:#25B1C8"/>
|
|
||||||
<stop offset="0.6667" style="stop-color:#1BC0C0"/>
|
|
||||||
<stop offset="0.7333" style="stop-color:#13CEB9"/>
|
|
||||||
<stop offset="0.8" style="stop-color:#0DD8B4"/>
|
|
||||||
<stop offset="0.8667" style="stop-color:#08DFB0"/>
|
|
||||||
<stop offset="0.9333" style="stop-color:#06E4AE"/>
|
|
||||||
<stop offset="1" style="stop-color:#05E5AD"/>
|
|
||||||
</radialGradient>
|
|
||||||
<path fill="url(#SVGID_00000022560571240417802950000012439139323268113305_)" d="M38,18.3c-2.1-2.8-4.9-5.1-8.1-6.6
|
|
||||||
c2-1.2,4.2-1.9,6.6-1.9c2.2,0,4.3,0.6,6.2,1.7C40.8,12.9,39.2,15.3,38,18.3L38,18.3z"/>
|
|
||||||
|
|
||||||
<radialGradient id="SVGID_00000143611122169386473660000017673587931016751800_" cx="-194.7918" cy="395.2442" r="4.8781" gradientTransform="matrix(-2.5514 -0.7616 -0.8125 2.7217 -130.733 -1172.9556)" gradientUnits="userSpaceOnUse">
|
|
||||||
<stop offset="0" style="stop-color:#6447FF"/>
|
|
||||||
<stop offset="6.666670e-02" style="stop-color:#6348FE"/>
|
|
||||||
<stop offset="0.1333" style="stop-color:#614DFC"/>
|
|
||||||
<stop offset="0.2" style="stop-color:#5C54F8"/>
|
|
||||||
<stop offset="0.2667" style="stop-color:#565EF3"/>
|
|
||||||
<stop offset="0.3333" style="stop-color:#4E6CEC"/>
|
|
||||||
<stop offset="0.4" style="stop-color:#447BE4"/>
|
|
||||||
<stop offset="0.4667" style="stop-color:#3A8DDB"/>
|
|
||||||
<stop offset="0.5333" style="stop-color:#2F9FD1"/>
|
|
||||||
<stop offset="0.6" style="stop-color:#25B1C8"/>
|
|
||||||
<stop offset="0.6667" style="stop-color:#1BC0C0"/>
|
|
||||||
<stop offset="0.7333" style="stop-color:#13CEB9"/>
|
|
||||||
<stop offset="0.8" style="stop-color:#0DD8B4"/>
|
|
||||||
<stop offset="0.8667" style="stop-color:#08DFB0"/>
|
|
||||||
<stop offset="0.9333" style="stop-color:#06E4AE"/>
|
|
||||||
<stop offset="1" style="stop-color:#05E5AD"/>
|
|
||||||
</radialGradient>
|
|
||||||
<path fill="url(#SVGID_00000143611122169386473660000017673587931016751800_)" d="M38,45.2c1.2,3,2.9,5.3,4.7,6.8
|
|
||||||
c-1.9,1.1-4,1.7-6.2,1.7c-2.3,0-4.6-0.7-6.6-1.9C33.1,50.4,35.8,48.1,38,45.2L38,45.2z"/>
|
|
||||||
<path fill="#684BFE" d="M20.1,31.8c0-9,4-16.7,9.8-20.1c3.2,1.5,6,3.8,8.1,6.6c-1.5,3.7-2.5,8.4-2.5,13.5s0.9,9.8,2.5,13.5
|
|
||||||
c-2.1,2.8-4.9,5.1-8.1,6.6C24.1,48.4,20.1,40.7,20.1,31.8z"/>
|
|
||||||
|
|
||||||
<radialGradient id="SVGID_00000147942998054305738810000004710078864578628519_" cx="-212.7358" cy="363.2475" r="4.8781" gradientTransform="matrix(-2.3342 -1.063 -1.623 3.5638 149.3813 -1470.1027)" gradientUnits="userSpaceOnUse">
|
|
||||||
<stop offset="0" style="stop-color:#6447FF"/>
|
|
||||||
<stop offset="6.666670e-02" style="stop-color:#6348FE"/>
|
|
||||||
<stop offset="0.1333" style="stop-color:#614DFC"/>
|
|
||||||
<stop offset="0.2" style="stop-color:#5C54F8"/>
|
|
||||||
<stop offset="0.2667" style="stop-color:#565EF3"/>
|
|
||||||
<stop offset="0.3333" style="stop-color:#4E6CEC"/>
|
|
||||||
<stop offset="0.4" style="stop-color:#447BE4"/>
|
|
||||||
<stop offset="0.4667" style="stop-color:#3A8DDB"/>
|
|
||||||
<stop offset="0.5333" style="stop-color:#2F9FD1"/>
|
|
||||||
<stop offset="0.6" style="stop-color:#25B1C8"/>
|
|
||||||
<stop offset="0.6667" style="stop-color:#1BC0C0"/>
|
|
||||||
<stop offset="0.7333" style="stop-color:#13CEB9"/>
|
|
||||||
<stop offset="0.8" style="stop-color:#0DD8B4"/>
|
|
||||||
<stop offset="0.8667" style="stop-color:#08DFB0"/>
|
|
||||||
<stop offset="0.9333" style="stop-color:#06E4AE"/>
|
|
||||||
<stop offset="1" style="stop-color:#05E5AD"/>
|
|
||||||
</radialGradient>
|
|
||||||
<path fill="url(#SVGID_00000147942998054305738810000004710078864578628519_)" d="M50.7,42.5c0.6,3.3,1.5,6.1,2.5,8
|
|
||||||
c-1.8,2-3.8,3.1-6,3.1c-1.6,0-3.1-0.6-4.5-1.7C46.1,50.2,48.9,46.8,50.7,42.5L50.7,42.5z"/>
|
|
||||||
|
|
||||||
<radialGradient id="SVGID_00000083770737908230256670000016126156495859285174_" cx="-208.5327" cy="357.2025" r="4.8781" gradientTransform="matrix(-2.3342 -1.063 -1.623 3.5638 149.3813 -1476.8097)" gradientUnits="userSpaceOnUse">
|
|
||||||
<stop offset="0" style="stop-color:#6447FF"/>
|
|
||||||
<stop offset="6.666670e-02" style="stop-color:#6348FE"/>
|
|
||||||
<stop offset="0.1333" style="stop-color:#614DFC"/>
|
|
||||||
<stop offset="0.2" style="stop-color:#5C54F8"/>
|
|
||||||
<stop offset="0.2667" style="stop-color:#565EF3"/>
|
|
||||||
<stop offset="0.3333" style="stop-color:#4E6CEC"/>
|
|
||||||
<stop offset="0.4" style="stop-color:#447BE4"/>
|
|
||||||
<stop offset="0.4667" style="stop-color:#3A8DDB"/>
|
|
||||||
<stop offset="0.5333" style="stop-color:#2F9FD1"/>
|
|
||||||
<stop offset="0.6" style="stop-color:#25B1C8"/>
|
|
||||||
<stop offset="0.6667" style="stop-color:#1BC0C0"/>
|
|
||||||
<stop offset="0.7333" style="stop-color:#13CEB9"/>
|
|
||||||
<stop offset="0.8" style="stop-color:#0DD8B4"/>
|
|
||||||
<stop offset="0.8667" style="stop-color:#08DFB0"/>
|
|
||||||
<stop offset="0.9333" style="stop-color:#06E4AE"/>
|
|
||||||
<stop offset="1" style="stop-color:#05E5AD"/>
|
|
||||||
</radialGradient>
|
|
||||||
<path fill="url(#SVGID_00000083770737908230256670000016126156495859285174_)" d="M42.7,11.5c1.4-1.1,2.9-1.7,4.5-1.7
|
|
||||||
c2.2,0,4.3,1.1,6,3.1c-1,2-1.9,4.7-2.5,8C48.9,16.7,46.1,13.4,42.7,11.5L42.7,11.5z"/>
|
|
||||||
<path fill="#684BFE" d="M38,45.2c2.8-3.7,4.4-8.4,4.4-13.5c0-5.1-1.7-9.8-4.4-13.5c1.2-3,2.9-5.3,4.7-6.8c3.4,1.9,6.2,5.3,8,9.5
|
|
||||||
c-0.6,3.2-0.9,6.9-0.9,10.8s0.3,7.6,0.9,10.8c-1.8,4.3-4.6,7.6-8,9.5C40.8,50.6,39.2,48.2,38,45.2L38,45.2z"/>
|
|
||||||
<path fill="#321BB2" d="M38,45.2c-1.5-3.7-2.5-8.4-2.5-13.5S36.4,22,38,18.3c2.8,3.7,4.4,8.4,4.4,13.5S40.8,41.5,38,45.2z"/>
|
|
||||||
<path fill="#05E6AD" d="M53.2,12.9c1.1-2,2.3-3.1,3.6-3.1c3.9,0,7,9.8,7,22s-3.1,22-7,22c-1.3,0-2.6-1.1-3.6-3.1
|
|
||||||
c3.4-3.8,5.7-10.8,5.7-18.8C58.8,23.8,56.6,16.8,53.2,12.9z"/>
|
|
||||||
|
|
||||||
<radialGradient id="SVGID_00000009565123575973598080000009335550354766300606_" cx="-7.8671" cy="278.2442" r="4.8781" gradientTransform="matrix(1.5187 0 0 -7.8271 69.237 2209.3281)" gradientUnits="userSpaceOnUse">
|
|
||||||
<stop offset="0" style="stop-color:#05E5AD"/>
|
|
||||||
<stop offset="0.32" style="stop-color:#05E5AD;stop-opacity:0"/>
|
|
||||||
<stop offset="0.9028" style="stop-color:#6447FF"/>
|
|
||||||
</radialGradient>
|
|
||||||
<path fill="url(#SVGID_00000009565123575973598080000009335550354766300606_)" d="M53.2,12.9c1.1-2,2.3-3.1,3.6-3.1
|
|
||||||
c3.9,0,7,9.8,7,22s-3.1,22-7,22c-1.3,0-2.6-1.1-3.6-3.1c3.4-3.8,5.7-10.8,5.7-18.8C58.8,23.8,56.6,16.8,53.2,12.9z"/>
|
|
||||||
<path fill="#684BFE" d="M52.8,31.8c0-3.9-0.8-7.6-2.1-10.8c0.6-3.3,1.5-6.1,2.5-8c3.4,3.8,5.7,10.8,5.7,18.8c0,8-2.3,15-5.7,18.8
|
|
||||||
c-1-2-1.9-4.7-2.5-8C52,39.3,52.8,35.7,52.8,31.8z"/>
|
|
||||||
<path fill="#321BB2" d="M50.7,42.5c-0.6-3.2-0.9-6.9-0.9-10.8s0.3-7.6,0.9-10.8c1.3,3.2,2.1,6.9,2.1,10.8S52,39.3,50.7,42.5z"/>
|
|
||||||
</g>
|
|
||||||
</g>
|
|
||||||
</svg>
|
|
||||||
|
Before Width: | Height: | Size: 9.0 KiB After Width: | Height: | Size: 1.2 KiB |
BIN
docs/docs/assets/logo_.png
Normal file
BIN
docs/docs/assets/logo_.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 8.7 KiB |
@ -2,4 +2,3 @@ We take your code's security and privacy seriously:
|
|||||||
|
|
||||||
- The Chrome extension will not send your code to any external servers.
|
- The Chrome extension will not send your code to any external servers.
|
||||||
- For private repositories, we will first validate the user's identity and permissions. After authentication, we generate responses using the existing Qodo Merge Pro integration.
|
- For private repositories, we will first validate the user's identity and permissions. After authentication, we generate responses using the existing Qodo Merge Pro integration.
|
||||||
|
|
||||||
|
115
docs/docs/core-abilities/fetching_ticket_context.md
Normal file
115
docs/docs/core-abilities/fetching_ticket_context.md
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
# Fetching Ticket Context for PRs
|
||||||
|
## Overview
|
||||||
|
Qodo Merge PR Agent streamlines code review workflows by seamlessly connecting with multiple ticket management systems.
|
||||||
|
This integration enriches the review process by automatically surfacing relevant ticket information and context alongside code changes.
|
||||||
|
|
||||||
|
|
||||||
|
## Affected Tools
|
||||||
|
|
||||||
|
Ticket Recognition Requirements:
|
||||||
|
|
||||||
|
1. The PR description should contain a link to the ticket.
|
||||||
|
2. For Jira tickets, you should follow the instructions in [Jira Integration](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/#jira-integration) in order to authenticate with Jira.
|
||||||
|
|
||||||
|
|
||||||
|
### Describe tool
|
||||||
|
Qodo Merge PR Agent will recognize the ticket and use the ticket content (title, description, labels) to provide additional context for the code changes.
|
||||||
|
By understanding the reasoning and intent behind modifications, the LLM can offer more insightful and relevant code analysis.
|
||||||
|
|
||||||
|
### Review tool
|
||||||
|
Similarly to the `describe` tool, the `review` tool will use the ticket content to provide additional context for the code changes.
|
||||||
|
|
||||||
|
In addition, this feature will evaluate how well a Pull Request (PR) adheres to its original purpose/intent as defined by the associated ticket or issue mentioned in the PR description.
|
||||||
|
Each ticket will be assigned a label (Compliance/Alignment level), Indicates the degree to which the PR fulfills its original purpose, Options: Fully compliant, Partially compliant or Not compliant.
|
||||||
|
|
||||||
|
|
||||||
|
{width=768}
|
||||||
|
|
||||||
|
By default, the tool will automatically validate if the PR complies with the referenced ticket.
|
||||||
|
If you want to disable this feedback, add the following line to your configuration file:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[pr_reviewer]
|
||||||
|
require_ticket_analysis_review=false
|
||||||
|
```
|
||||||
|
|
||||||
|
## Providers
|
||||||
|
|
||||||
|
### Github Issues Integration
|
||||||
|
|
||||||
|
Qodo Merge PR Agent will automatically recognize Github issues mentioned in the PR description and fetch the issue content.
|
||||||
|
Examples of valid GitHub issue references:
|
||||||
|
|
||||||
|
- `https://github.com/<ORG_NAME>/<REPO_NAME>/issues/<ISSUE_NUMBER>`
|
||||||
|
- `#<ISSUE_NUMBER>`
|
||||||
|
- `<ORG_NAME>/<REPO_NAME>#<ISSUE_NUMBER>`
|
||||||
|
|
||||||
|
Since Qodo Merge PR Agent is integrated with GitHub, it doesn't require any additional configuration to fetch GitHub issues.
|
||||||
|
|
||||||
|
### Jira Integration 💎
|
||||||
|
|
||||||
|
We support both Jira Cloud and Jira Server/Data Center.
|
||||||
|
To integrate with Jira, The PR Description should contain a link to the Jira ticket.
|
||||||
|
|
||||||
|
For Jira integration, include a ticket reference in your PR description using either the complete URL format `https://<JIRA_ORG>.atlassian.net/browse/ISSUE-123` or the shortened ticket ID `ISSUE-123`.
|
||||||
|
|
||||||
|
!!! note "Jira Base URL"
|
||||||
|
If using the shortened format, ensure your configuration file contains the Jira base URL under the [jira] section like this:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[jira]
|
||||||
|
jira_base_url = "https://<JIRA_ORG>.atlassian.net"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Jira Cloud 💎
|
||||||
|
There are two ways to authenticate with Jira Cloud:
|
||||||
|
|
||||||
|
**1) Jira App Authentication**
|
||||||
|
|
||||||
|
The recommended way to authenticate with Jira Cloud is to install the Qodo Merge app in your Jira Cloud instance. This will allow Qodo Merge to access Jira data on your behalf.
|
||||||
|
|
||||||
|
Installation steps:
|
||||||
|
|
||||||
|
1. Click [here](https://auth.atlassian.com/authorize?audience=api.atlassian.com&client_id=8krKmA4gMD8mM8z24aRCgPCSepZNP1xf&scope=read%3Ajira-work%20offline_access&redirect_uri=https%3A%2F%2Fregister.jira.pr-agent.codium.ai&state=qodomerge&response_type=code&prompt=consent) to install the Qodo Merge app in your Jira Cloud instance, click the `accept` button.<br>
|
||||||
|
{width=384}
|
||||||
|
|
||||||
|
2. After installing the app, you will be redirected to the Qodo Merge registration page. and you will see a success message.<br>
|
||||||
|
{width=384}
|
||||||
|
|
||||||
|
3. Now you can use the Jira integration in Qodo Merge PR Agent.
|
||||||
|
|
||||||
|
**2) Email/Token Authentication**
|
||||||
|
|
||||||
|
You can create an API token from your Atlassian account:
|
||||||
|
|
||||||
|
1. Log in to https://id.atlassian.com/manage-profile/security/api-tokens.
|
||||||
|
|
||||||
|
2. Click Create API token.
|
||||||
|
|
||||||
|
3. From the dialog that appears, enter a name for your new token and click Create.
|
||||||
|
|
||||||
|
4. Click Copy to clipboard.
|
||||||
|
|
||||||
|
{width=384}
|
||||||
|
|
||||||
|
5. In your [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) add the following lines:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[jira]
|
||||||
|
jira_api_token = "YOUR_API_TOKEN"
|
||||||
|
jira_api_email = "YOUR_EMAIL"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
#### Jira Server/Data Center 💎
|
||||||
|
|
||||||
|
Currently, we only support the Personal Access Token (PAT) Authentication method.
|
||||||
|
|
||||||
|
1. Create a [Personal Access Token (PAT)](https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html) in your Jira account
|
||||||
|
2. In your Configuration file/Environment variables/Secrets file, add the following lines:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[jira]
|
||||||
|
jira_base_url = "YOUR_JIRA_BASE_URL" # e.g. https://jira.example.com
|
||||||
|
jira_api_token = "YOUR_API_TOKEN"
|
||||||
|
```
|
@ -1,6 +1,7 @@
|
|||||||
# Core Abilities
|
# Core Abilities
|
||||||
Qodo Merge utilizes a variety of core abilities to provide a comprehensive and efficient code review experience. These abilities include:
|
Qodo Merge utilizes a variety of core abilities to provide a comprehensive and efficient code review experience. These abilities include:
|
||||||
|
|
||||||
|
- [Fetching ticket context](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/)
|
||||||
- [Local and global metadata](https://qodo-merge-docs.qodo.ai/core-abilities/metadata/)
|
- [Local and global metadata](https://qodo-merge-docs.qodo.ai/core-abilities/metadata/)
|
||||||
- [Dynamic context](https://qodo-merge-docs.qodo.ai/core-abilities/dynamic_context/)
|
- [Dynamic context](https://qodo-merge-docs.qodo.ai/core-abilities/dynamic_context/)
|
||||||
- [Self-reflection](https://qodo-merge-docs.qodo.ai/core-abilities/self_reflection/)
|
- [Self-reflection](https://qodo-merge-docs.qodo.ai/core-abilities/self_reflection/)
|
||||||
@ -10,3 +11,19 @@ Qodo Merge utilizes a variety of core abilities to provide a comprehensive and e
|
|||||||
- [Code-oriented YAML](https://qodo-merge-docs.qodo.ai/core-abilities/code_oriented_yaml/)
|
- [Code-oriented YAML](https://qodo-merge-docs.qodo.ai/core-abilities/code_oriented_yaml/)
|
||||||
- [Static code analysis](https://qodo-merge-docs.qodo.ai/core-abilities/static_code_analysis/)
|
- [Static code analysis](https://qodo-merge-docs.qodo.ai/core-abilities/static_code_analysis/)
|
||||||
- [Code fine-tuning benchmark](https://qodo-merge-docs.qodo.ai/finetuning_benchmark/)
|
- [Code fine-tuning benchmark](https://qodo-merge-docs.qodo.ai/finetuning_benchmark/)
|
||||||
|
|
||||||
|
## Blogs
|
||||||
|
|
||||||
|
Here are some additional technical blogs from Qodo, that delve deeper into the core capabilities and features of Large Language Models (LLMs) when applied to coding tasks.
|
||||||
|
These resources provide more comprehensive insights into leveraging LLMs for software development.
|
||||||
|
|
||||||
|
### Code Generation and LLMs
|
||||||
|
- [State-of-the-art Code Generation with AlphaCodium – From Prompt Engineering to Flow Engineering](https://www.qodo.ai/blog/qodoflow-state-of-the-art-code-generation-for-code-contests/)
|
||||||
|
- [RAG for a Codebase with 10k Repos](https://www.qodo.ai/blog/rag-for-large-scale-code-repos/)
|
||||||
|
|
||||||
|
### Development Processes
|
||||||
|
- [Understanding the Challenges and Pain Points of the Pull Request Cycle](https://www.qodo.ai/blog/understanding-the-challenges-and-pain-points-of-the-pull-request-cycle/)
|
||||||
|
- [Introduction to Code Coverage Testing](https://www.qodo.ai/blog/introduction-to-code-coverage-testing/)
|
||||||
|
|
||||||
|
### Cost Optimization
|
||||||
|
- [Reduce Your Costs by 30% When Using GPT for Python Code](https://www.qodo.ai/blog/reduce-your-costs-by-30-when-using-gpt-3-for-python-code/)
|
||||||
|
@ -49,7 +49,7 @@ __old hunk__
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
(3) The entire PR files that were retrieved are also used to expand and enhance the PR context (see [Dynamic Context](https://qodo-merge-docs.qodo.ai/core-abilities/dynamic-context/)).
|
(3) The entire PR files that were retrieved are also used to expand and enhance the PR context (see [Dynamic Context](https://qodo-merge-docs.qodo.ai/core-abilities/dynamic_context/)).
|
||||||
|
|
||||||
|
|
||||||
(4) All the metadata described above represents several level of cumulative analysis - ranging from hunk level, to file level, to PR level, to organization level.
|
(4) All the metadata described above represents several level of cumulative analysis - ranging from hunk level, to file level, to PR level, to organization level.
|
||||||
|
@ -46,6 +46,5 @@ This results in a more refined and valuable set of suggestions for the user, sav
|
|||||||
## Appendix - Relevant Configuration Options
|
## Appendix - Relevant Configuration Options
|
||||||
```
|
```
|
||||||
[pr_code_suggestions]
|
[pr_code_suggestions]
|
||||||
self_reflect_on_suggestions = true # Enable self-reflection on code suggestions
|
|
||||||
suggestions_score_threshold = 0 # Filter out suggestions with a score below this threshold (0-10)
|
suggestions_score_threshold = 0 # Filter out suggestions with a score below this threshold (0-10)
|
||||||
```
|
```
|
@ -29,7 +29,6 @@ Qodo Merge offers extensive pull request functionalities across various git prov
|
|||||||
|-------|-----------------------------------------------------------------------------------------------------------------------|:------:|:------:|:---------:|:------------:|
|
|-------|-----------------------------------------------------------------------------------------------------------------------|:------:|:------:|:---------:|:------------:|
|
||||||
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ |
|
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ |
|
||||||
| | ⮑ Incremental | ✅ | | | |
|
| | ⮑ Incremental | ✅ | | | |
|
||||||
| | ⮑ [SOC2 Compliance](https://qodo-merge-docs.qodo.ai/tools/review/#soc2-ticket-compliance){:target="_blank"} 💎 | ✅ | ✅ | ✅ | |
|
|
||||||
| | Ask | ✅ | ✅ | ✅ | ✅ |
|
| | Ask | ✅ | ✅ | ✅ | ✅ |
|
||||||
| | Describe | ✅ | ✅ | ✅ | ✅ |
|
| | Describe | ✅ | ✅ | ✅ | ✅ |
|
||||||
| | ⮑ [Inline file summary](https://qodo-merge-docs.qodo.ai/tools/describe/#inline-file-summary){:target="_blank"} 💎 | ✅ | ✅ | | |
|
| | ⮑ [Inline file summary](https://qodo-merge-docs.qodo.ai/tools/describe/#inline-file-summary){:target="_blank"} 💎 | ✅ | ✅ | | |
|
||||||
|
@ -51,10 +51,12 @@ stages:
|
|||||||
```
|
```
|
||||||
This script will run Qodo Merge on every new merge request, with the `improve`, `review`, and `describe` commands.
|
This script will run Qodo Merge on every new merge request, with the `improve`, `review`, and `describe` commands.
|
||||||
Note that you need to export the `azure_devops__pat` and `OPENAI_KEY` variables in the Azure DevOps pipeline settings (Pipelines -> Library -> + Variable group):
|
Note that you need to export the `azure_devops__pat` and `OPENAI_KEY` variables in the Azure DevOps pipeline settings (Pipelines -> Library -> + Variable group):
|
||||||
|
|
||||||
{width=468}
|
{width=468}
|
||||||
|
|
||||||
Make sure to give pipeline permissions to the `pr_agent` variable group.
|
Make sure to give pipeline permissions to the `pr_agent` variable group.
|
||||||
|
|
||||||
|
> Note that Azure Pipelines lacks support for triggering workflows from PR comments. If you find a viable solution, please contribute it to our [issue tracker](https://github.com/Codium-ai/pr-agent/issues)
|
||||||
|
|
||||||
## Azure DevOps from CLI
|
## Azure DevOps from CLI
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
You can use the Bitbucket Pipeline system to run Qodo Merge on every pull request open or update.
|
You can use the Bitbucket Pipeline system to run Qodo Merge on every pull request open or update.
|
||||||
|
|
||||||
1. Add the following file in your repository bitbucket_pipelines.yml
|
1. Add the following file in your repository bitbucket-pipelines.yml
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
pipelines:
|
pipelines:
|
||||||
|
@ -27,27 +27,6 @@ jobs:
|
|||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
if you want to pin your action to a specific release (v0.23 for example) for stability reasons, use:
|
|
||||||
```yaml
|
|
||||||
...
|
|
||||||
steps:
|
|
||||||
- name: PR Agent action step
|
|
||||||
id: pragent
|
|
||||||
uses: docker://codiumai/pr-agent:0.23-github_action
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
For enhanced security, you can also specify the Docker image by its [digest](https://hub.docker.com/repository/docker/codiumai/pr-agent/tags):
|
|
||||||
```yaml
|
|
||||||
...
|
|
||||||
steps:
|
|
||||||
- name: PR Agent action step
|
|
||||||
id: pragent
|
|
||||||
uses: docker://codiumai/pr-agent@sha256:14165e525678ace7d9b51cda8652c2d74abb4e1d76b57c4a6ccaeba84663cc64
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
2) Add the following secret to your repository under `Settings > Secrets and variables > Actions > New repository secret > Add secret`:
|
2) Add the following secret to your repository under `Settings > Secrets and variables > Actions > New repository secret > Add secret`:
|
||||||
|
|
||||||
```
|
```
|
||||||
@ -70,6 +49,40 @@ When you open your next PR, you should see a comment from `github-actions` bot w
|
|||||||
```
|
```
|
||||||
See detailed usage instructions in the [USAGE GUIDE](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#github-action)
|
See detailed usage instructions in the [USAGE GUIDE](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#github-action)
|
||||||
|
|
||||||
|
### Using a specific release
|
||||||
|
!!! tip ""
|
||||||
|
if you want to pin your action to a specific release (v0.23 for example) for stability reasons, use:
|
||||||
|
```yaml
|
||||||
|
...
|
||||||
|
steps:
|
||||||
|
- name: PR Agent action step
|
||||||
|
id: pragent
|
||||||
|
uses: docker://codiumai/pr-agent:0.23-github_action
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
For enhanced security, you can also specify the Docker image by its [digest](https://hub.docker.com/repository/docker/codiumai/pr-agent/tags):
|
||||||
|
```yaml
|
||||||
|
...
|
||||||
|
steps:
|
||||||
|
- name: PR Agent action step
|
||||||
|
id: pragent
|
||||||
|
uses: docker://codiumai/pr-agent@sha256:14165e525678ace7d9b51cda8652c2d74abb4e1d76b57c4a6ccaeba84663cc64
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
### Action for GitHub enterprise server
|
||||||
|
!!! tip ""
|
||||||
|
To use the action with a GitHub enterprise server, add an environment variable `GITHUB.BASE_URL` with the API URL of your GitHub server.
|
||||||
|
|
||||||
|
For example, if your GitHub server is at `https://github.mycompany.com`, add the following to your workflow file:
|
||||||
|
```yaml
|
||||||
|
env:
|
||||||
|
# ... previous environment values
|
||||||
|
GITHUB.BASE_URL: "https://github.mycompany.com/api/v3"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Run as a GitHub App
|
## Run as a GitHub App
|
||||||
|
@ -38,25 +38,41 @@ You can also modify the `script` section to run different Qodo Merge commands, o
|
|||||||
|
|
||||||
Note that if your base branches are not protected, don't set the variables as `protected`, since the pipeline will not have access to them.
|
Note that if your base branches are not protected, don't set the variables as `protected`, since the pipeline will not have access to them.
|
||||||
|
|
||||||
|
> **Note**: The `$CI_SERVER_FQDN` variable is available starting from GitLab version 16.10. If you're using an earlier version, this variable will not be available. However, you can combine `$CI_SERVER_HOST` and `$CI_SERVER_PORT` to achieve the same result. Please ensure you're using a compatible version or adjust your configuration.
|
||||||
|
|
||||||
|
|
||||||
## Run a GitLab webhook server
|
## Run a GitLab webhook server
|
||||||
|
|
||||||
1. From the GitLab workspace or group, create an access token. Enable the "api" scope only.
|
1. From the GitLab workspace or group, create an access token with "Reporter" role ("Developer" if using Pro version of the agent) and "api" scope.
|
||||||
|
|
||||||
2. Generate a random secret for your app, and save it for later. For example, you can use:
|
2. Generate a random secret for your app, and save it for later. For example, you can use:
|
||||||
|
|
||||||
```
|
```
|
||||||
WEBHOOK_SECRET=$(python -c "import secrets; print(secrets.token_hex(10))")
|
WEBHOOK_SECRET=$(python -c "import secrets; print(secrets.token_hex(10))")
|
||||||
```
|
```
|
||||||
3. Follow the instructions to build the Docker image, setup a secrets file and deploy on your own server from [here](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-app) steps 4-7.
|
|
||||||
|
|
||||||
4. In the secrets file, fill in the following:
|
3. Clone this repository:
|
||||||
- Your OpenAI key.
|
|
||||||
- In the [gitlab] section, fill in personal_access_token and shared_secret. The access token can be a personal access token, or a group or project access token.
|
|
||||||
- Set deployment_type to 'gitlab' in [configuration.toml](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml)
|
|
||||||
|
|
||||||
5. Create a webhook in GitLab. Set the URL to ```http[s]://<PR_AGENT_HOSTNAME>/webhook```. Set the secret token to the generated secret from step 2.
|
```
|
||||||
In the "Trigger" section, check the ‘comments’ and ‘merge request events’ boxes.
|
git clone https://github.com/Codium-ai/pr-agent.git
|
||||||
|
```
|
||||||
|
|
||||||
6. Test your installation by opening a merge request or commenting or a merge request using one of CodiumAI's commands.
|
4. Prepare variables and secrets. Skip this step if you plan on settings these as environment variables when running the agent:
|
||||||
|
1. In the configuration file/variables:
|
||||||
|
- Set `deployment_type` to "gitlab"
|
||||||
|
|
||||||
|
2. In the secrets file/variables:
|
||||||
|
- Set your AI model key in the respective section
|
||||||
|
- In the [gitlab] section, set `personal_access_token` (with token from step 1) and `shared_secret` (with secret from step 2)
|
||||||
|
|
||||||
|
|
||||||
|
5. Build a Docker image for the app and optionally push it to a Docker repository. We'll use Dockerhub as an example:
|
||||||
|
```
|
||||||
|
docker build . -t gitlab_pr_agent --target gitlab_webhook -f docker/Dockerfile
|
||||||
|
docker push codiumai/pr-agent:gitlab_webhook # Push to your Docker repository
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Create a webhook in GitLab. Set the URL to ```http[s]://<PR_AGENT_HOSTNAME>/webhook```, the secret token to the generated secret from step 2, and enable the triggers `push`, `comments` and `merge request events`.
|
||||||
|
|
||||||
|
7. Test your installation by opening a merge request or commenting on a merge request using one of CodiumAI's commands.
|
||||||
|
boxes
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# Installation
|
# Installation
|
||||||
|
|
||||||
## Self-hosted Qodo Merge
|
## Self-hosted Qodo Merge
|
||||||
If you choose to host you own Qodo Merge, you first need to acquire two tokens:
|
If you choose to host your own Qodo Merge, you first need to acquire two tokens:
|
||||||
|
|
||||||
1. An OpenAI key from [here](https://platform.openai.com/api-keys), with access to GPT-4 (or a key for other [language models](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/), if you prefer).
|
1. An OpenAI key from [here](https://platform.openai.com/api-keys), with access to GPT-4 (or a key for other [language models](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/), if you prefer).
|
||||||
2. A GitHub\GitLab\BitBucket personal access token (classic), with the repo scope. [GitHub from [here](https://github.com/settings/tokens)]
|
2. A GitHub\GitLab\BitBucket personal access token (classic), with the repo scope. [GitHub from [here](https://github.com/settings/tokens)]
|
||||||
|
@ -16,8 +16,8 @@ from pr_agent.config_loader import get_settings
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
# Fill in the following values
|
# Fill in the following values
|
||||||
provider = "github" # GitHub provider
|
provider = "github" # github/gitlab/bitbucket/azure_devops
|
||||||
user_token = "..." # GitHub user token
|
user_token = "..." # user token
|
||||||
openai_key = "..." # OpenAI key
|
openai_key = "..." # OpenAI key
|
||||||
pr_url = "..." # PR URL, for example 'https://github.com/Codium-ai/pr-agent/pull/809'
|
pr_url = "..." # PR URL, for example 'https://github.com/Codium-ai/pr-agent/pull/809'
|
||||||
command = "/review" # Command to run (e.g. '/review', '/describe', '/ask="What is the purpose of this PR?"', ...)
|
command = "/review" # Command to run (e.g. '/review', '/describe', '/ask="What is the purpose of this PR?"', ...)
|
||||||
@ -42,42 +42,34 @@ A list of the relevant tools can be found in the [tools guide](../tools/ask.md).
|
|||||||
To invoke a tool (for example `review`), you can run directly from the Docker image. Here's how:
|
To invoke a tool (for example `review`), you can run directly from the Docker image. Here's how:
|
||||||
|
|
||||||
- For GitHub:
|
- For GitHub:
|
||||||
```
|
```
|
||||||
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
|
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
|
||||||
```
|
```
|
||||||
|
If you are using GitHub enterprise server, you need to specify the custom url as variable.
|
||||||
|
For example, if your GitHub server is at `https://github.mycompany.com`, add the following to the command:
|
||||||
|
```
|
||||||
|
-e GITHUB.BASE_URL=https://github.mycompany.com/api/v3
|
||||||
|
```
|
||||||
|
|
||||||
- For GitLab:
|
- For GitLab:
|
||||||
```
|
```
|
||||||
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
|
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
|
||||||
```
|
```
|
||||||
|
|
||||||
Note: If you have a dedicated GitLab instance, you need to specify the custom url as variable:
|
If you have a dedicated GitLab instance, you need to specify the custom url as variable:
|
||||||
```
|
```
|
||||||
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> -e GITLAB.URL=<your gitlab instance url> codiumai/pr-agent:latest --pr_url <pr_url> review
|
-e GITLAB.URL=<your gitlab instance url>
|
||||||
```
|
```
|
||||||
|
|
||||||
- For BitBucket:
|
- For BitBucket:
|
||||||
```
|
```
|
||||||
docker run --rm -it -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=<pr_url> review
|
docker run --rm -it -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=<pr_url> review
|
||||||
```
|
```
|
||||||
|
|
||||||
For other git providers, update CONFIG.GIT_PROVIDER accordingly, and check the `pr_agent/settings/.secrets_template.toml` file for the environment variables expected names and values.
|
For other git providers, update CONFIG.GIT_PROVIDER accordingly, and check the `pr_agent/settings/.secrets_template.toml` file for the environment variables expected names and values.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
||||||
If you want to ensure you're running a specific version of the Docker image, consider using the image's digest:
|
|
||||||
```bash
|
|
||||||
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent@sha256:71b5ee15df59c745d352d84752d01561ba64b6d51327f97d46152f0c58a5f678 --pr_url <pr_url> review
|
|
||||||
```
|
|
||||||
|
|
||||||
Or you can run a [specific released versions](https://github.com/Codium-ai/pr-agent/blob/main/RELEASE_NOTES.md) of pr-agent, for example:
|
|
||||||
```
|
|
||||||
codiumai/pr-agent@v0.9
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Run from source
|
## Run from source
|
||||||
|
|
||||||
1. Clone this repository:
|
1. Clone this repository:
|
||||||
@ -115,7 +107,7 @@ python3 -m pr_agent.cli --issue_url <issue_url> similar_issue
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
[Optional] Add the pr_agent folder to your PYTHONPATH
|
[Optional] Add the pr_agent folder to your PYTHONPATH
|
||||||
```
|
```
|
||||||
export PYTHONPATH=$PYTHONPATH:<PATH to pr_agent folder>
|
export PYTHONPATH=$PYTHONPATH:<PATH to pr_agent folder>
|
||||||
```
|
```
|
@ -17,8 +17,8 @@ Users without a purchased seat who interact with a repository featuring Qodo Mer
|
|||||||
Beyond this limit, Qodo Merge Pro will cease to respond to their inquiries unless a seat is purchased.
|
Beyond this limit, Qodo Merge Pro will cease to respond to their inquiries unless a seat is purchased.
|
||||||
|
|
||||||
## Install Qodo Merge Pro for GitHub Enterprise Server
|
## Install Qodo Merge Pro for GitHub Enterprise Server
|
||||||
You can install Qodo Merge Pro application on your GitHub Enterprise Server, and enjoy two weeks of free trial.
|
|
||||||
After the trial period, to continue using Qodo Merge Pro, you will need to contact us for an [Enterprise license](https://www.codium.ai/pricing/).
|
To use Qodo Merge Pro application on your private GitHub Enterprise Server, you will need to contact us for starting an [Enterprise](https://www.codium.ai/pricing/) trial.
|
||||||
|
|
||||||
|
|
||||||
## Install Qodo Merge Pro for GitLab (Teams & Enterprise)
|
## Install Qodo Merge Pro for GitLab (Teams & Enterprise)
|
||||||
|
@ -29,7 +29,6 @@ Qodo Merge offers extensive pull request functionalities across various git prov
|
|||||||
|-------|-----------------------------------------------------------------------------------------------------------------------|:------:|:------:|:---------:|:------------:|
|
|-------|-----------------------------------------------------------------------------------------------------------------------|:------:|:------:|:---------:|:------------:|
|
||||||
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ |
|
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ |
|
||||||
| | ⮑ Incremental | ✅ | | | |
|
| | ⮑ Incremental | ✅ | | | |
|
||||||
| | ⮑ [SOC2 Compliance](https://qodo-merge-docs.qodo.ai/tools/review/#soc2-ticket-compliance){:target="_blank"} 💎 | ✅ | ✅ | ✅ | ✅ |
|
|
||||||
| | Ask | ✅ | ✅ | ✅ | ✅ |
|
| | Ask | ✅ | ✅ | ✅ | ✅ |
|
||||||
| | Describe | ✅ | ✅ | ✅ | ✅ |
|
| | Describe | ✅ | ✅ | ✅ | ✅ |
|
||||||
| | ⮑ [Inline file summary](https://qodo-merge-docs.qodo.ai/tools/describe/#inline-file-summary){:target="_blank"} 💎 | ✅ | ✅ | | ✅ |
|
| | ⮑ [Inline file summary](https://qodo-merge-docs.qodo.ai/tools/describe/#inline-file-summary){:target="_blank"} 💎 | ✅ | ✅ | | ✅ |
|
||||||
|
@ -20,14 +20,13 @@ Here are some of the additional features and capabilities that Qodo Merge Pro of
|
|||||||
| Feature | Description |
|
| Feature | Description |
|
||||||
|----------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|----------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| [**Model selection**](https://qodo-merge-docs.qodo.ai/usage-guide/PR_agent_pro_models/) | Choose the model that best fits your needs, among top models like `GPT4` and `Claude-Sonnet-3.5`
|
| [**Model selection**](https://qodo-merge-docs.qodo.ai/usage-guide/PR_agent_pro_models/) | Choose the model that best fits your needs, among top models like `GPT4` and `Claude-Sonnet-3.5`
|
||||||
| [**Global and wiki configuration**](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) | Control configurations for many repositories from a single location; <br>Edit configuration of a single repo without commiting code |
|
| [**Global and wiki configuration**](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) | Control configurations for many repositories from a single location; <br>Edit configuration of a single repo without committing code |
|
||||||
| [**Apply suggestions**](https://qodo-merge-docs.qodo.ai/tools/improve/#overview) | Generate commitable code from the relevant suggestions interactively by clicking on a checkbox |
|
| [**Apply suggestions**](https://qodo-merge-docs.qodo.ai/tools/improve/#overview) | Generate committable code from the relevant suggestions interactively by clicking on a checkbox |
|
||||||
| [**Suggestions impact**](https://qodo-merge-docs.qodo.ai/tools/improve/#assessing-impact) | Automatically mark suggestions that were implemented by the user (either directly in GitHub, or indirectly in the IDE) to enable tracking of the impact of the suggestions |
|
| [**Suggestions impact**](https://qodo-merge-docs.qodo.ai/tools/improve/#assessing-impact) | Automatically mark suggestions that were implemented by the user (either directly in GitHub, or indirectly in the IDE) to enable tracking of the impact of the suggestions |
|
||||||
| [**CI feedback**](https://qodo-merge-docs.qodo.ai/tools/ci_feedback/) | Automatically analyze failed CI checks on GitHub and provide actionable feedback in the PR conversation, helping to resolve issues quickly |
|
| [**CI feedback**](https://qodo-merge-docs.qodo.ai/tools/ci_feedback/) | Automatically analyze failed CI checks on GitHub and provide actionable feedback in the PR conversation, helping to resolve issues quickly |
|
||||||
| [**Advanced usage statistics**](https://www.codium.ai/contact/#/) | Qodo Merge Pro offers detailed statistics at user, repository, and company levels, including metrics about Qodo Merge usage, and also general statistics and insights |
|
| [**Advanced usage statistics**](https://www.codium.ai/contact/#/) | Qodo Merge Pro offers detailed statistics at user, repository, and company levels, including metrics about Qodo Merge usage, and also general statistics and insights |
|
||||||
| [**Incorporating companies' best practices**](https://qodo-merge-docs.qodo.ai/tools/improve/#best-practices) | Use the companies' best practices as reference to increase the effectiveness and the relevance of the code suggestions |
|
| [**Incorporating companies' best practices**](https://qodo-merge-docs.qodo.ai/tools/improve/#best-practices) | Use the companies' best practices as reference to increase the effectiveness and the relevance of the code suggestions |
|
||||||
| [**Interactive triggering**](https://qodo-merge-docs.qodo.ai/tools/analyze/#example-usage) | Interactively apply different tools via the `analyze` command |
|
| [**Interactive triggering**](https://qodo-merge-docs.qodo.ai/tools/analyze/#example-usage) | Interactively apply different tools via the `analyze` command |
|
||||||
| [**SOC2 compliance check**](https://qodo-merge-docs.qodo.ai/tools/review/#configuration-options) | Ensures the PR contains a ticket to a project management system (e.g., Jira, Asana, Trello, etc.)
|
|
||||||
| [**Custom labels**](https://qodo-merge-docs.qodo.ai/tools/describe/#handle-custom-labels-from-the-repos-labels-page) | Define custom labels for Qodo Merge to assign to the PR |
|
| [**Custom labels**](https://qodo-merge-docs.qodo.ai/tools/describe/#handle-custom-labels-from-the-repos-labels-page) | Define custom labels for Qodo Merge to assign to the PR |
|
||||||
|
|
||||||
### Additional tools
|
### Additional tools
|
||||||
|
@ -25,7 +25,7 @@ There are 3 ways to enable custom labels:
|
|||||||
When working from CLI, you need to apply the [configuration changes](#configuration-options) to the [custom_labels file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/custom_labels.toml):
|
When working from CLI, you need to apply the [configuration changes](#configuration-options) to the [custom_labels file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/custom_labels.toml):
|
||||||
|
|
||||||
#### 2. Repo configuration file
|
#### 2. Repo configuration file
|
||||||
To enable custom labels, you need to apply the [configuration changes](#configuration-options) to the local `.pr_agent.toml` file in you repository.
|
To enable custom labels, you need to apply the [configuration changes](#configuration-options) to the local `.pr_agent.toml` file in your repository.
|
||||||
|
|
||||||
#### 3. Handle custom labels from the Repo's labels page 💎
|
#### 3. Handle custom labels from the Repo's labels page 💎
|
||||||
> This feature is available only in Qodo Merge Pro
|
> This feature is available only in Qodo Merge Pro
|
||||||
|
@ -34,7 +34,7 @@ pr_commands = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[pr_description]
|
[pr_description]
|
||||||
publish_labels = ...
|
publish_labels = true
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -49,7 +49,7 @@ publish_labels = ...
|
|||||||
<table>
|
<table>
|
||||||
<tr>
|
<tr>
|
||||||
<td><b>publish_labels</b></td>
|
<td><b>publish_labels</b></td>
|
||||||
<td>If set to true, the tool will publish the labels to the PR. Default is true.</td>
|
<td>If set to true, the tool will publish labels to the PR. Default is false.</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td><b>publish_description_as_comment</b></td>
|
<td><b>publish_description_as_comment</b></td>
|
||||||
|
@ -67,6 +67,33 @@ In post-process, Qodo Merge counts the number of suggestions that were implement
|
|||||||
|
|
||||||
{width=512}
|
{width=512}
|
||||||
|
|
||||||
|
## Suggestion tracking 💎
|
||||||
|
`Platforms supported: GitHub, GitLab`
|
||||||
|
|
||||||
|
Qodo Merge employs an novel detection system to automatically [identify](https://qodo-merge-docs.qodo.ai/core-abilities/impact_evaluation/) AI code suggestions that PR authors have accepted and implemented.
|
||||||
|
|
||||||
|
Accepted suggestions are also automatically documented in a dedicated wiki page called `.pr_agent_accepted_suggestions`, allowing users to track historical changes, assess the tool's effectiveness, and learn from previously implemented recommendations in the repository.
|
||||||
|
An example [result](https://github.com/Codium-ai/pr-agent/wiki/.pr_agent_accepted_suggestions):
|
||||||
|
|
||||||
|
[{width=768}](https://github.com/Codium-ai/pr-agent/wiki/.pr_agent_accepted_suggestions)
|
||||||
|
|
||||||
|
This dedicated wiki page will also serve as a foundation for future AI model improvements, allowing it to learn from historically implemented suggestions and generate more targeted, contextually relevant recommendations.
|
||||||
|
|
||||||
|
This feature is controlled by a boolean configuration parameter: `pr_code_suggestions.wiki_page_accepted_suggestions` (default is true).
|
||||||
|
|
||||||
|
!!! note "Wiki must be enabled"
|
||||||
|
While the aggregation process is automatic, GitHub repositories require a one-time manual wiki setup.
|
||||||
|
|
||||||
|
To initialize the wiki: navigate to `Wiki`, select `Create the first page`, then click `Save page`.
|
||||||
|
|
||||||
|
{width=768}
|
||||||
|
|
||||||
|
Once a wiki repo is created, the tool will automatically use this wiki for tracking suggestions.
|
||||||
|
|
||||||
|
!!! note "Why a wiki page?"
|
||||||
|
Your code belongs to you, and we respect your privacy. Hence, we won't store any code suggestions in an external database.
|
||||||
|
|
||||||
|
Instead, we leverage a dedicated private page, within your repository wiki, to track suggestions. This approach offers convenient secure suggestion tracking while avoiding pull requests or any noise to the main repository.
|
||||||
|
|
||||||
## Usage Tips
|
## Usage Tips
|
||||||
|
|
||||||
@ -114,9 +141,16 @@ code_suggestions_self_review_text = "... (your text here) ..."
|
|||||||
{width=512}
|
{width=512}
|
||||||
|
|
||||||
|
|
||||||
|
!!! tip "Tip - Reducing visual footprint after self-review 💎"
|
||||||
|
|
||||||
|
The configuration parameter `pr_code_suggestions.fold_suggestions_on_self_review` (default is True)
|
||||||
|
can be used to automatically fold the suggestions after the user clicks the self-review checkbox.
|
||||||
|
|
||||||
|
This reduces the visual footprint of the suggestions, and also indicates to the PR reviewer that the suggestions have been reviewed by the PR author, and don't require further attention.
|
||||||
|
|
||||||
|
|
||||||
!!! tip "Tip - demanding self-review from the PR author 💎"
|
|
||||||
|
!!! tip "Tip - Demanding self-review from the PR author 💎"
|
||||||
|
|
||||||
By setting:
|
By setting:
|
||||||
```toml
|
```toml
|
||||||
@ -211,6 +245,32 @@ enable_global_best_practices = true
|
|||||||
|
|
||||||
Then, create a `best_practices.md` wiki file in the root of [global](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#global-configuration-file) configuration repository, `pr-agent-settings`.
|
Then, create a `best_practices.md` wiki file in the root of [global](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#global-configuration-file) configuration repository, `pr-agent-settings`.
|
||||||
|
|
||||||
|
##### Best practices for multiple languages
|
||||||
|
For a git organization working with multiple programming languages, you can maintain a centralized global `best_practices.md` file containing language-specific guidelines.
|
||||||
|
When reviewing pull requests, Qodo Merge automatically identifies the programming language and applies the relevant best practices from this file.
|
||||||
|
Structure your `best_practices.md` file using the following format:
|
||||||
|
|
||||||
|
```
|
||||||
|
# [Python]
|
||||||
|
...
|
||||||
|
# [Java]
|
||||||
|
...
|
||||||
|
# [JavaScript]
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Dedicated label for best practices suggestions
|
||||||
|
Best practice suggestions are labeled as `Organization best practice` by default.
|
||||||
|
To customize this label, modify it in your configuration file:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[best_practices]
|
||||||
|
organization_name = ""
|
||||||
|
```
|
||||||
|
|
||||||
|
And the label will be: `{organization_name} best practice`.
|
||||||
|
|
||||||
|
|
||||||
##### Example results
|
##### Example results
|
||||||
|
|
||||||
{width=512}
|
{width=512}
|
||||||
@ -242,12 +302,12 @@ Using a combination of both can help the AI model to provide relevant and tailor
|
|||||||
<td>Minimum score threshold for suggestions to be presented as commitable PR comments in addition to the table. Default is -1 (disabled).</td>
|
<td>Minimum score threshold for suggestions to be presented as commitable PR comments in addition to the table. Default is -1 (disabled).</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td><b>persistent_comment</b></td>
|
<td><b>focus_only_on_problems</b></td>
|
||||||
<td>If set to true, the improve comment will be persistent, meaning that every new improve request will edit the previous one. Default is false.</td>
|
<td>If set to true, suggestions will focus primarily on identifying and fixing code problems, and less on style considerations like best practices, maintainability, or readability. Default is true.</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td><b>self_reflect_on_suggestions</b></td>
|
<td><b>persistent_comment</b></td>
|
||||||
<td>If set to true, the improve tool will calculate an importance score for each suggestion [1-10], and sort the suggestion labels group based on this score. Default is true.</td>
|
<td>If set to true, the improve comment will be persistent, meaning that every new improve request will edit the previous one. Default is false.</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td><b>suggestions_score_threshold</b></td>
|
<td><b>suggestions_score_threshold</b></td>
|
||||||
@ -265,6 +325,10 @@ Using a combination of both can help the AI model to provide relevant and tailor
|
|||||||
<td><b>enable_chat_text</b></td>
|
<td><b>enable_chat_text</b></td>
|
||||||
<td>If set to true, the tool will display a reference to the PR chat in the comment. Default is true.</td>
|
<td>If set to true, the tool will display a reference to the PR chat in the comment. Default is true.</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><b>wiki_page_accepted_suggestions</b></td>
|
||||||
|
<td>If set to true, the tool will automatically track accepted suggestions in a dedicated wiki page called `.pr_agent_accepted_suggestions`. Default is true.</td>
|
||||||
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
??? example "Params for number of suggestions and AI calls"
|
??? example "Params for number of suggestions and AI calls"
|
||||||
|
@ -138,20 +138,9 @@ num_code_suggestions = ...
|
|||||||
<td><b>require_security_review</b></td>
|
<td><b>require_security_review</b></td>
|
||||||
<td>If set to true, the tool will add a section that checks if the PR contains a possible security or vulnerability issue. Default is true.</td>
|
<td>If set to true, the tool will add a section that checks if the PR contains a possible security or vulnerability issue. Default is true.</td>
|
||||||
</tr>
|
</tr>
|
||||||
</table>
|
|
||||||
|
|
||||||
!!! example "SOC2 ticket compliance 💎"
|
|
||||||
|
|
||||||
This sub-tool checks if the PR description properly contains a ticket to a project management system (e.g., Jira, Asana, Trello, etc.), as required by SOC2 compliance. If not, it will add a label to the PR: "Missing SOC2 ticket".
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
<tr>
|
||||||
<td><b>require_soc2_ticket</b></td>
|
<td><b>require_ticket_analysis_review</b></td>
|
||||||
<td>If set to true, the SOC2 ticket checker sub-tool will be enabled. Default is false.</td>
|
<td>If set to true, and the PR contains a GitHub or Jira ticket link, the tool will add a section that checks if the PR in fact fulfilled the ticket requirements. Default is true.</td>
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><b>soc2_ticket_prompt</b></td>
|
|
||||||
<td>The prompt for the SOC2 ticket review. Default is: `Does the PR description include a link to ticket in a project management system (e.g., Jira, Asana, Trello, etc.) ?`. Edit this field if your compliance requirements are different.</td>
|
|
||||||
</tr>
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
@ -193,7 +182,7 @@ If enabled, the `review` tool can approve a PR when a specific comment, `/review
|
|||||||
It is recommended to review the [Configuration options](#configuration-options) section, and choose the relevant options for your use case.
|
It is recommended to review the [Configuration options](#configuration-options) section, and choose the relevant options for your use case.
|
||||||
|
|
||||||
Some of the features that are disabled by default are quite useful, and should be considered for enabling. For example:
|
Some of the features that are disabled by default are quite useful, and should be considered for enabling. For example:
|
||||||
`require_score_review`, `require_soc2_ticket`, and more.
|
`require_score_review`, and more.
|
||||||
|
|
||||||
On the other hand, if you find one of the enabled features to be irrelevant for your use case, disable it. No default configuration can fit all use cases.
|
On the other hand, if you find one of the enabled features to be irrelevant for your use case, disable it. No default configuration can fit all use cases.
|
||||||
|
|
||||||
@ -269,4 +258,3 @@ If enabled, the `review` tool can approve a PR when a specific comment, `/review
|
|||||||
[//]: # ( Notice If you are interested **only** in the code suggestions, it is recommended to use the [`improve`](./improve.md) feature instead, since it is a dedicated only to code suggestions, and usually gives better results.)
|
[//]: # ( Notice If you are interested **only** in the code suggestions, it is recommended to use the [`improve`](./improve.md) feature instead, since it is a dedicated only to code suggestions, and usually gives better results.)
|
||||||
|
|
||||||
[//]: # ( Use the `review` tool if you want to get more comprehensive feedback, which includes code suggestions as well.)
|
[//]: # ( Use the `review` tool if you want to get more comprehensive feedback, which includes code suggestions as well.)
|
||||||
|
|
||||||
|
@ -160,3 +160,13 @@ ignore_pr_target_branches = ["qa"]
|
|||||||
|
|
||||||
Where the `ignore_pr_source_branches` and `ignore_pr_target_branches` are lists of regex patterns to match the source and target branches you want to ignore.
|
Where the `ignore_pr_source_branches` and `ignore_pr_target_branches` are lists of regex patterns to match the source and target branches you want to ignore.
|
||||||
They are not mutually exclusive, you can use them together or separately.
|
They are not mutually exclusive, you can use them together or separately.
|
||||||
|
|
||||||
|
|
||||||
|
To allow only specific folders (often needed in large monorepos), set:
|
||||||
|
|
||||||
|
```
|
||||||
|
[config]
|
||||||
|
allow_only_specific_folders=['folder1','folder2']
|
||||||
|
```
|
||||||
|
|
||||||
|
For the configuration above, automatic feedback will only be triggered when the PR changes include files from 'folder1' or 'folder2'
|
||||||
|
@ -72,13 +72,14 @@ The configuration parameter `pr_commands` defines the list of tools that will be
|
|||||||
```
|
```
|
||||||
[github_app]
|
[github_app]
|
||||||
pr_commands = [
|
pr_commands = [
|
||||||
"/describe --pr_description.final_update_message=false",
|
"/describe",
|
||||||
"/review --pr_reviewer.num_code_suggestions=0",
|
"/review",
|
||||||
"/improve",
|
"/improve --pr_code_suggestions.suggestions_score_threshold=5",
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
This means that when a new PR is opened/reopened or marked as ready for review, Qodo Merge will run the `describe`, `review` and `improve` tools.
|
This means that when a new PR is opened/reopened or marked as ready for review, Qodo Merge will run the `describe`, `review` and `improve` tools.
|
||||||
For the `review` tool, for example, the `num_code_suggestions` parameter will be set to 0.
|
For the `improve` tool, for example, the `suggestions_score_threshold` parameter will be set to 5 (suggestions below a score of 5 won't be presented)
|
||||||
|
|
||||||
You can override the default tool parameters by using one the three options for a [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/): **wiki**, **local**, or **global**.
|
You can override the default tool parameters by using one the three options for a [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/): **wiki**, **local**, or **global**.
|
||||||
For example, if your local `.pr_agent.toml` file contains:
|
For example, if your local `.pr_agent.toml` file contains:
|
||||||
@ -105,7 +106,7 @@ The configuration parameter `push_commands` defines the list of tools that will
|
|||||||
handle_push_trigger = true
|
handle_push_trigger = true
|
||||||
push_commands = [
|
push_commands = [
|
||||||
"/describe",
|
"/describe",
|
||||||
"/review --pr_reviewer.num_code_suggestions=0 --pr_reviewer.final_update_message=false",
|
"/review",
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
This means that when new code is pushed to the PR, the Qodo Merge will run the `describe` and `review` tools, with the specified parameters.
|
This means that when new code is pushed to the PR, the Qodo Merge will run the `describe` and `review` tools, with the specified parameters.
|
||||||
@ -148,7 +149,7 @@ After setting up a GitLab webhook, to control which commands will run automatica
|
|||||||
[gitlab]
|
[gitlab]
|
||||||
pr_commands = [
|
pr_commands = [
|
||||||
"/describe",
|
"/describe",
|
||||||
"/review --pr_reviewer.num_code_suggestions=0",
|
"/review",
|
||||||
"/improve",
|
"/improve",
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
@ -161,7 +162,7 @@ The configuration parameter `push_commands` defines the list of tools that will
|
|||||||
handle_push_trigger = true
|
handle_push_trigger = true
|
||||||
push_commands = [
|
push_commands = [
|
||||||
"/describe",
|
"/describe",
|
||||||
"/review --pr_reviewer.num_code_suggestions=0 --pr_reviewer.final_update_message=false",
|
"/review",
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -182,7 +183,7 @@ Each time you invoke a `/review` tool, it will use the extra instructions you se
|
|||||||
|
|
||||||
|
|
||||||
Note that among other limitations, BitBucket provides relatively low rate-limits for applications (up to 1000 requests per hour), and does not provide an API to track the actual rate-limit usage.
|
Note that among other limitations, BitBucket provides relatively low rate-limits for applications (up to 1000 requests per hour), and does not provide an API to track the actual rate-limit usage.
|
||||||
If you experience lack of responses from Qodo Merge, you might want to set: `bitbucket_app.avoid_full_files=true` in your configuration file.
|
If you experience a lack of responses from Qodo Merge, you might want to set: `bitbucket_app.avoid_full_files=true` in your configuration file.
|
||||||
This will prevent Qodo Merge from acquiring the full file content, and will only use the diff content. This will reduce the number of requests made to BitBucket, at the cost of small decrease in accuracy, as dynamic context will not be applicable.
|
This will prevent Qodo Merge from acquiring the full file content, and will only use the diff content. This will reduce the number of requests made to BitBucket, at the cost of small decrease in accuracy, as dynamic context will not be applicable.
|
||||||
|
|
||||||
|
|
||||||
@ -194,13 +195,23 @@ Specifically, set the following values:
|
|||||||
```
|
```
|
||||||
[bitbucket_app]
|
[bitbucket_app]
|
||||||
pr_commands = [
|
pr_commands = [
|
||||||
"/review --pr_reviewer.num_code_suggestions=0",
|
"/review",
|
||||||
"/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7",
|
"/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7",
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
Note that we set specifically for bitbucket, we recommend using: `--pr_code_suggestions.suggestions_score_threshold=7` and that is the default value we set for bitbucket.
|
Note that we set specifically for bitbucket, we recommend using: `--pr_code_suggestions.suggestions_score_threshold=7` and that is the default value we set for bitbucket.
|
||||||
Since this platform only supports inline code suggestions, we want to limit the number of suggestions, and only present a limited number.
|
Since this platform only supports inline code suggestions, we want to limit the number of suggestions, and only present a limited number.
|
||||||
|
|
||||||
|
To enable BitBucket app to respond to each **push** to the PR, set (for example):
|
||||||
|
```
|
||||||
|
[bitbucket_app]
|
||||||
|
handle_push_trigger = true
|
||||||
|
push_commands = [
|
||||||
|
"/describe",
|
||||||
|
"/review",
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
## Azure DevOps provider
|
## Azure DevOps provider
|
||||||
|
|
||||||
To use Azure DevOps provider use the following settings in configuration.toml:
|
To use Azure DevOps provider use the following settings in configuration.toml:
|
||||||
|
@ -133,9 +133,26 @@ Your [application default credentials](https://cloud.google.com/docs/authenticat
|
|||||||
|
|
||||||
If you do want to set explicit credentials, then you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable set to a path to a json credentials file.
|
If you do want to set explicit credentials, then you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable set to a path to a json credentials file.
|
||||||
|
|
||||||
|
### Google AI Studio
|
||||||
|
|
||||||
|
To use [Google AI Studio](https://aistudio.google.com/) models, set the relevant models in the configuration section of the configuration file:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[config] # in configuration.toml
|
||||||
|
model="google_ai_studio/gemini-1.5-flash"
|
||||||
|
model_turbo="google_ai_studio/gemini-1.5-flash"
|
||||||
|
fallback_models=["google_ai_studio/gemini-1.5-flash"]
|
||||||
|
|
||||||
|
[google_ai_studio] # in .secrets.toml
|
||||||
|
gemini_api_key = "..."
|
||||||
|
```
|
||||||
|
|
||||||
|
If you don't want to set the API key in the .secrets.toml file, you can set the `GOOGLE_AI_STUDIO.GEMINI_API_KEY` environment variable.
|
||||||
|
|
||||||
### Anthropic
|
### Anthropic
|
||||||
|
|
||||||
To use Anthropic models, set the relevant models in the configuration section of the configuration file:
|
To use Anthropic models, set the relevant models in the configuration section of the configuration file:
|
||||||
|
|
||||||
```
|
```
|
||||||
[config]
|
[config]
|
||||||
model="anthropic/claude-3-opus-20240229"
|
model="anthropic/claude-3-opus-20240229"
|
||||||
|
@ -18,7 +18,7 @@ In terms of precedence, wiki configurations will override local configurations,
|
|||||||
|
|
||||||
## Wiki configuration file 💎
|
## Wiki configuration file 💎
|
||||||
|
|
||||||
`Platforms supported: GitHub, GitLab`
|
`Platforms supported: GitHub, GitLab, Bitbucket`
|
||||||
|
|
||||||
With Qodo Merge Pro, you can set configurations by creating a page called `.pr_agent.toml` in the [wiki](https://github.com/Codium-ai/pr-agent/wiki/pr_agent.toml) of the repo.
|
With Qodo Merge Pro, you can set configurations by creating a page called `.pr_agent.toml` in the [wiki](https://github.com/Codium-ai/pr-agent/wiki/pr_agent.toml) of the repo.
|
||||||
The advantage of this method is that it allows to set configurations without needing to commit new content to the repo - just edit the wiki page and **save**.
|
The advantage of this method is that it allows to set configurations without needing to commit new content to the repo - just edit the wiki page and **save**.
|
||||||
|
@ -10,4 +10,3 @@ Specifically, CLI commands can be issued by invoking a pre-built [docker image](
|
|||||||
|
|
||||||
For online usage, you will need to setup either a [GitHub App](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-app) or a [GitHub Action](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-action) (GitHub), a [GitLab webhook](https://qodo-merge-docs.qodo.ai/installation/gitlab/#run-a-gitlab-webhook-server) (GitLab), or a [BitBucket App](https://qodo-merge-docs.qodo.ai/installation/bitbucket/#run-using-codiumai-hosted-bitbucket-app) (BitBucket).
|
For online usage, you will need to setup either a [GitHub App](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-app) or a [GitHub Action](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-action) (GitHub), a [GitLab webhook](https://qodo-merge-docs.qodo.ai/installation/gitlab/#run-a-gitlab-webhook-server) (GitLab), or a [BitBucket App](https://qodo-merge-docs.qodo.ai/installation/bitbucket/#run-using-codiumai-hosted-bitbucket-app) (BitBucket).
|
||||||
These platforms also enable to run Qodo Merge specific tools automatically when a new PR is opened, or on each push to a branch.
|
These platforms also enable to run Qodo Merge specific tools automatically when a new PR is opened, or on each push to a branch.
|
||||||
|
|
||||||
|
@ -43,6 +43,7 @@ nav:
|
|||||||
- 💎 Similar Code: 'tools/similar_code.md'
|
- 💎 Similar Code: 'tools/similar_code.md'
|
||||||
- Core Abilities:
|
- Core Abilities:
|
||||||
- 'core-abilities/index.md'
|
- 'core-abilities/index.md'
|
||||||
|
- Fetching ticket context: 'core-abilities/fetching_ticket_context.md'
|
||||||
- Local and global metadata: 'core-abilities/metadata.md'
|
- Local and global metadata: 'core-abilities/metadata.md'
|
||||||
- Dynamic context: 'core-abilities/dynamic_context.md'
|
- Dynamic context: 'core-abilities/dynamic_context.md'
|
||||||
- Self-reflection: 'core-abilities/self_reflection.md'
|
- Self-reflection: 'core-abilities/self_reflection.md'
|
||||||
|
@ -82,11 +82,11 @@
|
|||||||
|
|
||||||
<footer class="wrapper">
|
<footer class="wrapper">
|
||||||
<div class="container">
|
<div class="container">
|
||||||
<p class="footer-text">© 2024 <a href="https://www.codium.ai/" target="_blank" rel="noopener">CodiumAI</a></p>
|
<p class="footer-text">© 2024 <a href="https://www.qodo.ai/" target="_blank" rel="noopener">Qodo</a></p>
|
||||||
<div class="footer-links">
|
<div class="footer-links">
|
||||||
<a href="https://codiumate-docs.codium.ai/">Codiumate</a>
|
<a href="https://qodo-gen-docs.qodo.ai/">Qodo Gen</a>
|
||||||
<p>|</p>
|
<p>|</p>
|
||||||
<a href="https://alpha-codium-docs.codium.ai/">AlphaCodium</a>
|
<a href="https://qodo-flow-docs.qodo.ai/">AlphaCodium</a>
|
||||||
</div>
|
</div>
|
||||||
<div class="social-icons">
|
<div class="social-icons">
|
||||||
<a href="https://github.com/Codium-ai" target="_blank" rel="noopener" title="github.com" class="social-link">
|
<a href="https://github.com/Codium-ai" target="_blank" rel="noopener" title="github.com" class="social-link">
|
||||||
@ -95,16 +95,16 @@
|
|||||||
<a href="https://discord.com/invite/SgSxuQ65GF" target="_blank" rel="noopener" title="discord.com" class="social-link">
|
<a href="https://discord.com/invite/SgSxuQ65GF" target="_blank" rel="noopener" title="discord.com" class="social-link">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 640 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M524.531 69.836a1.5 1.5 0 0 0-.764-.7A485.065 485.065 0 0 0 404.081 32.03a1.816 1.816 0 0 0-1.923.91 337.461 337.461 0 0 0-14.9 30.6 447.848 447.848 0 0 0-134.426 0 309.541 309.541 0 0 0-15.135-30.6 1.89 1.89 0 0 0-1.924-.91 483.689 483.689 0 0 0-119.688 37.107 1.712 1.712 0 0 0-.788.676C39.068 183.651 18.186 294.69 28.43 404.354a2.016 2.016 0 0 0 .765 1.375 487.666 487.666 0 0 0 146.825 74.189 1.9 1.9 0 0 0 2.063-.676A348.2 348.2 0 0 0 208.12 430.4a1.86 1.86 0 0 0-1.019-2.588 321.173 321.173 0 0 1-45.868-21.853 1.885 1.885 0 0 1-.185-3.126 251.047 251.047 0 0 0 9.109-7.137 1.819 1.819 0 0 1 1.9-.256c96.229 43.917 200.41 43.917 295.5 0a1.812 1.812 0 0 1 1.924.233 234.533 234.533 0 0 0 9.132 7.16 1.884 1.884 0 0 1-.162 3.126 301.407 301.407 0 0 1-45.89 21.83 1.875 1.875 0 0 0-1 2.611 391.055 391.055 0 0 0 30.014 48.815 1.864 1.864 0 0 0 2.063.7A486.048 486.048 0 0 0 610.7 405.729a1.882 1.882 0 0 0 .765-1.352c12.264-126.783-20.532-236.912-86.934-334.541ZM222.491 337.58c-28.972 0-52.844-26.587-52.844-59.239s23.409-59.241 52.844-59.241c29.665 0 53.306 26.82 52.843 59.239 0 32.654-23.41 59.241-52.843 59.241Zm195.38 0c-28.971 0-52.843-26.587-52.843-59.239s23.409-59.241 52.843-59.241c29.667 0 53.307 26.82 52.844 59.239 0 32.654-23.177 59.241-52.844 59.241Z"></path></svg>
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 640 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M524.531 69.836a1.5 1.5 0 0 0-.764-.7A485.065 485.065 0 0 0 404.081 32.03a1.816 1.816 0 0 0-1.923.91 337.461 337.461 0 0 0-14.9 30.6 447.848 447.848 0 0 0-134.426 0 309.541 309.541 0 0 0-15.135-30.6 1.89 1.89 0 0 0-1.924-.91 483.689 483.689 0 0 0-119.688 37.107 1.712 1.712 0 0 0-.788.676C39.068 183.651 18.186 294.69 28.43 404.354a2.016 2.016 0 0 0 .765 1.375 487.666 487.666 0 0 0 146.825 74.189 1.9 1.9 0 0 0 2.063-.676A348.2 348.2 0 0 0 208.12 430.4a1.86 1.86 0 0 0-1.019-2.588 321.173 321.173 0 0 1-45.868-21.853 1.885 1.885 0 0 1-.185-3.126 251.047 251.047 0 0 0 9.109-7.137 1.819 1.819 0 0 1 1.9-.256c96.229 43.917 200.41 43.917 295.5 0a1.812 1.812 0 0 1 1.924.233 234.533 234.533 0 0 0 9.132 7.16 1.884 1.884 0 0 1-.162 3.126 301.407 301.407 0 0 1-45.89 21.83 1.875 1.875 0 0 0-1 2.611 391.055 391.055 0 0 0 30.014 48.815 1.864 1.864 0 0 0 2.063.7A486.048 486.048 0 0 0 610.7 405.729a1.882 1.882 0 0 0 .765-1.352c12.264-126.783-20.532-236.912-86.934-334.541ZM222.491 337.58c-28.972 0-52.844-26.587-52.844-59.239s23.409-59.241 52.844-59.241c29.665 0 53.306 26.82 52.843 59.239 0 32.654-23.41 59.241-52.843 59.241Zm195.38 0c-28.971 0-52.843-26.587-52.843-59.239s23.409-59.241 52.843-59.241c29.667 0 53.307 26.82 52.844 59.239 0 32.654-23.177 59.241-52.844 59.241Z"></path></svg>
|
||||||
</a>
|
</a>
|
||||||
<a href="https://www.youtube.com/@Codium-AI" target="_blank" rel="noopener" title="www.youtube.com" class="social-link">
|
<a href="https://www.youtube.com/@QodoAI" target="_blank" rel="noopener" title="www.youtube.com" class="social-link">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M549.655 124.083c-6.281-23.65-24.787-42.276-48.284-48.597C458.781 64 288 64 288 64S117.22 64 74.629 75.486c-23.497 6.322-42.003 24.947-48.284 48.597-11.412 42.867-11.412 132.305-11.412 132.305s0 89.438 11.412 132.305c6.281 23.65 24.787 41.5 48.284 47.821C117.22 448 288 448 288 448s170.78 0 213.371-11.486c23.497-6.321 42.003-24.171 48.284-47.821 11.412-42.867 11.412-132.305 11.412-132.305s0-89.438-11.412-132.305zm-317.51 213.508V175.185l142.739 81.205-142.739 81.201z"></path></svg>
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M549.655 124.083c-6.281-23.65-24.787-42.276-48.284-48.597C458.781 64 288 64 288 64S117.22 64 74.629 75.486c-23.497 6.322-42.003 24.947-48.284 48.597-11.412 42.867-11.412 132.305-11.412 132.305s0 89.438 11.412 132.305c6.281 23.65 24.787 41.5 48.284 47.821C117.22 448 288 448 288 448s170.78 0 213.371-11.486c23.497-6.321 42.003-24.171 48.284-47.821 11.412-42.867 11.412-132.305 11.412-132.305s0-89.438-11.412-132.305zm-317.51 213.508V175.185l142.739 81.205-142.739 81.201z"></path></svg>
|
||||||
</a>
|
</a>
|
||||||
<a href="https://www.linkedin.com/company/codiumai" target="_blank" rel="noopener" title="www.linkedin.com" class="social-link">
|
<a href="https://www.linkedin.com/company/qodoai" target="_blank" rel="noopener" title="www.linkedin.com" class="social-link">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M416 32H31.9C14.3 32 0 46.5 0 64.3v383.4C0 465.5 14.3 480 31.9 480H416c17.6 0 32-14.5 32-32.3V64.3c0-17.8-14.4-32.3-32-32.3zM135.4 416H69V202.2h66.5V416zm-33.2-243c-21.3 0-38.5-17.3-38.5-38.5S80.9 96 102.2 96c21.2 0 38.5 17.3 38.5 38.5 0 21.3-17.2 38.5-38.5 38.5zm282.1 243h-66.4V312c0-24.8-.5-56.7-34.5-56.7-34.6 0-39.9 27-39.9 54.9V416h-66.4V202.2h63.7v29.2h.9c8.9-16.8 30.6-34.5 62.9-34.5 67.2 0 79.7 44.3 79.7 101.9V416z"></path></svg>
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M416 32H31.9C14.3 32 0 46.5 0 64.3v383.4C0 465.5 14.3 480 31.9 480H416c17.6 0 32-14.5 32-32.3V64.3c0-17.8-14.4-32.3-32-32.3zM135.4 416H69V202.2h66.5V416zm-33.2-243c-21.3 0-38.5-17.3-38.5-38.5S80.9 96 102.2 96c21.2 0 38.5 17.3 38.5 38.5 0 21.3-17.2 38.5-38.5 38.5zm282.1 243h-66.4V312c0-24.8-.5-56.7-34.5-56.7-34.6 0-39.9 27-39.9 54.9V416h-66.4V202.2h63.7v29.2h.9c8.9-16.8 30.6-34.5 62.9-34.5 67.2 0 79.7 44.3 79.7 101.9V416z"></path></svg>
|
||||||
</a>
|
</a>
|
||||||
<a href="https://twitter.com/CodiumAI" target="_blank" rel="noopener" title="twitter.com" class="social-link">
|
<a href="https://twitter.com/QodoAI" target="_blank" rel="noopener" title="twitter.com" class="social-link">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M459.37 151.716c.325 4.548.325 9.097.325 13.645 0 138.72-105.583 298.558-298.558 298.558-59.452 0-114.68-17.219-161.137-47.106 8.447.974 16.568 1.299 25.34 1.299 49.055 0 94.213-16.568 130.274-44.832-46.132-.975-84.792-31.188-98.112-72.772 6.498.974 12.995 1.624 19.818 1.624 9.421 0 18.843-1.3 27.614-3.573-48.081-9.747-84.143-51.98-84.143-102.985v-1.299c13.969 7.797 30.214 12.67 47.431 13.319-28.264-18.843-46.781-51.005-46.781-87.391 0-19.492 5.197-37.36 14.294-52.954 51.655 63.675 129.3 105.258 216.365 109.807-1.624-7.797-2.599-15.918-2.599-24.04 0-57.828 46.782-104.934 104.934-104.934 30.213 0 57.502 12.67 76.67 33.137 23.715-4.548 46.456-13.32 66.599-25.34-7.798 24.366-24.366 44.833-46.132 57.827 21.117-2.273 41.584-8.122 60.426-16.243-14.292 20.791-32.161 39.308-52.628 54.253z"></path></svg>
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M459.37 151.716c.325 4.548.325 9.097.325 13.645 0 138.72-105.583 298.558-298.558 298.558-59.452 0-114.68-17.219-161.137-47.106 8.447.974 16.568 1.299 25.34 1.299 49.055 0 94.213-16.568 130.274-44.832-46.132-.975-84.792-31.188-98.112-72.772 6.498.974 12.995 1.624 19.818 1.624 9.421 0 18.843-1.3 27.614-3.573-48.081-9.747-84.143-51.98-84.143-102.985v-1.299c13.969 7.797 30.214 12.67 47.431 13.319-28.264-18.843-46.781-51.005-46.781-87.391 0-19.492 5.197-37.36 14.294-52.954 51.655 63.675 129.3 105.258 216.365 109.807-1.624-7.797-2.599-15.918-2.599-24.04 0-57.828 46.782-104.934 104.934-104.934 30.213 0 57.502 12.67 76.67 33.137 23.715-4.548 46.456-13.32 66.599-25.34-7.798 24.366-24.366 44.833-46.132 57.827 21.117-2.273 41.584-8.122 60.426-16.243-14.292 20.791-32.161 39.308-52.628 54.253z"></path></svg>
|
||||||
</a>
|
</a>
|
||||||
<a href="https://www.instagram.com/codiumai/" target="_blank" rel="noopener" title="www.instagram.com" class="social-link">
|
<a href="https://www.instagram.com/qodo_ai" target="_blank" rel="noopener" title="www.instagram.com" class="social-link">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M224.1 141c-63.6 0-114.9 51.3-114.9 114.9s51.3 114.9 114.9 114.9S339 319.5 339 255.9 287.7 141 224.1 141zm0 189.6c-41.1 0-74.7-33.5-74.7-74.7s33.5-74.7 74.7-74.7 74.7 33.5 74.7 74.7-33.6 74.7-74.7 74.7zm146.4-194.3c0 14.9-12 26.8-26.8 26.8-14.9 0-26.8-12-26.8-26.8s12-26.8 26.8-26.8 26.8 12 26.8 26.8zm76.1 27.2c-1.7-35.9-9.9-67.7-36.2-93.9-26.2-26.2-58-34.4-93.9-36.2-37-2.1-147.9-2.1-184.9 0-35.8 1.7-67.6 9.9-93.9 36.1s-34.4 58-36.2 93.9c-2.1 37-2.1 147.9 0 184.9 1.7 35.9 9.9 67.7 36.2 93.9s58 34.4 93.9 36.2c37 2.1 147.9 2.1 184.9 0 35.9-1.7 67.7-9.9 93.9-36.2 26.2-26.2 34.4-58 36.2-93.9 2.1-37 2.1-147.8 0-184.8zM398.8 388c-7.8 19.6-22.9 34.7-42.6 42.6-29.5 11.7-99.5 9-132.1 9s-102.7 2.6-132.1-9c-19.6-7.8-34.7-22.9-42.6-42.6-11.7-29.5-9-99.5-9-132.1s-2.6-102.7 9-132.1c7.8-19.6 22.9-34.7 42.6-42.6 29.5-11.7 99.5-9 132.1-9s102.7-2.6 132.1 9c19.6 7.8 34.7 22.9 42.6 42.6 11.7 29.5 9 99.5 9 132.1s2.7 102.7-9 132.1z"></path></svg>
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M224.1 141c-63.6 0-114.9 51.3-114.9 114.9s51.3 114.9 114.9 114.9S339 319.5 339 255.9 287.7 141 224.1 141zm0 189.6c-41.1 0-74.7-33.5-74.7-74.7s33.5-74.7 74.7-74.7 74.7 33.5 74.7 74.7-33.6 74.7-74.7 74.7zm146.4-194.3c0 14.9-12 26.8-26.8 26.8-14.9 0-26.8-12-26.8-26.8s12-26.8 26.8-26.8 26.8 12 26.8 26.8zm76.1 27.2c-1.7-35.9-9.9-67.7-36.2-93.9-26.2-26.2-58-34.4-93.9-36.2-37-2.1-147.9-2.1-184.9 0-35.8 1.7-67.6 9.9-93.9 36.1s-34.4 58-36.2 93.9c-2.1 37-2.1 147.9 0 184.9 1.7 35.9 9.9 67.7 36.2 93.9s58 34.4 93.9 36.2c37 2.1 147.9 2.1 184.9 0 35.9-1.7 67.7-9.9 93.9-36.2 26.2-26.2 34.4-58 36.2-93.9 2.1-37 2.1-147.8 0-184.8zM398.8 388c-7.8 19.6-22.9 34.7-42.6 42.6-29.5 11.7-99.5 9-132.1 9s-102.7 2.6-132.1-9c-19.6-7.8-34.7-22.9-42.6-42.6-11.7-29.5-9-99.5-9-132.1s-2.6-102.7 9-132.1c7.8-19.6 22.9-34.7 42.6-42.6 29.5-11.7 99.5-9 132.1-9s102.7-2.6 132.1 9c19.6 7.8 34.7 22.9 42.6 42.6 11.7 29.5 9 99.5 9 132.1s2.7 102.7-9 132.1z"></path></svg>
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
@ -3,5 +3,5 @@
|
|||||||
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
|
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
|
||||||
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
|
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
|
||||||
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
|
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
|
||||||
})(window,document,'script','dataLayer','GTM-5C9KZBM3');</script>
|
})(window,document,'script','dataLayer','GTM-M6PJSFV');</script>
|
||||||
<!-- End Google Tag Manager -->
|
<!-- End Google Tag Manager -->
|
@ -1 +0,0 @@
|
|||||||
|
|
||||||
|
@ -3,7 +3,6 @@ from functools import partial
|
|||||||
|
|
||||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||||
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
||||||
|
|
||||||
from pr_agent.algo.utils import update_settings_from_args
|
from pr_agent.algo.utils import update_settings_from_args
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers.utils import apply_repo_settings
|
from pr_agent.git_providers.utils import apply_repo_settings
|
||||||
|
@ -19,6 +19,7 @@ MAX_TOKENS = {
|
|||||||
'gpt-4o-mini': 128000, # 128K, but may be limited by config.max_model_tokens
|
'gpt-4o-mini': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||||
'gpt-4o-mini-2024-07-18': 128000, # 128K, but may be limited by config.max_model_tokens
|
'gpt-4o-mini-2024-07-18': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||||
'gpt-4o-2024-08-06': 128000, # 128K, but may be limited by config.max_model_tokens
|
'gpt-4o-2024-08-06': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||||
|
'gpt-4o-2024-11-20': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||||
'o1-mini': 128000, # 128K, but may be limited by config.max_model_tokens
|
'o1-mini': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||||
'o1-mini-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens
|
'o1-mini-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||||
'o1-preview': 128000, # 128K, but may be limited by config.max_model_tokens
|
'o1-preview': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||||
@ -31,12 +32,16 @@ MAX_TOKENS = {
|
|||||||
'vertex_ai/codechat-bison': 6144,
|
'vertex_ai/codechat-bison': 6144,
|
||||||
'vertex_ai/codechat-bison-32k': 32000,
|
'vertex_ai/codechat-bison-32k': 32000,
|
||||||
'vertex_ai/claude-3-haiku@20240307': 100000,
|
'vertex_ai/claude-3-haiku@20240307': 100000,
|
||||||
|
'vertex_ai/claude-3-5-haiku@20241022': 100000,
|
||||||
'vertex_ai/claude-3-sonnet@20240229': 100000,
|
'vertex_ai/claude-3-sonnet@20240229': 100000,
|
||||||
'vertex_ai/claude-3-opus@20240229': 100000,
|
'vertex_ai/claude-3-opus@20240229': 100000,
|
||||||
'vertex_ai/claude-3-5-sonnet@20240620': 100000,
|
'vertex_ai/claude-3-5-sonnet@20240620': 100000,
|
||||||
|
'vertex_ai/claude-3-5-sonnet-v2@20241022': 100000,
|
||||||
'vertex_ai/gemini-1.5-pro': 1048576,
|
'vertex_ai/gemini-1.5-pro': 1048576,
|
||||||
'vertex_ai/gemini-1.5-flash': 1048576,
|
'vertex_ai/gemini-1.5-flash': 1048576,
|
||||||
'vertex_ai/gemma2': 8200,
|
'vertex_ai/gemma2': 8200,
|
||||||
|
'gemini/gemini-1.5-pro': 1048576,
|
||||||
|
'gemini/gemini-1.5-flash': 1048576,
|
||||||
'codechat-bison': 6144,
|
'codechat-bison': 6144,
|
||||||
'codechat-bison-32k': 32000,
|
'codechat-bison-32k': 32000,
|
||||||
'anthropic.claude-instant-v1': 100000,
|
'anthropic.claude-instant-v1': 100000,
|
||||||
@ -44,12 +49,16 @@ MAX_TOKENS = {
|
|||||||
'anthropic.claude-v2': 100000,
|
'anthropic.claude-v2': 100000,
|
||||||
'anthropic/claude-3-opus-20240229': 100000,
|
'anthropic/claude-3-opus-20240229': 100000,
|
||||||
'anthropic/claude-3-5-sonnet-20240620': 100000,
|
'anthropic/claude-3-5-sonnet-20240620': 100000,
|
||||||
|
'anthropic/claude-3-5-sonnet-20241022': 100000,
|
||||||
|
'anthropic/claude-3-5-haiku-20241022': 100000,
|
||||||
'bedrock/anthropic.claude-instant-v1': 100000,
|
'bedrock/anthropic.claude-instant-v1': 100000,
|
||||||
'bedrock/anthropic.claude-v2': 100000,
|
'bedrock/anthropic.claude-v2': 100000,
|
||||||
'bedrock/anthropic.claude-v2:1': 100000,
|
'bedrock/anthropic.claude-v2:1': 100000,
|
||||||
'bedrock/anthropic.claude-3-sonnet-20240229-v1:0': 100000,
|
'bedrock/anthropic.claude-3-sonnet-20240229-v1:0': 100000,
|
||||||
'bedrock/anthropic.claude-3-haiku-20240307-v1:0': 100000,
|
'bedrock/anthropic.claude-3-haiku-20240307-v1:0': 100000,
|
||||||
|
'bedrock/anthropic.claude-3-5-haiku-20241022-v1:0': 100000,
|
||||||
'bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0': 100000,
|
'bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0': 100000,
|
||||||
|
'bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0': 100000,
|
||||||
'claude-3-5-sonnet': 100000,
|
'claude-3-5-sonnet': 100000,
|
||||||
'groq/llama3-8b-8192': 8192,
|
'groq/llama3-8b-8192': 8192,
|
||||||
'groq/llama3-70b-8192': 8192,
|
'groq/llama3-70b-8192': 8192,
|
||||||
|
@ -1,17 +1,18 @@
|
|||||||
try:
|
try:
|
||||||
from langchain_openai import ChatOpenAI, AzureChatOpenAI
|
from langchain_core.messages import HumanMessage, SystemMessage
|
||||||
from langchain_core.messages import SystemMessage, HumanMessage
|
from langchain_openai import AzureChatOpenAI, ChatOpenAI
|
||||||
except: # we don't enforce langchain as a dependency, so if it's not installed, just move on
|
except: # we don't enforce langchain as a dependency, so if it's not installed, just move on
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
import functools
|
||||||
|
|
||||||
|
from openai import APIError, RateLimitError, Timeout
|
||||||
|
from retry import retry
|
||||||
|
|
||||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
from openai import APIError, RateLimitError, Timeout
|
|
||||||
from retry import retry
|
|
||||||
import functools
|
|
||||||
|
|
||||||
OPENAI_RETRIES = 5
|
OPENAI_RETRIES = 5
|
||||||
|
|
||||||
|
|
||||||
@ -73,4 +74,3 @@ class LangChainOpenAIHandler(BaseAiHandler):
|
|||||||
raise ValueError(f"OpenAI {e.name} is required") from e
|
raise ValueError(f"OpenAI {e.name} is required") from e
|
||||||
else:
|
else:
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
import os
|
import os
|
||||||
import requests
|
|
||||||
import litellm
|
import litellm
|
||||||
import openai
|
import openai
|
||||||
|
import requests
|
||||||
from litellm import acompletion
|
from litellm import acompletion
|
||||||
from tenacity import retry, retry_if_exception_type, stop_after_attempt
|
from tenacity import retry, retry_if_exception_type, stop_after_attempt
|
||||||
|
|
||||||
@ -83,6 +84,11 @@ class LiteLLMAIHandler(BaseAiHandler):
|
|||||||
litellm.vertex_location = get_settings().get(
|
litellm.vertex_location = get_settings().get(
|
||||||
"VERTEXAI.VERTEX_LOCATION", None
|
"VERTEXAI.VERTEX_LOCATION", None
|
||||||
)
|
)
|
||||||
|
# Google AI Studio
|
||||||
|
# SEE https://docs.litellm.ai/docs/providers/gemini
|
||||||
|
if get_settings().get("GOOGLE_AI_STUDIO.GEMINI_API_KEY", None):
|
||||||
|
os.environ["GEMINI_API_KEY"] = get_settings().google_ai_studio.gemini_api_key
|
||||||
|
|
||||||
def prepare_logs(self, response, system, user, resp, finish_reason):
|
def prepare_logs(self, response, system, user, resp, finish_reason):
|
||||||
response_log = response.dict().copy()
|
response_log = response.dict().copy()
|
||||||
response_log['system'] = system
|
response_log['system'] = system
|
||||||
@ -171,6 +177,7 @@ class LiteLLMAIHandler(BaseAiHandler):
|
|||||||
get_logger().warning(
|
get_logger().warning(
|
||||||
"Empty system prompt for claude model. Adding a newline character to prevent OpenAI API error.")
|
"Empty system prompt for claude model. Adding a newline character to prevent OpenAI API error.")
|
||||||
messages = [{"role": "system", "content": system}, {"role": "user", "content": user}]
|
messages = [{"role": "system", "content": system}, {"role": "user", "content": user}]
|
||||||
|
|
||||||
if img_path:
|
if img_path:
|
||||||
try:
|
try:
|
||||||
# check if the image link is alive
|
# check if the image link is alive
|
||||||
@ -185,14 +192,30 @@ class LiteLLMAIHandler(BaseAiHandler):
|
|||||||
messages[1]["content"] = [{"type": "text", "text": messages[1]["content"]},
|
messages[1]["content"] = [{"type": "text", "text": messages[1]["content"]},
|
||||||
{"type": "image_url", "image_url": {"url": img_path}}]
|
{"type": "image_url", "image_url": {"url": img_path}}]
|
||||||
|
|
||||||
kwargs = {
|
# Currently O1 does not support separate system and user prompts
|
||||||
"model": model,
|
O1_MODEL_PREFIX = 'o1-'
|
||||||
"deployment_id": deployment_id,
|
model_type = model.split('/')[-1] if '/' in model else model
|
||||||
"messages": messages,
|
if model_type.startswith(O1_MODEL_PREFIX):
|
||||||
"temperature": temperature,
|
user = f"{system}\n\n\n{user}"
|
||||||
"timeout": get_settings().config.ai_timeout,
|
system = ""
|
||||||
"api_base": self.api_base,
|
get_logger().info(f"Using O1 model, combining system and user prompts")
|
||||||
}
|
messages = [{"role": "user", "content": user}]
|
||||||
|
kwargs = {
|
||||||
|
"model": model,
|
||||||
|
"deployment_id": deployment_id,
|
||||||
|
"messages": messages,
|
||||||
|
"timeout": get_settings().config.ai_timeout,
|
||||||
|
"api_base": self.api_base,
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
kwargs = {
|
||||||
|
"model": model,
|
||||||
|
"deployment_id": deployment_id,
|
||||||
|
"messages": messages,
|
||||||
|
"temperature": temperature,
|
||||||
|
"timeout": get_settings().config.ai_timeout,
|
||||||
|
"api_base": self.api_base,
|
||||||
|
}
|
||||||
|
|
||||||
if get_settings().litellm.get("enable_callbacks", False):
|
if get_settings().litellm.get("enable_callbacks", False):
|
||||||
kwargs = self.add_litellm_callbacks(kwargs)
|
kwargs = self.add_litellm_callbacks(kwargs)
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
|
from os import environ
|
||||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||||
import openai
|
import openai
|
||||||
from openai.error import APIError, RateLimitError, Timeout, TryAgain
|
from openai import APIError, AsyncOpenAI, RateLimitError, Timeout
|
||||||
from retry import retry
|
from retry import retry
|
||||||
|
|
||||||
|
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
@ -14,7 +16,7 @@ class OpenAIHandler(BaseAiHandler):
|
|||||||
# Initialize OpenAIHandler specific attributes here
|
# Initialize OpenAIHandler specific attributes here
|
||||||
try:
|
try:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
openai.api_key = get_settings().openai.key
|
environ["OPENAI_API_KEY"] = get_settings().openai.key
|
||||||
if get_settings().get("OPENAI.ORG", None):
|
if get_settings().get("OPENAI.ORG", None):
|
||||||
openai.organization = get_settings().openai.org
|
openai.organization = get_settings().openai.org
|
||||||
if get_settings().get("OPENAI.API_TYPE", None):
|
if get_settings().get("OPENAI.API_TYPE", None):
|
||||||
@ -24,7 +26,7 @@ class OpenAIHandler(BaseAiHandler):
|
|||||||
if get_settings().get("OPENAI.API_VERSION", None):
|
if get_settings().get("OPENAI.API_VERSION", None):
|
||||||
openai.api_version = get_settings().openai.api_version
|
openai.api_version = get_settings().openai.api_version
|
||||||
if get_settings().get("OPENAI.API_BASE", None):
|
if get_settings().get("OPENAI.API_BASE", None):
|
||||||
openai.api_base = get_settings().openai.api_base
|
environ["OPENAI_BASE_URL"] = get_settings().openai.api_base
|
||||||
|
|
||||||
except AttributeError as e:
|
except AttributeError as e:
|
||||||
raise ValueError("OpenAI key is required") from e
|
raise ValueError("OpenAI key is required") from e
|
||||||
@ -36,28 +38,26 @@ class OpenAIHandler(BaseAiHandler):
|
|||||||
"""
|
"""
|
||||||
return get_settings().get("OPENAI.DEPLOYMENT_ID", None)
|
return get_settings().get("OPENAI.DEPLOYMENT_ID", None)
|
||||||
|
|
||||||
@retry(exceptions=(APIError, Timeout, TryAgain, AttributeError, RateLimitError),
|
@retry(exceptions=(APIError, Timeout, AttributeError, RateLimitError),
|
||||||
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
|
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
|
||||||
async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2):
|
async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2):
|
||||||
try:
|
try:
|
||||||
deployment_id = self.deployment_id
|
|
||||||
get_logger().info("System: ", system)
|
get_logger().info("System: ", system)
|
||||||
get_logger().info("User: ", user)
|
get_logger().info("User: ", user)
|
||||||
messages = [{"role": "system", "content": system}, {"role": "user", "content": user}]
|
messages = [{"role": "system", "content": system}, {"role": "user", "content": user}]
|
||||||
|
client = AsyncOpenAI()
|
||||||
chat_completion = await openai.ChatCompletion.acreate(
|
chat_completion = await client.chat.completions.create(
|
||||||
model=model,
|
model=model,
|
||||||
deployment_id=deployment_id,
|
|
||||||
messages=messages,
|
messages=messages,
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
)
|
)
|
||||||
resp = chat_completion["choices"][0]['message']['content']
|
resp = chat_completion.choices[0].message.content
|
||||||
finish_reason = chat_completion["choices"][0]["finish_reason"]
|
finish_reason = chat_completion.choices[0].finish_reason
|
||||||
usage = chat_completion.get("usage")
|
usage = chat_completion.usage
|
||||||
get_logger().info("AI response", response=resp, messages=messages, finish_reason=finish_reason,
|
get_logger().info("AI response", response=resp, messages=messages, finish_reason=finish_reason,
|
||||||
model=model, usage=usage)
|
model=model, usage=usage)
|
||||||
return resp, finish_reason
|
return resp, finish_reason
|
||||||
except (APIError, Timeout, TryAgain) as e:
|
except (APIError, Timeout) as e:
|
||||||
get_logger().error("Error during OpenAI inference: ", e)
|
get_logger().error("Error during OpenAI inference: ", e)
|
||||||
raise
|
raise
|
||||||
except (RateLimitError) as e:
|
except (RateLimitError) as e:
|
||||||
@ -65,4 +65,4 @@ class OpenAIHandler(BaseAiHandler):
|
|||||||
raise
|
raise
|
||||||
except (Exception) as e:
|
except (Exception) as e:
|
||||||
get_logger().error("Unknown error during OpenAI inference: ", e)
|
get_logger().error("Unknown error during OpenAI inference: ", e)
|
||||||
raise TryAgain from e
|
raise
|
||||||
|
@ -3,8 +3,8 @@ from __future__ import annotations
|
|||||||
import re
|
import re
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from pr_agent.config_loader import get_settings
|
|
||||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||||
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
|
|
||||||
@ -31,7 +31,7 @@ def extend_patch(original_file_str, patch_str, patch_extra_lines_before=0,
|
|||||||
|
|
||||||
|
|
||||||
def decode_if_bytes(original_file_str):
|
def decode_if_bytes(original_file_str):
|
||||||
if isinstance(original_file_str, bytes):
|
if isinstance(original_file_str, (bytes, bytearray)):
|
||||||
try:
|
try:
|
||||||
return original_file_str.decode('utf-8')
|
return original_file_str.decode('utf-8')
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
@ -61,23 +61,26 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
|
|||||||
patch_lines = patch_str.splitlines()
|
patch_lines = patch_str.splitlines()
|
||||||
extended_patch_lines = []
|
extended_patch_lines = []
|
||||||
|
|
||||||
|
is_valid_hunk = True
|
||||||
start1, size1, start2, size2 = -1, -1, -1, -1
|
start1, size1, start2, size2 = -1, -1, -1, -1
|
||||||
RE_HUNK_HEADER = re.compile(
|
RE_HUNK_HEADER = re.compile(
|
||||||
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
|
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
|
||||||
try:
|
try:
|
||||||
for line in patch_lines:
|
for i,line in enumerate(patch_lines):
|
||||||
if line.startswith('@@'):
|
if line.startswith('@@'):
|
||||||
match = RE_HUNK_HEADER.match(line)
|
match = RE_HUNK_HEADER.match(line)
|
||||||
# identify hunk header
|
# identify hunk header
|
||||||
if match:
|
if match:
|
||||||
# finish processing previous hunk
|
# finish processing previous hunk
|
||||||
if start1 != -1 and patch_extra_lines_after > 0:
|
if is_valid_hunk and (start1 != -1 and patch_extra_lines_after > 0):
|
||||||
delta_lines = [f' {line}' for line in original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after]]
|
delta_lines = [f' {line}' for line in original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after]]
|
||||||
extended_patch_lines.extend(delta_lines)
|
extended_patch_lines.extend(delta_lines)
|
||||||
|
|
||||||
section_header, size1, size2, start1, start2 = extract_hunk_headers(match)
|
section_header, size1, size2, start1, start2 = extract_hunk_headers(match)
|
||||||
|
|
||||||
if patch_extra_lines_before > 0 or patch_extra_lines_after > 0:
|
is_valid_hunk = check_if_hunk_lines_matches_to_file(i, original_lines, patch_lines, start1)
|
||||||
|
|
||||||
|
if is_valid_hunk and (patch_extra_lines_before > 0 or patch_extra_lines_after > 0):
|
||||||
def _calc_context_limits(patch_lines_before):
|
def _calc_context_limits(patch_lines_before):
|
||||||
extended_start1 = max(1, start1 - patch_lines_before)
|
extended_start1 = max(1, start1 - patch_lines_before)
|
||||||
extended_size1 = size1 + (start1 - extended_start1) + patch_extra_lines_after
|
extended_size1 = size1 + (start1 - extended_start1) + patch_extra_lines_after
|
||||||
@ -138,7 +141,7 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
|
|||||||
return patch_str
|
return patch_str
|
||||||
|
|
||||||
# finish processing last hunk
|
# finish processing last hunk
|
||||||
if start1 != -1 and patch_extra_lines_after > 0:
|
if start1 != -1 and patch_extra_lines_after > 0 and is_valid_hunk:
|
||||||
delta_lines = original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after]
|
delta_lines = original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after]
|
||||||
# add space at the beginning of each extra line
|
# add space at the beginning of each extra line
|
||||||
delta_lines = [f' {line}' for line in delta_lines]
|
delta_lines = [f' {line}' for line in delta_lines]
|
||||||
@ -148,6 +151,23 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
|
|||||||
return extended_patch_str
|
return extended_patch_str
|
||||||
|
|
||||||
|
|
||||||
|
def check_if_hunk_lines_matches_to_file(i, original_lines, patch_lines, start1):
|
||||||
|
"""
|
||||||
|
Check if the hunk lines match the original file content. We saw cases where the hunk header line doesn't match the original file content, and then
|
||||||
|
extending the hunk with extra lines before the hunk header can cause the hunk to be invalid.
|
||||||
|
"""
|
||||||
|
is_valid_hunk = True
|
||||||
|
try:
|
||||||
|
if i + 1 < len(patch_lines) and patch_lines[i + 1][0] == ' ': # an existing line in the file
|
||||||
|
if patch_lines[i + 1].strip() != original_lines[start1 - 1].strip():
|
||||||
|
is_valid_hunk = False
|
||||||
|
get_logger().error(
|
||||||
|
f"Invalid hunk in PR, line {start1} in hunk header doesn't match the original file content")
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return is_valid_hunk
|
||||||
|
|
||||||
|
|
||||||
def extract_hunk_headers(match):
|
def extract_hunk_headers(match):
|
||||||
res = list(match.groups())
|
res = list(match.groups())
|
||||||
for i in range(len(res)):
|
for i in range(len(res)):
|
||||||
@ -281,7 +301,7 @@ __old hunk__
|
|||||||
prev_header_line = []
|
prev_header_line = []
|
||||||
header_line = []
|
header_line = []
|
||||||
for line_i, line in enumerate(patch_lines):
|
for line_i, line in enumerate(patch_lines):
|
||||||
if 'no newline at end of file' in line.lower().strip().strip('//'):
|
if 'no newline at end of file' in line.lower():
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if line.startswith('@@'):
|
if line.startswith('@@'):
|
||||||
@ -290,18 +310,19 @@ __old hunk__
|
|||||||
if match and (new_content_lines or old_content_lines): # found a new hunk, split the previous lines
|
if match and (new_content_lines or old_content_lines): # found a new hunk, split the previous lines
|
||||||
if prev_header_line:
|
if prev_header_line:
|
||||||
patch_with_lines_str += f'\n{prev_header_line}\n'
|
patch_with_lines_str += f'\n{prev_header_line}\n'
|
||||||
|
is_plus_lines = is_minus_lines = False
|
||||||
if new_content_lines:
|
if new_content_lines:
|
||||||
is_plus_lines = any([line.startswith('+') for line in new_content_lines])
|
is_plus_lines = any([line.startswith('+') for line in new_content_lines])
|
||||||
if is_plus_lines:
|
|
||||||
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__new hunk__\n'
|
|
||||||
for i, line_new in enumerate(new_content_lines):
|
|
||||||
patch_with_lines_str += f"{start2 + i} {line_new}\n"
|
|
||||||
if old_content_lines:
|
if old_content_lines:
|
||||||
is_minus_lines = any([line.startswith('-') for line in old_content_lines])
|
is_minus_lines = any([line.startswith('-') for line in old_content_lines])
|
||||||
if is_minus_lines:
|
if is_plus_lines or is_minus_lines: # notice 'True' here - we always present __new hunk__ for section, otherwise LLM gets confused
|
||||||
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__old hunk__\n'
|
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__new hunk__\n'
|
||||||
for line_old in old_content_lines:
|
for i, line_new in enumerate(new_content_lines):
|
||||||
patch_with_lines_str += f"{line_old}\n"
|
patch_with_lines_str += f"{start2 + i} {line_new}\n"
|
||||||
|
if is_minus_lines:
|
||||||
|
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__old hunk__\n'
|
||||||
|
for line_old in old_content_lines:
|
||||||
|
patch_with_lines_str += f"{line_old}\n"
|
||||||
new_content_lines = []
|
new_content_lines = []
|
||||||
old_content_lines = []
|
old_content_lines = []
|
||||||
if match:
|
if match:
|
||||||
@ -325,18 +346,19 @@ __old hunk__
|
|||||||
# finishing last hunk
|
# finishing last hunk
|
||||||
if match and new_content_lines:
|
if match and new_content_lines:
|
||||||
patch_with_lines_str += f'\n{header_line}\n'
|
patch_with_lines_str += f'\n{header_line}\n'
|
||||||
|
is_plus_lines = is_minus_lines = False
|
||||||
if new_content_lines:
|
if new_content_lines:
|
||||||
is_plus_lines = any([line.startswith('+') for line in new_content_lines])
|
is_plus_lines = any([line.startswith('+') for line in new_content_lines])
|
||||||
if is_plus_lines:
|
|
||||||
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__new hunk__\n'
|
|
||||||
for i, line_new in enumerate(new_content_lines):
|
|
||||||
patch_with_lines_str += f"{start2 + i} {line_new}\n"
|
|
||||||
if old_content_lines:
|
if old_content_lines:
|
||||||
is_minus_lines = any([line.startswith('-') for line in old_content_lines])
|
is_minus_lines = any([line.startswith('-') for line in old_content_lines])
|
||||||
if is_minus_lines:
|
if is_plus_lines or is_minus_lines: # notice 'True' here - we always present __new hunk__ for section, otherwise LLM gets confused
|
||||||
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__old hunk__\n'
|
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__new hunk__\n'
|
||||||
for line_old in old_content_lines:
|
for i, line_new in enumerate(new_content_lines):
|
||||||
patch_with_lines_str += f"{line_old}\n"
|
patch_with_lines_str += f"{start2 + i} {line_new}\n"
|
||||||
|
if is_minus_lines:
|
||||||
|
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__old hunk__\n'
|
||||||
|
for line_old in old_content_lines:
|
||||||
|
patch_with_lines_str += f"{line_old}\n"
|
||||||
|
|
||||||
return patch_with_lines_str.rstrip()
|
return patch_with_lines_str.rstrip()
|
||||||
|
|
||||||
|
@ -4,8 +4,6 @@ from typing import Dict
|
|||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def filter_bad_extensions(files):
|
def filter_bad_extensions(files):
|
||||||
# Bad Extensions, source: https://github.com/EleutherAI/github-downloader/blob/345e7c4cbb9e0dc8a0615fd995a08bf9d73b3fe6/download_repo_text.py # noqa: E501
|
# Bad Extensions, source: https://github.com/EleutherAI/github-downloader/blob/345e7c4cbb9e0dc8a0615fd995a08bf9d73b3fe6/download_repo_text.py # noqa: E501
|
||||||
bad_extensions = get_settings().bad_extensions.default
|
bad_extensions = get_settings().bad_extensions.default
|
||||||
|
@ -5,14 +5,15 @@ from typing import Callable, List, Tuple
|
|||||||
|
|
||||||
from github import RateLimitExceededException
|
from github import RateLimitExceededException
|
||||||
|
|
||||||
from pr_agent.algo.git_patch_processing import convert_to_hunks_with_lines_numbers, extend_patch, handle_patch_deletions
|
|
||||||
from pr_agent.algo.language_handler import sort_files_by_main_languages
|
|
||||||
from pr_agent.algo.file_filter import filter_ignored
|
from pr_agent.algo.file_filter import filter_ignored
|
||||||
|
from pr_agent.algo.git_patch_processing import (
|
||||||
|
convert_to_hunks_with_lines_numbers, extend_patch, handle_patch_deletions)
|
||||||
|
from pr_agent.algo.language_handler import sort_files_by_main_languages
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.algo.utils import get_max_tokens, clip_tokens, ModelType
|
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||||
|
from pr_agent.algo.utils import ModelType, clip_tokens, get_max_tokens
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers.git_provider import GitProvider
|
from pr_agent.git_providers.git_provider import GitProvider
|
||||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
DELETED_FILES_ = "Deleted files:\n"
|
DELETED_FILES_ = "Deleted files:\n"
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
from jinja2 import Environment, StrictUndefined
|
|
||||||
from tiktoken import encoding_for_model, get_encoding
|
|
||||||
from pr_agent.config_loader import get_settings
|
|
||||||
from threading import Lock
|
from threading import Lock
|
||||||
|
|
||||||
|
from jinja2 import Environment, StrictUndefined
|
||||||
|
from tiktoken import encoding_for_model, get_encoding
|
||||||
|
|
||||||
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,28 +1,32 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
import html2text
|
|
||||||
|
|
||||||
import html
|
|
||||||
import copy
|
import copy
|
||||||
import difflib
|
import difflib
|
||||||
|
import hashlib
|
||||||
|
import html
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import textwrap
|
import textwrap
|
||||||
import time
|
import time
|
||||||
|
import traceback
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any, List, Tuple
|
from typing import Any, List, Tuple
|
||||||
|
|
||||||
|
import html2text
|
||||||
|
import requests
|
||||||
import yaml
|
import yaml
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from starlette_context import context
|
from starlette_context import context
|
||||||
|
|
||||||
from pr_agent.algo import MAX_TOKENS
|
from pr_agent.algo import MAX_TOKENS
|
||||||
from pr_agent.algo.token_handler import TokenEncoder
|
from pr_agent.algo.token_handler import TokenEncoder
|
||||||
from pr_agent.config_loader import get_settings, global_settings
|
|
||||||
from pr_agent.algo.types import FilePatchInfo
|
from pr_agent.algo.types import FilePatchInfo
|
||||||
|
from pr_agent.config_loader import get_settings, global_settings
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
|
|
||||||
class Range(BaseModel):
|
class Range(BaseModel):
|
||||||
line_start: int # should be 0-indexed
|
line_start: int # should be 0-indexed
|
||||||
line_end: int
|
line_end: int
|
||||||
@ -39,6 +43,10 @@ class PRReviewHeader(str, Enum):
|
|||||||
INCREMENTAL = "## Incremental PR Reviewer Guide"
|
INCREMENTAL = "## Incremental PR Reviewer Guide"
|
||||||
|
|
||||||
|
|
||||||
|
class PRDescriptionHeader(str, Enum):
|
||||||
|
CHANGES_WALKTHROUGH = "### **Changes walkthrough** 📝"
|
||||||
|
|
||||||
|
|
||||||
def get_setting(key: str) -> Any:
|
def get_setting(key: str) -> Any:
|
||||||
try:
|
try:
|
||||||
key = key.upper()
|
key = key.upper()
|
||||||
@ -110,6 +118,7 @@ def convert_to_markdown_v2(output_data: dict,
|
|||||||
"Insights from user's answers": "📝",
|
"Insights from user's answers": "📝",
|
||||||
"Code feedback": "🤖",
|
"Code feedback": "🤖",
|
||||||
"Estimated effort to review [1-5]": "⏱️",
|
"Estimated effort to review [1-5]": "⏱️",
|
||||||
|
"Ticket compliance check": "🎫",
|
||||||
}
|
}
|
||||||
markdown_text = ""
|
markdown_text = ""
|
||||||
if not incremental_review:
|
if not incremental_review:
|
||||||
@ -164,7 +173,9 @@ def convert_to_markdown_v2(output_data: dict,
|
|||||||
if is_value_no(value):
|
if is_value_no(value):
|
||||||
markdown_text += f'### {emoji} No relevant tests\n\n'
|
markdown_text += f'### {emoji} No relevant tests\n\n'
|
||||||
else:
|
else:
|
||||||
markdown_text += f"### PR contains tests\n\n"
|
markdown_text += f"### {emoji} PR contains tests\n\n"
|
||||||
|
elif 'ticket compliance check' in key_nice.lower():
|
||||||
|
markdown_text = ticket_markdown_logic(emoji, markdown_text, value, gfm_supported)
|
||||||
elif 'security concerns' in key_nice.lower():
|
elif 'security concerns' in key_nice.lower():
|
||||||
if gfm_supported:
|
if gfm_supported:
|
||||||
markdown_text += f"<tr><td>"
|
markdown_text += f"<tr><td>"
|
||||||
@ -213,12 +224,21 @@ def convert_to_markdown_v2(output_data: dict,
|
|||||||
issue_content = issue.get('issue_content', '').strip()
|
issue_content = issue.get('issue_content', '').strip()
|
||||||
start_line = int(str(issue.get('start_line', 0)).strip())
|
start_line = int(str(issue.get('start_line', 0)).strip())
|
||||||
end_line = int(str(issue.get('end_line', 0)).strip())
|
end_line = int(str(issue.get('end_line', 0)).strip())
|
||||||
reference_link = git_provider.get_line_link(relevant_file, start_line, end_line)
|
if git_provider:
|
||||||
|
reference_link = git_provider.get_line_link(relevant_file, start_line, end_line)
|
||||||
|
else:
|
||||||
|
reference_link = None
|
||||||
|
|
||||||
if gfm_supported:
|
if gfm_supported:
|
||||||
issue_str = f"<a href='{reference_link}'><strong>{issue_header}</strong></a><br>{issue_content}"
|
if reference_link is not None and len(reference_link) > 0:
|
||||||
|
issue_str = f"<a href='{reference_link}'><strong>{issue_header}</strong></a><br>{issue_content}"
|
||||||
|
else:
|
||||||
|
issue_str = f"<strong>{issue_header}</strong><br>{issue_content}"
|
||||||
else:
|
else:
|
||||||
issue_str = f"[**{issue_header}**]({reference_link})\n\n{issue_content}\n\n"
|
if reference_link is not None and len(reference_link) > 0:
|
||||||
|
issue_str = f"[**{issue_header}**]({reference_link})\n\n{issue_content}\n\n"
|
||||||
|
else:
|
||||||
|
issue_str = f"**{issue_header}**\n\n{issue_content}\n\n"
|
||||||
markdown_text += f"{issue_str}\n\n"
|
markdown_text += f"{issue_str}\n\n"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
get_logger().exception(f"Failed to process 'Recommended focus areas for review': {e}")
|
get_logger().exception(f"Failed to process 'Recommended focus areas for review': {e}")
|
||||||
@ -254,6 +274,52 @@ def convert_to_markdown_v2(output_data: dict,
|
|||||||
return markdown_text
|
return markdown_text
|
||||||
|
|
||||||
|
|
||||||
|
def ticket_markdown_logic(emoji, markdown_text, value, gfm_supported) -> str:
|
||||||
|
ticket_compliance_str = ""
|
||||||
|
final_compliance_level = -1
|
||||||
|
if isinstance(value, list):
|
||||||
|
for v in value:
|
||||||
|
ticket_url = v.get('ticket_url', '').strip()
|
||||||
|
compliance_level = v.get('overall_compliance_level', '').strip()
|
||||||
|
# add emojis, if 'Fully compliant' ✅, 'Partially compliant' 🔶, or 'Not compliant' ❌
|
||||||
|
if compliance_level.lower() == 'fully compliant':
|
||||||
|
# compliance_level = '✅ Fully compliant'
|
||||||
|
final_compliance_level = 2 if final_compliance_level == -1 else 1
|
||||||
|
elif compliance_level.lower() == 'partially compliant':
|
||||||
|
# compliance_level = '🔶 Partially compliant'
|
||||||
|
final_compliance_level = 1
|
||||||
|
elif compliance_level.lower() == 'not compliant':
|
||||||
|
# compliance_level = '❌ Not compliant'
|
||||||
|
final_compliance_level = 0 if final_compliance_level < 1 else 1
|
||||||
|
|
||||||
|
# explanation = v.get('compliance_analysis', '').strip()
|
||||||
|
explanation = ''
|
||||||
|
fully_compliant_str = v.get('fully_compliant_requirements', '').strip()
|
||||||
|
not_compliant_str = v.get('not_compliant_requirements', '').strip()
|
||||||
|
if fully_compliant_str:
|
||||||
|
explanation += f"Fully compliant requirements:\n{fully_compliant_str}\n\n"
|
||||||
|
if not_compliant_str:
|
||||||
|
explanation += f"Not compliant requirements:\n{not_compliant_str}\n\n"
|
||||||
|
|
||||||
|
ticket_compliance_str += f"\n\n**[{ticket_url.split('/')[-1]}]({ticket_url}) - {compliance_level}**\n\n{explanation}\n\n"
|
||||||
|
if final_compliance_level == 2:
|
||||||
|
compliance_level = '✅'
|
||||||
|
elif final_compliance_level == 1:
|
||||||
|
compliance_level = '🔶'
|
||||||
|
else:
|
||||||
|
compliance_level = '❌'
|
||||||
|
|
||||||
|
if gfm_supported:
|
||||||
|
markdown_text += f"<tr><td>\n\n"
|
||||||
|
markdown_text += f"**{emoji} Ticket compliance analysis {compliance_level}**\n\n"
|
||||||
|
markdown_text += ticket_compliance_str
|
||||||
|
markdown_text += f"</td></tr>\n"
|
||||||
|
else:
|
||||||
|
markdown_text += f"### {emoji} Ticket compliance analysis {compliance_level}\n\n"
|
||||||
|
markdown_text += ticket_compliance_str+"\n\n"
|
||||||
|
return markdown_text
|
||||||
|
|
||||||
|
|
||||||
def process_can_be_split(emoji, value):
|
def process_can_be_split(emoji, value):
|
||||||
try:
|
try:
|
||||||
# key_nice = "Can this PR be split?"
|
# key_nice = "Can this PR be split?"
|
||||||
@ -554,7 +620,8 @@ def load_yaml(response_text: str, keys_fix_yaml: List[str] = [], first_key="", l
|
|||||||
get_logger().warning(f"Initial failure to parse AI prediction: {e}")
|
get_logger().warning(f"Initial failure to parse AI prediction: {e}")
|
||||||
data = try_fix_yaml(response_text, keys_fix_yaml=keys_fix_yaml, first_key=first_key, last_key=last_key)
|
data = try_fix_yaml(response_text, keys_fix_yaml=keys_fix_yaml, first_key=first_key, last_key=last_key)
|
||||||
if not data:
|
if not data:
|
||||||
get_logger().error(f"Failed to parse AI prediction after fallbacks", artifact={'response_text': response_text})
|
get_logger().error(f"Failed to parse AI prediction after fallbacks",
|
||||||
|
artifact={'response_text': response_text})
|
||||||
else:
|
else:
|
||||||
get_logger().info(f"Successfully parsed AI prediction after fallbacks",
|
get_logger().info(f"Successfully parsed AI prediction after fallbacks",
|
||||||
artifact={'response_text': response_text})
|
artifact={'response_text': response_text})
|
||||||
@ -841,56 +908,64 @@ def find_line_number_of_relevant_line_in_file(diff_files: List[FilePatchInfo],
|
|||||||
break
|
break
|
||||||
return position, absolute_position
|
return position, absolute_position
|
||||||
|
|
||||||
def validate_and_await_rate_limit(rate_limit_status=None, git_provider=None, get_rate_limit_status_func=None):
|
def get_rate_limit_status(github_token) -> dict:
|
||||||
if git_provider and not rate_limit_status:
|
GITHUB_API_URL = get_settings(use_context=False).get("GITHUB.BASE_URL", "https://api.github.com").rstrip("/") # "https://api.github.com"
|
||||||
rate_limit_status = {'resources': git_provider.github_client.get_rate_limit().raw_data}
|
# GITHUB_API_URL = "https://api.github.com"
|
||||||
|
RATE_LIMIT_URL = f"{GITHUB_API_URL}/rate_limit"
|
||||||
|
HEADERS = {
|
||||||
|
"Accept": "application/vnd.github.v3+json",
|
||||||
|
"Authorization": f"token {github_token}"
|
||||||
|
}
|
||||||
|
|
||||||
if not rate_limit_status:
|
response = requests.get(RATE_LIMIT_URL, headers=HEADERS)
|
||||||
rate_limit_status = get_rate_limit_status_func()
|
try:
|
||||||
|
rate_limit_info = response.json()
|
||||||
|
if rate_limit_info.get('message') == 'Rate limiting is not enabled.': # for github enterprise
|
||||||
|
return {'resources': {}}
|
||||||
|
response.raise_for_status() # Check for HTTP errors
|
||||||
|
except: # retry
|
||||||
|
time.sleep(0.1)
|
||||||
|
response = requests.get(RATE_LIMIT_URL, headers=HEADERS)
|
||||||
|
return response.json()
|
||||||
|
return rate_limit_info
|
||||||
|
|
||||||
|
|
||||||
|
def validate_rate_limit_github(github_token, installation_id=None, threshold=0.1) -> bool:
|
||||||
|
try:
|
||||||
|
rate_limit_status = get_rate_limit_status(github_token)
|
||||||
|
if installation_id:
|
||||||
|
get_logger().debug(f"installation_id: {installation_id}, Rate limit status: {rate_limit_status['rate']}")
|
||||||
# validate that the rate limit is not exceeded
|
# validate that the rate limit is not exceeded
|
||||||
is_rate_limit = False
|
# validate that the rate limit is not exceeded
|
||||||
for key, value in rate_limit_status['resources'].items():
|
for key, value in rate_limit_status['resources'].items():
|
||||||
if value['remaining'] == 0:
|
if value['remaining'] < value['limit'] * threshold:
|
||||||
print(f"key: {key}, value: {value}")
|
get_logger().error(f"key: {key}, value: {value}")
|
||||||
is_rate_limit = True
|
return False
|
||||||
sleep_time_sec = value['reset'] - datetime.now().timestamp()
|
return True
|
||||||
sleep_time_hour = sleep_time_sec / 3600.0
|
except Exception as e:
|
||||||
print(f"Rate limit exceeded. Sleeping for {sleep_time_hour} hours")
|
get_logger().error(f"Error in rate limit {e}",
|
||||||
if sleep_time_sec > 0:
|
artifact={"traceback": traceback.format_exc()})
|
||||||
time.sleep(sleep_time_sec+1)
|
return True
|
||||||
|
|
||||||
if git_provider:
|
|
||||||
rate_limit_status = {'resources': git_provider.github_client.get_rate_limit().raw_data}
|
|
||||||
else:
|
|
||||||
rate_limit_status = get_rate_limit_status_func()
|
|
||||||
|
|
||||||
return is_rate_limit
|
|
||||||
|
|
||||||
|
|
||||||
def get_largest_component(pr_url):
|
def validate_and_await_rate_limit(github_token):
|
||||||
from pr_agent.tools.pr_analyzer import PRAnalyzer
|
try:
|
||||||
publish_output = get_settings().config.publish_output
|
rate_limit_status = get_rate_limit_status(github_token)
|
||||||
get_settings().config.publish_output = False # disable publish output
|
# validate that the rate limit is not exceeded
|
||||||
analyzer = PRAnalyzer(pr_url)
|
for key, value in rate_limit_status['resources'].items():
|
||||||
methods_dict_files = analyzer.run_sync()
|
if value['remaining'] < value['limit'] // 80:
|
||||||
get_settings().config.publish_output = publish_output
|
get_logger().error(f"key: {key}, value: {value}")
|
||||||
max_lines_changed = 0
|
sleep_time_sec = value['reset'] - datetime.now().timestamp()
|
||||||
file_b = ""
|
sleep_time_hour = sleep_time_sec / 3600.0
|
||||||
component_name_b = ""
|
get_logger().error(f"Rate limit exceeded. Sleeping for {sleep_time_hour} hours")
|
||||||
for file in methods_dict_files:
|
if sleep_time_sec > 0:
|
||||||
for method in methods_dict_files[file]:
|
time.sleep(sleep_time_sec + 1)
|
||||||
try:
|
rate_limit_status = get_rate_limit_status(github_token)
|
||||||
if methods_dict_files[file][method]['num_plus_lines'] > max_lines_changed:
|
return rate_limit_status
|
||||||
max_lines_changed = methods_dict_files[file][method]['num_plus_lines']
|
except:
|
||||||
file_b = file
|
get_logger().error("Error in rate limit")
|
||||||
component_name_b = method
|
return None
|
||||||
except:
|
|
||||||
pass
|
|
||||||
if component_name_b:
|
|
||||||
get_logger().info(f"Using the largest changed component: '{component_name_b}'")
|
|
||||||
return component_name_b, file_b
|
|
||||||
else:
|
|
||||||
return None, None
|
|
||||||
|
|
||||||
def github_action_output(output_data: dict, key_name: str):
|
def github_action_output(output_data: dict, key_name: str):
|
||||||
try:
|
try:
|
||||||
@ -906,7 +981,7 @@ def github_action_output(output_data: dict, key_name: str):
|
|||||||
|
|
||||||
|
|
||||||
def show_relevant_configurations(relevant_section: str) -> str:
|
def show_relevant_configurations(relevant_section: str) -> str:
|
||||||
skip_keys = ['ai_disclaimer', 'ai_disclaimer_title', 'ANALYTICS_FOLDER', 'secret_provider', "skip_keys",
|
skip_keys = ['ai_disclaimer', 'ai_disclaimer_title', 'ANALYTICS_FOLDER', 'secret_provider', "skip_keys", "app_id", "redirect",
|
||||||
'trial_prefix_message', 'no_eligible_message', 'identity_provider', 'ALLOWED_REPOS','APP_NAME']
|
'trial_prefix_message', 'no_eligible_message', 'identity_provider', 'ALLOWED_REPOS','APP_NAME']
|
||||||
extra_skip_keys = get_settings().config.get('config.skip_keys', [])
|
extra_skip_keys = get_settings().config.get('config.skip_keys', [])
|
||||||
if extra_skip_keys:
|
if extra_skip_keys:
|
||||||
@ -939,12 +1014,30 @@ def is_value_no(value):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def set_pr_string(repo_name, pr_number):
|
||||||
|
return f"{repo_name}#{pr_number}"
|
||||||
|
|
||||||
|
|
||||||
|
def string_to_uniform_number(s: str) -> float:
|
||||||
|
"""
|
||||||
|
Convert a string to a uniform number in the range [0, 1].
|
||||||
|
The uniform distribution is achieved by the nature of the SHA-256 hash function, which produces a uniformly distributed hash value over its output space.
|
||||||
|
"""
|
||||||
|
# Generate a hash of the string
|
||||||
|
hash_object = hashlib.sha256(s.encode())
|
||||||
|
# Convert the hash to an integer
|
||||||
|
hash_int = int(hash_object.hexdigest(), 16)
|
||||||
|
# Normalize the integer to the range [0, 1]
|
||||||
|
max_hash_int = 2 ** 256 - 1
|
||||||
|
uniform_number = float(hash_int) / max_hash_int
|
||||||
|
return uniform_number
|
||||||
|
|
||||||
|
|
||||||
def process_description(description_full: str) -> Tuple[str, List]:
|
def process_description(description_full: str) -> Tuple[str, List]:
|
||||||
if not description_full:
|
if not description_full:
|
||||||
return "", []
|
return "", []
|
||||||
|
|
||||||
split_str = "### **Changes walkthrough** 📝"
|
description_split = description_full.split(PRDescriptionHeader.CHANGES_WALKTHROUGH.value)
|
||||||
description_split = description_full.split(split_str)
|
|
||||||
base_description_str = description_split[0]
|
base_description_str = description_split[0]
|
||||||
changes_walkthrough_str = ""
|
changes_walkthrough_str = ""
|
||||||
files = []
|
files = []
|
||||||
@ -979,6 +1072,9 @@ def process_description(description_full: str) -> Tuple[str, List]:
|
|||||||
if not res or res.lastindex != 4:
|
if not res or res.lastindex != 4:
|
||||||
pattern_back = r'<details>\s*<summary><strong>(.*?)</strong><dd><code>(.*?)</code>.*?</summary>\s*<hr>\s*(.*?)\n\n\s*(.*?)</details>'
|
pattern_back = r'<details>\s*<summary><strong>(.*?)</strong><dd><code>(.*?)</code>.*?</summary>\s*<hr>\s*(.*?)\n\n\s*(.*?)</details>'
|
||||||
res = re.search(pattern_back, file_data, re.DOTALL)
|
res = re.search(pattern_back, file_data, re.DOTALL)
|
||||||
|
if not res or res.lastindex != 4:
|
||||||
|
pattern_back = r'<details>\s*<summary><strong>(.*?)</strong>\s*<dd><code>(.*?)</code>.*?</summary>\s*<hr>\s*(.*?)\s*-\s*(.*?)\s*</details>' # looking for hypen ('- ')
|
||||||
|
res = re.search(pattern_back, file_data, re.DOTALL)
|
||||||
if res and res.lastindex == 4:
|
if res and res.lastindex == 4:
|
||||||
short_filename = res.group(1).strip()
|
short_filename = res.group(1).strip()
|
||||||
short_summary = res.group(2).strip()
|
short_summary = res.group(2).strip()
|
||||||
@ -998,7 +1094,10 @@ def process_description(description_full: str) -> Tuple[str, List]:
|
|||||||
'long_summary': long_summary
|
'long_summary': long_summary
|
||||||
})
|
})
|
||||||
else:
|
else:
|
||||||
get_logger().error(f"Failed to parse description", artifact={'description': file_data})
|
if '<code>...</code>' in file_data:
|
||||||
|
pass # PR with many files. some did not get analyzed
|
||||||
|
else:
|
||||||
|
get_logger().error(f"Failed to parse description", artifact={'description': file_data})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
get_logger().exception(f"Failed to process description: {e}", artifact={'description': file_data})
|
get_logger().exception(f"Failed to process description: {e}", artifact={'description': file_data})
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ import os
|
|||||||
|
|
||||||
from pr_agent.agent.pr_agent import PRAgent, commands
|
from pr_agent.agent.pr_agent import PRAgent, commands
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.log import setup_logger, get_logger
|
from pr_agent.log import get_logger, setup_logger
|
||||||
|
|
||||||
log_level = os.environ.get("LOG_LEVEL", "INFO")
|
log_level = os.environ.get("LOG_LEVEL", "INFO")
|
||||||
setup_logger(log_level)
|
setup_logger(log_level)
|
||||||
|
@ -1,14 +1,16 @@
|
|||||||
|
from starlette_context import context
|
||||||
|
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
|
from pr_agent.git_providers.azuredevops_provider import AzureDevopsProvider
|
||||||
from pr_agent.git_providers.bitbucket_provider import BitbucketProvider
|
from pr_agent.git_providers.bitbucket_provider import BitbucketProvider
|
||||||
from pr_agent.git_providers.bitbucket_server_provider import BitbucketServerProvider
|
from pr_agent.git_providers.bitbucket_server_provider import \
|
||||||
|
BitbucketServerProvider
|
||||||
from pr_agent.git_providers.codecommit_provider import CodeCommitProvider
|
from pr_agent.git_providers.codecommit_provider import CodeCommitProvider
|
||||||
|
from pr_agent.git_providers.gerrit_provider import GerritProvider
|
||||||
from pr_agent.git_providers.git_provider import GitProvider
|
from pr_agent.git_providers.git_provider import GitProvider
|
||||||
from pr_agent.git_providers.github_provider import GithubProvider
|
from pr_agent.git_providers.github_provider import GithubProvider
|
||||||
from pr_agent.git_providers.gitlab_provider import GitLabProvider
|
from pr_agent.git_providers.gitlab_provider import GitLabProvider
|
||||||
from pr_agent.git_providers.local_git_provider import LocalGitProvider
|
from pr_agent.git_providers.local_git_provider import LocalGitProvider
|
||||||
from pr_agent.git_providers.azuredevops_provider import AzureDevopsProvider
|
|
||||||
from pr_agent.git_providers.gerrit_provider import GerritProvider
|
|
||||||
from starlette_context import context
|
|
||||||
|
|
||||||
_GIT_PROVIDERS = {
|
_GIT_PROVIDERS = {
|
||||||
'github': GithubProvider,
|
'github': GithubProvider,
|
||||||
|
@ -2,33 +2,33 @@ import os
|
|||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from ..algo.file_filter import filter_ignored
|
|
||||||
from ..log import get_logger
|
|
||||||
from ..algo.language_handler import is_valid_file
|
|
||||||
from ..algo.utils import clip_tokens, find_line_number_of_relevant_line_in_file, load_large_diff
|
|
||||||
from ..config_loader import get_settings
|
|
||||||
from .git_provider import GitProvider
|
|
||||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||||
|
|
||||||
|
from ..algo.file_filter import filter_ignored
|
||||||
|
from ..algo.language_handler import is_valid_file
|
||||||
|
from ..algo.utils import (PRDescriptionHeader, clip_tokens,
|
||||||
|
find_line_number_of_relevant_line_in_file,
|
||||||
|
load_large_diff)
|
||||||
|
from ..config_loader import get_settings
|
||||||
|
from ..log import get_logger
|
||||||
|
from .git_provider import GitProvider
|
||||||
|
|
||||||
AZURE_DEVOPS_AVAILABLE = True
|
AZURE_DEVOPS_AVAILABLE = True
|
||||||
ADO_APP_CLIENT_DEFAULT_ID = "499b84ac-1321-427f-aa17-267ca6975798/.default"
|
ADO_APP_CLIENT_DEFAULT_ID = "499b84ac-1321-427f-aa17-267ca6975798/.default"
|
||||||
MAX_PR_DESCRIPTION_AZURE_LENGTH = 4000-1
|
MAX_PR_DESCRIPTION_AZURE_LENGTH = 4000-1
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# noinspection PyUnresolvedReferences
|
# noinspection PyUnresolvedReferences
|
||||||
from msrest.authentication import BasicAuthentication
|
|
||||||
# noinspection PyUnresolvedReferences
|
# noinspection PyUnresolvedReferences
|
||||||
from azure.devops.connection import Connection
|
from azure.devops.connection import Connection
|
||||||
# noinspection PyUnresolvedReferences
|
# noinspection PyUnresolvedReferences
|
||||||
from azure.identity import DefaultAzureCredential
|
from azure.devops.v7_1.git.models import (Comment, CommentThread,
|
||||||
|
GitPullRequest,
|
||||||
|
GitPullRequestIterationChanges,
|
||||||
|
GitVersionDescriptor)
|
||||||
# noinspection PyUnresolvedReferences
|
# noinspection PyUnresolvedReferences
|
||||||
from azure.devops.v7_1.git.models import (
|
from azure.identity import DefaultAzureCredential
|
||||||
Comment,
|
from msrest.authentication import BasicAuthentication
|
||||||
CommentThread,
|
|
||||||
GitVersionDescriptor,
|
|
||||||
GitPullRequest,
|
|
||||||
GitPullRequestIterationChanges,
|
|
||||||
)
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
AZURE_DEVOPS_AVAILABLE = False
|
AZURE_DEVOPS_AVAILABLE = False
|
||||||
|
|
||||||
@ -67,16 +67,14 @@ class AzureDevopsProvider(GitProvider):
|
|||||||
relevant_lines_end = suggestion['relevant_lines_end']
|
relevant_lines_end = suggestion['relevant_lines_end']
|
||||||
|
|
||||||
if not relevant_lines_start or relevant_lines_start == -1:
|
if not relevant_lines_start or relevant_lines_start == -1:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().warning(
|
||||||
get_logger().exception(
|
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}")
|
||||||
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}")
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if relevant_lines_end < relevant_lines_start:
|
if relevant_lines_end < relevant_lines_start:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().warning(f"Failed to publish code suggestion, "
|
||||||
get_logger().exception(f"Failed to publish code suggestion, "
|
f"relevant_lines_end is {relevant_lines_end} and "
|
||||||
f"relevant_lines_end is {relevant_lines_end} and "
|
f"relevant_lines_start is {relevant_lines_start}")
|
||||||
f"relevant_lines_start is {relevant_lines_start}")
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if relevant_lines_end > relevant_lines_start:
|
if relevant_lines_end > relevant_lines_start:
|
||||||
@ -95,9 +93,11 @@ class AzureDevopsProvider(GitProvider):
|
|||||||
"side": "RIGHT",
|
"side": "RIGHT",
|
||||||
}
|
}
|
||||||
post_parameters_list.append(post_parameters)
|
post_parameters_list.append(post_parameters)
|
||||||
|
if not post_parameters_list:
|
||||||
|
return False
|
||||||
|
|
||||||
try:
|
for post_parameters in post_parameters_list:
|
||||||
for post_parameters in post_parameters_list:
|
try:
|
||||||
comment = Comment(content=post_parameters["body"], comment_type=1)
|
comment = Comment(content=post_parameters["body"], comment_type=1)
|
||||||
thread = CommentThread(comments=[comment],
|
thread = CommentThread(comments=[comment],
|
||||||
thread_context={
|
thread_context={
|
||||||
@ -117,15 +117,11 @@ class AzureDevopsProvider(GitProvider):
|
|||||||
repository_id=self.repo_slug,
|
repository_id=self.repo_slug,
|
||||||
pull_request_id=self.pr_num
|
pull_request_id=self.pr_num
|
||||||
)
|
)
|
||||||
if get_settings().config.verbosity_level >= 2:
|
except Exception as e:
|
||||||
get_logger().info(
|
get_logger().warning(f"Azure failed to publish code suggestion, error: {e}")
|
||||||
f"Published code suggestion on {self.pr_num} at {post_parameters['path']}"
|
return True
|
||||||
)
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
if get_settings().config.verbosity_level >= 2:
|
|
||||||
get_logger().error(f"Failed to publish code suggestion, error: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_pr_description_full(self) -> str:
|
def get_pr_description_full(self) -> str:
|
||||||
return self.pr.description
|
return self.pr.description
|
||||||
@ -336,19 +332,22 @@ class AzureDevopsProvider(GitProvider):
|
|||||||
version = GitVersionDescriptor(
|
version = GitVersionDescriptor(
|
||||||
version=base_sha.commit_id, version_type="commit"
|
version=base_sha.commit_id, version_type="commit"
|
||||||
)
|
)
|
||||||
try:
|
if edit_type == EDIT_TYPE.ADDED:
|
||||||
original_file_content_str = self.azure_devops_client.get_item(
|
|
||||||
repository_id=self.repo_slug,
|
|
||||||
path=file,
|
|
||||||
project=self.workspace_slug,
|
|
||||||
version_descriptor=version,
|
|
||||||
download=False,
|
|
||||||
include_content=True,
|
|
||||||
)
|
|
||||||
original_file_content_str = original_file_content_str.content
|
|
||||||
except Exception as error:
|
|
||||||
get_logger().error(f"Failed to retrieve original file content of {file} at version {version}", error=error)
|
|
||||||
original_file_content_str = ""
|
original_file_content_str = ""
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
original_file_content_str = self.azure_devops_client.get_item(
|
||||||
|
repository_id=self.repo_slug,
|
||||||
|
path=file,
|
||||||
|
project=self.workspace_slug,
|
||||||
|
version_descriptor=version,
|
||||||
|
download=False,
|
||||||
|
include_content=True,
|
||||||
|
)
|
||||||
|
original_file_content_str = original_file_content_str.content
|
||||||
|
except Exception as error:
|
||||||
|
get_logger().error(f"Failed to retrieve original file content of {file} at version {version}", error=error)
|
||||||
|
original_file_content_str = ""
|
||||||
|
|
||||||
patch = load_large_diff(
|
patch = load_large_diff(
|
||||||
file, new_file_content_str, original_file_content_str, show_warning=False
|
file, new_file_content_str, original_file_content_str, show_warning=False
|
||||||
@ -379,6 +378,9 @@ class AzureDevopsProvider(GitProvider):
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False, thread_context=None):
|
def publish_comment(self, pr_comment: str, is_temporary: bool = False, thread_context=None):
|
||||||
|
if is_temporary and not get_settings().config.publish_output_progress:
|
||||||
|
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
|
||||||
|
return None
|
||||||
comment = Comment(content=pr_comment)
|
comment = Comment(content=pr_comment)
|
||||||
thread = CommentThread(comments=[comment], thread_context=thread_context, status=5)
|
thread = CommentThread(comments=[comment], thread_context=thread_context, status=5)
|
||||||
thread_response = self.azure_devops_client.create_thread(
|
thread_response = self.azure_devops_client.create_thread(
|
||||||
@ -401,7 +403,7 @@ class AzureDevopsProvider(GitProvider):
|
|||||||
pr_body = pr_body[:ind]
|
pr_body = pr_body[:ind]
|
||||||
|
|
||||||
if len(pr_body) > MAX_PR_DESCRIPTION_AZURE_LENGTH:
|
if len(pr_body) > MAX_PR_DESCRIPTION_AZURE_LENGTH:
|
||||||
changes_walkthrough_text = '## **Changes walkthrough**'
|
changes_walkthrough_text = PRDescriptionHeader.CHANGES_WALKTHROUGH.value
|
||||||
ind = pr_body.find(changes_walkthrough_text)
|
ind = pr_body.find(changes_walkthrough_text)
|
||||||
if ind != -1:
|
if ind != -1:
|
||||||
pr_body = pr_body[:ind]
|
pr_body = pr_body[:ind]
|
||||||
@ -617,4 +619,3 @@ class AzureDevopsProvider(GitProvider):
|
|||||||
|
|
||||||
def publish_file_comments(self, file_comments: list) -> bool:
|
def publish_file_comments(self, file_comments: list) -> bool:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
|
import difflib
|
||||||
import json
|
import json
|
||||||
|
import re
|
||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
@ -6,13 +8,14 @@ import requests
|
|||||||
from atlassian.bitbucket import Cloud
|
from atlassian.bitbucket import Cloud
|
||||||
from starlette_context import context
|
from starlette_context import context
|
||||||
|
|
||||||
from pr_agent.algo.types import FilePatchInfo, EDIT_TYPE
|
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||||
|
|
||||||
from ..algo.file_filter import filter_ignored
|
from ..algo.file_filter import filter_ignored
|
||||||
from ..algo.language_handler import is_valid_file
|
from ..algo.language_handler import is_valid_file
|
||||||
from ..algo.utils import find_line_number_of_relevant_line_in_file
|
from ..algo.utils import find_line_number_of_relevant_line_in_file
|
||||||
from ..config_loader import get_settings
|
from ..config_loader import get_settings
|
||||||
from ..log import get_logger
|
from ..log import get_logger
|
||||||
from .git_provider import GitProvider, MAX_FILES_ALLOWED_FULL
|
from .git_provider import MAX_FILES_ALLOWED_FULL, GitProvider
|
||||||
|
|
||||||
|
|
||||||
def _gef_filename(diff):
|
def _gef_filename(diff):
|
||||||
@ -71,24 +74,38 @@ class BitbucketProvider(GitProvider):
|
|||||||
post_parameters_list = []
|
post_parameters_list = []
|
||||||
for suggestion in code_suggestions:
|
for suggestion in code_suggestions:
|
||||||
body = suggestion["body"]
|
body = suggestion["body"]
|
||||||
|
original_suggestion = suggestion.get('original_suggestion', None) # needed for diff code
|
||||||
|
if original_suggestion:
|
||||||
|
try:
|
||||||
|
existing_code = original_suggestion['existing_code'].rstrip() + "\n"
|
||||||
|
improved_code = original_suggestion['improved_code'].rstrip() + "\n"
|
||||||
|
diff = difflib.unified_diff(existing_code.split('\n'),
|
||||||
|
improved_code.split('\n'), n=999)
|
||||||
|
patch_orig = "\n".join(diff)
|
||||||
|
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
|
||||||
|
diff_code = f"\n\n```diff\n{patch.rstrip()}\n```"
|
||||||
|
# replace ```suggestion ... ``` with diff_code, using regex:
|
||||||
|
body = re.sub(r'```suggestion.*?```', diff_code, body, flags=re.DOTALL)
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().exception(f"Bitbucket failed to get diff code for publishing, error: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
relevant_file = suggestion["relevant_file"]
|
relevant_file = suggestion["relevant_file"]
|
||||||
relevant_lines_start = suggestion["relevant_lines_start"]
|
relevant_lines_start = suggestion["relevant_lines_start"]
|
||||||
relevant_lines_end = suggestion["relevant_lines_end"]
|
relevant_lines_end = suggestion["relevant_lines_end"]
|
||||||
|
|
||||||
if not relevant_lines_start or relevant_lines_start == -1:
|
if not relevant_lines_start or relevant_lines_start == -1:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().exception(
|
||||||
get_logger().exception(
|
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}"
|
||||||
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}"
|
)
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if relevant_lines_end < relevant_lines_start:
|
if relevant_lines_end < relevant_lines_start:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().exception(
|
||||||
get_logger().exception(
|
f"Failed to publish code suggestion, "
|
||||||
f"Failed to publish code suggestion, "
|
f"relevant_lines_end is {relevant_lines_end} and "
|
||||||
f"relevant_lines_end is {relevant_lines_end} and "
|
f"relevant_lines_start is {relevant_lines_start}"
|
||||||
f"relevant_lines_start is {relevant_lines_start}"
|
)
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if relevant_lines_end > relevant_lines_start:
|
if relevant_lines_end > relevant_lines_start:
|
||||||
@ -112,8 +129,7 @@ class BitbucketProvider(GitProvider):
|
|||||||
self.publish_inline_comments(post_parameters_list)
|
self.publish_inline_comments(post_parameters_list)
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().error(f"Bitbucket failed to publish code suggestion, error: {e}")
|
||||||
get_logger().error(f"Failed to publish code suggestion, error: {e}")
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def publish_file_comments(self, file_comments: list) -> bool:
|
def publish_file_comments(self, file_comments: list) -> bool:
|
||||||
@ -121,7 +137,7 @@ class BitbucketProvider(GitProvider):
|
|||||||
|
|
||||||
def is_supported(self, capability: str) -> bool:
|
def is_supported(self, capability: str) -> bool:
|
||||||
if capability in ['get_issue_comments', 'publish_inline_comments', 'get_labels', 'gfm_markdown',
|
if capability in ['get_issue_comments', 'publish_inline_comments', 'get_labels', 'gfm_markdown',
|
||||||
'publish_file_comments']:
|
'publish_file_comments']:
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -309,6 +325,9 @@ class BitbucketProvider(GitProvider):
|
|||||||
self.publish_comment(pr_comment)
|
self.publish_comment(pr_comment)
|
||||||
|
|
||||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||||
|
if is_temporary and not get_settings().config.publish_output_progress:
|
||||||
|
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
|
||||||
|
return None
|
||||||
pr_comment = self.limit_output_characters(pr_comment, self.max_comment_length)
|
pr_comment = self.limit_output_characters(pr_comment, self.max_comment_length)
|
||||||
comment = self.pr.comment(pr_comment)
|
comment = self.pr.comment(pr_comment)
|
||||||
if is_temporary:
|
if is_temporary:
|
||||||
|
@ -1,16 +1,21 @@
|
|||||||
from distutils.version import LooseVersion
|
import difflib
|
||||||
from requests.exceptions import HTTPError
|
import re
|
||||||
|
|
||||||
|
from packaging.version import parse as parse_version
|
||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple
|
||||||
from urllib.parse import quote_plus, urlparse
|
from urllib.parse import quote_plus, urlparse
|
||||||
|
|
||||||
from atlassian.bitbucket import Bitbucket
|
from atlassian.bitbucket import Bitbucket
|
||||||
|
from requests.exceptions import HTTPError
|
||||||
|
|
||||||
from .git_provider import GitProvider
|
from ..algo.git_patch_processing import decode_if_bytes
|
||||||
from ..algo.types import EDIT_TYPE, FilePatchInfo
|
|
||||||
from ..algo.language_handler import is_valid_file
|
from ..algo.language_handler import is_valid_file
|
||||||
from ..algo.utils import load_large_diff, find_line_number_of_relevant_line_in_file
|
from ..algo.types import EDIT_TYPE, FilePatchInfo
|
||||||
|
from ..algo.utils import (find_line_number_of_relevant_line_in_file,
|
||||||
|
load_large_diff)
|
||||||
from ..config_loader import get_settings
|
from ..config_loader import get_settings
|
||||||
from ..log import get_logger
|
from ..log import get_logger
|
||||||
|
from .git_provider import GitProvider
|
||||||
|
|
||||||
|
|
||||||
class BitbucketServerProvider(GitProvider):
|
class BitbucketServerProvider(GitProvider):
|
||||||
@ -35,7 +40,7 @@ class BitbucketServerProvider(GitProvider):
|
|||||||
token=get_settings().get("BITBUCKET_SERVER.BEARER_TOKEN",
|
token=get_settings().get("BITBUCKET_SERVER.BEARER_TOKEN",
|
||||||
None))
|
None))
|
||||||
try:
|
try:
|
||||||
self.bitbucket_api_version = LooseVersion(self.bitbucket_client.get("rest/api/1.0/application-properties").get('version'))
|
self.bitbucket_api_version = parse_version(self.bitbucket_client.get("rest/api/1.0/application-properties").get('version'))
|
||||||
except Exception:
|
except Exception:
|
||||||
self.bitbucket_api_version = None
|
self.bitbucket_api_version = None
|
||||||
|
|
||||||
@ -65,24 +70,37 @@ class BitbucketServerProvider(GitProvider):
|
|||||||
post_parameters_list = []
|
post_parameters_list = []
|
||||||
for suggestion in code_suggestions:
|
for suggestion in code_suggestions:
|
||||||
body = suggestion["body"]
|
body = suggestion["body"]
|
||||||
|
original_suggestion = suggestion.get('original_suggestion', None) # needed for diff code
|
||||||
|
if original_suggestion:
|
||||||
|
try:
|
||||||
|
existing_code = original_suggestion['existing_code'].rstrip() + "\n"
|
||||||
|
improved_code = original_suggestion['improved_code'].rstrip() + "\n"
|
||||||
|
diff = difflib.unified_diff(existing_code.split('\n'),
|
||||||
|
improved_code.split('\n'), n=999)
|
||||||
|
patch_orig = "\n".join(diff)
|
||||||
|
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
|
||||||
|
diff_code = f"\n\n```diff\n{patch.rstrip()}\n```"
|
||||||
|
# replace ```suggestion ... ``` with diff_code, using regex:
|
||||||
|
body = re.sub(r'```suggestion.*?```', diff_code, body, flags=re.DOTALL)
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().exception(f"Bitbucket failed to get diff code for publishing, error: {e}")
|
||||||
|
continue
|
||||||
relevant_file = suggestion["relevant_file"]
|
relevant_file = suggestion["relevant_file"]
|
||||||
relevant_lines_start = suggestion["relevant_lines_start"]
|
relevant_lines_start = suggestion["relevant_lines_start"]
|
||||||
relevant_lines_end = suggestion["relevant_lines_end"]
|
relevant_lines_end = suggestion["relevant_lines_end"]
|
||||||
|
|
||||||
if not relevant_lines_start or relevant_lines_start == -1:
|
if not relevant_lines_start or relevant_lines_start == -1:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().warning(
|
||||||
get_logger().exception(
|
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}"
|
||||||
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}"
|
)
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if relevant_lines_end < relevant_lines_start:
|
if relevant_lines_end < relevant_lines_start:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().warning(
|
||||||
get_logger().exception(
|
f"Failed to publish code suggestion, "
|
||||||
f"Failed to publish code suggestion, "
|
f"relevant_lines_end is {relevant_lines_end} and "
|
||||||
f"relevant_lines_end is {relevant_lines_end} and "
|
f"relevant_lines_start is {relevant_lines_start}"
|
||||||
f"relevant_lines_start is {relevant_lines_start}"
|
)
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if relevant_lines_end > relevant_lines_start:
|
if relevant_lines_end > relevant_lines_start:
|
||||||
@ -159,7 +177,7 @@ class BitbucketServerProvider(GitProvider):
|
|||||||
head_sha = self.pr.fromRef['latestCommit']
|
head_sha = self.pr.fromRef['latestCommit']
|
||||||
|
|
||||||
# if Bitbucket api version is >= 8.16 then use the merge-base api for 2-way diff calculation
|
# if Bitbucket api version is >= 8.16 then use the merge-base api for 2-way diff calculation
|
||||||
if self.bitbucket_api_version is not None and self.bitbucket_api_version >= LooseVersion("8.16"):
|
if self.bitbucket_api_version is not None and self.bitbucket_api_version >= parse_version("8.16"):
|
||||||
try:
|
try:
|
||||||
base_sha = self.bitbucket_client.get(self._get_merge_base())['id']
|
base_sha = self.bitbucket_client.get(self._get_merge_base())['id']
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -174,7 +192,7 @@ class BitbucketServerProvider(GitProvider):
|
|||||||
# if Bitbucket api version is None or < 7.0 then do a simple diff with a guaranteed common ancestor
|
# if Bitbucket api version is None or < 7.0 then do a simple diff with a guaranteed common ancestor
|
||||||
base_sha = source_commits_list[-1]['parents'][0]['id']
|
base_sha = source_commits_list[-1]['parents'][0]['id']
|
||||||
# if Bitbucket api version is 7.0-8.15 then use 2-way diff functionality for the base_sha
|
# if Bitbucket api version is 7.0-8.15 then use 2-way diff functionality for the base_sha
|
||||||
if self.bitbucket_api_version is not None and self.bitbucket_api_version >= LooseVersion("7.0"):
|
if self.bitbucket_api_version is not None and self.bitbucket_api_version >= parse_version("7.0"):
|
||||||
try:
|
try:
|
||||||
destination_commits = list(
|
destination_commits = list(
|
||||||
self.bitbucket_client.get_commits(self.workspace_slug, self.repo_slug, base_sha,
|
self.bitbucket_client.get_commits(self.workspace_slug, self.repo_slug, base_sha,
|
||||||
@ -200,25 +218,21 @@ class BitbucketServerProvider(GitProvider):
|
|||||||
case 'ADD':
|
case 'ADD':
|
||||||
edit_type = EDIT_TYPE.ADDED
|
edit_type = EDIT_TYPE.ADDED
|
||||||
new_file_content_str = self.get_file(file_path, head_sha)
|
new_file_content_str = self.get_file(file_path, head_sha)
|
||||||
if isinstance(new_file_content_str, (bytes, bytearray)):
|
new_file_content_str = decode_if_bytes(new_file_content_str)
|
||||||
new_file_content_str = new_file_content_str.decode("utf-8")
|
|
||||||
original_file_content_str = ""
|
original_file_content_str = ""
|
||||||
case 'DELETE':
|
case 'DELETE':
|
||||||
edit_type = EDIT_TYPE.DELETED
|
edit_type = EDIT_TYPE.DELETED
|
||||||
new_file_content_str = ""
|
new_file_content_str = ""
|
||||||
original_file_content_str = self.get_file(file_path, base_sha)
|
original_file_content_str = self.get_file(file_path, base_sha)
|
||||||
if isinstance(original_file_content_str, (bytes, bytearray)):
|
original_file_content_str = decode_if_bytes(original_file_content_str)
|
||||||
original_file_content_str = original_file_content_str.decode("utf-8")
|
|
||||||
case 'RENAME':
|
case 'RENAME':
|
||||||
edit_type = EDIT_TYPE.RENAMED
|
edit_type = EDIT_TYPE.RENAMED
|
||||||
case _:
|
case _:
|
||||||
edit_type = EDIT_TYPE.MODIFIED
|
edit_type = EDIT_TYPE.MODIFIED
|
||||||
original_file_content_str = self.get_file(file_path, base_sha)
|
original_file_content_str = self.get_file(file_path, base_sha)
|
||||||
if isinstance(original_file_content_str, (bytes, bytearray)):
|
original_file_content_str = decode_if_bytes(original_file_content_str)
|
||||||
original_file_content_str = original_file_content_str.decode("utf-8")
|
|
||||||
new_file_content_str = self.get_file(file_path, head_sha)
|
new_file_content_str = self.get_file(file_path, head_sha)
|
||||||
if isinstance(new_file_content_str, (bytes, bytearray)):
|
new_file_content_str = decode_if_bytes(new_file_content_str)
|
||||||
new_file_content_str = new_file_content_str.decode("utf-8")
|
|
||||||
|
|
||||||
patch = load_large_diff(file_path, new_file_content_str, original_file_content_str)
|
patch = load_large_diff(file_path, new_file_content_str, original_file_content_str)
|
||||||
|
|
||||||
@ -329,10 +343,10 @@ class BitbucketServerProvider(GitProvider):
|
|||||||
for comment in comments:
|
for comment in comments:
|
||||||
if 'position' in comment:
|
if 'position' in comment:
|
||||||
self.publish_inline_comment(comment['body'], comment['position'], comment['path'])
|
self.publish_inline_comment(comment['body'], comment['position'], comment['path'])
|
||||||
elif 'start_line' in comment: # multi-line comment
|
elif 'start_line' in comment: # multi-line comment
|
||||||
# note that bitbucket does not seem to support range - only a comment on a single line - https://community.developer.atlassian.com/t/api-post-endpoint-for-inline-pull-request-comments/60452
|
# note that bitbucket does not seem to support range - only a comment on a single line - https://community.developer.atlassian.com/t/api-post-endpoint-for-inline-pull-request-comments/60452
|
||||||
self.publish_inline_comment(comment['body'], comment['start_line'], comment['path'])
|
self.publish_inline_comment(comment['body'], comment['start_line'], comment['path'])
|
||||||
elif 'line' in comment: # single-line comment
|
elif 'line' in comment: # single-line comment
|
||||||
self.publish_inline_comment(comment['body'], comment['line'], comment['path'])
|
self.publish_inline_comment(comment['body'], comment['line'], comment['path'])
|
||||||
else:
|
else:
|
||||||
get_logger().error(f"Could not publish inline comment: {comment}")
|
get_logger().error(f"Could not publish inline comment: {comment}")
|
||||||
|
@ -4,13 +4,15 @@ from collections import Counter
|
|||||||
from typing import List, Optional, Tuple
|
from typing import List, Optional, Tuple
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from pr_agent.git_providers.codecommit_client import CodeCommitClient
|
from pr_agent.algo.language_handler import is_valid_file
|
||||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||||
|
from pr_agent.git_providers.codecommit_client import CodeCommitClient
|
||||||
|
|
||||||
from ..algo.utils import load_large_diff
|
from ..algo.utils import load_large_diff
|
||||||
from .git_provider import GitProvider
|
|
||||||
from ..config_loader import get_settings
|
from ..config_loader import get_settings
|
||||||
from ..log import get_logger
|
from ..log import get_logger
|
||||||
from pr_agent.algo.language_handler import is_valid_file
|
from .git_provider import GitProvider
|
||||||
|
|
||||||
|
|
||||||
class PullRequestCCMimic:
|
class PullRequestCCMimic:
|
||||||
"""
|
"""
|
||||||
|
@ -12,9 +12,9 @@ import requests
|
|||||||
import urllib3.util
|
import urllib3.util
|
||||||
from git import Repo
|
from git import Repo
|
||||||
|
|
||||||
|
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers.git_provider import GitProvider
|
from pr_agent.git_providers.git_provider import GitProvider
|
||||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
|
||||||
from pr_agent.git_providers.local_git_provider import PullRequestMimic
|
from pr_agent.git_providers.local_git_provider import PullRequestMimic
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
# enum EDIT_TYPE (ADDED, DELETED, MODIFIED, RENAMED)
|
# enum EDIT_TYPE (ADDED, DELETED, MODIFIED, RENAMED)
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
from pr_agent.algo.types import FilePatchInfo
|
||||||
from pr_agent.algo.utils import Range, process_description
|
from pr_agent.algo.utils import Range, process_description
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.algo.types import FilePatchInfo
|
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
MAX_FILES_ALLOWED_FULL = 50
|
MAX_FILES_ALLOWED_FULL = 50
|
||||||
|
|
||||||
class GitProvider(ABC):
|
class GitProvider(ABC):
|
||||||
@ -62,8 +62,8 @@ class GitProvider(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def get_pr_description(self, full: bool = True, split_changes_walkthrough=False) -> str or tuple:
|
def get_pr_description(self, full: bool = True, split_changes_walkthrough=False) -> str or tuple:
|
||||||
from pr_agent.config_loader import get_settings
|
|
||||||
from pr_agent.algo.utils import clip_tokens
|
from pr_agent.algo.utils import clip_tokens
|
||||||
|
from pr_agent.config_loader import get_settings
|
||||||
max_tokens_description = get_settings().get("CONFIG.MAX_DESCRIPTION_TOKENS", None)
|
max_tokens_description = get_settings().get("CONFIG.MAX_DESCRIPTION_TOKENS", None)
|
||||||
description = self.get_pr_description_full() if full else self.get_user_description()
|
description = self.get_pr_description_full() if full else self.get_user_description()
|
||||||
if split_changes_walkthrough:
|
if split_changes_walkthrough:
|
||||||
|
@ -1,22 +1,30 @@
|
|||||||
import itertools
|
import copy
|
||||||
import time
|
import difflib
|
||||||
import hashlib
|
import hashlib
|
||||||
|
import itertools
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from github import AppAuthentication, Auth, Github, GithubException
|
from github import AppAuthentication, Auth, Github
|
||||||
from retry import retry
|
from retry import retry
|
||||||
from starlette_context import context
|
from starlette_context import context
|
||||||
|
|
||||||
from ..algo.file_filter import filter_ignored
|
from ..algo.file_filter import filter_ignored
|
||||||
|
from ..algo.git_patch_processing import extract_hunk_headers
|
||||||
from ..algo.language_handler import is_valid_file
|
from ..algo.language_handler import is_valid_file
|
||||||
from ..algo.utils import PRReviewHeader, load_large_diff, clip_tokens, find_line_number_of_relevant_line_in_file, Range
|
from ..algo.types import EDIT_TYPE
|
||||||
|
from ..algo.utils import (PRReviewHeader, Range, clip_tokens,
|
||||||
|
find_line_number_of_relevant_line_in_file,
|
||||||
|
load_large_diff)
|
||||||
from ..config_loader import get_settings
|
from ..config_loader import get_settings
|
||||||
from ..log import get_logger
|
from ..log import get_logger
|
||||||
from ..servers.utils import RateLimitExceeded
|
from ..servers.utils import RateLimitExceeded
|
||||||
from .git_provider import GitProvider, IncrementalPR, MAX_FILES_ALLOWED_FULL
|
from .git_provider import (MAX_FILES_ALLOWED_FULL, FilePatchInfo, GitProvider,
|
||||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
IncrementalPR)
|
||||||
|
|
||||||
|
|
||||||
class GithubProvider(GitProvider):
|
class GithubProvider(GitProvider):
|
||||||
@ -27,10 +35,8 @@ class GithubProvider(GitProvider):
|
|||||||
except Exception:
|
except Exception:
|
||||||
self.installation_id = None
|
self.installation_id = None
|
||||||
self.max_comment_chars = 65000
|
self.max_comment_chars = 65000
|
||||||
self.base_url = get_settings().get("GITHUB.BASE_URL", "https://api.github.com").rstrip("/")
|
self.base_url = get_settings().get("GITHUB.BASE_URL", "https://api.github.com").rstrip("/") # "https://api.github.com"
|
||||||
self.base_url_html = self.base_url.split("api/")[0].rstrip("/") if "api/" in self.base_url else "https://github.com"
|
self.base_url_html = self.base_url.split("api/")[0].rstrip("/") if "api/" in self.base_url else "https://github.com"
|
||||||
self.base_domain = self.base_url.replace("https://", "").replace("http://", "")
|
|
||||||
self.base_domain_html = self.base_url_html.replace("https://", "").replace("http://", "")
|
|
||||||
self.github_client = self._get_github_client()
|
self.github_client = self._get_github_client()
|
||||||
self.repo = None
|
self.repo = None
|
||||||
self.pr_num = None
|
self.pr_num = None
|
||||||
@ -197,7 +203,24 @@ class GithubProvider(GitProvider):
|
|||||||
if avoid_load:
|
if avoid_load:
|
||||||
original_file_content_str = ""
|
original_file_content_str = ""
|
||||||
else:
|
else:
|
||||||
original_file_content_str = self._get_pr_file_content(file, self.pr.base.sha)
|
# The base.sha will point to the current state of the base branch (including parallel merges), not the original base commit when the PR was created
|
||||||
|
# We can fix this by finding the merge base commit between the PR head and base branches
|
||||||
|
# Note that The pr.head.sha is actually correct as is - it points to the latest commit in your PR branch.
|
||||||
|
# This SHA isn't affected by parallel merges to the base branch since it's specific to your PR's branch.
|
||||||
|
repo = self.repo_obj
|
||||||
|
pr = self.pr
|
||||||
|
try:
|
||||||
|
compare = repo.compare(pr.base.sha, pr.head.sha)
|
||||||
|
merge_base_commit = compare.merge_base_commit
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().error(f"Failed to get merge base commit: {e}")
|
||||||
|
merge_base_commit = pr.base
|
||||||
|
if merge_base_commit.sha != pr.base.sha:
|
||||||
|
get_logger().info(
|
||||||
|
f"Using merge base commit {merge_base_commit.sha} instead of base commit "
|
||||||
|
f"{pr.base.sha} for {file.filename}")
|
||||||
|
original_file_content_str = self._get_pr_file_content(file, merge_base_commit.sha)
|
||||||
|
|
||||||
if not patch:
|
if not patch:
|
||||||
patch = load_large_diff(file.filename, new_file_content_str, original_file_content_str)
|
patch = load_large_diff(file.filename, new_file_content_str, original_file_content_str)
|
||||||
|
|
||||||
@ -233,8 +256,9 @@ class GithubProvider(GitProvider):
|
|||||||
|
|
||||||
return diff_files
|
return diff_files
|
||||||
|
|
||||||
except GithubException.RateLimitExceededException as e:
|
except Exception as e:
|
||||||
get_logger().error(f"Rate limit exceeded for GitHub API. Original message: {e}")
|
get_logger().error(f"Failing to get diff files: {e}",
|
||||||
|
artifact={"traceback": traceback.format_exc()})
|
||||||
raise RateLimitExceeded("Rate limit exceeded for GitHub API.") from e
|
raise RateLimitExceeded("Rate limit exceeded for GitHub API.") from e
|
||||||
|
|
||||||
def publish_description(self, pr_title: str, pr_body: str):
|
def publish_description(self, pr_title: str, pr_body: str):
|
||||||
@ -256,7 +280,7 @@ class GithubProvider(GitProvider):
|
|||||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||||
if is_temporary and not get_settings().config.publish_output_progress:
|
if is_temporary and not get_settings().config.publish_output_progress:
|
||||||
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
|
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
|
||||||
return
|
return None
|
||||||
pr_comment = self.limit_output_characters(pr_comment, self.max_comment_chars)
|
pr_comment = self.limit_output_characters(pr_comment, self.max_comment_chars)
|
||||||
response = self.pr.create_issue_comment(pr_comment)
|
response = self.pr.create_issue_comment(pr_comment)
|
||||||
if hasattr(response, "user") and hasattr(response.user, "login"):
|
if hasattr(response, "user") and hasattr(response.user, "login"):
|
||||||
@ -280,8 +304,7 @@ class GithubProvider(GitProvider):
|
|||||||
relevant_line_in_file,
|
relevant_line_in_file,
|
||||||
absolute_position)
|
absolute_position)
|
||||||
if position == -1:
|
if position == -1:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
|
||||||
get_logger().info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
|
|
||||||
subject_type = "FILE"
|
subject_type = "FILE"
|
||||||
else:
|
else:
|
||||||
subject_type = "LINE"
|
subject_type = "LINE"
|
||||||
@ -293,11 +316,9 @@ class GithubProvider(GitProvider):
|
|||||||
# publish all comments in a single message
|
# publish all comments in a single message
|
||||||
self.pr.create_review(commit=self.last_commit_id, comments=comments)
|
self.pr.create_review(commit=self.last_commit_id, comments=comments)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().info(f"Initially failed to publish inline comments as committable")
|
||||||
get_logger().error(f"Failed to publish inline comments")
|
|
||||||
|
|
||||||
if (getattr(e, "status", None) == 422
|
if (getattr(e, "status", None) == 422 and not disable_fallback):
|
||||||
and get_settings().github.publish_inline_comments_fallback_with_verification and not disable_fallback):
|
|
||||||
pass # continue to try _publish_inline_comments_fallback_with_verification
|
pass # continue to try _publish_inline_comments_fallback_with_verification
|
||||||
else:
|
else:
|
||||||
raise e # will end up with publishing the comments one by one
|
raise e # will end up with publishing the comments one by one
|
||||||
@ -305,8 +326,7 @@ class GithubProvider(GitProvider):
|
|||||||
try:
|
try:
|
||||||
self._publish_inline_comments_fallback_with_verification(comments)
|
self._publish_inline_comments_fallback_with_verification(comments)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().error(f"Failed to publish inline code comments fallback, error: {e}")
|
||||||
get_logger().error(f"Failed to publish inline code comments fallback, error: {e}")
|
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
def _publish_inline_comments_fallback_with_verification(self, comments: list[dict]):
|
def _publish_inline_comments_fallback_with_verification(self, comments: list[dict]):
|
||||||
@ -331,11 +351,9 @@ class GithubProvider(GitProvider):
|
|||||||
for comment in fixed_comments_as_one_liner:
|
for comment in fixed_comments_as_one_liner:
|
||||||
try:
|
try:
|
||||||
self.publish_inline_comments([comment], disable_fallback=True)
|
self.publish_inline_comments([comment], disable_fallback=True)
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().info(f"Published invalid comment as a single line comment: {comment}")
|
||||||
get_logger().info(f"Published invalid comment as a single line comment: {comment}")
|
|
||||||
except:
|
except:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().error(f"Failed to publish invalid comment as a single line comment: {comment}")
|
||||||
get_logger().error(f"Failed to publish invalid comment as a single line comment: {comment}")
|
|
||||||
|
|
||||||
def _verify_code_comment(self, comment: dict):
|
def _verify_code_comment(self, comment: dict):
|
||||||
is_verified = False
|
is_verified = False
|
||||||
@ -393,8 +411,7 @@ class GithubProvider(GitProvider):
|
|||||||
if fixed_comment != comment:
|
if fixed_comment != comment:
|
||||||
fixed_comments.append(fixed_comment)
|
fixed_comments.append(fixed_comment)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().error(f"Failed to fix inline comment, error: {e}")
|
||||||
get_logger().error(f"Failed to fix inline comment, error: {e}")
|
|
||||||
return fixed_comments
|
return fixed_comments
|
||||||
|
|
||||||
def publish_code_suggestions(self, code_suggestions: list) -> bool:
|
def publish_code_suggestions(self, code_suggestions: list) -> bool:
|
||||||
@ -402,23 +419,24 @@ class GithubProvider(GitProvider):
|
|||||||
Publishes code suggestions as comments on the PR.
|
Publishes code suggestions as comments on the PR.
|
||||||
"""
|
"""
|
||||||
post_parameters_list = []
|
post_parameters_list = []
|
||||||
for suggestion in code_suggestions:
|
|
||||||
|
code_suggestions_validated = self.validate_comments_inside_hunks(code_suggestions)
|
||||||
|
|
||||||
|
for suggestion in code_suggestions_validated:
|
||||||
body = suggestion['body']
|
body = suggestion['body']
|
||||||
relevant_file = suggestion['relevant_file']
|
relevant_file = suggestion['relevant_file']
|
||||||
relevant_lines_start = suggestion['relevant_lines_start']
|
relevant_lines_start = suggestion['relevant_lines_start']
|
||||||
relevant_lines_end = suggestion['relevant_lines_end']
|
relevant_lines_end = suggestion['relevant_lines_end']
|
||||||
|
|
||||||
if not relevant_lines_start or relevant_lines_start == -1:
|
if not relevant_lines_start or relevant_lines_start == -1:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().exception(
|
||||||
get_logger().exception(
|
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}")
|
||||||
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}")
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if relevant_lines_end < relevant_lines_start:
|
if relevant_lines_end < relevant_lines_start:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().exception(f"Failed to publish code suggestion, "
|
||||||
get_logger().exception(f"Failed to publish code suggestion, "
|
f"relevant_lines_end is {relevant_lines_end} and "
|
||||||
f"relevant_lines_end is {relevant_lines_end} and "
|
f"relevant_lines_start is {relevant_lines_start}")
|
||||||
f"relevant_lines_start is {relevant_lines_start}")
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if relevant_lines_end > relevant_lines_start:
|
if relevant_lines_end > relevant_lines_start:
|
||||||
@ -442,8 +460,7 @@ class GithubProvider(GitProvider):
|
|||||||
self.publish_inline_comments(post_parameters_list)
|
self.publish_inline_comments(post_parameters_list)
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().error(f"Failed to publish code suggestion, error: {e}")
|
||||||
get_logger().error(f"Failed to publish code suggestion, error: {e}")
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def edit_comment(self, comment, body: str):
|
def edit_comment(self, comment, body: str):
|
||||||
@ -502,6 +519,7 @@ class GithubProvider(GitProvider):
|
|||||||
elif self.deployment_type == 'user':
|
elif self.deployment_type == 'user':
|
||||||
same_comment_creator = self.github_user_id == existing_comment['user']['login']
|
same_comment_creator = self.github_user_id == existing_comment['user']['login']
|
||||||
if existing_comment['subject_type'] == 'file' and comment['path'] == existing_comment['path'] and same_comment_creator:
|
if existing_comment['subject_type'] == 'file' and comment['path'] == existing_comment['path'] and same_comment_creator:
|
||||||
|
|
||||||
headers, data_patch = self.pr._requester.requestJsonAndCheck(
|
headers, data_patch = self.pr._requester.requestJsonAndCheck(
|
||||||
"PATCH", f"{self.base_url}/repos/{self.repo}/pulls/comments/{existing_comment['id']}", input={"body":comment['body']}
|
"PATCH", f"{self.base_url}/repos/{self.repo}/pulls/comments/{existing_comment['id']}", input={"body":comment['body']}
|
||||||
)
|
)
|
||||||
@ -513,8 +531,7 @@ class GithubProvider(GitProvider):
|
|||||||
)
|
)
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().error(f"Failed to publish diffview file summary, error: {e}")
|
||||||
get_logger().error(f"Failed to publish diffview file summary, error: {e}")
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def remove_initial_comment(self):
|
def remove_initial_comment(self):
|
||||||
@ -611,8 +628,11 @@ class GithubProvider(GitProvider):
|
|||||||
def _parse_pr_url(self, pr_url: str) -> Tuple[str, int]:
|
def _parse_pr_url(self, pr_url: str) -> Tuple[str, int]:
|
||||||
parsed_url = urlparse(pr_url)
|
parsed_url = urlparse(pr_url)
|
||||||
|
|
||||||
|
if parsed_url.path.startswith('/api/v3'):
|
||||||
|
parsed_url = urlparse(pr_url.replace("/api/v3", ""))
|
||||||
|
|
||||||
path_parts = parsed_url.path.strip('/').split('/')
|
path_parts = parsed_url.path.strip('/').split('/')
|
||||||
if self.base_domain in parsed_url.netloc:
|
if 'api.github.com' in parsed_url.netloc or '/api/v3' in pr_url:
|
||||||
if len(path_parts) < 5 or path_parts[3] != 'pulls':
|
if len(path_parts) < 5 or path_parts[3] != 'pulls':
|
||||||
raise ValueError("The provided URL does not appear to be a GitHub PR URL")
|
raise ValueError("The provided URL does not appear to be a GitHub PR URL")
|
||||||
repo_name = '/'.join(path_parts[1:3])
|
repo_name = '/'.join(path_parts[1:3])
|
||||||
@ -635,8 +655,12 @@ class GithubProvider(GitProvider):
|
|||||||
|
|
||||||
def _parse_issue_url(self, issue_url: str) -> Tuple[str, int]:
|
def _parse_issue_url(self, issue_url: str) -> Tuple[str, int]:
|
||||||
parsed_url = urlparse(issue_url)
|
parsed_url = urlparse(issue_url)
|
||||||
|
|
||||||
|
if 'github.com' not in parsed_url.netloc:
|
||||||
|
raise ValueError("The provided URL is not a valid GitHub URL")
|
||||||
|
|
||||||
path_parts = parsed_url.path.strip('/').split('/')
|
path_parts = parsed_url.path.strip('/').split('/')
|
||||||
if self.base_domain in parsed_url.netloc:
|
if 'api.github.com' in parsed_url.netloc:
|
||||||
if len(path_parts) < 5 or path_parts[3] != 'issues':
|
if len(path_parts) < 5 or path_parts[3] != 'issues':
|
||||||
raise ValueError("The provided URL does not appear to be a GitHub ISSUE URL")
|
raise ValueError("The provided URL does not appear to be a GitHub ISSUE URL")
|
||||||
repo_name = '/'.join(path_parts[1:3])
|
repo_name = '/'.join(path_parts[1:3])
|
||||||
@ -795,8 +819,7 @@ class GithubProvider(GitProvider):
|
|||||||
link = f"{self.base_url_html}/{self.repo}/pull/{self.pr_num}/files#diff-{sha_file}R{absolute_position}"
|
link = f"{self.base_url_html}/{self.repo}/pull/{self.pr_num}/files#diff-{sha_file}R{absolute_position}"
|
||||||
return link
|
return link
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if get_settings().config.verbosity_level >= 2:
|
get_logger().info(f"Failed adding line link, error: {e}")
|
||||||
get_logger().info(f"Failed adding line link, error: {e}")
|
|
||||||
|
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
@ -856,3 +879,100 @@ class GithubProvider(GitProvider):
|
|||||||
|
|
||||||
def calc_pr_statistics(self, pull_request_data: dict):
|
def calc_pr_statistics(self, pull_request_data: dict):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
def validate_comments_inside_hunks(self, code_suggestions):
|
||||||
|
"""
|
||||||
|
validate that all committable comments are inside PR hunks - this is a must for committable comments in GitHub
|
||||||
|
"""
|
||||||
|
code_suggestions_copy = copy.deepcopy(code_suggestions)
|
||||||
|
diff_files = self.get_diff_files()
|
||||||
|
RE_HUNK_HEADER = re.compile(
|
||||||
|
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
|
||||||
|
|
||||||
|
# map file extensions to programming languages
|
||||||
|
language_extension_map_org = get_settings().language_extension_map_org
|
||||||
|
extension_to_language = {}
|
||||||
|
for language, extensions in language_extension_map_org.items():
|
||||||
|
for ext in extensions:
|
||||||
|
extension_to_language[ext] = language
|
||||||
|
for file in diff_files:
|
||||||
|
extension_s = '.' + file.filename.rsplit('.')[-1]
|
||||||
|
language_name = "txt"
|
||||||
|
if extension_s and (extension_s in extension_to_language):
|
||||||
|
language_name = extension_to_language[extension_s]
|
||||||
|
file.language = language_name.lower()
|
||||||
|
|
||||||
|
for suggestion in code_suggestions_copy:
|
||||||
|
try:
|
||||||
|
relevant_file_path = suggestion['relevant_file']
|
||||||
|
for file in diff_files:
|
||||||
|
if file.filename == relevant_file_path:
|
||||||
|
|
||||||
|
# generate on-demand the patches range for the relevant file
|
||||||
|
patch_str = file.patch
|
||||||
|
if not hasattr(file, 'patches_range'):
|
||||||
|
file.patches_range = []
|
||||||
|
patch_lines = patch_str.splitlines()
|
||||||
|
for i, line in enumerate(patch_lines):
|
||||||
|
if line.startswith('@@'):
|
||||||
|
match = RE_HUNK_HEADER.match(line)
|
||||||
|
# identify hunk header
|
||||||
|
if match:
|
||||||
|
section_header, size1, size2, start1, start2 = extract_hunk_headers(match)
|
||||||
|
file.patches_range.append({'start': start2, 'end': start2 + size2 - 1})
|
||||||
|
|
||||||
|
patches_range = file.patches_range
|
||||||
|
comment_start_line = suggestion.get('relevant_lines_start', None)
|
||||||
|
comment_end_line = suggestion.get('relevant_lines_end', None)
|
||||||
|
original_suggestion = suggestion.get('original_suggestion', None) # needed for diff code
|
||||||
|
if not comment_start_line or not comment_end_line or not original_suggestion:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# check if the comment is inside a valid hunk
|
||||||
|
is_valid_hunk = False
|
||||||
|
min_distance = float('inf')
|
||||||
|
patch_range_min = None
|
||||||
|
# find the hunk that contains the comment, or the closest one
|
||||||
|
for i, patch_range in enumerate(patches_range):
|
||||||
|
d1 = comment_start_line - patch_range['start']
|
||||||
|
d2 = patch_range['end'] - comment_end_line
|
||||||
|
if d1 >= 0 and d2 >= 0: # found a valid hunk
|
||||||
|
is_valid_hunk = True
|
||||||
|
min_distance = 0
|
||||||
|
patch_range_min = patch_range
|
||||||
|
break
|
||||||
|
elif d1 * d2 <= 0: # comment is possibly inside the hunk
|
||||||
|
d1_clip = abs(min(0, d1))
|
||||||
|
d2_clip = abs(min(0, d2))
|
||||||
|
d = max(d1_clip, d2_clip)
|
||||||
|
if d < min_distance:
|
||||||
|
patch_range_min = patch_range
|
||||||
|
min_distance = min(min_distance, d)
|
||||||
|
if not is_valid_hunk:
|
||||||
|
if min_distance < 10: # 10 lines - a reasonable distance to consider the comment inside the hunk
|
||||||
|
# make the suggestion non-committable, yet multi line
|
||||||
|
suggestion['relevant_lines_start'] = max(suggestion['relevant_lines_start'], patch_range_min['start'])
|
||||||
|
suggestion['relevant_lines_end'] = min(suggestion['relevant_lines_end'], patch_range_min['end'])
|
||||||
|
body = suggestion['body'].strip()
|
||||||
|
|
||||||
|
# present new diff code in collapsible
|
||||||
|
existing_code = original_suggestion['existing_code'].rstrip() + "\n"
|
||||||
|
improved_code = original_suggestion['improved_code'].rstrip() + "\n"
|
||||||
|
diff = difflib.unified_diff(existing_code.split('\n'),
|
||||||
|
improved_code.split('\n'), n=999)
|
||||||
|
patch_orig = "\n".join(diff)
|
||||||
|
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
|
||||||
|
diff_code = f"\n\n<details><summary>New proposed code:</summary>\n\n```diff\n{patch.rstrip()}\n```"
|
||||||
|
# replace ```suggestion ... ``` with diff_code, using regex:
|
||||||
|
body = re.sub(r'```suggestion.*?```', diff_code, body, flags=re.DOTALL)
|
||||||
|
body += "\n\n</details>"
|
||||||
|
suggestion['body'] = body
|
||||||
|
get_logger().info(f"Comment was moved to a valid hunk, "
|
||||||
|
f"start_line={suggestion['relevant_lines_start']}, end_line={suggestion['relevant_lines_end']}, file={file.filename}")
|
||||||
|
else:
|
||||||
|
get_logger().error(f"Comment is not inside a valid hunk, "
|
||||||
|
f"start_line={suggestion['relevant_lines_start']}, end_line={suggestion['relevant_lines_end']}, file={file.filename}")
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().error(f"Failed to process patch for committable comment, error: {e}")
|
||||||
|
return code_suggestions_copy
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
import difflib
|
||||||
import hashlib
|
import hashlib
|
||||||
import re
|
import re
|
||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple
|
||||||
@ -7,13 +8,16 @@ import gitlab
|
|||||||
import requests
|
import requests
|
||||||
from gitlab import GitlabGetError
|
from gitlab import GitlabGetError
|
||||||
|
|
||||||
|
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||||
|
|
||||||
from ..algo.file_filter import filter_ignored
|
from ..algo.file_filter import filter_ignored
|
||||||
from ..algo.language_handler import is_valid_file
|
from ..algo.language_handler import is_valid_file
|
||||||
from ..algo.utils import load_large_diff, clip_tokens, find_line_number_of_relevant_line_in_file
|
from ..algo.utils import (clip_tokens,
|
||||||
|
find_line_number_of_relevant_line_in_file,
|
||||||
|
load_large_diff)
|
||||||
from ..config_loader import get_settings
|
from ..config_loader import get_settings
|
||||||
from .git_provider import GitProvider, MAX_FILES_ALLOWED_FULL
|
|
||||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
|
||||||
from ..log import get_logger
|
from ..log import get_logger
|
||||||
|
from .git_provider import MAX_FILES_ALLOWED_FULL, GitProvider
|
||||||
|
|
||||||
|
|
||||||
class DiffNotFoundError(Exception):
|
class DiffNotFoundError(Exception):
|
||||||
@ -190,6 +194,9 @@ class GitLabProvider(GitProvider):
|
|||||||
self.publish_persistent_comment_full(pr_comment, initial_header, update_header, name, final_update_message)
|
self.publish_persistent_comment_full(pr_comment, initial_header, update_header, name, final_update_message)
|
||||||
|
|
||||||
def publish_comment(self, mr_comment: str, is_temporary: bool = False):
|
def publish_comment(self, mr_comment: str, is_temporary: bool = False):
|
||||||
|
if is_temporary and not get_settings().config.publish_output_progress:
|
||||||
|
get_logger().debug(f"Skipping publish_comment for temporary comment: {mr_comment}")
|
||||||
|
return None
|
||||||
mr_comment = self.limit_output_characters(mr_comment, self.max_comment_chars)
|
mr_comment = self.limit_output_characters(mr_comment, self.max_comment_chars)
|
||||||
comment = self.mr.notes.create({'body': mr_comment})
|
comment = self.mr.notes.create({'body': mr_comment})
|
||||||
if is_temporary:
|
if is_temporary:
|
||||||
@ -275,20 +282,23 @@ class GitLabProvider(GitProvider):
|
|||||||
new_code_snippet = original_suggestion['improved_code']
|
new_code_snippet = original_suggestion['improved_code']
|
||||||
content = original_suggestion['suggestion_content']
|
content = original_suggestion['suggestion_content']
|
||||||
label = original_suggestion['label']
|
label = original_suggestion['label']
|
||||||
if 'score' in original_suggestion:
|
score = original_suggestion.get('score', 7)
|
||||||
score = original_suggestion['score']
|
|
||||||
else:
|
|
||||||
score = 7
|
|
||||||
|
|
||||||
if hasattr(self, 'main_language'):
|
if hasattr(self, 'main_language'):
|
||||||
language = self.main_language
|
language = self.main_language
|
||||||
else:
|
else:
|
||||||
language = ''
|
language = ''
|
||||||
link = self.get_line_link(relevant_file, line_start, line_end)
|
link = self.get_line_link(relevant_file, line_start, line_end)
|
||||||
body_fallback =f"**Suggestion:** {content} [{label}, importance: {score}]\n___\n"
|
body_fallback =f"**Suggestion:** {content} [{label}, importance: {score}]\n\n"
|
||||||
body_fallback +=f"\n\nReplace lines ([{line_start}-{line_end}]({link}))\n\n```{language}\n{old_code_snippet}\n````\n\n"
|
body_fallback +=f"\n\n<details><summary>[{target_file.filename} [{line_start}-{line_end}]]({link}):</summary>\n\n"
|
||||||
body_fallback +=f"with\n\n```{language}\n{new_code_snippet}\n````"
|
body_fallback += f"\n\n___\n\n`(Cannot implement directly - GitLab API allows committable suggestions strictly on MR diff lines)`"
|
||||||
body_fallback += f"\n\n___\n\n`(Cannot implement this suggestion directly, as gitlab API does not enable committing to a non -+ line in a PR)`"
|
body_fallback+="</details>\n\n"
|
||||||
|
diff_patch = difflib.unified_diff(old_code_snippet.split('\n'),
|
||||||
|
new_code_snippet.split('\n'), n=999)
|
||||||
|
patch_orig = "\n".join(diff_patch)
|
||||||
|
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
|
||||||
|
diff_code = f"\n\n```diff\n{patch.rstrip()}\n```"
|
||||||
|
body_fallback += diff_code
|
||||||
|
|
||||||
# Create a general note on the file in the MR
|
# Create a general note on the file in the MR
|
||||||
self.mr.notes.create({
|
self.mr.notes.create({
|
||||||
@ -301,6 +311,7 @@ class GitLabProvider(GitProvider):
|
|||||||
'file_path': f'{target_file.filename}',
|
'file_path': f'{target_file.filename}',
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
get_logger().debug(f"Created fallback comment in MR {self.id_mr} with position {pos_obj}")
|
||||||
|
|
||||||
# get_logger().debug(
|
# get_logger().debug(
|
||||||
# f"Failed to create comment in MR {self.id_mr} with position {pos_obj} (probably not a '+' line)")
|
# f"Failed to create comment in MR {self.id_mr} with position {pos_obj} (probably not a '+' line)")
|
||||||
|
@ -4,9 +4,9 @@ from typing import List
|
|||||||
|
|
||||||
from git import Repo
|
from git import Repo
|
||||||
|
|
||||||
|
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||||
from pr_agent.config_loader import _find_repository_root, get_settings
|
from pr_agent.config_loader import _find_repository_root, get_settings
|
||||||
from pr_agent.git_providers.git_provider import GitProvider
|
from pr_agent.git_providers.git_provider import GitProvider
|
||||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
|
|
||||||
|
@ -3,11 +3,12 @@ import os
|
|||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
from dynaconf import Dynaconf
|
from dynaconf import Dynaconf
|
||||||
|
from starlette_context import context
|
||||||
|
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers import get_git_provider, get_git_provider_with_context
|
from pr_agent.git_providers import (get_git_provider,
|
||||||
|
get_git_provider_with_context)
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
from starlette_context import context
|
|
||||||
|
|
||||||
|
|
||||||
def apply_repo_settings(pr_url):
|
def apply_repo_settings(pr_url):
|
||||||
@ -27,18 +28,27 @@ def apply_repo_settings(pr_url):
|
|||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
error_local = None
|
||||||
if repo_settings:
|
if repo_settings:
|
||||||
repo_settings_file = None
|
repo_settings_file = None
|
||||||
fd, repo_settings_file = tempfile.mkstemp(suffix='.toml')
|
category = 'local'
|
||||||
os.write(fd, repo_settings)
|
try:
|
||||||
new_settings = Dynaconf(settings_files=[repo_settings_file])
|
fd, repo_settings_file = tempfile.mkstemp(suffix='.toml')
|
||||||
for section, contents in new_settings.as_dict().items():
|
os.write(fd, repo_settings)
|
||||||
section_dict = copy.deepcopy(get_settings().as_dict().get(section, {}))
|
new_settings = Dynaconf(settings_files=[repo_settings_file])
|
||||||
for key, value in contents.items():
|
for section, contents in new_settings.as_dict().items():
|
||||||
section_dict[key] = value
|
section_dict = copy.deepcopy(get_settings().as_dict().get(section, {}))
|
||||||
get_settings().unset(section)
|
for key, value in contents.items():
|
||||||
get_settings().set(section, section_dict, merge=False)
|
section_dict[key] = value
|
||||||
get_logger().info(f"Applying repo settings:\n{new_settings.as_dict()}")
|
get_settings().unset(section)
|
||||||
|
get_settings().set(section, section_dict, merge=False)
|
||||||
|
get_logger().info(f"Applying repo settings:\n{new_settings.as_dict()}")
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().warning(f"Failed to apply repo {category} settings, error: {str(e)}")
|
||||||
|
error_local = {'error': str(e), 'settings': repo_settings, 'category': category}
|
||||||
|
|
||||||
|
if error_local:
|
||||||
|
handle_configurations_errors([error_local], git_provider)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
get_logger().exception("Failed to apply repo settings", e)
|
get_logger().exception("Failed to apply repo settings", e)
|
||||||
finally:
|
finally:
|
||||||
@ -49,10 +59,40 @@ def apply_repo_settings(pr_url):
|
|||||||
get_logger().error(f"Failed to remove temporary settings file {repo_settings_file}", e)
|
get_logger().error(f"Failed to remove temporary settings file {repo_settings_file}", e)
|
||||||
|
|
||||||
# enable switching models with a short definition
|
# enable switching models with a short definition
|
||||||
if get_settings().config.model.lower()=='claude-3-5-sonnet':
|
if get_settings().config.model.lower() == 'claude-3-5-sonnet':
|
||||||
set_claude_model()
|
set_claude_model()
|
||||||
|
|
||||||
|
|
||||||
|
def handle_configurations_errors(config_errors, git_provider):
|
||||||
|
try:
|
||||||
|
if not any(config_errors):
|
||||||
|
return
|
||||||
|
|
||||||
|
for err in config_errors:
|
||||||
|
if err:
|
||||||
|
configuration_file_content = err['settings'].decode()
|
||||||
|
err_message = err['error']
|
||||||
|
config_type = err['category']
|
||||||
|
header = f"❌ **PR-Agent failed to apply '{config_type}' repo settings**"
|
||||||
|
body = f"{header}\n\nThe configuration file needs to be a valid [TOML](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/), please fix it.\n\n"
|
||||||
|
body += f"___\n\n**Error message:**\n`{err_message}`\n\n"
|
||||||
|
if git_provider.is_supported("gfm_markdown"):
|
||||||
|
body += f"\n\n<details><summary>Configuration content:</summary>\n\n```toml\n{configuration_file_content}\n```\n\n</details>"
|
||||||
|
else:
|
||||||
|
body += f"\n\n**Configuration content:**\n\n```toml\n{configuration_file_content}\n```\n\n"
|
||||||
|
get_logger().warning(f"Sending a 'configuration error' comment to the PR", artifact={'body': body})
|
||||||
|
# git_provider.publish_comment(body)
|
||||||
|
if hasattr(git_provider, 'publish_persistent_comment'):
|
||||||
|
git_provider.publish_persistent_comment(body,
|
||||||
|
initial_header=header,
|
||||||
|
update_header=False,
|
||||||
|
final_update_message=False)
|
||||||
|
else:
|
||||||
|
git_provider.publish_comment(body)
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().exception(f"Failed to handle configurations errors", e)
|
||||||
|
|
||||||
|
|
||||||
def set_claude_model():
|
def set_claude_model():
|
||||||
"""
|
"""
|
||||||
set the claude-sonnet-3.5 model easily (even by users), just by stating: --config.model='claude-3-5-sonnet'
|
set the claude-sonnet-3.5 model easily (even by users), just by stating: --config.model='claude-3-5-sonnet'
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.identity_providers.default_identity_provider import DefaultIdentityProvider
|
from pr_agent.identity_providers.default_identity_provider import \
|
||||||
|
DefaultIdentityProvider
|
||||||
|
|
||||||
_IDENTITY_PROVIDERS = {
|
_IDENTITY_PROVIDERS = {
|
||||||
'default': DefaultIdentityProvider
|
'default': DefaultIdentityProvider
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
from pr_agent.identity_providers.identity_provider import Eligibility, IdentityProvider
|
from pr_agent.identity_providers.identity_provider import (Eligibility,
|
||||||
|
IdentityProvider)
|
||||||
|
|
||||||
|
|
||||||
class DefaultIdentityProvider(IdentityProvider):
|
class DefaultIdentityProvider(IdentityProvider):
|
||||||
|
@ -8,12 +8,10 @@ def get_secret_provider():
|
|||||||
provider_id = get_settings().config.secret_provider
|
provider_id = get_settings().config.secret_provider
|
||||||
if provider_id == 'google_cloud_storage':
|
if provider_id == 'google_cloud_storage':
|
||||||
try:
|
try:
|
||||||
from pr_agent.secret_providers.google_cloud_storage_secret_provider import GoogleCloudStorageSecretProvider
|
from pr_agent.secret_providers.google_cloud_storage_secret_provider import \
|
||||||
|
GoogleCloudStorageSecretProvider
|
||||||
return GoogleCloudStorageSecretProvider()
|
return GoogleCloudStorageSecretProvider()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise ValueError(f"Failed to initialize google_cloud_storage secret provider {provider_id}") from e
|
raise ValueError(f"Failed to initialize google_cloud_storage secret provider {provider_id}") from e
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unknown SECRET_PROVIDER")
|
raise ValueError("Unknown SECRET_PROVIDER")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -16,8 +16,9 @@
|
|||||||
},
|
},
|
||||||
"scopes": [
|
"scopes": [
|
||||||
"account",
|
"account",
|
||||||
"repository",
|
"repository:write",
|
||||||
"pullrequest"
|
"pullrequest:write",
|
||||||
|
"wiki"
|
||||||
],
|
],
|
||||||
"contexts": [
|
"contexts": [
|
||||||
"account"
|
"account"
|
||||||
|
@ -9,9 +9,9 @@ import secrets
|
|||||||
from urllib.parse import unquote
|
from urllib.parse import unquote
|
||||||
|
|
||||||
import uvicorn
|
import uvicorn
|
||||||
from fastapi import APIRouter, Depends, FastAPI, HTTPException
|
from fastapi import APIRouter, Depends, FastAPI, HTTPException, Request
|
||||||
from fastapi.security import HTTPBasic, HTTPBasicCredentials
|
|
||||||
from fastapi.encoders import jsonable_encoder
|
from fastapi.encoders import jsonable_encoder
|
||||||
|
from fastapi.security import HTTPBasic, HTTPBasicCredentials
|
||||||
from starlette import status
|
from starlette import status
|
||||||
from starlette.background import BackgroundTasks
|
from starlette.background import BackgroundTasks
|
||||||
from starlette.middleware import Middleware
|
from starlette.middleware import Middleware
|
||||||
@ -23,9 +23,6 @@ from pr_agent.agent.pr_agent import PRAgent, command2class
|
|||||||
from pr_agent.algo.utils import update_settings_from_args
|
from pr_agent.algo.utils import update_settings_from_args
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers.utils import apply_repo_settings
|
from pr_agent.git_providers.utils import apply_repo_settings
|
||||||
from pr_agent.log import get_logger
|
|
||||||
from fastapi import Request, Depends
|
|
||||||
from fastapi.security import HTTPBasic, HTTPBasicCredentials
|
|
||||||
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
||||||
|
|
||||||
setup_logger(fmt=LoggingFormat.JSON, level="DEBUG")
|
setup_logger(fmt=LoggingFormat.JSON, level="DEBUG")
|
||||||
|
@ -75,8 +75,11 @@ async def handle_manifest(request: Request, response: Response):
|
|||||||
return JSONResponse(manifest_obj)
|
return JSONResponse(manifest_obj)
|
||||||
|
|
||||||
|
|
||||||
async def _perform_commands_bitbucket(commands_conf: str, agent: PRAgent, api_url: str, log_context: dict):
|
async def _perform_commands_bitbucket(commands_conf: str, agent: PRAgent, api_url: str, log_context: dict, data: dict):
|
||||||
apply_repo_settings(api_url)
|
apply_repo_settings(api_url)
|
||||||
|
if data.get("event", "") == "pullrequest:created":
|
||||||
|
if not should_process_pr_logic(data):
|
||||||
|
return
|
||||||
commands = get_settings().get(f"bitbucket_app.{commands_conf}", {})
|
commands = get_settings().get(f"bitbucket_app.{commands_conf}", {})
|
||||||
get_settings().set("config.is_auto_command", True)
|
get_settings().set("config.is_auto_command", True)
|
||||||
for command in commands:
|
for command in commands:
|
||||||
@ -95,11 +98,14 @@ async def _perform_commands_bitbucket(commands_conf: str, agent: PRAgent, api_ur
|
|||||||
|
|
||||||
def is_bot_user(data) -> bool:
|
def is_bot_user(data) -> bool:
|
||||||
try:
|
try:
|
||||||
if data["data"]["actor"]["type"] != "user":
|
actor = data.get("data", {}).get("actor", {})
|
||||||
get_logger().info(f"BitBucket actor type is not 'user': {data['data']['actor']['type']}")
|
# allow actor type: user . if it's "AppUser" or "team" then it is a bot user
|
||||||
|
allowed_actor_types = {"user"}
|
||||||
|
if actor and actor["type"].lower() not in allowed_actor_types:
|
||||||
|
get_logger().info(f"BitBucket actor type is not 'user', skipping: {actor}")
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
get_logger().error("Failed 'is_bot_user' logic: {e}")
|
get_logger().error(f"Failed 'is_bot_user' logic: {e}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@ -158,16 +164,18 @@ async def handle_github_webhooks(background_tasks: BackgroundTasks, request: Req
|
|||||||
return "OK"
|
return "OK"
|
||||||
|
|
||||||
# Get the username of the sender
|
# Get the username of the sender
|
||||||
try:
|
actor = data.get("data", {}).get("actor", {})
|
||||||
username = data["data"]["actor"]["username"]
|
if actor:
|
||||||
except KeyError:
|
|
||||||
try:
|
try:
|
||||||
username = data["data"]["actor"]["display_name"]
|
username = actor["username"]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
username = data["data"]["actor"]["nickname"]
|
try:
|
||||||
log_context["sender"] = username
|
username = actor["display_name"]
|
||||||
|
except KeyError:
|
||||||
|
username = actor["nickname"]
|
||||||
|
log_context["sender"] = username
|
||||||
|
|
||||||
sender_id = data["data"]["actor"]["account_id"]
|
sender_id = data.get("data", {}).get("actor", {}).get("account_id", "")
|
||||||
log_context["sender_id"] = sender_id
|
log_context["sender_id"] = sender_id
|
||||||
jwt_parts = input_jwt.split(".")
|
jwt_parts = input_jwt.split(".")
|
||||||
claim_part = jwt_parts[1]
|
claim_part = jwt_parts[1]
|
||||||
@ -193,7 +201,7 @@ async def handle_github_webhooks(background_tasks: BackgroundTasks, request: Req
|
|||||||
if get_identity_provider().verify_eligibility("bitbucket",
|
if get_identity_provider().verify_eligibility("bitbucket",
|
||||||
sender_id, pr_url) is not Eligibility.NOT_ELIGIBLE:
|
sender_id, pr_url) is not Eligibility.NOT_ELIGIBLE:
|
||||||
if get_settings().get("bitbucket_app.pr_commands"):
|
if get_settings().get("bitbucket_app.pr_commands"):
|
||||||
await _perform_commands_bitbucket("pr_commands", PRAgent(), pr_url, log_context)
|
await _perform_commands_bitbucket("pr_commands", PRAgent(), pr_url, log_context, data)
|
||||||
elif event == "pullrequest:comment_created":
|
elif event == "pullrequest:comment_created":
|
||||||
pr_url = data["data"]["pullrequest"]["links"]["html"]["href"]
|
pr_url = data["data"]["pullrequest"]["links"]["html"]["href"]
|
||||||
log_context["api_url"] = pr_url
|
log_context["api_url"] = pr_url
|
||||||
|
@ -6,20 +6,20 @@ from typing import List
|
|||||||
import uvicorn
|
import uvicorn
|
||||||
from fastapi import APIRouter, FastAPI
|
from fastapi import APIRouter, FastAPI
|
||||||
from fastapi.encoders import jsonable_encoder
|
from fastapi.encoders import jsonable_encoder
|
||||||
|
from fastapi.responses import RedirectResponse
|
||||||
from starlette import status
|
from starlette import status
|
||||||
from starlette.background import BackgroundTasks
|
from starlette.background import BackgroundTasks
|
||||||
from starlette.middleware import Middleware
|
from starlette.middleware import Middleware
|
||||||
from starlette.requests import Request
|
from starlette.requests import Request
|
||||||
from starlette.responses import JSONResponse
|
from starlette.responses import JSONResponse
|
||||||
from starlette_context.middleware import RawContextMiddleware
|
from starlette_context.middleware import RawContextMiddleware
|
||||||
|
|
||||||
from pr_agent.agent.pr_agent import PRAgent
|
from pr_agent.agent.pr_agent import PRAgent
|
||||||
from pr_agent.algo.utils import update_settings_from_args
|
from pr_agent.algo.utils import update_settings_from_args
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers.utils import apply_repo_settings
|
from pr_agent.git_providers.utils import apply_repo_settings
|
||||||
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
||||||
from pr_agent.servers.utils import verify_signature
|
from pr_agent.servers.utils import verify_signature
|
||||||
from fastapi.responses import RedirectResponse
|
|
||||||
|
|
||||||
|
|
||||||
setup_logger(fmt=LoggingFormat.JSON, level="DEBUG")
|
setup_logger(fmt=LoggingFormat.JSON, level="DEBUG")
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
|
@ -15,7 +15,8 @@ from starlette_context.middleware import RawContextMiddleware
|
|||||||
from pr_agent.agent.pr_agent import PRAgent
|
from pr_agent.agent.pr_agent import PRAgent
|
||||||
from pr_agent.algo.utils import update_settings_from_args
|
from pr_agent.algo.utils import update_settings_from_args
|
||||||
from pr_agent.config_loader import get_settings, global_settings
|
from pr_agent.config_loader import get_settings, global_settings
|
||||||
from pr_agent.git_providers import get_git_provider, get_git_provider_with_context
|
from pr_agent.git_providers import (get_git_provider,
|
||||||
|
get_git_provider_with_context)
|
||||||
from pr_agent.git_providers.git_provider import IncrementalPR
|
from pr_agent.git_providers.git_provider import IncrementalPR
|
||||||
from pr_agent.git_providers.utils import apply_repo_settings
|
from pr_agent.git_providers.utils import apply_repo_settings
|
||||||
from pr_agent.identity_providers import get_identity_provider
|
from pr_agent.identity_providers import get_identity_provider
|
||||||
@ -249,7 +250,7 @@ def is_bot_user(sender, sender_type):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def should_process_pr_logic(sender_type, sender, body) -> bool:
|
def should_process_pr_logic(body) -> bool:
|
||||||
try:
|
try:
|
||||||
pull_request = body.get("pull_request", {})
|
pull_request = body.get("pull_request", {})
|
||||||
title = pull_request.get("title", "")
|
title = pull_request.get("title", "")
|
||||||
@ -306,10 +307,10 @@ async def handle_request(body: Dict[str, Any], event: str):
|
|||||||
log_context, sender, sender_id, sender_type = get_log_context(body, event, action, build_number)
|
log_context, sender, sender_id, sender_type = get_log_context(body, event, action, build_number)
|
||||||
|
|
||||||
# logic to ignore PRs opened by bot, PRs with specific titles, labels, source branches, or target branches
|
# logic to ignore PRs opened by bot, PRs with specific titles, labels, source branches, or target branches
|
||||||
if is_bot_user(sender, sender_type):
|
if is_bot_user(sender, sender_type) and 'check_run' not in body:
|
||||||
return {}
|
return {}
|
||||||
if action != 'created' and 'check_run' not in body:
|
if action != 'created' and 'check_run' not in body:
|
||||||
if not should_process_pr_logic(sender_type, sender, body):
|
if not should_process_pr_logic(body):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
if 'check_run' in body: # handle failed checks
|
if 'check_run' in body: # handle failed checks
|
||||||
@ -373,6 +374,8 @@ def _check_pull_request_event(action: str, body: dict, log_context: dict) -> Tup
|
|||||||
async def _perform_auto_commands_github(commands_conf: str, agent: PRAgent, body: dict, api_url: str,
|
async def _perform_auto_commands_github(commands_conf: str, agent: PRAgent, body: dict, api_url: str,
|
||||||
log_context: dict):
|
log_context: dict):
|
||||||
apply_repo_settings(api_url)
|
apply_repo_settings(api_url)
|
||||||
|
if not should_process_pr_logic(body): # Here we already updated the configuration with the repo settings
|
||||||
|
return {}
|
||||||
commands = get_settings().get(f"github_app.{commands_conf}")
|
commands = get_settings().get(f"github_app.{commands_conf}")
|
||||||
if not commands:
|
if not commands:
|
||||||
get_logger().info(f"New PR, but no auto commands configured")
|
get_logger().info(f"New PR, but no auto commands configured")
|
||||||
|
@ -1,11 +1,12 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
from collections import deque
|
|
||||||
import traceback
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
import time
|
import time
|
||||||
import requests
|
import traceback
|
||||||
|
from collections import deque
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
import aiohttp
|
import aiohttp
|
||||||
|
import requests
|
||||||
|
|
||||||
from pr_agent.agent.pr_agent import PRAgent
|
from pr_agent.agent.pr_agent import PRAgent
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import copy
|
import copy
|
||||||
import re
|
|
||||||
import json
|
import json
|
||||||
|
import re
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
import uvicorn
|
import uvicorn
|
||||||
@ -59,8 +59,10 @@ async def handle_request(api_url: str, body: str, log_context: dict, sender_id:
|
|||||||
|
|
||||||
|
|
||||||
async def _perform_commands_gitlab(commands_conf: str, agent: PRAgent, api_url: str,
|
async def _perform_commands_gitlab(commands_conf: str, agent: PRAgent, api_url: str,
|
||||||
log_context: dict):
|
log_context: dict, data: dict):
|
||||||
apply_repo_settings(api_url)
|
apply_repo_settings(api_url)
|
||||||
|
if not should_process_pr_logic(data): # Here we already updated the configurations
|
||||||
|
return
|
||||||
commands = get_settings().get(f"gitlab.{commands_conf}", {})
|
commands = get_settings().get(f"gitlab.{commands_conf}", {})
|
||||||
get_settings().set("config.is_auto_command", True)
|
get_settings().set("config.is_auto_command", True)
|
||||||
for command in commands:
|
for command in commands:
|
||||||
@ -90,8 +92,12 @@ def is_bot_user(data) -> bool:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def should_process_pr_logic(data, title) -> bool:
|
def should_process_pr_logic(data) -> bool:
|
||||||
try:
|
try:
|
||||||
|
if not data.get('object_attributes', {}):
|
||||||
|
return False
|
||||||
|
title = data['object_attributes'].get('title')
|
||||||
|
|
||||||
# logic to ignore MRs for titles, labels and source, target branches.
|
# logic to ignore MRs for titles, labels and source, target branches.
|
||||||
ignore_mr_title = get_settings().get("CONFIG.IGNORE_PR_TITLE", [])
|
ignore_mr_title = get_settings().get("CONFIG.IGNORE_PR_TITLE", [])
|
||||||
ignore_mr_labels = get_settings().get("CONFIG.IGNORE_PR_LABELS", [])
|
ignore_mr_labels = get_settings().get("CONFIG.IGNORE_PR_LABELS", [])
|
||||||
@ -133,6 +139,7 @@ def should_process_pr_logic(data, title) -> bool:
|
|||||||
async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
|
async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
|
||||||
start_time = datetime.now()
|
start_time = datetime.now()
|
||||||
request_json = await request.json()
|
request_json = await request.json()
|
||||||
|
context["settings"] = copy.deepcopy(global_settings)
|
||||||
|
|
||||||
async def inner(data: dict):
|
async def inner(data: dict):
|
||||||
log_context = {"server_type": "gitlab_app"}
|
log_context = {"server_type": "gitlab_app"}
|
||||||
@ -148,7 +155,6 @@ async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
|
|||||||
secret_dict = json.loads(secret)
|
secret_dict = json.loads(secret)
|
||||||
gitlab_token = secret_dict["gitlab_token"]
|
gitlab_token = secret_dict["gitlab_token"]
|
||||||
log_context["token_id"] = secret_dict.get("token_name", secret_dict.get("id", "unknown"))
|
log_context["token_id"] = secret_dict.get("token_name", secret_dict.get("id", "unknown"))
|
||||||
context["settings"] = copy.deepcopy(global_settings)
|
|
||||||
context["settings"].gitlab.personal_access_token = gitlab_token
|
context["settings"].gitlab.personal_access_token = gitlab_token
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
get_logger().error(f"Failed to validate secret {request_token}: {e}")
|
get_logger().error(f"Failed to validate secret {request_token}: {e}")
|
||||||
@ -173,9 +179,9 @@ async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
|
|||||||
# ignore bot users
|
# ignore bot users
|
||||||
if is_bot_user(data):
|
if is_bot_user(data):
|
||||||
return JSONResponse(status_code=status.HTTP_200_OK, content=jsonable_encoder({"message": "success"}))
|
return JSONResponse(status_code=status.HTTP_200_OK, content=jsonable_encoder({"message": "success"}))
|
||||||
if data.get('event_type') != 'note' and data.get('object_attributes', {}): # not a comment
|
if data.get('event_type') != 'note': # not a comment
|
||||||
# ignore MRs based on title, labels, source and target branches
|
# ignore MRs based on title, labels, source and target branches
|
||||||
if not should_process_pr_logic(data, data['object_attributes'].get('title')):
|
if not should_process_pr_logic(data):
|
||||||
return JSONResponse(status_code=status.HTTP_200_OK, content=jsonable_encoder({"message": "success"}))
|
return JSONResponse(status_code=status.HTTP_200_OK, content=jsonable_encoder({"message": "success"}))
|
||||||
|
|
||||||
log_context["sender"] = sender
|
log_context["sender"] = sender
|
||||||
@ -188,7 +194,7 @@ async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
|
|||||||
get_logger().info(f"Skipping draft MR: {url}")
|
get_logger().info(f"Skipping draft MR: {url}")
|
||||||
return JSONResponse(status_code=status.HTTP_200_OK, content=jsonable_encoder({"message": "success"}))
|
return JSONResponse(status_code=status.HTTP_200_OK, content=jsonable_encoder({"message": "success"}))
|
||||||
|
|
||||||
await _perform_commands_gitlab("pr_commands", PRAgent(), url, log_context)
|
await _perform_commands_gitlab("pr_commands", PRAgent(), url, log_context, data)
|
||||||
elif data.get('object_kind') == 'note' and data.get('event_type') == 'note': # comment on MR
|
elif data.get('object_kind') == 'note' and data.get('event_type') == 'note': # comment on MR
|
||||||
if 'merge_request' in data:
|
if 'merge_request' in data:
|
||||||
mr = data['merge_request']
|
mr = data['merge_request']
|
||||||
@ -220,7 +226,7 @@ async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
|
|||||||
content=jsonable_encoder({"message": "success"}))
|
content=jsonable_encoder({"message": "success"}))
|
||||||
|
|
||||||
get_logger().debug(f'A push event has been received: {url}')
|
get_logger().debug(f'A push event has been received: {url}')
|
||||||
await _perform_commands_gitlab("push_commands", PRAgent(), url, log_context)
|
await _perform_commands_gitlab("push_commands", PRAgent(), url, log_context, data)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
get_logger().error(f"Failed to handle push event: {e}")
|
get_logger().error(f"Failed to handle push event: {e}")
|
||||||
|
|
||||||
|
@ -5,7 +5,6 @@ from starlette_context.middleware import RawContextMiddleware
|
|||||||
|
|
||||||
from pr_agent.servers.github_app import router
|
from pr_agent.servers.github_app import router
|
||||||
|
|
||||||
|
|
||||||
middleware = [Middleware(RawContextMiddleware)]
|
middleware = [Middleware(RawContextMiddleware)]
|
||||||
app = FastAPI(middleware=middleware)
|
app = FastAPI(middleware=middleware)
|
||||||
app.include_router(router)
|
app.include_router(router)
|
||||||
|
@ -2,7 +2,7 @@ import hashlib
|
|||||||
import hmac
|
import hmac
|
||||||
import time
|
import time
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from typing import Callable, Any
|
from typing import Any, Callable
|
||||||
|
|
||||||
from fastapi import HTTPException
|
from fastapi import HTTPException
|
||||||
|
|
||||||
|
@ -43,6 +43,9 @@ api_base = "" # the base url for your local Llama 2, Code Llama, and other model
|
|||||||
vertex_project = "" # the google cloud platform project name for your vertexai deployment
|
vertex_project = "" # the google cloud platform project name for your vertexai deployment
|
||||||
vertex_location = "" # the google cloud platform location for your vertexai deployment
|
vertex_location = "" # the google cloud platform location for your vertexai deployment
|
||||||
|
|
||||||
|
[google_ai_studio]
|
||||||
|
gemini_api_key = "" # the google AI Studio API key
|
||||||
|
|
||||||
[github]
|
[github]
|
||||||
# ---- Set the following only for deployment type == "user"
|
# ---- Set the following only for deployment type == "user"
|
||||||
user_token = "" # A GitHub personal access token with 'repo' scope.
|
user_token = "" # A GitHub personal access token with 'repo' scope.
|
||||||
@ -60,6 +63,7 @@ webhook_secret = "<WEBHOOK SECRET>" # Optional, may be commented out.
|
|||||||
[gitlab]
|
[gitlab]
|
||||||
# Gitlab personal access token
|
# Gitlab personal access token
|
||||||
personal_access_token = ""
|
personal_access_token = ""
|
||||||
|
shared_secret = "" # webhook secret
|
||||||
|
|
||||||
[bitbucket]
|
[bitbucket]
|
||||||
# For Bitbucket personal/repository bearer token
|
# For Bitbucket personal/repository bearer token
|
||||||
|
@ -1,12 +1,13 @@
|
|||||||
[config]
|
[config]
|
||||||
# models
|
# models
|
||||||
model="gpt-4-turbo-2024-04-09"
|
model="gpt-4-turbo-2024-04-09"
|
||||||
model_turbo="gpt-4o-2024-08-06"
|
model_turbo="gpt-4o-2024-11-20"
|
||||||
fallback_models=["gpt-4o-2024-05-13"]
|
fallback_models=["gpt-4o-2024-08-06"]
|
||||||
# CLI
|
# CLI
|
||||||
git_provider="github"
|
git_provider="github"
|
||||||
publish_output=true
|
publish_output=true
|
||||||
publish_output_progress=true
|
publish_output_progress=true
|
||||||
|
publish_output_no_suggestions=true
|
||||||
verbosity_level=0 # 0,1,2
|
verbosity_level=0 # 0,1,2
|
||||||
use_extra_bad_extensions=false
|
use_extra_bad_extensions=false
|
||||||
# Configurations
|
# Configurations
|
||||||
@ -51,9 +52,7 @@ require_tests_review=true
|
|||||||
require_estimate_effort_to_review=true
|
require_estimate_effort_to_review=true
|
||||||
require_can_be_split_review=false
|
require_can_be_split_review=false
|
||||||
require_security_review=true
|
require_security_review=true
|
||||||
# soc2
|
require_ticket_analysis_review=true
|
||||||
require_soc2_ticket=false
|
|
||||||
soc2_ticket_prompt="Does the PR description include a link to ticket in a project management system (e.g., Jira, Asana, Trello, etc.) ?"
|
|
||||||
# general options
|
# general options
|
||||||
num_code_suggestions=0
|
num_code_suggestions=0
|
||||||
inline_code_comments = false
|
inline_code_comments = false
|
||||||
@ -77,7 +76,7 @@ maximal_review_effort=5
|
|||||||
|
|
||||||
|
|
||||||
[pr_description] # /describe #
|
[pr_description] # /describe #
|
||||||
publish_labels=true
|
publish_labels=false
|
||||||
add_original_user_description=true
|
add_original_user_description=true
|
||||||
generate_ai_title=false
|
generate_ai_title=false
|
||||||
use_bullet_points=true
|
use_bullet_points=true
|
||||||
@ -108,10 +107,11 @@ enable_help_text=false
|
|||||||
|
|
||||||
|
|
||||||
[pr_code_suggestions] # /improve #
|
[pr_code_suggestions] # /improve #
|
||||||
max_context_tokens=14000
|
max_context_tokens=16000
|
||||||
#
|
#
|
||||||
commitable_code_suggestions = false
|
commitable_code_suggestions = false
|
||||||
dual_publishing_score_threshold=-1 # -1 to disable, [0-10] to set the threshold (>=) for publishing a code suggestion both in a table and as commitable
|
dual_publishing_score_threshold=-1 # -1 to disable, [0-10] to set the threshold (>=) for publishing a code suggestion both in a table and as commitable
|
||||||
|
focus_only_on_problems=true
|
||||||
#
|
#
|
||||||
extra_instructions = ""
|
extra_instructions = ""
|
||||||
rank_suggestions = false
|
rank_suggestions = false
|
||||||
@ -123,7 +123,6 @@ max_history_len=4
|
|||||||
# enable to apply suggestion 💎
|
# enable to apply suggestion 💎
|
||||||
apply_suggestions_checkbox=true
|
apply_suggestions_checkbox=true
|
||||||
# suggestions scoring
|
# suggestions scoring
|
||||||
self_reflect_on_suggestions=true
|
|
||||||
suggestions_score_threshold=0 # [0-10]| recommend not to set this value above 8, since above it may clip highly relevant suggestions
|
suggestions_score_threshold=0 # [0-10]| recommend not to set this value above 8, since above it may clip highly relevant suggestions
|
||||||
# params for '/improve --extended' mode
|
# params for '/improve --extended' mode
|
||||||
auto_extended_mode=true
|
auto_extended_mode=true
|
||||||
@ -136,8 +135,10 @@ final_clip_factor = 0.8
|
|||||||
demand_code_suggestions_self_review=false # add a checkbox for the author to self-review the code suggestions
|
demand_code_suggestions_self_review=false # add a checkbox for the author to self-review the code suggestions
|
||||||
code_suggestions_self_review_text= "**Author self-review**: I have reviewed the PR code suggestions, and addressed the relevant ones."
|
code_suggestions_self_review_text= "**Author self-review**: I have reviewed the PR code suggestions, and addressed the relevant ones."
|
||||||
approve_pr_on_self_review=false # Pro feature. if true, the PR will be auto-approved after the author clicks on the self-review checkbox
|
approve_pr_on_self_review=false # Pro feature. if true, the PR will be auto-approved after the author clicks on the self-review checkbox
|
||||||
# Suggestion impact
|
fold_suggestions_on_self_review=true # Pro feature. if true, the code suggestions will be folded after the author clicks on the self-review checkbox
|
||||||
|
# Suggestion impact 💎
|
||||||
publish_post_process_suggestion_impact=true
|
publish_post_process_suggestion_impact=true
|
||||||
|
wiki_page_accepted_suggestions=true
|
||||||
|
|
||||||
[pr_custom_prompt] # /custom_prompt #
|
[pr_custom_prompt] # /custom_prompt #
|
||||||
prompt = """\
|
prompt = """\
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
[pr_code_suggestions_prompt]
|
[pr_code_suggestions_prompt]
|
||||||
system="""You are PR-Reviewer, an AI specializing in Pull Request (PR) code analysis and suggestions.
|
system="""You are PR-Reviewer, an AI specializing in Pull Request (PR) code analysis and suggestions.
|
||||||
Your task is to examine the provided code diff, focusing on new code (lines prefixed with '+'), and offer concise, actionable suggestions to fix possible bugs and problems, and enhance code quality, readability, and performance.
|
{%- if not focus_only_on_problems %}
|
||||||
|
Your task is to examine the provided code diff, focusing on new code (lines prefixed with '+'), and offer concise, actionable suggestions to fix possible bugs and problems, and enhance code quality and performance.
|
||||||
|
{%- else %}
|
||||||
|
Your task is to examine the provided code diff, focusing on new code (lines prefixed with '+'), and offer concise, actionable suggestions to fix critical bugs and problems.
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
The PR code diff will be in the following structured format:
|
The PR code diff will be in the following structured format:
|
||||||
======
|
======
|
||||||
@ -14,10 +17,10 @@ The PR code diff will be in the following structured format:
|
|||||||
|
|
||||||
@@ ... @@ def func1():
|
@@ ... @@ def func1():
|
||||||
__new hunk__
|
__new hunk__
|
||||||
11 unchanged code line0 in the PR
|
unchanged code line0 in the PR
|
||||||
12 unchanged code line1 in the PR
|
unchanged code line1 in the PR
|
||||||
13 +new code line2 added in the PR
|
+new code line2 added in the PR
|
||||||
14 unchanged code line3 in the PR
|
unchanged code line3 in the PR
|
||||||
__old hunk__
|
__old hunk__
|
||||||
unchanged code line0
|
unchanged code line0
|
||||||
unchanged code line1
|
unchanged code line1
|
||||||
@ -26,17 +29,15 @@ __old hunk__
|
|||||||
|
|
||||||
@@ ... @@ def func2():
|
@@ ... @@ def func2():
|
||||||
__new hunk__
|
__new hunk__
|
||||||
...
|
unchanged code line4
|
||||||
__old hunk__
|
+new code line5 removed in the PR
|
||||||
...
|
unchanged code line6
|
||||||
|
|
||||||
|
|
||||||
## File: 'src/file2.py'
|
## File: 'src/file2.py'
|
||||||
...
|
...
|
||||||
======
|
======
|
||||||
|
|
||||||
- In the format above, the diff is organized into separate '__new hunk__' and '__old hunk__' sections for each code chunk. '__new hunk__' contains the updated code, while '__old hunk__' shows the removed code. If no code was added or removed in a specific chunk, the corresponding section will be omitted.
|
- In the format above, the diff is organized into separate '__new hunk__' and '__old hunk__' sections for each code chunk. '__new hunk__' contains the updated code, while '__old hunk__' shows the removed code. If no code was removed in a specific chunk, the __old hunk__ section will be omitted.
|
||||||
- Line numbers were added for the '__new hunk__' sections to help referencing specific lines in the code suggestions. These numbers are for reference only and are not part of the actual code.
|
|
||||||
- Code lines are prefixed with symbols: '+' for new code added in the PR, '-' for code removed, and ' ' for unchanged code.
|
- Code lines are prefixed with symbols: '+' for new code added in the PR, '-' for code removed, and ' ' for unchanged code.
|
||||||
{%- if is_ai_metadata %}
|
{%- if is_ai_metadata %}
|
||||||
- When available, an AI-generated summary will precede each file's diff, with a high-level overview of the changes. Note that this summary may not be fully accurate or complete.
|
- When available, an AI-generated summary will precede each file's diff, with a high-level overview of the changes. Note that this summary may not be fully accurate or complete.
|
||||||
@ -44,12 +45,20 @@ __old hunk__
|
|||||||
|
|
||||||
|
|
||||||
Specific guidelines for generating code suggestions:
|
Specific guidelines for generating code suggestions:
|
||||||
|
{%- if not focus_only_on_problems %}
|
||||||
- Provide up to {{ num_code_suggestions }} distinct and insightful code suggestions.
|
- Provide up to {{ num_code_suggestions }} distinct and insightful code suggestions.
|
||||||
- Focus solely on enhancing new code introduced in the PR, identified by '+' prefixes in '__new hunk__' sections (after the line numbers).
|
{%- else %}
|
||||||
|
- Provide up to {{ num_code_suggestions }} distinct and insightful code suggestions. Return less suggestions if no pertinent ones are applicable.
|
||||||
|
{%- endif %}
|
||||||
|
- Focus solely on enhancing new code introduced in the PR, identified by '+' prefixes in '__new hunk__' sections.
|
||||||
|
{%- if not focus_only_on_problems %}
|
||||||
- Prioritize suggestions that address potential issues, critical problems, and bugs in the PR code. Avoid repeating changes already implemented in the PR. If no pertinent suggestions are applicable, return an empty list.
|
- Prioritize suggestions that address potential issues, critical problems, and bugs in the PR code. Avoid repeating changes already implemented in the PR. If no pertinent suggestions are applicable, return an empty list.
|
||||||
- Avoid proposing additions of docstrings, type hints, or comments, or the removal of unused imports.
|
{%- else %}
|
||||||
|
- Only give suggestions that address critical problems and bugs in the PR code. If no relevant suggestions are applicable, return an empty list.
|
||||||
|
{%- endif %}
|
||||||
|
- Don't suggest to add docstring, type hints, or comments, to remove unused imports, or to use more specific exception types.
|
||||||
- When referencing variables or names from the code, enclose them in backticks (`). Example: "ensure that `variable_name` is..."
|
- When referencing variables or names from the code, enclose them in backticks (`). Example: "ensure that `variable_name` is..."
|
||||||
- Be mindful you are viewing a partial PR code diff, not the full codebase. Avoid suggestions that might conflict with unseen code or alerting on variables not declared in the visible scope, as the context is incomplete.
|
- Be mindful you are viewing a partial PR code diff, not the full codebase. Avoid suggestions that might conflict with unseen code or alerting variables not declared in the visible scope, as the context is incomplete.
|
||||||
|
|
||||||
|
|
||||||
{%- if extra_instructions %}
|
{%- if extra_instructions %}
|
||||||
@ -68,12 +77,14 @@ class CodeSuggestion(BaseModel):
|
|||||||
relevant_file: str = Field(description="Full path of the relevant file")
|
relevant_file: str = Field(description="Full path of the relevant file")
|
||||||
language: str = Field(description="Programming language used by the relevant file")
|
language: str = Field(description="Programming language used by the relevant file")
|
||||||
suggestion_content: str = Field(description="An actionable suggestion to enhance, improve or fix the new code introduced in the PR. Don't present here actual code snippets, just the suggestion. Be short and concise")
|
suggestion_content: str = Field(description="An actionable suggestion to enhance, improve or fix the new code introduced in the PR. Don't present here actual code snippets, just the suggestion. Be short and concise")
|
||||||
existing_code: str = Field(description="A short code snippet from a '__new hunk__' section that the suggestion aims to enhance or fix. Include only complete code lines, without line numbers. Use ellipsis (...) for brevity if needed. This snippet should represent the specific PR code targeted for improvement.")
|
existing_code: str = Field(description="A short code snippet from a '__new hunk__' section that the suggestion aims to enhance or fix. Include only complete code lines. Use ellipsis (...) for brevity if needed. This snippet should represent the specific PR code targeted for improvement.")
|
||||||
improved_code: str = Field(description="A refined code snippet that replaces the 'existing_code' snippet after implementing the suggestion.")
|
improved_code: str = Field(description="A refined code snippet that replaces the 'existing_code' snippet after implementing the suggestion.")
|
||||||
one_sentence_summary: str = Field(description="A concise, single-sentence overview of the suggested improvement. Focus on the 'what'. Be general, and avoid method or variable names.")
|
one_sentence_summary: str = Field(description="A concise, single-sentence overview of the suggested improvement. Focus on the 'what'. Be general, and avoid method or variable names.")
|
||||||
relevant_lines_start: int = Field(description="The relevant line number, from a '__new hunk__' section, where the suggestion starts (inclusive). Should be derived from the hunk line numbers, and correspond to the beginning of the 'existing code' snippet above")
|
{%- if not focus_only_on_problems %}
|
||||||
relevant_lines_end: int = Field(description="The relevant line number, from a '__new hunk__' section, where the suggestion ends (inclusive). Should be derived from the hunk line numbers, and correspond to the end of the 'existing code' snippet above")
|
label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'possible bug', 'possible issue', 'performance', 'enhancement', 'best practice', 'maintainability', 'typo'. Other relevant labels are also acceptable.")
|
||||||
label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'possible bug', 'possible issue', 'performance', 'enhancement', 'best practice', 'maintainability'. Other relevant labels are also acceptable.")
|
{%- else %}
|
||||||
|
label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'critical bug', 'general'. The 'general' section should be used for suggestions that address a major issue, but are necessarily on a critical level.")
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
|
||||||
class PRCodeSuggestions(BaseModel):
|
class PRCodeSuggestions(BaseModel):
|
||||||
@ -96,8 +107,6 @@ code_suggestions:
|
|||||||
...
|
...
|
||||||
one_sentence_summary: |
|
one_sentence_summary: |
|
||||||
...
|
...
|
||||||
relevant_lines_start: 12
|
|
||||||
relevant_lines_end: 13
|
|
||||||
label: |
|
label: |
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
@ -113,7 +122,7 @@ Title: '{{title}}'
|
|||||||
|
|
||||||
The PR Diff:
|
The PR Diff:
|
||||||
======
|
======
|
||||||
{{ diff|trim }}
|
{{ diff_no_line_numbers|trim }}
|
||||||
======
|
======
|
||||||
|
|
||||||
|
|
||||||
|
@ -15,8 +15,8 @@ Be particularly vigilant for suggestions that:
|
|||||||
- Contradict or ignore parts of the PR's modifications
|
- Contradict or ignore parts of the PR's modifications
|
||||||
In such cases, assign the suggestion a score of 0.
|
In such cases, assign the suggestion a score of 0.
|
||||||
|
|
||||||
For valid suggestions, your role is to provide an impartial and precise score assessment that accurately reflects each suggestion's potential impact on the PR's correctness, quality and functionality.
|
Evaluate each valid suggestion by scoring its potential impact on the PR's correctness, quality and functionality.
|
||||||
|
In addition, you should also detect the line numbers in the '__new hunk__' section that correspond to the 'existing_code' snippet.
|
||||||
|
|
||||||
Key guidelines for evaluation:
|
Key guidelines for evaluation:
|
||||||
- Thoroughly examine both the suggestion content and the corresponding PR code diff. Be vigilant for potential errors in each suggestion, ensuring they are logically sound, accurate, and directly derived from the PR code diff.
|
- Thoroughly examine both the suggestion content and the corresponding PR code diff. Be vigilant for potential errors in each suggestion, ensuring they are logically sound, accurate, and directly derived from the PR code diff.
|
||||||
@ -29,6 +29,14 @@ Key guidelines for evaluation:
|
|||||||
- Avoid inflating scores for suggestions that, while correct, offer only marginal improvements or optimizations.
|
- Avoid inflating scores for suggestions that, while correct, offer only marginal improvements or optimizations.
|
||||||
- Maintain the original order of suggestions in your feedback, corresponding to their input sequence.
|
- Maintain the original order of suggestions in your feedback, corresponding to their input sequence.
|
||||||
|
|
||||||
|
Additional scoring considerations:
|
||||||
|
- If the suggestion is not actionable, and only asks the user to verify or ensure a change, reduce its score by 1-2 points.
|
||||||
|
- Assign a score of 0 to suggestions aiming at:
|
||||||
|
- Adding docstring, type hints, or comments
|
||||||
|
- Remove unused imports or variables
|
||||||
|
- Using more specific exception types.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
The PR code diff will be presented in the following structured format:
|
The PR code diff will be presented in the following structured format:
|
||||||
======
|
======
|
||||||
@ -74,6 +82,8 @@ The output must be a YAML object equivalent to type $PRCodeSuggestionsFeedback,
|
|||||||
class CodeSuggestionFeedback(BaseModel):
|
class CodeSuggestionFeedback(BaseModel):
|
||||||
suggestion_summary: str = Field(description="Repeated from the input")
|
suggestion_summary: str = Field(description="Repeated from the input")
|
||||||
relevant_file: str = Field(description="Repeated from the input")
|
relevant_file: str = Field(description="Repeated from the input")
|
||||||
|
relevant_lines_start: int = Field(description="The relevant line number, from a '__new hunk__' section, where the suggestion starts (inclusive). Should be derived from the hunk line numbers, and correspond to the beginning of the relevant 'existing code' snippet")
|
||||||
|
relevant_lines_end: int = Field(description="The relevant line number, from a '__new hunk__' section, where the suggestion ends (inclusive). Should be derived from the hunk line numbers, and correspond to the end of the relevant 'existing code' snippet")
|
||||||
suggestion_score: int = Field(description="Evaluate the suggestion and assign a score from 0 to 10. Give 0 if the suggestion is wrong. For valid suggestions, score from 1 (lowest impact/importance) to 10 (highest impact/importance).")
|
suggestion_score: int = Field(description="Evaluate the suggestion and assign a score from 0 to 10. Give 0 if the suggestion is wrong. For valid suggestions, score from 1 (lowest impact/importance) to 10 (highest impact/importance).")
|
||||||
why: str = Field(description="Briefly explain the score given in 1-2 sentences, focusing on the suggestion's impact, relevance, and accuracy.")
|
why: str = Field(description="Briefly explain the score given in 1-2 sentences, focusing on the suggestion's impact, relevance, and accuracy.")
|
||||||
|
|
||||||
@ -88,6 +98,8 @@ code_suggestions:
|
|||||||
- suggestion_summary: |
|
- suggestion_summary: |
|
||||||
Use a more descriptive variable name here
|
Use a more descriptive variable name here
|
||||||
relevant_file: "src/file1.py"
|
relevant_file: "src/file1.py"
|
||||||
|
relevant_lines_start: 13
|
||||||
|
relevant_lines_end: 14
|
||||||
suggestion_score: 6
|
suggestion_score: 6
|
||||||
why: |
|
why: |
|
||||||
The variable name 't' is not descriptive enough
|
The variable name 't' is not descriptive enough
|
||||||
|
@ -78,9 +78,9 @@ pr_files:
|
|||||||
...
|
...
|
||||||
...
|
...
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
description: |-
|
description: |
|
||||||
...
|
...
|
||||||
title: |-
|
title: |
|
||||||
...
|
...
|
||||||
{%- if enable_custom_labels %}
|
{%- if enable_custom_labels %}
|
||||||
labels:
|
labels:
|
||||||
@ -94,7 +94,26 @@ labels:
|
|||||||
Answer should be a valid YAML, and nothing else. Each YAML output MUST be after a newline, with proper indent, and block scalar indicator ('|')
|
Answer should be a valid YAML, and nothing else. Each YAML output MUST be after a newline, with proper indent, and block scalar indicator ('|')
|
||||||
"""
|
"""
|
||||||
|
|
||||||
user="""PR Info:
|
user="""
|
||||||
|
{%- if related_tickets %}
|
||||||
|
Related Ticket Info:
|
||||||
|
{% for ticket in related_tickets %}
|
||||||
|
=====
|
||||||
|
Ticket Title: '{{ ticket.title }}'
|
||||||
|
{%- if ticket.labels %}
|
||||||
|
Ticket Labels: {{ ticket.labels }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if ticket.body %}
|
||||||
|
Ticket Description:
|
||||||
|
#####
|
||||||
|
{{ ticket.body }}
|
||||||
|
#####
|
||||||
|
{%- endif %}
|
||||||
|
=====
|
||||||
|
{% endfor %}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
PR Info:
|
||||||
|
|
||||||
Previous title: '{{title}}'
|
Previous title: '{{title}}'
|
||||||
|
|
||||||
|
@ -1,20 +1,24 @@
|
|||||||
[pr_help_prompts]
|
[pr_help_prompts]
|
||||||
system="""You are Doc-helper, a language models designed to answer questions about a documentation website for an open-soure project called "PR-Agent".
|
system="""You are Doc-helper, a language models designed to answer questions about a documentation website for an open-soure project called "PR-Agent" (recently renamed to "Qodo Merge").
|
||||||
You will recieve a question, and a list of snippets that were collected for a documentation site using RAG as the retrieval method.
|
You will recieve a question, and the full documentation website content.
|
||||||
Your goal is to provide the best answer to the question using the snippets provided.
|
Your goal is to provide the best answer to the question using the documentation provided.
|
||||||
|
|
||||||
Additional instructions:
|
Additional instructions:
|
||||||
- Try to be short and concise in your answers. Give examples if needed.
|
- Try to be short and concise in your answers. Try to give examples if needed.
|
||||||
- It is possible some of the snippets may not be relevant to the question. In that case, you should ignore them and focus on the ones that are relevant.
|
- The main tools of PR-Agent are 'describe', 'review', 'improve'. If there is ambiguity to which tool the user is referring to, prioritize snippets of these tools over others.
|
||||||
- The main tools of pr-agent are 'describe', 'review', 'improve'. If there is ambiguity to which tool the user is referring to, prioritize snippets of these tools over others.
|
- If the question has ambiguity and can relate to different tools or platfroms, provide the best answer possible based on what is available, but also state in your answer what additional information would be needed to give a more accurate answer.
|
||||||
|
|
||||||
|
|
||||||
The output must be a YAML object equivalent to type $DocHelper, according to the following Pydantic definitions:
|
The output must be a YAML object equivalent to type $DocHelper, according to the following Pydantic definitions:
|
||||||
=====
|
=====
|
||||||
|
class relevant_section(BaseModel):
|
||||||
|
file_name: str = Field(description="The name of the relevant file")
|
||||||
|
relevant_section_header_string: str = Field(description="From the relevant file, exact text of the relevant section heading. If no markdown heading is relevant, return empty string")
|
||||||
|
|
||||||
class DocHelper(BaseModel):
|
class DocHelper(BaseModel):
|
||||||
user_question: str = Field(description="The user's question")
|
user_question: str = Field(description="The user's question")
|
||||||
response: str = Field(description="The response to the user's question")
|
response: str = Field(description="The response to the user's question")
|
||||||
relevant_snippets: List[int] = Field(description="One-based index of the relevant snippets in the list of snippets provided. Order the by relevance, with the most relevant first. If a snippet was not relevant, do not include it in the list.")
|
relevant_sections: List[relevant_section] = Field(description="A list of the relevant markdown sections in the documentation that answer the user's question, ordered by importance (most relevant first)")
|
||||||
=====
|
=====
|
||||||
|
|
||||||
|
|
||||||
@ -23,11 +27,12 @@ Example output:
|
|||||||
user_question: |
|
user_question: |
|
||||||
...
|
...
|
||||||
response: |
|
response: |
|
||||||
|
...
|
||||||
|
relevant_sections:
|
||||||
|
- file_name: "src/file1.py"
|
||||||
|
relevant_section_header_string: |
|
||||||
...
|
...
|
||||||
relevant_snippets:
|
- ...
|
||||||
- 1
|
|
||||||
- 2
|
|
||||||
- 4
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
user="""\
|
user="""\
|
||||||
@ -37,7 +42,7 @@ User's Question:
|
|||||||
=====
|
=====
|
||||||
|
|
||||||
|
|
||||||
Relevant doc snippets retrieved:
|
Documentation website content:
|
||||||
=====
|
=====
|
||||||
{{ snippets|trim }}
|
{{ snippets|trim }}
|
||||||
=====
|
=====
|
||||||
|
@ -32,16 +32,15 @@ __old hunk__
|
|||||||
|
|
||||||
@@ ... @@ def func2():
|
@@ ... @@ def func2():
|
||||||
__new hunk__
|
__new hunk__
|
||||||
...
|
unchanged code line4
|
||||||
__old hunk__
|
+new code line5 removed in the PR
|
||||||
...
|
unchanged code line6
|
||||||
|
|
||||||
|
|
||||||
## File: 'src/file2.py'
|
## File: 'src/file2.py'
|
||||||
...
|
...
|
||||||
======
|
======
|
||||||
|
|
||||||
- In this format, we separated each hunk of diff code to '__new hunk__' and '__old hunk__' sections. The '__new hunk__' section contains the new code of the chunk, and the '__old hunk__' section contains the old code, that was removed. If no new code was added in a specific hunk, '__new hunk__' section will not be presented. If no code was removed, '__old hunk__' section will not be presented.
|
- In the format above, the diff is organized into separate '__new hunk__' and '__old hunk__' sections for each code chunk. '__new hunk__' contains the updated code, while '__old hunk__' shows the removed code. If no code was removed in a specific chunk, the __old hunk__ section will be omitted.
|
||||||
- We also added line numbers for the '__new hunk__' code, to help you refer to the code lines in your suggestions. These line numbers are not part of the actual code, and should only used for reference.
|
- We also added line numbers for the '__new hunk__' code, to help you refer to the code lines in your suggestions. These line numbers are not part of the actual code, and should only used for reference.
|
||||||
- Code lines are prefixed with symbols ('+', '-', ' '). The '+' symbol indicates new code added in the PR, the '-' symbol indicates code removed in the PR, and the ' ' symbol indicates unchanged code. \
|
- Code lines are prefixed with symbols ('+', '-', ' '). The '+' symbol indicates new code added in the PR, the '-' symbol indicates code removed in the PR, and the ' ' symbol indicates unchanged code. \
|
||||||
The review should address new code added in the PR code diff (lines starting with '+')
|
The review should address new code added in the PR code diff (lines starting with '+')
|
||||||
@ -86,7 +85,20 @@ class KeyIssuesComponentLink(BaseModel):
|
|||||||
start_line: int = Field(description="The start line that corresponds to this issue in the relevant file")
|
start_line: int = Field(description="The start line that corresponds to this issue in the relevant file")
|
||||||
end_line: int = Field(description="The end line that corresponds to this issue in the relevant file")
|
end_line: int = Field(description="The end line that corresponds to this issue in the relevant file")
|
||||||
|
|
||||||
|
{%- if related_tickets %}
|
||||||
|
|
||||||
|
class TicketCompliance(BaseModel):
|
||||||
|
ticket_url: str = Field(description="Ticket URL or ID")
|
||||||
|
ticket_requirements: str = Field(description="Repeat, in your own words, all ticket requirements, in bullet points")
|
||||||
|
fully_compliant_requirements: str = Field(description="A list, in bullet points, of which requirements are met by the PR code. Don't explain how the requirements are met, just list them shortly. Can be empty")
|
||||||
|
not_compliant_requirements: str = Field(description="A list, in bullet points, of which requirements are not met by the PR code. Don't explain how the requirements are not met, just list them shortly. Can be empty")
|
||||||
|
overall_compliance_level: str = Field(description="Overall give this PR one of these three values in relation to the ticket: 'Fully compliant', 'Partially compliant', or 'Not compliant'")
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
class Review(BaseModel):
|
class Review(BaseModel):
|
||||||
|
{%- if related_tickets %}
|
||||||
|
ticket_compliance_check: List[TicketCompliance] = Field(description="A list of compliance checks for the related tickets")
|
||||||
|
{%- endif %}
|
||||||
{%- if require_estimate_effort_to_review %}
|
{%- if require_estimate_effort_to_review %}
|
||||||
estimated_effort_to_review_[1-5]: int = Field(description="Estimate, on a scale of 1-5 (inclusive), the time and effort required to review this PR by an experienced and knowledgeable developer. 1 means short and easy review , 5 means long and hard review. Take into account the size, complexity, quality, and the needed changes of the PR code diff.")
|
estimated_effort_to_review_[1-5]: int = Field(description="Estimate, on a scale of 1-5 (inclusive), the time and effort required to review this PR by an experienced and knowledgeable developer. 1 means short and easy review , 5 means long and hard review. Take into account the size, complexity, quality, and the needed changes of the PR code diff.")
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
@ -131,6 +143,19 @@ class PRReview(BaseModel):
|
|||||||
Example output:
|
Example output:
|
||||||
```yaml
|
```yaml
|
||||||
review:
|
review:
|
||||||
|
{%- if related_tickets %}
|
||||||
|
ticket_compliance_check:
|
||||||
|
- ticket_url: |
|
||||||
|
...
|
||||||
|
ticket_requirements: |
|
||||||
|
...
|
||||||
|
fully_compliant_requirements: |
|
||||||
|
...
|
||||||
|
not_compliant_requirements: |
|
||||||
|
...
|
||||||
|
overall_compliance_level: |
|
||||||
|
...
|
||||||
|
{%- endif %}
|
||||||
{%- if require_estimate_effort_to_review %}
|
{%- if require_estimate_effort_to_review %}
|
||||||
estimated_effort_to_review_[1-5]: |
|
estimated_effort_to_review_[1-5]: |
|
||||||
3
|
3
|
||||||
@ -177,7 +202,33 @@ code_feedback:
|
|||||||
Answer should be a valid YAML, and nothing else. Each YAML output MUST be after a newline, with proper indent, and block scalar indicator ('|')
|
Answer should be a valid YAML, and nothing else. Each YAML output MUST be after a newline, with proper indent, and block scalar indicator ('|')
|
||||||
"""
|
"""
|
||||||
|
|
||||||
user="""--PR Info--
|
user="""
|
||||||
|
{%- if related_tickets %}
|
||||||
|
--PR Ticket Info--
|
||||||
|
{%- for ticket in related_tickets %}
|
||||||
|
=====
|
||||||
|
Ticket URL: '{{ ticket.ticket_url }}'
|
||||||
|
|
||||||
|
Ticket Title: '{{ ticket.title }}'
|
||||||
|
|
||||||
|
{%- if ticket.labels %}
|
||||||
|
|
||||||
|
Ticket Labels: {{ ticket.labels }}
|
||||||
|
|
||||||
|
{%- endif %}
|
||||||
|
{%- if ticket.body %}
|
||||||
|
|
||||||
|
Ticket Description:
|
||||||
|
#####
|
||||||
|
{{ ticket.body }}
|
||||||
|
#####
|
||||||
|
{%- endif %}
|
||||||
|
=====
|
||||||
|
{% endfor %}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
|
||||||
|
--PR Info--
|
||||||
|
|
||||||
Title: '{{title}}'
|
Title: '{{title}}'
|
||||||
|
|
||||||
|
@ -1,25 +1,30 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import copy
|
import copy
|
||||||
|
import difflib
|
||||||
|
import re
|
||||||
import textwrap
|
import textwrap
|
||||||
|
import traceback
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from typing import Dict, List
|
from typing import Dict, List
|
||||||
|
|
||||||
from jinja2 import Environment, StrictUndefined
|
from jinja2 import Environment, StrictUndefined
|
||||||
|
|
||||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||||
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
||||||
from pr_agent.algo.pr_processing import get_pr_diff, get_pr_multi_diffs, retry_with_fallback_models, \
|
from pr_agent.algo.pr_processing import (add_ai_metadata_to_diff_files,
|
||||||
add_ai_metadata_to_diff_files
|
get_pr_diff, get_pr_multi_diffs,
|
||||||
|
retry_with_fallback_models)
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.algo.utils import load_yaml, replace_code_tags, ModelType, show_relevant_configurations
|
from pr_agent.algo.utils import (ModelType, load_yaml, replace_code_tags,
|
||||||
|
show_relevant_configurations)
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers import get_git_provider, get_git_provider_with_context, GithubProvider, GitLabProvider, \
|
from pr_agent.git_providers import (AzureDevopsProvider, GithubProvider,
|
||||||
AzureDevopsProvider
|
GitLabProvider, get_git_provider,
|
||||||
|
get_git_provider_with_context)
|
||||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
from pr_agent.servers.help import HelpMessage
|
from pr_agent.servers.help import HelpMessage
|
||||||
from pr_agent.tools.pr_description import insert_br_after_x_chars
|
from pr_agent.tools.pr_description import insert_br_after_x_chars
|
||||||
import difflib
|
|
||||||
import re
|
|
||||||
|
|
||||||
|
|
||||||
class PRCodeSuggestions:
|
class PRCodeSuggestions:
|
||||||
@ -44,7 +49,7 @@ class PRCodeSuggestions:
|
|||||||
self.is_extended = self._get_is_extended(args or [])
|
self.is_extended = self._get_is_extended(args or [])
|
||||||
except:
|
except:
|
||||||
self.is_extended = False
|
self.is_extended = False
|
||||||
num_code_suggestions = get_settings().pr_code_suggestions.num_code_suggestions_per_chunk
|
num_code_suggestions = int(get_settings().pr_code_suggestions.num_code_suggestions_per_chunk)
|
||||||
|
|
||||||
|
|
||||||
self.ai_handler = ai_handler()
|
self.ai_handler = ai_handler()
|
||||||
@ -69,11 +74,13 @@ class PRCodeSuggestions:
|
|||||||
"description": self.pr_description,
|
"description": self.pr_description,
|
||||||
"language": self.main_language,
|
"language": self.main_language,
|
||||||
"diff": "", # empty diff for initial calculation
|
"diff": "", # empty diff for initial calculation
|
||||||
|
"diff_no_line_numbers": "", # empty diff for initial calculation
|
||||||
"num_code_suggestions": num_code_suggestions,
|
"num_code_suggestions": num_code_suggestions,
|
||||||
"extra_instructions": get_settings().pr_code_suggestions.extra_instructions,
|
"extra_instructions": get_settings().pr_code_suggestions.extra_instructions,
|
||||||
"commit_messages_str": self.git_provider.get_commit_messages(),
|
"commit_messages_str": self.git_provider.get_commit_messages(),
|
||||||
"relevant_best_practices": "",
|
"relevant_best_practices": "",
|
||||||
"is_ai_metadata": get_settings().get("config.enable_ai_metadata", False),
|
"is_ai_metadata": get_settings().get("config.enable_ai_metadata", False),
|
||||||
|
"focus_only_on_problems": get_settings().get("pr_code_suggestions.focus_only_on_problems", False),
|
||||||
}
|
}
|
||||||
self.pr_code_suggestions_prompt_system = get_settings().pr_code_suggestions_prompt.system
|
self.pr_code_suggestions_prompt_system = get_settings().pr_code_suggestions_prompt.system
|
||||||
|
|
||||||
@ -110,15 +117,17 @@ class PRCodeSuggestions:
|
|||||||
if not data:
|
if not data:
|
||||||
data = {"code_suggestions": []}
|
data = {"code_suggestions": []}
|
||||||
|
|
||||||
if (data is None or 'code_suggestions' not in data or not data['code_suggestions']
|
if (data is None or 'code_suggestions' not in data or not data['code_suggestions']):
|
||||||
and get_settings().config.publish_output):
|
|
||||||
get_logger().warning('No code suggestions found for the PR.')
|
|
||||||
pr_body = "## PR Code Suggestions ✨\n\nNo code suggestions found for the PR."
|
pr_body = "## PR Code Suggestions ✨\n\nNo code suggestions found for the PR."
|
||||||
get_logger().debug(f"PR output", artifact=pr_body)
|
get_logger().warning('No code suggestions found for the PR.')
|
||||||
if self.progress_response:
|
if get_settings().config.publish_output and get_settings().config.publish_output_no_suggestions:
|
||||||
self.git_provider.edit_comment(self.progress_response, body=pr_body)
|
get_logger().debug(f"PR output", artifact=pr_body)
|
||||||
|
if self.progress_response:
|
||||||
|
self.git_provider.edit_comment(self.progress_response, body=pr_body)
|
||||||
|
else:
|
||||||
|
self.git_provider.publish_comment(pr_body)
|
||||||
else:
|
else:
|
||||||
self.git_provider.publish_comment(pr_body)
|
get_settings().data = {"artifact": ""}
|
||||||
return
|
return
|
||||||
|
|
||||||
if (not self.is_extended and get_settings().pr_code_suggestions.rank_suggestions) or \
|
if (not self.is_extended and get_settings().pr_code_suggestions.rank_suggestions) or \
|
||||||
@ -195,8 +204,11 @@ class PRCodeSuggestions:
|
|||||||
self.git_provider.remove_comment(self.progress_response)
|
self.git_provider.remove_comment(self.progress_response)
|
||||||
else:
|
else:
|
||||||
get_logger().info('Code suggestions generated for PR, but not published since publish_output is False.')
|
get_logger().info('Code suggestions generated for PR, but not published since publish_output is False.')
|
||||||
|
get_settings().data = {"artifact": data}
|
||||||
|
return
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
get_logger().error(f"Failed to generate code suggestions for PR, error: {e}")
|
get_logger().error(f"Failed to generate code suggestions for PR, error: {e}",
|
||||||
|
artifact={"traceback": traceback.format_exc()})
|
||||||
if get_settings().config.publish_output:
|
if get_settings().config.publish_output:
|
||||||
if self.progress_response:
|
if self.progress_response:
|
||||||
self.progress_response.delete()
|
self.progress_response.delete()
|
||||||
@ -325,10 +337,12 @@ class PRCodeSuggestions:
|
|||||||
model,
|
model,
|
||||||
add_line_numbers_to_hunks=True,
|
add_line_numbers_to_hunks=True,
|
||||||
disable_extra_lines=False)
|
disable_extra_lines=False)
|
||||||
|
self.patches_diff_list = [self.patches_diff]
|
||||||
|
self.patches_diff_no_line_number = self.remove_line_numbers([self.patches_diff])[0]
|
||||||
|
|
||||||
if self.patches_diff:
|
if self.patches_diff:
|
||||||
get_logger().debug(f"PR diff", artifact=self.patches_diff)
|
get_logger().debug(f"PR diff", artifact=self.patches_diff)
|
||||||
self.prediction = await self._get_prediction(model, self.patches_diff)
|
self.prediction = await self._get_prediction(model, self.patches_diff, self.patches_diff_no_line_number)
|
||||||
else:
|
else:
|
||||||
get_logger().warning(f"Empty PR diff")
|
get_logger().warning(f"Empty PR diff")
|
||||||
self.prediction = None
|
self.prediction = None
|
||||||
@ -336,42 +350,76 @@ class PRCodeSuggestions:
|
|||||||
data = self.prediction
|
data = self.prediction
|
||||||
return data
|
return data
|
||||||
|
|
||||||
async def _get_prediction(self, model: str, patches_diff: str) -> dict:
|
async def _get_prediction(self, model: str, patches_diff: str, patches_diff_no_line_number: str) -> dict:
|
||||||
variables = copy.deepcopy(self.vars)
|
variables = copy.deepcopy(self.vars)
|
||||||
variables["diff"] = patches_diff # update diff
|
variables["diff"] = patches_diff # update diff
|
||||||
|
variables["diff_no_line_numbers"] = patches_diff_no_line_number # update diff
|
||||||
environment = Environment(undefined=StrictUndefined)
|
environment = Environment(undefined=StrictUndefined)
|
||||||
system_prompt = environment.from_string(self.pr_code_suggestions_prompt_system).render(variables)
|
system_prompt = environment.from_string(self.pr_code_suggestions_prompt_system).render(variables)
|
||||||
user_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.user).render(variables)
|
user_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.user).render(variables)
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(
|
response, finish_reason = await self.ai_handler.chat_completion(
|
||||||
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
|
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
|
||||||
|
if not get_settings().config.publish_output:
|
||||||
|
get_settings().system_prompt = system_prompt
|
||||||
|
get_settings().user_prompt = user_prompt
|
||||||
|
|
||||||
# load suggestions from the AI response
|
# load suggestions from the AI response
|
||||||
data = self._prepare_pr_code_suggestions(response)
|
data = self._prepare_pr_code_suggestions(response)
|
||||||
|
|
||||||
# self-reflect on suggestions
|
# self-reflect on suggestions (mandatory, since line numbers are generated now here)
|
||||||
if get_settings().pr_code_suggestions.self_reflect_on_suggestions:
|
model_reflection = get_settings().config.model
|
||||||
model_turbo = get_settings().config.model_turbo # use turbo model for self-reflection, since it is an easier task
|
response_reflect = await self.self_reflect_on_suggestions(data["code_suggestions"],
|
||||||
response_reflect = await self.self_reflect_on_suggestions(data["code_suggestions"],
|
patches_diff, model=model_reflection)
|
||||||
patches_diff, model=model_turbo)
|
if response_reflect:
|
||||||
if response_reflect:
|
response_reflect_yaml = load_yaml(response_reflect)
|
||||||
response_reflect_yaml = load_yaml(response_reflect)
|
code_suggestions_feedback = response_reflect_yaml["code_suggestions"]
|
||||||
code_suggestions_feedback = response_reflect_yaml["code_suggestions"]
|
if len(code_suggestions_feedback) == len(data["code_suggestions"]):
|
||||||
if len(code_suggestions_feedback) == len(data["code_suggestions"]):
|
|
||||||
for i, suggestion in enumerate(data["code_suggestions"]):
|
|
||||||
try:
|
|
||||||
suggestion["score"] = code_suggestions_feedback[i]["suggestion_score"]
|
|
||||||
suggestion["score_why"] = code_suggestions_feedback[i]["why"]
|
|
||||||
except Exception as e: #
|
|
||||||
get_logger().error(f"Error processing suggestion score {i}",
|
|
||||||
artifact={"suggestion": suggestion,
|
|
||||||
"code_suggestions_feedback": code_suggestions_feedback[i]})
|
|
||||||
suggestion["score"] = 7
|
|
||||||
suggestion["score_why"] = ""
|
|
||||||
else:
|
|
||||||
# get_logger().error(f"Could not self-reflect on suggestions. using default score 7")
|
|
||||||
for i, suggestion in enumerate(data["code_suggestions"]):
|
for i, suggestion in enumerate(data["code_suggestions"]):
|
||||||
suggestion["score"] = 7
|
try:
|
||||||
suggestion["score_why"] = ""
|
suggestion["score"] = code_suggestions_feedback[i]["suggestion_score"]
|
||||||
|
suggestion["score_why"] = code_suggestions_feedback[i]["why"]
|
||||||
|
|
||||||
|
if 'relevant_lines_start' not in suggestion:
|
||||||
|
relevant_lines_start = code_suggestions_feedback[i].get('relevant_lines_start', -1)
|
||||||
|
relevant_lines_end = code_suggestions_feedback[i].get('relevant_lines_end', -1)
|
||||||
|
suggestion['relevant_lines_start'] = relevant_lines_start
|
||||||
|
suggestion['relevant_lines_end'] = relevant_lines_end
|
||||||
|
if relevant_lines_start < 0 or relevant_lines_end < 0:
|
||||||
|
suggestion["score"] = 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
if get_settings().config.publish_output:
|
||||||
|
suggestion_statistics_dict = {'score': int(suggestion["score"]),
|
||||||
|
'label': suggestion["label"].lower().strip()}
|
||||||
|
get_logger().info(f"PR-Agent suggestions statistics",
|
||||||
|
statistics=suggestion_statistics_dict, analytics=True)
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().error(f"Failed to log suggestion statistics, error: {e}")
|
||||||
|
pass
|
||||||
|
|
||||||
|
except Exception as e: #
|
||||||
|
get_logger().error(f"Error processing suggestion score {i}",
|
||||||
|
artifact={"suggestion": suggestion,
|
||||||
|
"code_suggestions_feedback": code_suggestions_feedback[i]})
|
||||||
|
suggestion["score"] = 7
|
||||||
|
suggestion["score_why"] = ""
|
||||||
|
|
||||||
|
# if the before and after code is the same, clear one of them
|
||||||
|
try:
|
||||||
|
if suggestion['existing_code'] == suggestion['improved_code']:
|
||||||
|
get_logger().debug(
|
||||||
|
f"edited improved suggestion {i + 1}, because equal to existing code: {suggestion['existing_code']}")
|
||||||
|
if get_settings().pr_code_suggestions.commitable_code_suggestions:
|
||||||
|
suggestion['improved_code'] = "" # we need 'existing_code' to locate the code in the PR
|
||||||
|
else:
|
||||||
|
suggestion['existing_code'] = ""
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().error(f"Error processing suggestion {i + 1}, error: {e}")
|
||||||
|
else:
|
||||||
|
# get_logger().error(f"Could not self-reflect on suggestions. using default score 7")
|
||||||
|
for i, suggestion in enumerate(data["code_suggestions"]):
|
||||||
|
suggestion["score"] = 7
|
||||||
|
suggestion["score_why"] = ""
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
@ -381,10 +429,10 @@ class PRCodeSuggestions:
|
|||||||
suggestion_truncation_message = get_settings().get("PR_CODE_SUGGESTIONS.SUGGESTION_TRUNCATION_MESSAGE", "")
|
suggestion_truncation_message = get_settings().get("PR_CODE_SUGGESTIONS.SUGGESTION_TRUNCATION_MESSAGE", "")
|
||||||
if max_code_suggestion_length > 0:
|
if max_code_suggestion_length > 0:
|
||||||
if len(suggestion['improved_code']) > max_code_suggestion_length:
|
if len(suggestion['improved_code']) > max_code_suggestion_length:
|
||||||
suggestion['improved_code'] = suggestion['improved_code'][:max_code_suggestion_length]
|
|
||||||
suggestion['improved_code'] += f"\n{suggestion_truncation_message}"
|
|
||||||
get_logger().info(f"Truncated suggestion from {len(suggestion['improved_code'])} "
|
get_logger().info(f"Truncated suggestion from {len(suggestion['improved_code'])} "
|
||||||
f"characters to {max_code_suggestion_length} characters")
|
f"characters to {max_code_suggestion_length} characters")
|
||||||
|
suggestion['improved_code'] = suggestion['improved_code'][:max_code_suggestion_length]
|
||||||
|
suggestion['improved_code'] += f"\n{suggestion_truncation_message}"
|
||||||
return suggestion
|
return suggestion
|
||||||
|
|
||||||
def _prepare_pr_code_suggestions(self, predictions: str) -> Dict:
|
def _prepare_pr_code_suggestions(self, predictions: str) -> Dict:
|
||||||
@ -399,8 +447,7 @@ class PRCodeSuggestions:
|
|||||||
one_sentence_summary_list = []
|
one_sentence_summary_list = []
|
||||||
for i, suggestion in enumerate(data['code_suggestions']):
|
for i, suggestion in enumerate(data['code_suggestions']):
|
||||||
try:
|
try:
|
||||||
needed_keys = ['one_sentence_summary', 'label', 'relevant_file', 'relevant_lines_start',
|
needed_keys = ['one_sentence_summary', 'label', 'relevant_file']
|
||||||
'relevant_lines_end']
|
|
||||||
is_valid_keys = True
|
is_valid_keys = True
|
||||||
for key in needed_keys:
|
for key in needed_keys:
|
||||||
if key not in suggestion:
|
if key not in suggestion:
|
||||||
@ -411,6 +458,11 @@ class PRCodeSuggestions:
|
|||||||
if not is_valid_keys:
|
if not is_valid_keys:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if get_settings().get("pr_code_suggestions.focus_only_on_problems", False):
|
||||||
|
CRITICAL_LABEL = 'critical'
|
||||||
|
if CRITICAL_LABEL in suggestion['label'].lower(): # we want the published labels to be less declarative
|
||||||
|
suggestion['label'] = 'possible issue'
|
||||||
|
|
||||||
if suggestion['one_sentence_summary'] in one_sentence_summary_list:
|
if suggestion['one_sentence_summary'] in one_sentence_summary_list:
|
||||||
get_logger().debug(f"Skipping suggestion {i + 1}, because it is a duplicate: {suggestion}")
|
get_logger().debug(f"Skipping suggestion {i + 1}, because it is a duplicate: {suggestion}")
|
||||||
continue
|
continue
|
||||||
@ -422,13 +474,6 @@ class PRCodeSuggestions:
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
if ('existing_code' in suggestion) and ('improved_code' in suggestion):
|
if ('existing_code' in suggestion) and ('improved_code' in suggestion):
|
||||||
if suggestion['existing_code'] == suggestion['improved_code']:
|
|
||||||
get_logger().debug(
|
|
||||||
f"edited improved suggestion {i + 1}, because equal to existing code: {suggestion['existing_code']}")
|
|
||||||
if get_settings().pr_code_suggestions.commitable_code_suggestions:
|
|
||||||
suggestion['improved_code'] = "" # we need 'existing_code' to locate the code in the PR
|
|
||||||
else:
|
|
||||||
suggestion['existing_code'] = ""
|
|
||||||
suggestion = self._truncate_if_needed(suggestion)
|
suggestion = self._truncate_if_needed(suggestion)
|
||||||
one_sentence_summary_list.append(suggestion['one_sentence_summary'])
|
one_sentence_summary_list.append(suggestion['one_sentence_summary'])
|
||||||
suggestion_list.append(suggestion)
|
suggestion_list.append(suggestion)
|
||||||
@ -531,9 +576,33 @@ class PRCodeSuggestions:
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def remove_line_numbers(self, patches_diff_list: List[str]) -> List[str]:
|
||||||
|
# create a copy of the patches_diff_list, without line numbers for '__new hunk__' sections
|
||||||
|
try:
|
||||||
|
self.patches_diff_list_no_line_numbers = []
|
||||||
|
for patches_diff in self.patches_diff_list:
|
||||||
|
patches_diff_lines = patches_diff.splitlines()
|
||||||
|
for i, line in enumerate(patches_diff_lines):
|
||||||
|
if line.strip():
|
||||||
|
if line[0].isdigit():
|
||||||
|
# find the first letter in the line that starts with a valid letter
|
||||||
|
for j, char in enumerate(line):
|
||||||
|
if not char.isdigit():
|
||||||
|
patches_diff_lines[i] = line[j + 1:]
|
||||||
|
break
|
||||||
|
self.patches_diff_list_no_line_numbers.append('\n'.join(patches_diff_lines))
|
||||||
|
return self.patches_diff_list_no_line_numbers
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().error(f"Error removing line numbers from patches_diff_list, error: {e}")
|
||||||
|
return patches_diff_list
|
||||||
|
|
||||||
async def _prepare_prediction_extended(self, model: str) -> dict:
|
async def _prepare_prediction_extended(self, model: str) -> dict:
|
||||||
self.patches_diff_list = get_pr_multi_diffs(self.git_provider, self.token_handler, model,
|
self.patches_diff_list = get_pr_multi_diffs(self.git_provider, self.token_handler, model,
|
||||||
max_calls=get_settings().pr_code_suggestions.max_number_of_calls)
|
max_calls=get_settings().pr_code_suggestions.max_number_of_calls)
|
||||||
|
|
||||||
|
# create a copy of the patches_diff_list, without line numbers for '__new hunk__' sections
|
||||||
|
self.patches_diff_list_no_line_numbers = self.remove_line_numbers(self.patches_diff_list)
|
||||||
|
|
||||||
if self.patches_diff_list:
|
if self.patches_diff_list:
|
||||||
get_logger().info(f"Number of PR chunk calls: {len(self.patches_diff_list)}")
|
get_logger().info(f"Number of PR chunk calls: {len(self.patches_diff_list)}")
|
||||||
get_logger().debug(f"PR diff:", artifact=self.patches_diff_list)
|
get_logger().debug(f"PR diff:", artifact=self.patches_diff_list)
|
||||||
@ -541,12 +610,14 @@ class PRCodeSuggestions:
|
|||||||
# parallelize calls to AI:
|
# parallelize calls to AI:
|
||||||
if get_settings().pr_code_suggestions.parallel_calls:
|
if get_settings().pr_code_suggestions.parallel_calls:
|
||||||
prediction_list = await asyncio.gather(
|
prediction_list = await asyncio.gather(
|
||||||
*[self._get_prediction(model, patches_diff) for patches_diff in self.patches_diff_list])
|
*[self._get_prediction(model, patches_diff, patches_diff_no_line_numbers) for
|
||||||
|
patches_diff, patches_diff_no_line_numbers in
|
||||||
|
zip(self.patches_diff_list, self.patches_diff_list_no_line_numbers)])
|
||||||
self.prediction_list = prediction_list
|
self.prediction_list = prediction_list
|
||||||
else:
|
else:
|
||||||
prediction_list = []
|
prediction_list = []
|
||||||
for i, patches_diff in enumerate(self.patches_diff_list):
|
for patches_diff, patches_diff_no_line_numbers in zip(self.patches_diff_list, self.patches_diff_list_no_line_numbers):
|
||||||
prediction = await self._get_prediction(model, patches_diff)
|
prediction = await self._get_prediction(model, patches_diff, patches_diff_no_line_numbers)
|
||||||
prediction_list.append(prediction)
|
prediction_list.append(prediction)
|
||||||
|
|
||||||
data = {"code_suggestions": []}
|
data = {"code_suggestions": []}
|
||||||
@ -555,18 +626,16 @@ class PRCodeSuggestions:
|
|||||||
score_threshold = max(1, int(get_settings().pr_code_suggestions.suggestions_score_threshold))
|
score_threshold = max(1, int(get_settings().pr_code_suggestions.suggestions_score_threshold))
|
||||||
for i, prediction in enumerate(predictions["code_suggestions"]):
|
for i, prediction in enumerate(predictions["code_suggestions"]):
|
||||||
try:
|
try:
|
||||||
if get_settings().pr_code_suggestions.self_reflect_on_suggestions:
|
score = int(prediction.get("score", 1))
|
||||||
score = int(prediction.get("score", 1))
|
if score >= score_threshold:
|
||||||
if score >= score_threshold:
|
|
||||||
data["code_suggestions"].append(prediction)
|
|
||||||
else:
|
|
||||||
get_logger().info(
|
|
||||||
f"Removing suggestions {i} from call {j}, because score is {score}, and score_threshold is {score_threshold}",
|
|
||||||
artifact=prediction)
|
|
||||||
else:
|
|
||||||
data["code_suggestions"].append(prediction)
|
data["code_suggestions"].append(prediction)
|
||||||
|
else:
|
||||||
|
get_logger().info(
|
||||||
|
f"Removing suggestions {i} from call {j}, because score is {score}, and score_threshold is {score_threshold}",
|
||||||
|
artifact=prediction)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
get_logger().error(f"Error getting PR diff for suggestion {i} in call {j}, error: {e}")
|
get_logger().error(f"Error getting PR diff for suggestion {i} in call {j}, error: {e}",
|
||||||
|
artifact={"prediction": prediction})
|
||||||
self.data = data
|
self.data = data
|
||||||
else:
|
else:
|
||||||
get_logger().warning(f"Empty PR diff list")
|
get_logger().warning(f"Empty PR diff list")
|
||||||
@ -617,7 +686,7 @@ class PRCodeSuggestions:
|
|||||||
if get_settings().pr_code_suggestions.final_clip_factor != 1:
|
if get_settings().pr_code_suggestions.final_clip_factor != 1:
|
||||||
max_len = max(
|
max_len = max(
|
||||||
len(data_sorted),
|
len(data_sorted),
|
||||||
get_settings().pr_code_suggestions.num_code_suggestions_per_chunk,
|
int(get_settings().pr_code_suggestions.num_code_suggestions_per_chunk),
|
||||||
)
|
)
|
||||||
new_len = int(0.5 + max_len * get_settings().pr_code_suggestions.final_clip_factor)
|
new_len = int(0.5 + max_len * get_settings().pr_code_suggestions.final_clip_factor)
|
||||||
if new_len < len(data_sorted):
|
if new_len < len(data_sorted):
|
||||||
@ -650,10 +719,7 @@ class PRCodeSuggestions:
|
|||||||
header = f"Suggestion"
|
header = f"Suggestion"
|
||||||
delta = 66
|
delta = 66
|
||||||
header += " " * delta
|
header += " " * delta
|
||||||
if get_settings().pr_code_suggestions.self_reflect_on_suggestions:
|
pr_body += f"""<thead><tr><td>Category</td><td align=left>{header}</td><td align=center>Score</td></tr>"""
|
||||||
pr_body += f"""<thead><tr><td>Category</td><td align=left>{header}</td><td align=center>Score</td></tr>"""
|
|
||||||
else:
|
|
||||||
pr_body += f"""<thead><tr><td>Category</td><td align=left>{header}</td></tr>"""
|
|
||||||
pr_body += """<tbody>"""
|
pr_body += """<tbody>"""
|
||||||
suggestions_labels = dict()
|
suggestions_labels = dict()
|
||||||
# add all suggestions related to each label
|
# add all suggestions related to each label
|
||||||
@ -664,12 +730,11 @@ class PRCodeSuggestions:
|
|||||||
suggestions_labels[label].append(suggestion)
|
suggestions_labels[label].append(suggestion)
|
||||||
|
|
||||||
# sort suggestions_labels by the suggestion with the highest score
|
# sort suggestions_labels by the suggestion with the highest score
|
||||||
if get_settings().pr_code_suggestions.self_reflect_on_suggestions:
|
suggestions_labels = dict(
|
||||||
suggestions_labels = dict(
|
sorted(suggestions_labels.items(), key=lambda x: max([s['score'] for s in x[1]]), reverse=True))
|
||||||
sorted(suggestions_labels.items(), key=lambda x: max([s['score'] for s in x[1]]), reverse=True))
|
# sort the suggestions inside each label group by score
|
||||||
# sort the suggestions inside each label group by score
|
for label, suggestions in suggestions_labels.items():
|
||||||
for label, suggestions in suggestions_labels.items():
|
suggestions_labels[label] = sorted(suggestions, key=lambda x: x['score'], reverse=True)
|
||||||
suggestions_labels[label] = sorted(suggestions, key=lambda x: x['score'], reverse=True)
|
|
||||||
|
|
||||||
counter_suggestions = 0
|
counter_suggestions = 0
|
||||||
for label, suggestions in suggestions_labels.items():
|
for label, suggestions in suggestions_labels.items():
|
||||||
@ -728,16 +793,14 @@ class PRCodeSuggestions:
|
|||||||
|
|
||||||
{example_code.rstrip()}
|
{example_code.rstrip()}
|
||||||
"""
|
"""
|
||||||
if get_settings().pr_code_suggestions.self_reflect_on_suggestions:
|
pr_body += f"<details><summary>Suggestion importance[1-10]: {suggestion['score']}</summary>\n\n"
|
||||||
pr_body += f"<details><summary>Suggestion importance[1-10]: {suggestion['score']}</summary>\n\n"
|
pr_body += f"Why: {suggestion['score_why']}\n\n"
|
||||||
pr_body += f"Why: {suggestion['score_why']}\n\n"
|
pr_body += f"</details>"
|
||||||
pr_body += f"</details>"
|
|
||||||
|
|
||||||
pr_body += f"</details>"
|
pr_body += f"</details>"
|
||||||
|
|
||||||
# # add another column for 'score'
|
# # add another column for 'score'
|
||||||
if get_settings().pr_code_suggestions.self_reflect_on_suggestions:
|
pr_body += f"</td><td align=center>{suggestion['score']}\n\n"
|
||||||
pr_body += f"</td><td align=center>{suggestion['score']}\n\n"
|
|
||||||
|
|
||||||
pr_body += f"</td></tr>"
|
pr_body += f"</td></tr>"
|
||||||
counter_suggestions += 1
|
counter_suggestions += 1
|
||||||
@ -778,4 +841,3 @@ class PRCodeSuggestions:
|
|||||||
get_logger().info(f"Could not reflect on suggestions, error: {e}")
|
get_logger().info(f"Could not reflect on suggestions, error: {e}")
|
||||||
return ""
|
return ""
|
||||||
return response_reflect
|
return response_reflect
|
||||||
|
|
||||||
|
@ -9,17 +9,24 @@ from jinja2 import Environment, StrictUndefined
|
|||||||
|
|
||||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||||
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
||||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models, get_pr_diff_multiple_patchs, \
|
from pr_agent.algo.pr_processing import (OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD,
|
||||||
OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD
|
get_pr_diff,
|
||||||
|
get_pr_diff_multiple_patchs,
|
||||||
|
retry_with_fallback_models)
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.algo.utils import set_custom_labels
|
from pr_agent.algo.utils import (ModelType, PRDescriptionHeader, clip_tokens,
|
||||||
from pr_agent.algo.utils import load_yaml, get_user_labels, ModelType, show_relevant_configurations, get_max_tokens, \
|
get_max_tokens, get_user_labels, load_yaml,
|
||||||
clip_tokens
|
set_custom_labels,
|
||||||
|
show_relevant_configurations)
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers import get_git_provider, GithubProvider, get_git_provider_with_context
|
from pr_agent.git_providers import (GithubProvider, get_git_provider,
|
||||||
|
get_git_provider_with_context)
|
||||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
from pr_agent.servers.help import HelpMessage
|
from pr_agent.servers.help import HelpMessage
|
||||||
|
from pr_agent.tools.ticket_pr_compliance_check import (
|
||||||
|
extract_and_cache_pr_tickets, extract_ticket_links_from_pr_description,
|
||||||
|
extract_tickets)
|
||||||
|
|
||||||
|
|
||||||
class PRDescription:
|
class PRDescription:
|
||||||
@ -38,6 +45,7 @@ class PRDescription:
|
|||||||
self.git_provider.get_languages(), self.git_provider.get_files()
|
self.git_provider.get_languages(), self.git_provider.get_files()
|
||||||
)
|
)
|
||||||
self.pr_id = self.git_provider.get_pr_id()
|
self.pr_id = self.git_provider.get_pr_id()
|
||||||
|
self.keys_fix = ["filename:", "language:", "changes_summary:", "changes_title:", "description:", "title:"]
|
||||||
|
|
||||||
if get_settings().pr_description.enable_semantic_files_types and not self.git_provider.is_supported(
|
if get_settings().pr_description.enable_semantic_files_types and not self.git_provider.is_supported(
|
||||||
"gfm_markdown"):
|
"gfm_markdown"):
|
||||||
@ -60,6 +68,7 @@ class PRDescription:
|
|||||||
"enable_custom_labels": get_settings().config.enable_custom_labels,
|
"enable_custom_labels": get_settings().config.enable_custom_labels,
|
||||||
"custom_labels_class": "", # will be filled if necessary in 'set_custom_labels' function
|
"custom_labels_class": "", # will be filled if necessary in 'set_custom_labels' function
|
||||||
"enable_semantic_files_types": get_settings().pr_description.enable_semantic_files_types,
|
"enable_semantic_files_types": get_settings().pr_description.enable_semantic_files_types,
|
||||||
|
"related_tickets": "",
|
||||||
}
|
}
|
||||||
|
|
||||||
self.user_description = self.git_provider.get_user_description()
|
self.user_description = self.git_provider.get_user_description()
|
||||||
@ -87,6 +96,9 @@ class PRDescription:
|
|||||||
if get_settings().config.publish_output and not get_settings().config.get('is_auto_command', False):
|
if get_settings().config.publish_output and not get_settings().config.get('is_auto_command', False):
|
||||||
self.git_provider.publish_comment("Preparing PR description...", is_temporary=True)
|
self.git_provider.publish_comment("Preparing PR description...", is_temporary=True)
|
||||||
|
|
||||||
|
# ticket extraction if exists
|
||||||
|
await extract_and_cache_pr_tickets(self.git_provider, self.vars)
|
||||||
|
|
||||||
await retry_with_fallback_models(self._prepare_prediction, ModelType.TURBO)
|
await retry_with_fallback_models(self._prepare_prediction, ModelType.TURBO)
|
||||||
|
|
||||||
if self.prediction:
|
if self.prediction:
|
||||||
@ -126,7 +138,7 @@ class PRDescription:
|
|||||||
|
|
||||||
if get_settings().config.publish_output:
|
if get_settings().config.publish_output:
|
||||||
# publish labels
|
# publish labels
|
||||||
if get_settings().pr_description.publish_labels and self.git_provider.is_supported("get_labels"):
|
if get_settings().pr_description.publish_labels and pr_labels and self.git_provider.is_supported("get_labels"):
|
||||||
original_labels = self.git_provider.get_pr_labels(update=True)
|
original_labels = self.git_provider.get_pr_labels(update=True)
|
||||||
get_logger().debug(f"original labels", artifact=original_labels)
|
get_logger().debug(f"original labels", artifact=original_labels)
|
||||||
user_labels = get_user_labels(original_labels)
|
user_labels = get_user_labels(original_labels)
|
||||||
@ -226,7 +238,7 @@ class PRDescription:
|
|||||||
file_description_str_list = []
|
file_description_str_list = []
|
||||||
for i, result in enumerate(results):
|
for i, result in enumerate(results):
|
||||||
prediction_files = result.strip().removeprefix('```yaml').strip('`').strip()
|
prediction_files = result.strip().removeprefix('```yaml').strip('`').strip()
|
||||||
if load_yaml(prediction_files) and prediction_files.startswith('pr_files'):
|
if load_yaml(prediction_files, keys_fix_yaml=self.keys_fix) and prediction_files.startswith('pr_files'):
|
||||||
prediction_files = prediction_files.removeprefix('pr_files:').strip()
|
prediction_files = prediction_files.removeprefix('pr_files:').strip()
|
||||||
file_description_str_list.append(prediction_files)
|
file_description_str_list.append(prediction_files)
|
||||||
else:
|
else:
|
||||||
@ -304,16 +316,16 @@ extra_file_yaml =
|
|||||||
|
|
||||||
# final processing
|
# final processing
|
||||||
self.prediction = prediction_headers + "\n" + "pr_files:\n" + files_walkthrough
|
self.prediction = prediction_headers + "\n" + "pr_files:\n" + files_walkthrough
|
||||||
if not load_yaml(self.prediction):
|
if not load_yaml(self.prediction, keys_fix_yaml=self.keys_fix):
|
||||||
get_logger().error(f"Error getting valid YAML in large PR handling for describe {self.pr_id}")
|
get_logger().error(f"Error getting valid YAML in large PR handling for describe {self.pr_id}")
|
||||||
if load_yaml(prediction_headers):
|
if load_yaml(prediction_headers, keys_fix_yaml=self.keys_fix):
|
||||||
get_logger().debug(f"Using only headers for describe {self.pr_id}")
|
get_logger().debug(f"Using only headers for describe {self.pr_id}")
|
||||||
self.prediction = prediction_headers
|
self.prediction = prediction_headers
|
||||||
|
|
||||||
async def extend_additional_files(self, remaining_files_list) -> str:
|
async def extend_additional_files(self, remaining_files_list) -> str:
|
||||||
prediction = self.prediction
|
prediction = self.prediction
|
||||||
try:
|
try:
|
||||||
original_prediction_dict = load_yaml(self.prediction)
|
original_prediction_dict = load_yaml(self.prediction, keys_fix_yaml=self.keys_fix)
|
||||||
prediction_extra = "pr_files:"
|
prediction_extra = "pr_files:"
|
||||||
for file in remaining_files_list:
|
for file in remaining_files_list:
|
||||||
extra_file_yaml = f"""\
|
extra_file_yaml = f"""\
|
||||||
@ -327,12 +339,12 @@ extra_file_yaml =
|
|||||||
additional files (token-limit)
|
additional files (token-limit)
|
||||||
"""
|
"""
|
||||||
prediction_extra = prediction_extra + "\n" + extra_file_yaml.strip()
|
prediction_extra = prediction_extra + "\n" + extra_file_yaml.strip()
|
||||||
prediction_extra_dict = load_yaml(prediction_extra)
|
prediction_extra_dict = load_yaml(prediction_extra, keys_fix_yaml=self.keys_fix)
|
||||||
# merge the two dictionaries
|
# merge the two dictionaries
|
||||||
if isinstance(original_prediction_dict, dict) and isinstance(prediction_extra_dict, dict):
|
if isinstance(original_prediction_dict, dict) and isinstance(prediction_extra_dict, dict):
|
||||||
original_prediction_dict["pr_files"].extend(prediction_extra_dict["pr_files"])
|
original_prediction_dict["pr_files"].extend(prediction_extra_dict["pr_files"])
|
||||||
new_yaml = yaml.dump(original_prediction_dict)
|
new_yaml = yaml.dump(original_prediction_dict)
|
||||||
if load_yaml(new_yaml):
|
if load_yaml(new_yaml, keys_fix_yaml=self.keys_fix):
|
||||||
prediction = new_yaml
|
prediction = new_yaml
|
||||||
return prediction
|
return prediction
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -361,7 +373,7 @@ extra_file_yaml =
|
|||||||
|
|
||||||
def _prepare_data(self):
|
def _prepare_data(self):
|
||||||
# Load the AI prediction data into a dictionary
|
# Load the AI prediction data into a dictionary
|
||||||
self.data = load_yaml(self.prediction.strip())
|
self.data = load_yaml(self.prediction.strip(), keys_fix_yaml=self.keys_fix)
|
||||||
|
|
||||||
if get_settings().pr_description.add_original_user_description and self.user_description:
|
if get_settings().pr_description.add_original_user_description and self.user_description:
|
||||||
self.data["User Description"] = self.user_description
|
self.data["User Description"] = self.user_description
|
||||||
@ -494,7 +506,7 @@ extra_file_yaml =
|
|||||||
pr_body += "</details>\n"
|
pr_body += "</details>\n"
|
||||||
elif 'pr_files' in key.lower() and get_settings().pr_description.enable_semantic_files_types:
|
elif 'pr_files' in key.lower() and get_settings().pr_description.enable_semantic_files_types:
|
||||||
changes_walkthrough, pr_file_changes = self.process_pr_files_prediction(changes_walkthrough, value)
|
changes_walkthrough, pr_file_changes = self.process_pr_files_prediction(changes_walkthrough, value)
|
||||||
changes_walkthrough = f"### **Changes walkthrough** 📝\n{changes_walkthrough}"
|
changes_walkthrough = f"{PRDescriptionHeader.CHANGES_WALKTHROUGH.value}\n{changes_walkthrough}"
|
||||||
else:
|
else:
|
||||||
# if the value is a list, join its items by comma
|
# if the value is a list, join its items by comma
|
||||||
if isinstance(value, list):
|
if isinstance(value, list):
|
||||||
|
@ -9,7 +9,7 @@ from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
|||||||
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
||||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.algo.utils import load_yaml, set_custom_labels, get_user_labels
|
from pr_agent.algo.utils import get_user_labels, load_yaml, set_custom_labels
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers import get_git_provider
|
from pr_agent.git_providers import get_git_provider
|
||||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||||
|
@ -1,20 +1,18 @@
|
|||||||
import os
|
|
||||||
import traceback
|
|
||||||
import zipfile
|
|
||||||
import tempfile
|
|
||||||
import copy
|
import copy
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from jinja2 import Environment, StrictUndefined
|
from jinja2 import Environment, StrictUndefined
|
||||||
|
|
||||||
|
from pr_agent.algo import MAX_TOKENS
|
||||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||||
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
||||||
from pr_agent.algo.pr_processing import retry_with_fallback_models
|
from pr_agent.algo.pr_processing import retry_with_fallback_models
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.algo.utils import ModelType, load_yaml
|
from pr_agent.algo.utils import ModelType, clip_tokens, load_yaml
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers import get_git_provider, GithubProvider, BitbucketServerProvider, \
|
from pr_agent.git_providers import (BitbucketServerProvider, GithubProvider,
|
||||||
get_git_provider_with_context
|
get_git_provider_with_context)
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
|
|
||||||
@ -67,83 +65,6 @@ class PRHelpMessage:
|
|||||||
question_str = ""
|
question_str = ""
|
||||||
return question_str
|
return question_str
|
||||||
|
|
||||||
def get_sim_results_from_s3_db(self, embeddings):
|
|
||||||
get_logger().info("Loading the S3 index...")
|
|
||||||
sim_results = []
|
|
||||||
try:
|
|
||||||
from langchain_chroma import Chroma
|
|
||||||
from urllib import request
|
|
||||||
with tempfile.TemporaryDirectory() as temp_dir:
|
|
||||||
# Define the local file path within the temporary directory
|
|
||||||
local_file_path = os.path.join(temp_dir, 'chroma_db.zip')
|
|
||||||
|
|
||||||
bucket = 'pr-agent'
|
|
||||||
file_name = 'chroma_db.zip'
|
|
||||||
s3_url = f'https://{bucket}.s3.amazonaws.com/{file_name}'
|
|
||||||
request.urlretrieve(s3_url, local_file_path)
|
|
||||||
|
|
||||||
# # Download the file from S3 to the temporary directory
|
|
||||||
# s3 = boto3.client('s3')
|
|
||||||
# s3.download_file(bucket, file_name, local_file_path)
|
|
||||||
|
|
||||||
# Extract the contents of the zip file
|
|
||||||
with zipfile.ZipFile(local_file_path, 'r') as zip_ref:
|
|
||||||
zip_ref.extractall(temp_dir)
|
|
||||||
|
|
||||||
vectorstore = Chroma(persist_directory=temp_dir + "/chroma_db",
|
|
||||||
embedding_function=embeddings)
|
|
||||||
sim_results = vectorstore.similarity_search_with_score(self.question_str, k=self.num_retrieved_snippets)
|
|
||||||
except Exception as e:
|
|
||||||
get_logger().error(f"Error while getting sim from S3: {e}",
|
|
||||||
artifact={"traceback": traceback.format_exc()})
|
|
||||||
return sim_results
|
|
||||||
|
|
||||||
def get_sim_results_from_local_db(self, embeddings):
|
|
||||||
get_logger().info("Loading the local index...")
|
|
||||||
sim_results = []
|
|
||||||
try:
|
|
||||||
from langchain_chroma import Chroma
|
|
||||||
get_logger().info("Loading the Chroma index...")
|
|
||||||
db_path = "./docs/chroma_db.zip"
|
|
||||||
if not os.path.exists(db_path):
|
|
||||||
db_path= "/app/docs/chroma_db.zip"
|
|
||||||
if not os.path.exists(db_path):
|
|
||||||
get_logger().error("Local db not found")
|
|
||||||
return sim_results
|
|
||||||
with tempfile.TemporaryDirectory() as temp_dir:
|
|
||||||
|
|
||||||
# Extract the ZIP file
|
|
||||||
with zipfile.ZipFile(db_path, 'r') as zip_ref:
|
|
||||||
zip_ref.extractall(temp_dir)
|
|
||||||
|
|
||||||
vectorstore = Chroma(persist_directory=temp_dir + "/chroma_db",
|
|
||||||
embedding_function=embeddings)
|
|
||||||
|
|
||||||
# Do similarity search
|
|
||||||
sim_results = vectorstore.similarity_search_with_score(self.question_str, k=self.num_retrieved_snippets)
|
|
||||||
except Exception as e:
|
|
||||||
get_logger().error(f"Error while getting sim from local db: {e}",
|
|
||||||
artifact={"traceback": traceback.format_exc()})
|
|
||||||
return sim_results
|
|
||||||
|
|
||||||
def get_sim_results_from_pinecone_db(self, embeddings):
|
|
||||||
get_logger().info("Loading the Pinecone index...")
|
|
||||||
sim_results = []
|
|
||||||
try:
|
|
||||||
from langchain_pinecone import PineconeVectorStore
|
|
||||||
INDEX_NAME = "pr-agent-docs"
|
|
||||||
vectorstore = PineconeVectorStore(
|
|
||||||
index_name=INDEX_NAME, embedding=embeddings,
|
|
||||||
pinecone_api_key=get_settings().pinecone.api_key
|
|
||||||
)
|
|
||||||
|
|
||||||
# Do similarity search
|
|
||||||
sim_results = vectorstore.similarity_search_with_score(self.question_str, k=self.num_retrieved_snippets)
|
|
||||||
except Exception as e:
|
|
||||||
get_logger().error(f"Error while getting sim from Pinecone db: {e}",
|
|
||||||
artifact={"traceback": traceback.format_exc()})
|
|
||||||
return sim_results
|
|
||||||
|
|
||||||
async def run(self):
|
async def run(self):
|
||||||
try:
|
try:
|
||||||
if self.question_str:
|
if self.question_str:
|
||||||
@ -157,38 +78,49 @@ class PRHelpMessage:
|
|||||||
get_logger().error("The `Help` tool chat feature requires an OpenAI API key for calculating embeddings")
|
get_logger().error("The `Help` tool chat feature requires an OpenAI API key for calculating embeddings")
|
||||||
return
|
return
|
||||||
|
|
||||||
# Initialize embeddings
|
# current path
|
||||||
from langchain_openai import OpenAIEmbeddings
|
docs_path= Path(__file__).parent.parent.parent / 'docs' / 'docs'
|
||||||
embeddings = OpenAIEmbeddings(model="text-embedding-3-small",
|
# get all the 'md' files inside docs_path and its subdirectories
|
||||||
api_key=get_settings().openai.key)
|
md_files = list(docs_path.glob('**/*.md'))
|
||||||
|
folders_to_exclude = ['/finetuning_benchmark/']
|
||||||
|
files_to_exclude = {'EXAMPLE_BEST_PRACTICE.md', 'compression_strategy.md', '/docs/overview/index.md'}
|
||||||
|
md_files = [file for file in md_files if not any(folder in str(file) for folder in folders_to_exclude) and not any(file.name == file_to_exclude for file_to_exclude in files_to_exclude)]
|
||||||
|
|
||||||
# Get similar snippets via similarity search
|
# sort the 'md_files' so that 'priority_files' will be at the top
|
||||||
if get_settings().pr_help.force_local_db:
|
priority_files_strings = ['/docs/index.md', '/usage-guide', 'tools/describe.md', 'tools/review.md',
|
||||||
sim_results = self.get_sim_results_from_local_db(embeddings)
|
'tools/improve.md', '/faq']
|
||||||
elif get_settings().get('pinecone.api_key'):
|
md_files_priority = [file for file in md_files if
|
||||||
sim_results = self.get_sim_results_from_pinecone_db(embeddings)
|
any(priority_string in str(file) for priority_string in priority_files_strings)]
|
||||||
else:
|
md_files_not_priority = [file for file in md_files if file not in md_files_priority]
|
||||||
sim_results = self.get_sim_results_from_s3_db(embeddings)
|
md_files = md_files_priority + md_files_not_priority
|
||||||
if not sim_results:
|
|
||||||
get_logger().info("Failed to load the S3 index. Loading the local index...")
|
|
||||||
sim_results = self.get_sim_results_from_local_db(embeddings)
|
|
||||||
if not sim_results:
|
|
||||||
get_logger().error("Failed to retrieve similar snippets. Exiting...")
|
|
||||||
return
|
|
||||||
|
|
||||||
# Prepare relevant snippets
|
docs_prompt = ""
|
||||||
relevant_pages_full, relevant_snippets_full_header, relevant_snippets_str =\
|
for file in md_files:
|
||||||
await self.prepare_relevant_snippets(sim_results)
|
try:
|
||||||
self.vars['snippets'] = relevant_snippets_str.strip()
|
with open(file, 'r') as f:
|
||||||
|
file_path = str(file).replace(str(docs_path), '')
|
||||||
|
docs_prompt += f"\n==file name==\n\n{file_path}\n\n==file content==\n\n{f.read().strip()}\n=========\n\n"
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().error(f"Error while reading the file {file}: {e}")
|
||||||
|
token_count = self.token_handler.count_tokens(docs_prompt)
|
||||||
|
get_logger().debug(f"Token count of full documentation website: {token_count}")
|
||||||
|
|
||||||
|
model = get_settings().config.model
|
||||||
|
max_tokens_full = MAX_TOKENS[model] # note - here we take the actual max tokens, without any reductions. we do aim to get the full documentation website in the prompt
|
||||||
|
delta_output = 2000
|
||||||
|
if token_count > max_tokens_full - delta_output:
|
||||||
|
get_logger().info(f"Token count {token_count} exceeds the limit {max_tokens_full - delta_output}. Skipping the PR Help message.")
|
||||||
|
docs_prompt = clip_tokens(docs_prompt, max_tokens_full - delta_output)
|
||||||
|
self.vars['snippets'] = docs_prompt.strip()
|
||||||
|
|
||||||
# run the AI model
|
# run the AI model
|
||||||
response = await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.REGULAR)
|
response = await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.REGULAR)
|
||||||
response_yaml = load_yaml(response)
|
response_yaml = load_yaml(response)
|
||||||
response_str = response_yaml.get('response')
|
response_str = response_yaml.get('response')
|
||||||
relevant_snippets_numbers = response_yaml.get('relevant_snippets')
|
relevant_sections = response_yaml.get('relevant_sections')
|
||||||
|
|
||||||
if not relevant_snippets_numbers:
|
if not relevant_sections:
|
||||||
get_logger().info(f"Could not find relevant snippets for the question: {self.question_str}")
|
get_logger().info(f"Could not find relevant answer for the question: {self.question_str}")
|
||||||
if get_settings().config.publish_output:
|
if get_settings().config.publish_output:
|
||||||
answer_str = f"### Question: \n{self.question_str}\n\n"
|
answer_str = f"### Question: \n{self.question_str}\n\n"
|
||||||
answer_str += f"### Answer:\n\n"
|
answer_str += f"### Answer:\n\n"
|
||||||
@ -202,16 +134,15 @@ class PRHelpMessage:
|
|||||||
answer_str += f"### Question: \n{self.question_str}\n\n"
|
answer_str += f"### Question: \n{self.question_str}\n\n"
|
||||||
answer_str += f"### Answer:\n{response_str.strip()}\n\n"
|
answer_str += f"### Answer:\n{response_str.strip()}\n\n"
|
||||||
answer_str += f"#### Relevant Sources:\n\n"
|
answer_str += f"#### Relevant Sources:\n\n"
|
||||||
paged_published = []
|
base_path = "https://qodo-merge-docs.qodo.ai/"
|
||||||
for page in relevant_snippets_numbers:
|
for section in relevant_sections:
|
||||||
page = int(page - 1)
|
file = section.get('file_name').strip().removesuffix('.md')
|
||||||
if page < len(relevant_pages_full) and page >= 0:
|
if str(section['relevant_section_header_string']).strip():
|
||||||
if relevant_pages_full[page] in paged_published:
|
markdown_header = section['relevant_section_header_string'].strip().strip('#').strip().lower().replace(' ', '-').replace("'", '').replace('(', '').replace(')', '').replace(',', '').replace('.', '').replace('?', '').replace('!', '')
|
||||||
continue
|
answer_str += f"> - {base_path}{file}#{markdown_header}\n"
|
||||||
link = f"{relevant_pages_full[page]}{relevant_snippets_full_header[page]}"
|
else:
|
||||||
# answer_str += f"> - [{relevant_pages_full[page]}]({link})\n"
|
answer_str += f"> - {base_path}{file}\n"
|
||||||
answer_str += f"> - {link}\n"
|
|
||||||
paged_published.append(relevant_pages_full[page])
|
|
||||||
|
|
||||||
# publish the answer
|
# publish the answer
|
||||||
if get_settings().config.publish_output:
|
if get_settings().config.publish_output:
|
||||||
|
@ -6,8 +6,8 @@ from jinja2 import Environment, StrictUndefined
|
|||||||
|
|
||||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||||
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
||||||
from pr_agent.algo.git_patch_processing import convert_to_hunks_with_lines_numbers, \
|
from pr_agent.algo.git_patch_processing import (
|
||||||
extract_hunk_lines_from_patch
|
convert_to_hunks_with_lines_numbers, extract_hunk_lines_from_patch)
|
||||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.algo.utils import ModelType
|
from pr_agent.algo.utils import ModelType
|
||||||
|
@ -1,20 +1,30 @@
|
|||||||
import copy
|
import copy
|
||||||
import datetime
|
import datetime
|
||||||
|
import traceback
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from typing import List, Tuple
|
from typing import List, Tuple
|
||||||
|
|
||||||
from jinja2 import Environment, StrictUndefined
|
from jinja2 import Environment, StrictUndefined
|
||||||
|
|
||||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||||
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
||||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models, add_ai_metadata_to_diff_files
|
from pr_agent.algo.pr_processing import (add_ai_metadata_to_diff_files,
|
||||||
|
get_pr_diff,
|
||||||
|
retry_with_fallback_models)
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.algo.utils import github_action_output, load_yaml, ModelType, \
|
from pr_agent.algo.utils import (ModelType, PRReviewHeader,
|
||||||
show_relevant_configurations, convert_to_markdown_v2, PRReviewHeader
|
convert_to_markdown_v2, github_action_output,
|
||||||
|
load_yaml, show_relevant_configurations)
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers import get_git_provider, get_git_provider_with_context
|
from pr_agent.git_providers import (get_git_provider,
|
||||||
from pr_agent.git_providers.git_provider import IncrementalPR, get_main_pr_language
|
get_git_provider_with_context)
|
||||||
|
from pr_agent.git_providers.git_provider import (IncrementalPR,
|
||||||
|
get_main_pr_language)
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
from pr_agent.servers.help import HelpMessage
|
from pr_agent.servers.help import HelpMessage
|
||||||
|
from pr_agent.tools.ticket_pr_compliance_check import (
|
||||||
|
extract_and_cache_pr_tickets, extract_tickets)
|
||||||
|
|
||||||
|
|
||||||
class PRReviewer:
|
class PRReviewer:
|
||||||
@ -84,6 +94,7 @@ class PRReviewer:
|
|||||||
"custom_labels": "",
|
"custom_labels": "",
|
||||||
"enable_custom_labels": get_settings().config.enable_custom_labels,
|
"enable_custom_labels": get_settings().config.enable_custom_labels,
|
||||||
"is_ai_metadata": get_settings().get("config.enable_ai_metadata", False),
|
"is_ai_metadata": get_settings().get("config.enable_ai_metadata", False),
|
||||||
|
"related_tickets": get_settings().get('related_tickets', []),
|
||||||
}
|
}
|
||||||
|
|
||||||
self.token_handler = TokenHandler(
|
self.token_handler = TokenHandler(
|
||||||
@ -121,6 +132,9 @@ class PRReviewer:
|
|||||||
'config': dict(get_settings().config)}
|
'config': dict(get_settings().config)}
|
||||||
get_logger().debug("Relevant configs", artifacts=relevant_configs)
|
get_logger().debug("Relevant configs", artifacts=relevant_configs)
|
||||||
|
|
||||||
|
# ticket extraction if exists
|
||||||
|
await extract_and_cache_pr_tickets(self.git_provider, self.vars)
|
||||||
|
|
||||||
if self.incremental.is_incremental and hasattr(self.git_provider, "unreviewed_files_set") and not self.git_provider.unreviewed_files_set:
|
if self.incremental.is_incremental and hasattr(self.git_provider, "unreviewed_files_set") and not self.git_provider.unreviewed_files_set:
|
||||||
get_logger().info(f"Incremental review is enabled for {self.pr_url} but there are no new files")
|
get_logger().info(f"Incremental review is enabled for {self.pr_url} but there are no new files")
|
||||||
previous_review_url = ""
|
previous_review_url = ""
|
||||||
@ -207,7 +221,7 @@ class PRReviewer:
|
|||||||
first_key = 'review'
|
first_key = 'review'
|
||||||
last_key = 'security_concerns'
|
last_key = 'security_concerns'
|
||||||
data = load_yaml(self.prediction.strip(),
|
data = load_yaml(self.prediction.strip(),
|
||||||
keys_fix_yaml=["estimated_effort_to_review_[1-5]:", "security_concerns:", "key_issues_to_review:",
|
keys_fix_yaml=["ticket_compliance_check", "estimated_effort_to_review_[1-5]:", "security_concerns:", "key_issues_to_review:",
|
||||||
"relevant_file:", "relevant_line:", "suggestion:"],
|
"relevant_file:", "relevant_line:", "suggestion:"],
|
||||||
first_key=first_key, last_key=last_key)
|
first_key=first_key, last_key=last_key)
|
||||||
github_action_output(data, 'review')
|
github_action_output(data, 'review')
|
||||||
@ -282,7 +296,7 @@ class PRReviewer:
|
|||||||
first_key = 'review'
|
first_key = 'review'
|
||||||
last_key = 'security_concerns'
|
last_key = 'security_concerns'
|
||||||
data = load_yaml(self.prediction.strip(),
|
data = load_yaml(self.prediction.strip(),
|
||||||
keys_fix_yaml=["estimated_effort_to_review_[1-5]:", "security_concerns:", "key_issues_to_review:",
|
keys_fix_yaml=["ticket_compliance_check", "estimated_effort_to_review_[1-5]:", "security_concerns:", "key_issues_to_review:",
|
||||||
"relevant_file:", "relevant_line:", "suggestion:"],
|
"relevant_file:", "relevant_line:", "suggestion:"],
|
||||||
first_key=first_key, last_key=last_key)
|
first_key=first_key, last_key=last_key)
|
||||||
comments: List[str] = []
|
comments: List[str] = []
|
||||||
@ -401,7 +415,16 @@ class PRReviewer:
|
|||||||
review_labels = []
|
review_labels = []
|
||||||
if get_settings().pr_reviewer.enable_review_labels_effort:
|
if get_settings().pr_reviewer.enable_review_labels_effort:
|
||||||
estimated_effort = data['review']['estimated_effort_to_review_[1-5]']
|
estimated_effort = data['review']['estimated_effort_to_review_[1-5]']
|
||||||
estimated_effort_number = int(estimated_effort.split(',')[0])
|
estimated_effort_number = 0
|
||||||
|
if isinstance(estimated_effort, str):
|
||||||
|
try:
|
||||||
|
estimated_effort_number = int(estimated_effort.split(',')[0])
|
||||||
|
except ValueError:
|
||||||
|
get_logger().warning(f"Invalid estimated_effort value: {estimated_effort}")
|
||||||
|
elif isinstance(estimated_effort, int):
|
||||||
|
estimated_effort_number = estimated_effort
|
||||||
|
else:
|
||||||
|
get_logger().warning(f"Unexpected type for estimated_effort: {type(estimated_effort)}")
|
||||||
if 1 <= estimated_effort_number <= 5: # 1, because ...
|
if 1 <= estimated_effort_number <= 5: # 1, because ...
|
||||||
review_labels.append(f'Review effort [1-5]: {estimated_effort_number}')
|
review_labels.append(f'Review effort [1-5]: {estimated_effort_number}')
|
||||||
if get_settings().pr_reviewer.enable_review_labels_security and get_settings().pr_reviewer.require_security_review:
|
if get_settings().pr_reviewer.enable_review_labels_security and get_settings().pr_reviewer.require_security_review:
|
||||||
|
@ -34,9 +34,9 @@ class PRSimilarIssue:
|
|||||||
|
|
||||||
if get_settings().pr_similar_issue.vectordb == "pinecone":
|
if get_settings().pr_similar_issue.vectordb == "pinecone":
|
||||||
try:
|
try:
|
||||||
|
import pandas as pd
|
||||||
import pinecone
|
import pinecone
|
||||||
from pinecone_datasets import Dataset, DatasetMetadata
|
from pinecone_datasets import Dataset, DatasetMetadata
|
||||||
import pandas as pd
|
|
||||||
except:
|
except:
|
||||||
raise Exception("Please install 'pinecone' and 'pinecone_datasets' to use pinecone as vectordb")
|
raise Exception("Please install 'pinecone' and 'pinecone_datasets' to use pinecone as vectordb")
|
||||||
# assuming pinecone api key and environment are set in secrets file
|
# assuming pinecone api key and environment are set in secrets file
|
||||||
@ -111,7 +111,7 @@ class PRSimilarIssue:
|
|||||||
|
|
||||||
elif get_settings().pr_similar_issue.vectordb == "lancedb":
|
elif get_settings().pr_similar_issue.vectordb == "lancedb":
|
||||||
try:
|
try:
|
||||||
import lancedb # import lancedb only if needed
|
import lancedb # import lancedb only if needed
|
||||||
except:
|
except:
|
||||||
raise Exception("Please install lancedb to use lancedb as vectordb")
|
raise Exception("Please install lancedb to use lancedb as vectordb")
|
||||||
self.db = lancedb.connect(get_settings().lancedb.uri)
|
self.db = lancedb.connect(get_settings().lancedb.uri)
|
||||||
|
@ -3,14 +3,16 @@ from datetime import date
|
|||||||
from functools import partial
|
from functools import partial
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from typing import Tuple
|
from typing import Tuple
|
||||||
|
|
||||||
from jinja2 import Environment, StrictUndefined
|
from jinja2 import Environment, StrictUndefined
|
||||||
|
|
||||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||||
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
||||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.algo.utils import ModelType, show_relevant_configurations
|
from pr_agent.algo.utils import ModelType, show_relevant_configurations
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers import get_git_provider, GithubProvider
|
from pr_agent.git_providers import GithubProvider, get_git_provider
|
||||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
|
126
pr_agent/tools/ticket_pr_compliance_check.py
Normal file
126
pr_agent/tools/ticket_pr_compliance_check.py
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
import re
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from pr_agent.config_loader import get_settings
|
||||||
|
from pr_agent.git_providers import GithubProvider
|
||||||
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
|
# Compile the regex pattern once, outside the function
|
||||||
|
GITHUB_TICKET_PATTERN = re.compile(
|
||||||
|
r'(https://github[^/]+/[^/]+/[^/]+/issues/\d+)|(\b(\w+)/(\w+)#(\d+)\b)|(#\d+)'
|
||||||
|
)
|
||||||
|
|
||||||
|
def find_jira_tickets(text):
|
||||||
|
# Regular expression patterns for JIRA tickets
|
||||||
|
patterns = [
|
||||||
|
r'\b[A-Z]{2,10}-\d{1,7}\b', # Standard JIRA ticket format (e.g., PROJ-123)
|
||||||
|
r'(?:https?://[^\s/]+/browse/)?([A-Z]{2,10}-\d{1,7})\b' # JIRA URL or just the ticket
|
||||||
|
]
|
||||||
|
|
||||||
|
tickets = set()
|
||||||
|
for pattern in patterns:
|
||||||
|
matches = re.findall(pattern, text)
|
||||||
|
for match in matches:
|
||||||
|
if isinstance(match, tuple):
|
||||||
|
# If it's a tuple (from the URL pattern), take the last non-empty group
|
||||||
|
ticket = next((m for m in reversed(match) if m), None)
|
||||||
|
else:
|
||||||
|
ticket = match
|
||||||
|
if ticket:
|
||||||
|
tickets.add(ticket)
|
||||||
|
|
||||||
|
return list(tickets)
|
||||||
|
|
||||||
|
|
||||||
|
def extract_ticket_links_from_pr_description(pr_description, repo_path, base_url_html='https://github.com'):
|
||||||
|
"""
|
||||||
|
Extract all ticket links from PR description
|
||||||
|
"""
|
||||||
|
github_tickets = set()
|
||||||
|
try:
|
||||||
|
# Use the updated pattern to find matches
|
||||||
|
matches = GITHUB_TICKET_PATTERN.findall(pr_description)
|
||||||
|
|
||||||
|
for match in matches:
|
||||||
|
if match[0]: # Full URL match
|
||||||
|
github_tickets.add(match[0])
|
||||||
|
elif match[1]: # Shorthand notation match: owner/repo#issue_number
|
||||||
|
owner, repo, issue_number = match[2], match[3], match[4]
|
||||||
|
github_tickets.add(f'{base_url_html.strip("/")}/{owner}/{repo}/issues/{issue_number}')
|
||||||
|
else: # #123 format
|
||||||
|
issue_number = match[5][1:] # remove #
|
||||||
|
if issue_number.isdigit() and len(issue_number) < 5 and repo_path:
|
||||||
|
github_tickets.add(f'{base_url_html.strip("/")}/{repo_path}/issues/{issue_number}')
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().error(f"Error extracting tickets error= {e}",
|
||||||
|
artifact={"traceback": traceback.format_exc()})
|
||||||
|
|
||||||
|
return list(github_tickets)
|
||||||
|
|
||||||
|
|
||||||
|
async def extract_tickets(git_provider):
|
||||||
|
MAX_TICKET_CHARACTERS = 10000
|
||||||
|
try:
|
||||||
|
if isinstance(git_provider, GithubProvider):
|
||||||
|
user_description = git_provider.get_user_description()
|
||||||
|
tickets = extract_ticket_links_from_pr_description(user_description, git_provider.repo, git_provider.base_url_html)
|
||||||
|
tickets_content = []
|
||||||
|
if tickets:
|
||||||
|
for ticket in tickets:
|
||||||
|
# extract ticket number and repo name
|
||||||
|
repo_name, original_issue_number = git_provider._parse_issue_url(ticket)
|
||||||
|
|
||||||
|
# get the ticket object
|
||||||
|
try:
|
||||||
|
issue_main = git_provider.repo_obj.get_issue(original_issue_number)
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().error(f"Error getting issue_main error= {e}",
|
||||||
|
artifact={"traceback": traceback.format_exc()})
|
||||||
|
continue
|
||||||
|
|
||||||
|
# clip issue_main.body max length
|
||||||
|
issue_body_str = issue_main.body
|
||||||
|
if not issue_body_str:
|
||||||
|
issue_body_str = ""
|
||||||
|
if len(issue_body_str) > MAX_TICKET_CHARACTERS:
|
||||||
|
issue_body_str = issue_body_str[:MAX_TICKET_CHARACTERS] + "..."
|
||||||
|
|
||||||
|
# extract labels
|
||||||
|
labels = []
|
||||||
|
try:
|
||||||
|
for label in issue_main.labels:
|
||||||
|
if isinstance(label, str):
|
||||||
|
labels.append(label)
|
||||||
|
else:
|
||||||
|
labels.append(label.name)
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().error(f"Error extracting labels error= {e}",
|
||||||
|
artifact={"traceback": traceback.format_exc()})
|
||||||
|
tickets_content.append(
|
||||||
|
{'ticket_id': issue_main.number,
|
||||||
|
'ticket_url': ticket, 'title': issue_main.title, 'body': issue_body_str,
|
||||||
|
'labels': ", ".join(labels)})
|
||||||
|
return tickets_content
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().error(f"Error extracting tickets error= {e}",
|
||||||
|
artifact={"traceback": traceback.format_exc()})
|
||||||
|
|
||||||
|
|
||||||
|
async def extract_and_cache_pr_tickets(git_provider, vars):
|
||||||
|
if not get_settings().get('pr_reviewer.require_ticket_analysis_review', False):
|
||||||
|
return
|
||||||
|
related_tickets = get_settings().get('related_tickets', [])
|
||||||
|
if not related_tickets:
|
||||||
|
tickets_content = await extract_tickets(git_provider)
|
||||||
|
if tickets_content:
|
||||||
|
get_logger().info("Extracted tickets from PR description", artifact={"tickets": tickets_content})
|
||||||
|
vars['related_tickets'] = tickets_content
|
||||||
|
get_settings().set('related_tickets', tickets_content)
|
||||||
|
else: # if tickets are already cached
|
||||||
|
get_logger().info("Using cached tickets", artifact={"tickets": related_tickets})
|
||||||
|
vars['related_tickets'] = related_tickets
|
||||||
|
|
||||||
|
|
||||||
|
def check_tickets_relevancy():
|
||||||
|
return True
|
@ -6,19 +6,19 @@ build-backend = "setuptools.build_meta"
|
|||||||
name = "pr-agent"
|
name = "pr-agent"
|
||||||
version = "0.2.4"
|
version = "0.2.4"
|
||||||
|
|
||||||
authors = [{name= "CodiumAI", email = "tal.r@codium.ai"}]
|
authors = [{ name = "CodiumAI", email = "tal.r@codium.ai" }]
|
||||||
|
|
||||||
maintainers = [
|
maintainers = [
|
||||||
{name = "Tal Ridnik", email = "tal.r@codium.ai"},
|
{ name = "Tal Ridnik", email = "tal.r@codium.ai" },
|
||||||
{name = "Ori Kotek", email = "ori.k@codium.ai"},
|
{ name = "Ori Kotek", email = "ori.k@codium.ai" },
|
||||||
{name = "Hussam Lawen", email = "hussam.l@codium.ai"},
|
{ name = "Hussam Lawen", email = "hussam.l@codium.ai" },
|
||||||
]
|
]
|
||||||
|
|
||||||
description = "CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedbacks and suggestions."
|
description = "CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedbacks and suggestions."
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.10"
|
requires-python = ">=3.10"
|
||||||
keywords = ["AI", "Agents", "Pull Request", "Automation", "Code Review"]
|
keywords = ["AI", "Agents", "Pull Request", "Automation", "Code Review"]
|
||||||
license = {name = "Apache 2.0", file = "LICENSE"}
|
license = { name = "Apache 2.0", file = "LICENSE" }
|
||||||
|
|
||||||
classifiers = [
|
classifiers = [
|
||||||
"Intended Audience :: Developers",
|
"Intended Audience :: Developers",
|
||||||
@ -28,7 +28,7 @@ dynamic = ["dependencies"]
|
|||||||
|
|
||||||
|
|
||||||
[tool.setuptools.dynamic]
|
[tool.setuptools.dynamic]
|
||||||
dependencies = {file = ["requirements.txt"]}
|
dependencies = { file = ["requirements.txt"] }
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
"Homepage" = "https://github.com/Codium-ai/pr-agent"
|
"Homepage" = "https://github.com/Codium-ai/pr-agent"
|
||||||
@ -40,41 +40,43 @@ license-files = ["LICENSE"]
|
|||||||
|
|
||||||
[tool.setuptools.packages.find]
|
[tool.setuptools.packages.find]
|
||||||
where = ["."]
|
where = ["."]
|
||||||
include = ["pr_agent*"] # include pr_agent and any sub-packages it finds under it.
|
include = [
|
||||||
|
"pr_agent*",
|
||||||
|
] # include pr_agent and any sub-packages it finds under it.
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
pr-agent = "pr_agent.cli:run"
|
pr-agent = "pr_agent.cli:run"
|
||||||
|
|
||||||
|
|
||||||
[tool.ruff]
|
[tool.ruff]
|
||||||
|
|
||||||
line-length = 120
|
line-length = 120
|
||||||
|
|
||||||
select = [
|
lint.select = [
|
||||||
"E", # Pyflakes
|
"E", # Pyflakes
|
||||||
"F", # Pyflakes
|
"F", # Pyflakes
|
||||||
"B", # flake8-bugbear
|
"B", # flake8-bugbear
|
||||||
"I001", # isort basic checks
|
"I001", # isort basic checks
|
||||||
"I002", # isort missing-required-import
|
"I002", # isort missing-required-import
|
||||||
]
|
]
|
||||||
|
|
||||||
# First commit - only fixing isort
|
# First commit - only fixing isort
|
||||||
fixable = [
|
lint.fixable = [
|
||||||
"I001", # isort basic checks
|
"I001", # isort basic checks
|
||||||
]
|
]
|
||||||
|
|
||||||
unfixable = [
|
lint.unfixable = [
|
||||||
"B", # Avoid trying to fix flake8-bugbear (`B`) violations.
|
"B", # Avoid trying to fix flake8-bugbear (`B`) violations.
|
||||||
]
|
|
||||||
|
|
||||||
exclude = [
|
|
||||||
"api/code_completions",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
ignore = [
|
lint.exclude = ["api/code_completions"]
|
||||||
"E999", "B008"
|
|
||||||
]
|
|
||||||
|
|
||||||
[tool.ruff.per-file-ignores]
|
lint.ignore = ["E999", "B008"]
|
||||||
"__init__.py" = ["E402"] # Ignore `E402` (import violations) in all `__init__.py` files, and in `path/to/file.py`.
|
|
||||||
# TODO: should decide if maybe not to ignore these.
|
[tool.ruff.lint.per-file-ignores]
|
||||||
|
"__init__.py" = [
|
||||||
|
"E402",
|
||||||
|
] # Ignore `E402` (import violations) in all `__init__.py` files, and in `path/to/file.py`.
|
||||||
|
|
||||||
|
[tool.bandit]
|
||||||
|
exclude_dirs = ["tests"]
|
||||||
|
skips = ["B101"]
|
||||||
|
tests = []
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
pytest==7.4.0
|
pytest==7.4.0
|
||||||
poetry
|
poetry
|
||||||
twine
|
twine
|
||||||
|
pre-commit>=4,<5
|
||||||
|
@ -1,26 +1,28 @@
|
|||||||
aiohttp==3.9.5
|
aiohttp==3.9.5
|
||||||
anthropic[vertex]==0.21.3
|
anthropic[vertex]==0.39.0
|
||||||
atlassian-python-api==3.41.4
|
atlassian-python-api==3.41.4
|
||||||
azure-devops==7.1.0b3
|
azure-devops==7.1.0b3
|
||||||
azure-identity==1.15.0
|
azure-identity==1.15.0
|
||||||
boto3==1.33.6
|
boto3==1.33.6
|
||||||
|
certifi==2024.8.30
|
||||||
dynaconf==3.2.4
|
dynaconf==3.2.4
|
||||||
fastapi==0.111.0
|
fastapi==0.111.0
|
||||||
GitPython==3.1.41
|
GitPython==3.1.41
|
||||||
google-cloud-aiplatform==1.38.0
|
google-cloud-aiplatform==1.38.0
|
||||||
|
google-generativeai==0.8.3
|
||||||
google-cloud-storage==2.10.0
|
google-cloud-storage==2.10.0
|
||||||
Jinja2==3.1.2
|
Jinja2==3.1.2
|
||||||
litellm==1.43.13
|
litellm==1.52.12
|
||||||
loguru==0.7.2
|
loguru==0.7.2
|
||||||
msrest==0.7.1
|
msrest==0.7.1
|
||||||
openai==1.46.0
|
openai==1.55.3
|
||||||
pytest==7.4.0
|
pytest==7.4.0
|
||||||
PyGithub==1.59.*
|
PyGithub==1.59.*
|
||||||
PyYAML==6.0.1
|
PyYAML==6.0.1
|
||||||
python-gitlab==3.15.0
|
python-gitlab==3.15.0
|
||||||
retry==0.9.2
|
retry==0.9.2
|
||||||
starlette-context==0.3.6
|
starlette-context==0.3.6
|
||||||
tiktoken==0.7.0
|
tiktoken==0.8.0
|
||||||
ujson==5.8.0
|
ujson==5.8.0
|
||||||
uvicorn==0.22.0
|
uvicorn==0.22.0
|
||||||
tenacity==8.2.3
|
tenacity==8.2.3
|
||||||
@ -28,12 +30,6 @@ gunicorn==22.0.0
|
|||||||
pytest-cov==5.0.0
|
pytest-cov==5.0.0
|
||||||
pydantic==2.8.2
|
pydantic==2.8.2
|
||||||
html2text==2024.2.26
|
html2text==2024.2.26
|
||||||
# help bot
|
|
||||||
langchain==0.3.0
|
|
||||||
langchain-openai==0.2.0
|
|
||||||
langchain-pinecone==0.2.0
|
|
||||||
langchain-chroma==0.1.4
|
|
||||||
chromadb==0.5.7
|
|
||||||
# Uncomment the following lines to enable the 'similar issue' tool
|
# Uncomment the following lines to enable the 'similar issue' tool
|
||||||
# pinecone-client
|
# pinecone-client
|
||||||
# pinecone-datasets @ git+https://github.com/mrT23/pinecone-datasets.git@main
|
# pinecone-datasets @ git+https://github.com/mrT23/pinecone-datasets.git@main
|
||||||
|
@ -32,4 +32,3 @@ def main():
|
|||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -5,16 +5,16 @@ import time
|
|||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
import jwt
|
import jwt
|
||||||
from atlassian.bitbucket import Cloud
|
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
from atlassian.bitbucket import Cloud
|
||||||
from requests.auth import HTTPBasicAuth
|
from requests.auth import HTTPBasicAuth
|
||||||
|
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.log import setup_logger, get_logger
|
from pr_agent.log import get_logger, setup_logger
|
||||||
from tests.e2e_tests.e2e_utils import NEW_FILE_CONTENT, FILE_PATH, PR_HEADER_START_WITH, REVIEW_START_WITH, \
|
from tests.e2e_tests.e2e_utils import (FILE_PATH,
|
||||||
IMPROVE_START_WITH_REGEX_PATTERN, NUM_MINUTES
|
IMPROVE_START_WITH_REGEX_PATTERN,
|
||||||
|
NEW_FILE_CONTENT, NUM_MINUTES,
|
||||||
|
PR_HEADER_START_WITH, REVIEW_START_WITH)
|
||||||
|
|
||||||
log_level = os.environ.get("LOG_LEVEL", "INFO")
|
log_level = os.environ.get("LOG_LEVEL", "INFO")
|
||||||
setup_logger(log_level)
|
setup_logger(log_level)
|
||||||
|
@ -5,9 +5,11 @@ from datetime import datetime
|
|||||||
|
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers import get_git_provider
|
from pr_agent.git_providers import get_git_provider
|
||||||
from pr_agent.log import setup_logger, get_logger
|
from pr_agent.log import get_logger, setup_logger
|
||||||
from tests.e2e_tests.e2e_utils import NEW_FILE_CONTENT, FILE_PATH, PR_HEADER_START_WITH, REVIEW_START_WITH, \
|
from tests.e2e_tests.e2e_utils import (FILE_PATH,
|
||||||
IMPROVE_START_WITH_REGEX_PATTERN, NUM_MINUTES
|
IMPROVE_START_WITH_REGEX_PATTERN,
|
||||||
|
NEW_FILE_CONTENT, NUM_MINUTES,
|
||||||
|
PR_HEADER_START_WITH, REVIEW_START_WITH)
|
||||||
|
|
||||||
log_level = os.environ.get("LOG_LEVEL", "INFO")
|
log_level = os.environ.get("LOG_LEVEL", "INFO")
|
||||||
setup_logger(log_level)
|
setup_logger(log_level)
|
||||||
|
@ -7,9 +7,11 @@ import gitlab
|
|||||||
|
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers import get_git_provider
|
from pr_agent.git_providers import get_git_provider
|
||||||
from pr_agent.log import setup_logger, get_logger
|
from pr_agent.log import get_logger, setup_logger
|
||||||
from tests.e2e_tests.e2e_utils import NEW_FILE_CONTENT, FILE_PATH, PR_HEADER_START_WITH, REVIEW_START_WITH, \
|
from tests.e2e_tests.e2e_utils import (FILE_PATH,
|
||||||
IMPROVE_START_WITH_REGEX_PATTERN, NUM_MINUTES
|
IMPROVE_START_WITH_REGEX_PATTERN,
|
||||||
|
NEW_FILE_CONTENT, NUM_MINUTES,
|
||||||
|
PR_HEADER_START_WITH, REVIEW_START_WITH)
|
||||||
|
|
||||||
log_level = os.environ.get("LOG_LEVEL", "INFO")
|
log_level = os.environ.get("LOG_LEVEL", "INFO")
|
||||||
setup_logger(log_level)
|
setup_logger(log_level)
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
|
from atlassian.bitbucket import Bitbucket
|
||||||
|
|
||||||
|
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||||
from pr_agent.git_providers import BitbucketServerProvider
|
from pr_agent.git_providers import BitbucketServerProvider
|
||||||
from pr_agent.git_providers.bitbucket_provider import BitbucketProvider
|
from pr_agent.git_providers.bitbucket_provider import BitbucketProvider
|
||||||
from unittest.mock import MagicMock
|
|
||||||
from atlassian.bitbucket import Bitbucket
|
|
||||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
|
||||||
|
|
||||||
|
|
||||||
class TestBitbucketProvider:
|
class TestBitbucketProvider:
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
from unittest.mock import MagicMock
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
from pr_agent.git_providers.codecommit_client import CodeCommitClient
|
from pr_agent.git_providers.codecommit_client import CodeCommitClient
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
import pytest
|
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
from pr_agent.git_providers.codecommit_provider import CodeCommitFile
|
|
||||||
from pr_agent.git_providers.codecommit_provider import CodeCommitProvider
|
import pytest
|
||||||
from pr_agent.git_providers.codecommit_provider import PullRequestCCMimic
|
|
||||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||||
|
from pr_agent.git_providers.codecommit_provider import (CodeCommitFile,
|
||||||
|
CodeCommitProvider,
|
||||||
|
PullRequestCCMimic)
|
||||||
|
|
||||||
|
|
||||||
class TestCodeCommitFile:
|
class TestCodeCommitFile:
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user