mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-08 23:00:43 +08:00
Compare commits
149 Commits
ok/json_lo
...
v0.10
Author | SHA1 | Date | |
---|---|---|---|
416a5495da | |||
a2b27dcac8 | |||
d8e4e2e8fd | |||
896a81d173 | |||
b216af8f04 | |||
388cc740b6 | |||
6214494c84 | |||
762a6981e1 | |||
b362c406bc | |||
7a342d3312 | |||
2e95988741 | |||
9478447141 | |||
082293b48c | |||
e1d92206f3 | |||
557ec72bfe | |||
6b4b16dcf9 | |||
c4899a6c54 | |||
24d82e65cb | |||
2567a6cf27 | |||
94cb6b9795 | |||
e878bbbe36 | |||
7d89b82967 | |||
c5f9bbbf92 | |||
a5e5a82952 | |||
ccbb62b50a | |||
1df36c6a44 | |||
9e5e9afe92 | |||
5e43c202dd | |||
37e6608e68 | |||
f64d5f1e2a | |||
8fdf174dec | |||
29d4f98b19 | |||
737792d83c | |||
7e5889061c | |||
755e04cf65 | |||
44d6c95714 | |||
14610d5375 | |||
f9c832d6cb | |||
c2bec614e5 | |||
49725e92f2 | |||
a1e32d8331 | |||
0293412a42 | |||
10ec0a1812 | |||
69b68b78f5 | |||
c5bc4b44ff | |||
39e5102a2e | |||
6c82bc9a3e | |||
54f41dd603 | |||
094f641fb5 | |||
a35a75eb34 | |||
5a7c118b56 | |||
cf9e0fbbc5 | |||
ef9af261ed | |||
ff79776410 | |||
ec3f2fb485 | |||
94a2a5e527 | |||
ea4bc548fc | |||
1eefd3365b | |||
db37ee819a | |||
e352c98ce8 | |||
e96b03da57 | |||
1d2aedf169 | |||
4c484f8e86 | |||
8a79114ed9 | |||
cd69f43c77 | |||
6d6d864417 | |||
b286c8ed20 | |||
7238c81f0c | |||
62412f8cd4 | |||
5d2bdadb45 | |||
06d030637c | |||
8e3fa3926a | |||
92071fcf1c | |||
fed1c160eb | |||
e37daf6987 | |||
8fc663911f | |||
bb2760ae41 | |||
3548b88463 | |||
c917e48098 | |||
e6ef123ce5 | |||
194bfe1193 | |||
e456cf36aa | |||
fe3527de3c | |||
b99c769b53 | |||
60bdfb78df | |||
c0b3c76884 | |||
e1370a8385 | |||
c623c3baf4 | |||
d0f3a4139d | |||
3ddc7e79d1 | |||
3e14edfd4e | |||
15573e2286 | |||
ce64877063 | |||
6666a128ee | |||
9fbf89670d | |||
ad1c51c536 | |||
9ab7ccd20d | |||
c907f93ab8 | |||
29a8cf8357 | |||
7b6a6c7164 | |||
cf4d007737 | |||
a751bb0ef0 | |||
26d6280a20 | |||
32a19fdab6 | |||
775ccb3f25 | |||
a1c6c57f7b | |||
73bb70fef4 | |||
dcac6c145c | |||
4bda9dfe04 | |||
66644f0224 | |||
e74bb80668 | |||
e06fb534d3 | |||
71a341855e | |||
7d949ad6e2 | |||
4b5f86fcf0 | |||
cd11f51df0 | |||
b40c0b9b23 | |||
816ddeeb9e | |||
11f01a226c | |||
b57ec301e8 | |||
71da20ea7e | |||
c895657310 | |||
eda20ccca9 | |||
aed113cd79 | |||
0ab07a46c6 | |||
5f32e28933 | |||
7538c4dd2f | |||
e3845283f8 | |||
a85921d3c5 | |||
27b64fbcaf | |||
8d50f2ae82 | |||
e97a03f522 | |||
2e3344b5b0 | |||
e1b51eace7 | |||
49e3d5ec5f | |||
afa78ed3fb | |||
72d5e4748e | |||
61d3e1ebf4 | |||
055b5ea700 | |||
3434296792 | |||
ae375c2ff0 | |||
3d5efdf4f3 | |||
e83747300d | |||
013a689b33 | |||
e6bea76eee | |||
414f2b6767 | |||
6541575a0e | |||
02570ea797 | |||
65bb70a1dd |
1
.github/workflows/pr-agent-review.yaml
vendored
1
.github/workflows/pr-agent-review.yaml
vendored
@ -26,5 +26,6 @@ jobs:
|
|||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
PINECONE.API_KEY: ${{ secrets.PINECONE_API_KEY }}
|
PINECONE.API_KEY: ${{ secrets.PINECONE_API_KEY }}
|
||||||
PINECONE.ENVIRONMENT: ${{ secrets.PINECONE_ENVIRONMENT }}
|
PINECONE.ENVIRONMENT: ${{ secrets.PINECONE_ENVIRONMENT }}
|
||||||
|
GITHUB_ACTION.AUTO_REVIEW: true
|
||||||
|
|
||||||
|
|
||||||
|
2
.pr_agent.toml
Normal file
2
.pr_agent.toml
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
[pr_reviewer]
|
||||||
|
enable_review_labels_effort = true
|
38
INSTALL.md
38
INSTALL.md
@ -29,39 +29,43 @@ There are several ways to use PR-Agent:
|
|||||||
|
|
||||||
### Use Docker image (no installation required)
|
### Use Docker image (no installation required)
|
||||||
|
|
||||||
To request a review for a PR, or ask a question about a PR, you can run directly from the Docker image. Here's how:
|
A list of the relevant tools can be found in the [tools guide](./docs/TOOLS_GUIDE.md).
|
||||||
|
|
||||||
For GitHub:
|
To invoke a tool (for example `review`), you can run directly from the Docker image. Here's how:
|
||||||
|
|
||||||
|
- For GitHub:
|
||||||
```
|
```
|
||||||
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
|
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
|
||||||
```
|
```
|
||||||
For GitLab:
|
|
||||||
|
- For GitLab:
|
||||||
```
|
```
|
||||||
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
|
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
|
||||||
```
|
```
|
||||||
For BitBucket:
|
|
||||||
|
Note: If you have a dedicated GitLab instance, you need to specify the custom url as variable:
|
||||||
|
```
|
||||||
|
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> GITLAB.URL=<your gitlab instance url> codiumai/pr-agent:latest --pr_url <pr_url> review
|
||||||
|
```
|
||||||
|
|
||||||
|
- For BitBucket:
|
||||||
```
|
```
|
||||||
docker run --rm -it -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=<pr_url> review
|
docker run --rm -it -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=<pr_url> review
|
||||||
```
|
```
|
||||||
|
|
||||||
For other git providers, update CONFIG.GIT_PROVIDER accordingly, and check the `pr_agent/settings/.secrets_template.toml` file for the environment variables expected names and values.
|
For other git providers, update CONFIG.GIT_PROVIDER accordingly, and check the `pr_agent/settings/.secrets_template.toml` file for the environment variables expected names and values.
|
||||||
|
|
||||||
|
---
|
||||||
Similarly, to ask a question about a PR, run the following command:
|
|
||||||
```
|
|
||||||
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent --pr_url <pr_url> ask "<your question>"
|
|
||||||
```
|
|
||||||
|
|
||||||
A list of the relevant tools can be found in the [tools guide](./docs/TOOLS_GUIDE.md).
|
|
||||||
|
|
||||||
|
|
||||||
Note: If you want to ensure you're running a specific version of the Docker image, consider using the image's digest:
|
If you want to ensure you're running a specific version of the Docker image, consider using the image's digest:
|
||||||
```bash
|
```bash
|
||||||
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent@sha256:71b5ee15df59c745d352d84752d01561ba64b6d51327f97d46152f0c58a5f678 --pr_url <pr_url> review
|
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent@sha256:71b5ee15df59c745d352d84752d01561ba64b6d51327f97d46152f0c58a5f678 --pr_url <pr_url> review
|
||||||
```
|
```
|
||||||
in addition, you can run a [specific released versions](./RELEASE_NOTES.md) of pr-agent, for example:
|
|
||||||
|
Or you can run a [specific released versions](./RELEASE_NOTES.md) of pr-agent, for example:
|
||||||
```
|
```
|
||||||
codiumai/pr-agent@v0.8
|
codiumai/pr-agent@v0.9
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@ -368,7 +372,7 @@ PYTHONPATH="/PATH/TO/PROJECTS/pr-agent" python pr_agent/cli.py \
|
|||||||
```
|
```
|
||||||
WEBHOOK_SECRET=$(python -c "import secrets; print(secrets.token_hex(10))")
|
WEBHOOK_SECRET=$(python -c "import secrets; print(secrets.token_hex(10))")
|
||||||
```
|
```
|
||||||
3. Follow the instructions to build the Docker image, setup a secrets file and deploy on your own server from [Method 5](#method-5-run-as-a-github-app) steps 4-7.
|
3. Follow the instructions to build the Docker image, setup a secrets file and deploy on your own server from [Method 5](#run-as-a-github-app) steps 4-7.
|
||||||
4. In the secrets file, fill in the following:
|
4. In the secrets file, fill in the following:
|
||||||
- Your OpenAI key.
|
- Your OpenAI key.
|
||||||
- In the [gitlab] section, fill in personal_access_token and shared_secret. The access token can be a personal access token, or a group or project access token.
|
- In the [gitlab] section, fill in personal_access_token and shared_secret. The access token can be a personal access token, or a group or project access token.
|
||||||
@ -406,9 +410,9 @@ BITBUCKET_BEARER_TOKEN: <your token>
|
|||||||
You can get a Bitbucket token for your repository by following Repository Settings -> Security -> Access Tokens.
|
You can get a Bitbucket token for your repository by following Repository Settings -> Security -> Access Tokens.
|
||||||
|
|
||||||
|
|
||||||
### Run on a hosted Bitbucket app
|
### Run using CodiumAI-hosted Bitbucket app
|
||||||
|
|
||||||
Please contact <support@codium.ai> if you're interested in a hosted BitBucket app solution that provides full functionality including PR reviews and comment handling. It's based on the [bitbucket_app.py](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/git_providers/bitbucket_provider.py) implmentation.
|
Please contact <support@codium.ai> or visit [CodiumAI pricing page](https://www.codium.ai/pricing/) if you're interested in a hosted BitBucket app solution that provides full functionality including PR reviews and comment handling. It's based on the [bitbucket_app.py](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/git_providers/bitbucket_provider.py) implementation.
|
||||||
|
|
||||||
|
|
||||||
=======
|
=======
|
||||||
|
@ -31,6 +31,8 @@ CodiumAI `PR-Agent` is an open-source tool aiming to help developers review pull
|
|||||||
‣ **Find Similar Issue ([`/similar_issue`](./docs/SIMILAR_ISSUE.md))**: Automatically retrieves and presents similar issues
|
‣ **Find Similar Issue ([`/similar_issue`](./docs/SIMILAR_ISSUE.md))**: Automatically retrieves and presents similar issues
|
||||||
\
|
\
|
||||||
‣ **Add Documentation ([`/add_docs`](./docs/ADD_DOCUMENTATION.md))**: Automatically adds documentation to un-documented functions/classes in the PR.
|
‣ **Add Documentation ([`/add_docs`](./docs/ADD_DOCUMENTATION.md))**: Automatically adds documentation to un-documented functions/classes in the PR.
|
||||||
|
\
|
||||||
|
‣ **Generate Custom Labels ([`/generate_labels`](./docs/GENERATE_CUSTOM_LABELS.md))**: Automatically suggests custom labels based on the PR code changes.
|
||||||
|
|
||||||
See the [Installation Guide](./INSTALL.md) for instructions how to install and run the tool on different platforms.
|
See the [Installation Guide](./INSTALL.md) for instructions how to install and run the tool on different platforms.
|
||||||
|
|
||||||
@ -115,6 +117,7 @@ See the [Tools Guide](./docs/TOOLS_GUIDE.md) for detailed description of the dif
|
|||||||
| | Update CHANGELOG.md | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | |
|
| | Update CHANGELOG.md | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | |
|
||||||
| | Find similar issue | :white_check_mark: | | | | | |
|
| | Find similar issue | :white_check_mark: | | | | | |
|
||||||
| | Add Documentation | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | :white_check_mark: |
|
| | Add Documentation | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | :white_check_mark: |
|
||||||
|
| | Generate Labels | :white_check_mark: | :white_check_mark: | | | | |
|
||||||
| | | | | | | |
|
| | | | | | | |
|
||||||
| USAGE | CLI | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| USAGE | CLI | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
| | App / webhook | :white_check_mark: | :white_check_mark: | | | |
|
| | App / webhook | :white_check_mark: | :white_check_mark: | | | |
|
||||||
|
@ -1,3 +1,50 @@
|
|||||||
|
## [Version 0.10] - 2023-11-15
|
||||||
|
- codiumai/pr-agent:0.10
|
||||||
|
- codiumai/pr-agent:0.10-github_app
|
||||||
|
- codiumai/pr-agent:0.10-bitbucket-app
|
||||||
|
- codiumai/pr-agent:0.10-gitlab_webhook
|
||||||
|
- codiumai/pr-agent:0.10-github_polling
|
||||||
|
- codiumai/pr-agent:0.10-github_action
|
||||||
|
|
||||||
|
### Added::Algo
|
||||||
|
- Review tool now works with [persistent comments](https://github.com/Codium-ai/pr-agent/pull/451) by default
|
||||||
|
- Bitbucket now publishes review suggestions with [code links](https://github.com/Codium-ai/pr-agent/pull/428)
|
||||||
|
- Enabling to limit [max number of tokens](https://github.com/Codium-ai/pr-agent/pull/437/files)
|
||||||
|
- Support ['gpt-4-1106-preview'](https://github.com/Codium-ai/pr-agent/pull/437/files) model
|
||||||
|
- Support for Google's [Vertex AI](https://github.com/Codium-ai/pr-agent/pull/436)
|
||||||
|
- Implementing [thresholds](https://github.com/Codium-ai/pr-agent/pull/423) for incremental PR reviews
|
||||||
|
- Decoupled custom labels from [PR type](https://github.com/Codium-ai/pr-agent/pull/431)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Fixed bug in [parsing quotes](https://github.com/Codium-ai/pr-agent/pull/446) in CLI
|
||||||
|
- Preserve [user-added labels](https://github.com/Codium-ai/pr-agent/pull/433) in pull requests
|
||||||
|
- Bug fixes in GitLab and BitBucket
|
||||||
|
|
||||||
|
## [Version 0.9] - 2023-10-29
|
||||||
|
- codiumai/pr-agent:0.9
|
||||||
|
- codiumai/pr-agent:0.9-github_app
|
||||||
|
- codiumai/pr-agent:0.9-bitbucket-app
|
||||||
|
- codiumai/pr-agent:0.9-gitlab_webhook
|
||||||
|
- codiumai/pr-agent:0.9-github_polling
|
||||||
|
- codiumai/pr-agent:0.9-github_action
|
||||||
|
|
||||||
|
### Added::Algo
|
||||||
|
- New tool - [generate_labels](https://github.com/Codium-ai/pr-agent/blob/main/docs/GENERATE_CUSTOM_LABELS.md)
|
||||||
|
- New ability to use [customize labels](https://github.com/Codium-ai/pr-agent/blob/main/docs/GENERATE_CUSTOM_LABELS.md#how-to-enable-custom-labels) on the `review` and `describe` tools.
|
||||||
|
- New tool - [add_docs](https://github.com/Codium-ai/pr-agent/blob/main/docs/ADD_DOCUMENTATION.md)
|
||||||
|
- GitHub Action: Can now use a `.pr_agent.toml` file to control configuration parameters (see [Usage Guide](./Usage.md#working-with-github-action)).
|
||||||
|
- GitHub App: Added ability to trigger tools on [push events](https://github.com/Codium-ai/pr-agent/blob/main/Usage.md#github-app-automatic-tools-for-new-code-pr-push)
|
||||||
|
- Support custom domain URLs for Azure devops integration (see [link](https://github.com/Codium-ai/pr-agent/pull/381)).
|
||||||
|
- PR Description default mode is now in [bullet points](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L35).
|
||||||
|
|
||||||
|
### Added::Documentation
|
||||||
|
Significant documentation updates (see [Installation Guide](https://github.com/Codium-ai/pr-agent/blob/main/INSTALL.md), [Usage Guide](https://github.com/Codium-ai/pr-agent/blob/main/Usage.md), and [Tools Guide](https://github.com/Codium-ai/pr-agent/blob/main/docs/TOOLS_GUIDE.md))
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Fixed support for BitBucket pipeline (see [link](https://github.com/Codium-ai/pr-agent/pull/386))
|
||||||
|
- Fixed a bug in `review -i` tool
|
||||||
|
- Added blacklist for specific file extensions in `add_docs` tool (see [link](https://github.com/Codium-ai/pr-agent/pull/385/))
|
||||||
|
|
||||||
## [Version 0.8] - 2023-09-27
|
## [Version 0.8] - 2023-09-27
|
||||||
- codiumai/pr-agent:0.8
|
- codiumai/pr-agent:0.8
|
||||||
- codiumai/pr-agent:0.8-github_app
|
- codiumai/pr-agent:0.8-github_app
|
||||||
|
94
Usage.md
94
Usage.md
@ -108,19 +108,41 @@ Any configuration value in [configuration file](pr_agent/settings/configuration.
|
|||||||
|
|
||||||
|
|
||||||
### Working with GitHub App
|
### Working with GitHub App
|
||||||
When running PR-Agent from [GitHub App](INSTALL.md#method-5-run-as-a-github-app), the default configurations from a pre-built docker will be initially loaded.
|
When running PR-Agent from GitHub App, the default [configuration file](pr_agent/settings/configuration.toml) from a pre-built docker will be initially loaded.
|
||||||
|
|
||||||
|
By uploading a local `.pr_agent.toml` file, you can edit and customize any configuration parameter.
|
||||||
|
|
||||||
|
For example, if you set in `.pr_agent.toml`:
|
||||||
|
|
||||||
|
```
|
||||||
|
[pr_reviewer]
|
||||||
|
num_code_suggestions=1
|
||||||
|
```
|
||||||
|
|
||||||
|
Than you will overwrite the default number of code suggestions to be 1.
|
||||||
|
|
||||||
#### GitHub app automatic tools
|
#### GitHub app automatic tools
|
||||||
The [github_app](pr_agent/settings/configuration.toml#L56) section defines GitHub app specific configurations.
|
The [github_app](pr_agent/settings/configuration.toml#L76) section defines GitHub app-specific configurations.
|
||||||
An important parameter is `pr_commands`, which is a list of tools that will be **run automatically** when a new PR is opened:
|
In this section you can define configurations to control the conditions for which tools will **run automatically**.
|
||||||
|
|
||||||
|
##### GitHub app automatic tools for PR actions
|
||||||
|
The GitHub app can respond to the following actions on a PR:
|
||||||
|
1. `opened` - Opening a new PR
|
||||||
|
2. `reopened` - Reopening a closed PR
|
||||||
|
3. `ready_for_review` - Moving a PR from Draft to Open
|
||||||
|
4. `review_requested` - Specifically requesting review (in the PR reviewers list) from the `github-actions[bot]` user
|
||||||
|
|
||||||
|
The configuration parameter `handle_pr_actions` defines the list of actions for which the GitHub app will trigger the PR-Agent.
|
||||||
|
The configuration parameter `pr_commands` defines the list of tools that will be **run automatically** when one of the above action happens (e.g. a new PR is opened):
|
||||||
```
|
```
|
||||||
[github_app]
|
[github_app]
|
||||||
|
handle_pr_actions = ['opened', 'reopened', 'ready_for_review', 'review_requested']
|
||||||
pr_commands = [
|
pr_commands = [
|
||||||
"/describe --pr_description.add_original_user_description=true --pr_description.keep_original_user_title=true",
|
"/describe --pr_description.add_original_user_description=true --pr_description.keep_original_user_title=true",
|
||||||
"/auto_review",
|
"/auto_review",
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
This means that when a new PR is opened, PR-Agent will run the `describe` and `auto_review` tools.
|
This means that when a new PR is opened/reopened or marked as ready for review, PR-Agent will run the `describe` and `auto_review` tools.
|
||||||
For the describe tool, the `add_original_user_description` and `keep_original_user_title` parameters will be set to true.
|
For the describe tool, the `add_original_user_description` and `keep_original_user_title` parameters will be set to true.
|
||||||
|
|
||||||
You can override the default tool parameters by uploading a local configuration file called `.pr_agent.toml` to the root of your repo.
|
You can override the default tool parameters by uploading a local configuration file called `.pr_agent.toml` to the root of your repo.
|
||||||
@ -135,11 +157,27 @@ When a new PR is opened, PR-Agent will run the `describe` tool with the above pa
|
|||||||
To cancel the automatic run of all the tools, set:
|
To cancel the automatic run of all the tools, set:
|
||||||
```
|
```
|
||||||
[github_app]
|
[github_app]
|
||||||
pr_commands = ""
|
handle_pr_actions = []
|
||||||
```
|
```
|
||||||
|
|
||||||
|
##### GitHub app automatic tools for new code (PR push)
|
||||||
|
In addition to running automatic tools when a PR is opened, the GitHub app can also respond to new code that is pushed to an open PR.
|
||||||
|
|
||||||
Note that a local `.pr_agent.toml` file enables you to edit and customize the default parameters of any tool, not just the ones that are run automatically.
|
The configuration toggle `handle_push_trigger` can be used to enable this feature.
|
||||||
|
The configuration parameter `push_commands` defines the list of tools that will be **run automatically** when new code is pushed to the PR.
|
||||||
|
```
|
||||||
|
[github_app]
|
||||||
|
handle_push_trigger = true
|
||||||
|
push_commands = [
|
||||||
|
"/describe --pr_description.add_original_user_description=true --pr_description.keep_original_user_title=true",
|
||||||
|
"/auto_review -i --pr_reviewer.remove_previous_review_comment=true",
|
||||||
|
]
|
||||||
|
```
|
||||||
|
The means that when new code is pushed to the PR, the PR-Agent will run the `describe` and incremental `auto_review` tools.
|
||||||
|
For the describe tool, the `add_original_user_description` and `keep_original_user_title` parameters will be set to true.
|
||||||
|
For the `auto_review` tool, it will run in incremental mode, and the `remove_previous_review_comment` parameter will be set to true.
|
||||||
|
|
||||||
|
Much like the configurations for `pr_commands`, you can override the default tool paramteres by uploading a local configuration file to the root of your repo.
|
||||||
|
|
||||||
#### Editing the prompts
|
#### Editing the prompts
|
||||||
The prompts for the various PR-Agent tools are defined in the `pr_agent/settings` folder.
|
The prompts for the various PR-Agent tools are defined in the `pr_agent/settings` folder.
|
||||||
@ -159,21 +197,28 @@ user="""
|
|||||||
Note that the new prompt will need to generate an output compatible with the relevant [post-process function](./pr_agent/tools/pr_description.py#L137).
|
Note that the new prompt will need to generate an output compatible with the relevant [post-process function](./pr_agent/tools/pr_description.py#L137).
|
||||||
|
|
||||||
### Working with GitHub Action
|
### Working with GitHub Action
|
||||||
You can configure settings in GitHub action by adding environment variables under the env section in `.github/workflows/pr_agent.yml` file. Some examples:
|
You can configure settings in GitHub action by adding environment variables under the env section in `.github/workflows/pr_agent.yml` file.
|
||||||
|
Specifically, start by setting the following environment variables:
|
||||||
```yaml
|
```yaml
|
||||||
env:
|
env:
|
||||||
# ... previous environment values
|
OPENAI_KEY: ${{ secrets.OPENAI_KEY }} # Make sure to add your OpenAI key to your repo secrets
|
||||||
OPENAI.ORG: "<Your organization name under your OpenAI account>"
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Make sure to add your GitHub token to your repo secrets
|
||||||
PR_REVIEWER.REQUIRE_TESTS_REVIEW: "false" # Disable tests review
|
github_action.auto_review: "true" # enable\disable auto review
|
||||||
PR_CODE_SUGGESTIONS.NUM_CODE_SUGGESTIONS: 6 # Increase number of code suggestions
|
github_action.auto_describe: "true" # enable\disable auto describe
|
||||||
github_action.auto_review: "true" # Enable auto review
|
github_action.auto_improve: "false" # enable\disable auto improve
|
||||||
github_action.auto_describe: "true" # Enable auto describe
|
|
||||||
github_action.auto_improve: "false" # Disable auto improve
|
|
||||||
```
|
```
|
||||||
specifically, `github_action.auto_review`, `github_action.auto_describe` and `github_action.auto_improve` are used to enable/disable automatic tools that run when a new PR is opened.
|
`github_action.auto_review`, `github_action.auto_describe` and `github_action.auto_improve` are used to enable/disable automatic tools that run when a new PR is opened.
|
||||||
|
|
||||||
If not set, the default option is that only the `review` tool will run automatically when a new PR is opened.
|
If not set, the default option is that only the `review` tool will run automatically when a new PR is opened.
|
||||||
|
|
||||||
|
Note that you can give additional config parameters by adding environment variables to `.github/workflows/pr_agent.yml`, or by using a `.pr_agent.toml` file in the root of your repo, similar to the GitHub App usage.
|
||||||
|
|
||||||
|
For example, you can set an environment variable: `pr_description.add_original_user_description=false`, or add a `.pr_agent.toml` file with the following content:
|
||||||
|
```
|
||||||
|
[pr_description]
|
||||||
|
add_original_user_description = false
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### Changing a model
|
### Changing a model
|
||||||
|
|
||||||
See [here](pr_agent/algo/__init__.py) for the list of available models.
|
See [here](pr_agent/algo/__init__.py) for the list of available models.
|
||||||
@ -258,6 +303,23 @@ key = ...
|
|||||||
|
|
||||||
Also review the [AiHandler](pr_agent/algo/ai_handler.py) file for instruction how to set keys for other models.
|
Also review the [AiHandler](pr_agent/algo/ai_handler.py) file for instruction how to set keys for other models.
|
||||||
|
|
||||||
|
#### Vertex AI
|
||||||
|
|
||||||
|
To use Google's Vertex AI platform and its associated models (chat-bison/codechat-bison) set:
|
||||||
|
|
||||||
|
```
|
||||||
|
[config] # in configuration.toml
|
||||||
|
model = "vertex_ai/codechat-bison"
|
||||||
|
|
||||||
|
[vertexai] # in .secrets.toml
|
||||||
|
vertex_project = "my-google-cloud-project"
|
||||||
|
vertex_location = ""
|
||||||
|
```
|
||||||
|
|
||||||
|
Your [application default credentials](https://cloud.google.com/docs/authentication/application-default-credentials) will be used for authentication so there is no need to set explicit credentials in most environments.
|
||||||
|
|
||||||
|
If you do want to set explicit credentials then you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable set to a path to a json credentials file.
|
||||||
|
|
||||||
### Working with large PRs
|
### Working with large PRs
|
||||||
|
|
||||||
The default mode of CodiumAI is to have a single call per tool, using GPT-4, which has a token limit of 8000 tokens.
|
The default mode of CodiumAI is to have a single call per tool, using GPT-4, which has a token limit of 8000 tokens.
|
||||||
|
@ -26,7 +26,9 @@ Under the section 'pr_description', the [configuration file](./../pr_agent/setti
|
|||||||
- `keep_original_user_title`: if set to true, the tool will keep the original PR title, and won't change it. Default is false.
|
- `keep_original_user_title`: if set to true, the tool will keep the original PR title, and won't change it. Default is false.
|
||||||
|
|
||||||
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".
|
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".
|
||||||
|
- To enable `custom labels`, apply the configuration changes described [here](./GENERATE_CUSTOM_LABELS.md#configuration-changes)
|
||||||
|
- `enable_pr_type`: if set to false, it will not show the `PR type` as a text value in the description content. Default is true.
|
||||||
|
|
||||||
### Markers template
|
### Markers template
|
||||||
|
|
||||||
markers enable to easily integrate user's content and auto-generated content, with a template-like mechanism.
|
markers enable to easily integrate user's content and auto-generated content, with a template-like mechanism.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# Generate Custom Labels
|
# Generate Custom Labels
|
||||||
The `generte_labels` tool scans the PR code changes, and given a list of labels and their descriptions, it automatically suggests labels that match the PR code changes.
|
The `generate_labels` tool scans the PR code changes, and given a list of labels and their descriptions, it automatically suggests labels that match the PR code changes.
|
||||||
|
|
||||||
It can be invoked manually by commenting on any PR:
|
It can be invoked manually by commenting on any PR:
|
||||||
```
|
```
|
||||||
@ -10,26 +10,32 @@ For example:
|
|||||||
If we wish to add detect changes to SQL queries in a given PR, we can add the following custom label along with its description:
|
If we wish to add detect changes to SQL queries in a given PR, we can add the following custom label along with its description:
|
||||||
|
|
||||||
<kbd><img src=./../pics/custom_labels_list.png width="768"></kbd>
|
<kbd><img src=./../pics/custom_labels_list.png width="768"></kbd>
|
||||||
When running the `generte_labels` tool on a PR that includes changes in SQL queries, it will automatically suggest the custom label:
|
|
||||||
|
When running the `generate_labels` tool on a PR that includes changes in SQL queries, it will automatically suggest the custom label:
|
||||||
<kbd><img src=./../pics/custom_label_published.png width="768"></kbd>
|
<kbd><img src=./../pics/custom_label_published.png width="768"></kbd>
|
||||||
|
|
||||||
### Configuration options
|
### How to enable custom labels
|
||||||
To enable custom labels, you need to add the following configuration to the [custom_labels file](./../pr_agent/settings/custom_labels.toml):
|
|
||||||
|
Note that in addition to the dedicated tool `generate_labels`, the custom labels will also be used by the `review` and `describe` tools.
|
||||||
|
|
||||||
|
#### CLI
|
||||||
|
To enable custom labels, you need to apply the [configuration changes](#configuration-changes) to the [custom_labels file](./../pr_agent/settings/custom_labels.toml):
|
||||||
|
|
||||||
|
#### GitHub Action and GitHub App
|
||||||
|
To enable custom labels, you need to apply the [configuration changes](#configuration-changes) to the local `.pr_agent.toml` file in you repository.
|
||||||
|
|
||||||
|
#### Configuration changes
|
||||||
- Change `enable_custom_labels` to True: This will turn off the default labels and enable the custom labels provided in the custom_labels.toml file.
|
- Change `enable_custom_labels` to True: This will turn off the default labels and enable the custom labels provided in the custom_labels.toml file.
|
||||||
- Add the custom labels to the custom_labels.toml file. It should be formatted as follows:
|
- Add the custom labels. It should be formatted as follows:
|
||||||
```
|
|
||||||
|
```
|
||||||
|
[config]
|
||||||
|
enable_custom_labels=true
|
||||||
|
|
||||||
[custom_labels."Custom Label Name"]
|
[custom_labels."Custom Label Name"]
|
||||||
description = "Description of when AI should suggest this label"
|
description = "Description of when AI should suggest this label"
|
||||||
```
|
|
||||||
- You can add modify the list to include all the custom labels you wish to use in your repository.
|
|
||||||
|
|
||||||
#### Github Action
|
[custom_labels."Custom Label 2"]
|
||||||
To use the `generte_labels` tool with Github Action:
|
description = "Description of when AI should suggest this label 2"
|
||||||
|
|
||||||
- Add the following file to your repository under `env` section in `.github/workflows/pr_agent.yml`
|
|
||||||
- Comma separated list of custom labels and their descriptions
|
|
||||||
- The number of labels and descriptions should be the same and in the same order (empty descriptions are allowed):
|
|
||||||
```
|
```
|
||||||
CUSTOM_LABELS: "label1, label2, ..."
|
|
||||||
CUSTOM_LABELS_DESCRIPTION: "label1 description, label2 description, ..."
|
|
||||||
```
|
|
||||||
|
@ -16,24 +16,46 @@ The `review` tool can also be triggered automatically every time a new PR is ope
|
|||||||
|
|
||||||
Under the section 'pr_reviewer', the [configuration file](./../pr_agent/settings/configuration.toml#L16) contains options to customize the 'review' tool:
|
Under the section 'pr_reviewer', the [configuration file](./../pr_agent/settings/configuration.toml#L16) contains options to customize the 'review' tool:
|
||||||
|
|
||||||
|
#### enable\\disable features
|
||||||
- `require_focused_review`: if set to true, the tool will add a section - 'is the PR a focused one'. Default is false.
|
- `require_focused_review`: if set to true, the tool will add a section - 'is the PR a focused one'. Default is false.
|
||||||
- `require_score_review`: if set to true, the tool will add a section that scores the PR. Default is false.
|
- `require_score_review`: if set to true, the tool will add a section that scores the PR. Default is false.
|
||||||
- `require_tests_review`: if set to true, the tool will add a section that checks if the PR contains tests. Default is true.
|
- `require_tests_review`: if set to true, the tool will add a section that checks if the PR contains tests. Default is true.
|
||||||
- `require_security_review`: if set to true, the tool will add a section that checks if the PR contains security issues. Default is true.
|
- `require_security_review`: if set to true, the tool will add a section that checks if the PR contains security issues. Default is true.
|
||||||
- `require_estimate_effort_to_review`: if set to true, the tool will add a section that estimates thed effort needed to review the PR. Default is true.
|
- `require_estimate_effort_to_review`: if set to true, the tool will add a section that estimates thed effort needed to review the PR. Default is true.
|
||||||
|
#### general options
|
||||||
- `num_code_suggestions`: number of code suggestions provided by the 'review' tool. Default is 4.
|
- `num_code_suggestions`: number of code suggestions provided by the 'review' tool. Default is 4.
|
||||||
- `inline_code_comments`: if set to true, the tool will publish the code suggestions as comments on the code diff. Default is false.
|
- `inline_code_comments`: if set to true, the tool will publish the code suggestions as comments on the code diff. Default is false.
|
||||||
- `automatic_review`: if set to false, no automatic reviews will be done. Default is true.
|
- `automatic_review`: if set to false, no automatic reviews will be done. Default is true.
|
||||||
|
- `remove_previous_review_comment`: if set to true, the tool will remove the previous review comment before adding a new one. Default is false.
|
||||||
|
- `persistent_comment`: if set to true, the review comment will be persistent, meaning that every new review request will edit the previous one. Default is true.
|
||||||
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".
|
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".
|
||||||
|
#### review labels
|
||||||
|
- `enable_review_labels_security`: if set to true, the tool will publish a 'possible security issue' label if it detects a security issue. Default is true.
|
||||||
|
- `enable_review_labels_effort`: if set to true, the tool will publish a 'Review effort [1-5]: x' label. Default is false.
|
||||||
|
- To enable `custom labels`, apply the configuration changes described [here](./GENERATE_CUSTOM_LABELS.md#configuration-changes)
|
||||||
#### Incremental Mode
|
#### Incremental Mode
|
||||||
For an incremental review, which only considers changes since the last PR-Agent review, this can be useful when working on the PR in an iterative manner, and you want to focus on the changes since the last review instead of reviewing the entire PR again, the following command can be used:
|
For an incremental review, which only considers changes since the last PR-Agent review, this can be useful when working on the PR in an iterative manner, and you want to focus on the changes since the last review instead of reviewing the entire PR again, the following command can be used:
|
||||||
```
|
```
|
||||||
/improve -i
|
/review -i
|
||||||
```
|
```
|
||||||
Note that the incremental mode is only available for GitHub.
|
Note that the incremental mode is only available for GitHub.
|
||||||
|
|
||||||
<kbd><img src=./../pics/incremental_review.png width="768"></kbd>
|
<kbd><img src=./../pics/incremental_review.png width="768"></kbd>
|
||||||
|
|
||||||
|
Under the section 'pr_reviewer', the [configuration file](./../pr_agent/settings/configuration.toml#L16) contains options to customize the 'review -i' tool.
|
||||||
|
These configurations can be used to control the rate at which the incremental review tool will create new review comments when invoked automatically, to prevent making too much noise in the PR.
|
||||||
|
- `minimal_commits_for_incremental_review`: Minimal number of commits since the last review that are required to create incremental review.
|
||||||
|
If there are less than the specified number of commits since the last review, the tool will not perform any action.
|
||||||
|
Default is 0 - the tool will always run, no matter how many commits since the last review.
|
||||||
|
- `minimal_minutes_for_incremental_review`: Minimal number of minutes that need to pass since the last reviewed commit to create incremental review.
|
||||||
|
If less that the specified number of minutes have passed between the last reviewed commit and running this command, the tool will not perform any action.
|
||||||
|
Default is 0 - the tool will always run, no matter how much time have passed since the last reviewed commit.
|
||||||
|
- `require_all_thresholds_for_incremental_review`: If set to true, all the previous thresholds must be met for incremental review to run. If false, only one is enough to run the tool.
|
||||||
|
For example, if `minimal_commits_for_incremental_review=2` and `minimal_minutes_for_incremental_review=2`, and we have 3 commits since the last review, but the last reviewed commit is from 1 minute ago:
|
||||||
|
When `require_all_thresholds_for_incremental_review=true` the incremental review __will not__ run, because only 1 out of 2 conditions were met (we have enough commits but the last review is too recent),
|
||||||
|
but when `require_all_thresholds_for_incremental_review=false` the incremental review __will__ run, because one condition is enough (we have 3 commits which is more than the configured 2).
|
||||||
|
Default is false - the tool will run as long as at least once conditions is met.
|
||||||
|
|
||||||
#### PR Reflection
|
#### PR Reflection
|
||||||
By invoking:
|
By invoking:
|
||||||
```
|
```
|
||||||
|
@ -6,5 +6,6 @@
|
|||||||
- [SIMILAR_ISSUE](./SIMILAR_ISSUE.md)
|
- [SIMILAR_ISSUE](./SIMILAR_ISSUE.md)
|
||||||
- [UPDATE CHANGELOG](./UPDATE_CHANGELOG.md)
|
- [UPDATE CHANGELOG](./UPDATE_CHANGELOG.md)
|
||||||
- [ADD DOCUMENTATION](./ADD_DOCUMENTATION.md)
|
- [ADD DOCUMENTATION](./ADD_DOCUMENTATION.md)
|
||||||
|
- [GENERATE CUSTOM LABELS](./GENERATE_CUSTOM_LABELS.md)
|
||||||
|
|
||||||
See the **[installation guide](/INSTALL.md)** for instructions on how to setup PR-Agent.
|
See the **[installation guide](/INSTALL.md)** for instructions on how to setup PR-Agent.
|
@ -46,10 +46,13 @@ class PRAgent:
|
|||||||
apply_repo_settings(pr_url)
|
apply_repo_settings(pr_url)
|
||||||
|
|
||||||
# Then, apply user specific settings if exists
|
# Then, apply user specific settings if exists
|
||||||
request = request.replace("'", "\\'")
|
if isinstance(request, str):
|
||||||
lexer = shlex.shlex(request, posix=True)
|
request = request.replace("'", "\\'")
|
||||||
lexer.whitespace_split = True
|
lexer = shlex.shlex(request, posix=True)
|
||||||
action, *args = list(lexer)
|
lexer.whitespace_split = True
|
||||||
|
action, *args = list(lexer)
|
||||||
|
else:
|
||||||
|
action, *args = request
|
||||||
args = update_settings_from_args(args)
|
args = update_settings_from_args(args)
|
||||||
|
|
||||||
action = action.lstrip("/").lower()
|
action = action.lstrip("/").lower()
|
||||||
|
@ -8,9 +8,14 @@ MAX_TOKENS = {
|
|||||||
'gpt-4': 8000,
|
'gpt-4': 8000,
|
||||||
'gpt-4-0613': 8000,
|
'gpt-4-0613': 8000,
|
||||||
'gpt-4-32k': 32000,
|
'gpt-4-32k': 32000,
|
||||||
|
'gpt-4-1106-preview': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||||
'claude-instant-1': 100000,
|
'claude-instant-1': 100000,
|
||||||
'claude-2': 100000,
|
'claude-2': 100000,
|
||||||
'command-nightly': 4096,
|
'command-nightly': 4096,
|
||||||
'replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1': 4096,
|
'replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1': 4096,
|
||||||
'meta-llama/Llama-2-7b-chat-hf': 4096
|
'meta-llama/Llama-2-7b-chat-hf': 4096,
|
||||||
|
'vertex_ai/codechat-bison': 6144,
|
||||||
|
'vertex_ai/codechat-bison-32k': 32000,
|
||||||
|
'codechat-bison': 6144,
|
||||||
|
'codechat-bison-32k': 32000,
|
||||||
}
|
}
|
||||||
|
@ -23,39 +23,43 @@ class AiHandler:
|
|||||||
Initializes the OpenAI API key and other settings from a configuration file.
|
Initializes the OpenAI API key and other settings from a configuration file.
|
||||||
Raises a ValueError if the OpenAI key is missing.
|
Raises a ValueError if the OpenAI key is missing.
|
||||||
"""
|
"""
|
||||||
try:
|
self.azure = False
|
||||||
|
|
||||||
|
if get_settings().get("OPENAI.KEY", None):
|
||||||
openai.api_key = get_settings().openai.key
|
openai.api_key = get_settings().openai.key
|
||||||
litellm.openai_key = get_settings().openai.key
|
litellm.openai_key = get_settings().openai.key
|
||||||
if get_settings().get("litellm.use_client"):
|
if get_settings().get("litellm.use_client"):
|
||||||
litellm_token = get_settings().get("litellm.LITELLM_TOKEN")
|
litellm_token = get_settings().get("litellm.LITELLM_TOKEN")
|
||||||
assert litellm_token, "LITELLM_TOKEN is required"
|
assert litellm_token, "LITELLM_TOKEN is required"
|
||||||
os.environ["LITELLM_TOKEN"] = litellm_token
|
os.environ["LITELLM_TOKEN"] = litellm_token
|
||||||
litellm.use_client = True
|
litellm.use_client = True
|
||||||
self.azure = False
|
if get_settings().get("OPENAI.ORG", None):
|
||||||
if get_settings().get("OPENAI.ORG", None):
|
litellm.organization = get_settings().openai.org
|
||||||
litellm.organization = get_settings().openai.org
|
if get_settings().get("OPENAI.API_TYPE", None):
|
||||||
if get_settings().get("OPENAI.API_TYPE", None):
|
if get_settings().openai.api_type == "azure":
|
||||||
if get_settings().openai.api_type == "azure":
|
self.azure = True
|
||||||
self.azure = True
|
litellm.azure_key = get_settings().openai.key
|
||||||
litellm.azure_key = get_settings().openai.key
|
if get_settings().get("OPENAI.API_VERSION", None):
|
||||||
if get_settings().get("OPENAI.API_VERSION", None):
|
litellm.api_version = get_settings().openai.api_version
|
||||||
litellm.api_version = get_settings().openai.api_version
|
if get_settings().get("OPENAI.API_BASE", None):
|
||||||
if get_settings().get("OPENAI.API_BASE", None):
|
litellm.api_base = get_settings().openai.api_base
|
||||||
litellm.api_base = get_settings().openai.api_base
|
if get_settings().get("ANTHROPIC.KEY", None):
|
||||||
if get_settings().get("ANTHROPIC.KEY", None):
|
litellm.anthropic_key = get_settings().anthropic.key
|
||||||
litellm.anthropic_key = get_settings().anthropic.key
|
if get_settings().get("COHERE.KEY", None):
|
||||||
if get_settings().get("COHERE.KEY", None):
|
litellm.cohere_key = get_settings().cohere.key
|
||||||
litellm.cohere_key = get_settings().cohere.key
|
if get_settings().get("REPLICATE.KEY", None):
|
||||||
if get_settings().get("REPLICATE.KEY", None):
|
litellm.replicate_key = get_settings().replicate.key
|
||||||
litellm.replicate_key = get_settings().replicate.key
|
if get_settings().get("REPLICATE.KEY", None):
|
||||||
if get_settings().get("REPLICATE.KEY", None):
|
litellm.replicate_key = get_settings().replicate.key
|
||||||
litellm.replicate_key = get_settings().replicate.key
|
if get_settings().get("HUGGINGFACE.KEY", None):
|
||||||
if get_settings().get("HUGGINGFACE.KEY", None):
|
litellm.huggingface_key = get_settings().huggingface.key
|
||||||
litellm.huggingface_key = get_settings().huggingface.key
|
if get_settings().get("HUGGINGFACE.API_BASE", None):
|
||||||
if get_settings().get("HUGGINGFACE.API_BASE", None):
|
litellm.api_base = get_settings().huggingface.api_base
|
||||||
litellm.api_base = get_settings().huggingface.api_base
|
if get_settings().get("VERTEXAI.VERTEX_PROJECT", None):
|
||||||
except AttributeError as e:
|
litellm.vertex_project = get_settings().vertexai.vertex_project
|
||||||
raise ValueError("OpenAI key is required") from e
|
litellm.vertex_location = get_settings().get(
|
||||||
|
"VERTEXAI.VERTEX_LOCATION", None
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def deployment_id(self):
|
def deployment_id(self):
|
||||||
|
@ -23,7 +23,7 @@ def filter_ignored(files):
|
|||||||
|
|
||||||
# keep filenames that _don't_ match the ignore regex
|
# keep filenames that _don't_ match the ignore regex
|
||||||
for r in compiled_patterns:
|
for r in compiled_patterns:
|
||||||
files = [f for f in files if not r.match(f.filename)]
|
files = [f for f in files if (f.filename and not r.match(f.filename))]
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Could not filter file list: {e}")
|
print(f"Could not filter file list: {e}")
|
||||||
|
@ -3,6 +3,7 @@ from __future__ import annotations
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
|
from pr_agent.git_providers.git_provider import EDIT_TYPE
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
|
|
||||||
@ -115,7 +116,7 @@ def omit_deletion_hunks(patch_lines) -> str:
|
|||||||
|
|
||||||
|
|
||||||
def handle_patch_deletions(patch: str, original_file_content_str: str,
|
def handle_patch_deletions(patch: str, original_file_content_str: str,
|
||||||
new_file_content_str: str, file_name: str) -> str:
|
new_file_content_str: str, file_name: str, edit_type: EDIT_TYPE = EDIT_TYPE.UNKNOWN) -> str:
|
||||||
"""
|
"""
|
||||||
Handle entire file or deletion patches.
|
Handle entire file or deletion patches.
|
||||||
|
|
||||||
@ -132,7 +133,7 @@ def handle_patch_deletions(patch: str, original_file_content_str: str,
|
|||||||
str: The modified patch with deletion hunks omitted.
|
str: The modified patch with deletion hunks omitted.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not new_file_content_str:
|
if not new_file_content_str and edit_type != EDIT_TYPE.ADDED:
|
||||||
# logic for handling deleted files - don't show patch, just show that the file was deleted
|
# logic for handling deleted files - don't show patch, just show that the file was deleted
|
||||||
if get_settings().config.verbosity_level > 0:
|
if get_settings().config.verbosity_level > 0:
|
||||||
get_logger().info(f"Processing file: {file_name}, minimizing deletion file")
|
get_logger().info(f"Processing file: {file_name}, minimizing deletion file")
|
||||||
|
@ -7,18 +7,20 @@ from typing import Any, Callable, List, Tuple
|
|||||||
|
|
||||||
from github import RateLimitExceededException
|
from github import RateLimitExceededException
|
||||||
|
|
||||||
from pr_agent.algo import MAX_TOKENS
|
|
||||||
from pr_agent.algo.git_patch_processing import convert_to_hunks_with_lines_numbers, extend_patch, handle_patch_deletions
|
from pr_agent.algo.git_patch_processing import convert_to_hunks_with_lines_numbers, extend_patch, handle_patch_deletions
|
||||||
from pr_agent.algo.language_handler import sort_files_by_main_languages
|
from pr_agent.algo.language_handler import sort_files_by_main_languages
|
||||||
from pr_agent.algo.file_filter import filter_ignored
|
from pr_agent.algo.file_filter import filter_ignored
|
||||||
from pr_agent.algo.token_handler import TokenHandler, get_token_encoder
|
from pr_agent.algo.token_handler import TokenHandler, get_token_encoder
|
||||||
|
from pr_agent.algo.utils import get_max_tokens
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers.git_provider import FilePatchInfo, GitProvider
|
from pr_agent.git_providers.git_provider import FilePatchInfo, GitProvider, EDIT_TYPE
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
DELETED_FILES_ = "Deleted files:\n"
|
DELETED_FILES_ = "Deleted files:\n"
|
||||||
|
|
||||||
MORE_MODIFIED_FILES_ = "More modified files:\n"
|
MORE_MODIFIED_FILES_ = "Additional modified files (insufficient token budget to process):\n"
|
||||||
|
|
||||||
|
ADDED_FILES_ = "Additional added files (insufficient token budget to process):\n"
|
||||||
|
|
||||||
OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD = 1000
|
OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD = 1000
|
||||||
OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD = 600
|
OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD = 600
|
||||||
@ -64,14 +66,17 @@ def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler, model: s
|
|||||||
pr_languages, token_handler, add_line_numbers_to_hunks, patch_extra_lines=PATCH_EXTRA_LINES)
|
pr_languages, token_handler, add_line_numbers_to_hunks, patch_extra_lines=PATCH_EXTRA_LINES)
|
||||||
|
|
||||||
# if we are under the limit, return the full diff
|
# if we are under the limit, return the full diff
|
||||||
if total_tokens + OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD < MAX_TOKENS[model]:
|
if total_tokens + OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD < get_max_tokens(model):
|
||||||
return "\n".join(patches_extended)
|
return "\n".join(patches_extended)
|
||||||
|
|
||||||
# if we are over the limit, start pruning
|
# if we are over the limit, start pruning
|
||||||
patches_compressed, modified_file_names, deleted_file_names = \
|
patches_compressed, modified_file_names, deleted_file_names, added_file_names = \
|
||||||
pr_generate_compressed_diff(pr_languages, token_handler, model, add_line_numbers_to_hunks)
|
pr_generate_compressed_diff(pr_languages, token_handler, model, add_line_numbers_to_hunks)
|
||||||
|
|
||||||
final_diff = "\n".join(patches_compressed)
|
final_diff = "\n".join(patches_compressed)
|
||||||
|
if added_file_names:
|
||||||
|
added_list_str = ADDED_FILES_ + "\n".join(added_file_names)
|
||||||
|
final_diff = final_diff + "\n\n" + added_list_str
|
||||||
if modified_file_names:
|
if modified_file_names:
|
||||||
modified_list_str = MORE_MODIFIED_FILES_ + "\n".join(modified_file_names)
|
modified_list_str = MORE_MODIFIED_FILES_ + "\n".join(modified_file_names)
|
||||||
final_diff = final_diff + "\n\n" + modified_list_str
|
final_diff = final_diff + "\n\n" + modified_list_str
|
||||||
@ -122,7 +127,7 @@ def pr_generate_extended_diff(pr_languages: list,
|
|||||||
|
|
||||||
|
|
||||||
def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, model: str,
|
def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, model: str,
|
||||||
convert_hunks_to_line_numbers: bool) -> Tuple[list, list, list]:
|
convert_hunks_to_line_numbers: bool) -> Tuple[list, list, list, list]:
|
||||||
"""
|
"""
|
||||||
Generate a compressed diff string for a pull request, using diff minimization techniques to reduce the number of
|
Generate a compressed diff string for a pull request, using diff minimization techniques to reduce the number of
|
||||||
tokens used.
|
tokens used.
|
||||||
@ -148,6 +153,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
patches = []
|
patches = []
|
||||||
|
added_files_list = []
|
||||||
modified_files_list = []
|
modified_files_list = []
|
||||||
deleted_files_list = []
|
deleted_files_list = []
|
||||||
# sort each one of the languages in top_langs by the number of tokens in the diff
|
# sort each one of the languages in top_langs by the number of tokens in the diff
|
||||||
@ -165,7 +171,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
|
|||||||
|
|
||||||
# removing delete-only hunks
|
# removing delete-only hunks
|
||||||
patch = handle_patch_deletions(patch, original_file_content_str,
|
patch = handle_patch_deletions(patch, original_file_content_str,
|
||||||
new_file_content_str, file.filename)
|
new_file_content_str, file.filename, file.edit_type)
|
||||||
if patch is None:
|
if patch is None:
|
||||||
if not deleted_files_list:
|
if not deleted_files_list:
|
||||||
total_tokens += token_handler.count_tokens(DELETED_FILES_)
|
total_tokens += token_handler.count_tokens(DELETED_FILES_)
|
||||||
@ -179,21 +185,26 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
|
|||||||
new_patch_tokens = token_handler.count_tokens(patch)
|
new_patch_tokens = token_handler.count_tokens(patch)
|
||||||
|
|
||||||
# Hard Stop, no more tokens
|
# Hard Stop, no more tokens
|
||||||
if total_tokens > MAX_TOKENS[model] - OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD:
|
if total_tokens > get_max_tokens(model) - OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD:
|
||||||
get_logger().warning(f"File was fully skipped, no more tokens: {file.filename}.")
|
get_logger().warning(f"File was fully skipped, no more tokens: {file.filename}.")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# If the patch is too large, just show the file name
|
# If the patch is too large, just show the file name
|
||||||
if total_tokens + new_patch_tokens > MAX_TOKENS[model] - OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD:
|
if total_tokens + new_patch_tokens > get_max_tokens(model) - OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD:
|
||||||
# Current logic is to skip the patch if it's too large
|
# Current logic is to skip the patch if it's too large
|
||||||
# TODO: Option for alternative logic to remove hunks from the patch to reduce the number of tokens
|
# TODO: Option for alternative logic to remove hunks from the patch to reduce the number of tokens
|
||||||
# until we meet the requirements
|
# until we meet the requirements
|
||||||
if get_settings().config.verbosity_level >= 2:
|
if get_settings().config.verbosity_level >= 2:
|
||||||
get_logger().warning(f"Patch too large, minimizing it, {file.filename}")
|
get_logger().warning(f"Patch too large, minimizing it, {file.filename}")
|
||||||
if not modified_files_list:
|
if file.edit_type == EDIT_TYPE.ADDED:
|
||||||
total_tokens += token_handler.count_tokens(MORE_MODIFIED_FILES_)
|
if not added_files_list:
|
||||||
modified_files_list.append(file.filename)
|
total_tokens += token_handler.count_tokens(ADDED_FILES_)
|
||||||
total_tokens += token_handler.count_tokens(file.filename) + 1
|
added_files_list.append(file.filename)
|
||||||
|
else:
|
||||||
|
if not modified_files_list:
|
||||||
|
total_tokens += token_handler.count_tokens(MORE_MODIFIED_FILES_)
|
||||||
|
modified_files_list.append(file.filename)
|
||||||
|
total_tokens += token_handler.count_tokens(file.filename) + 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if patch:
|
if patch:
|
||||||
@ -206,7 +217,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
|
|||||||
if get_settings().config.verbosity_level >= 2:
|
if get_settings().config.verbosity_level >= 2:
|
||||||
get_logger().info(f"Tokens: {total_tokens}, last filename: {file.filename}")
|
get_logger().info(f"Tokens: {total_tokens}, last filename: {file.filename}")
|
||||||
|
|
||||||
return patches, modified_files_list, deleted_files_list
|
return patches, modified_files_list, deleted_files_list, added_files_list
|
||||||
|
|
||||||
|
|
||||||
async def retry_with_fallback_models(f: Callable):
|
async def retry_with_fallback_models(f: Callable):
|
||||||
@ -271,7 +282,7 @@ def find_line_number_of_relevant_line_in_file(diff_files: List[FilePatchInfo],
|
|||||||
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
|
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
|
||||||
|
|
||||||
for file in diff_files:
|
for file in diff_files:
|
||||||
if file.filename.strip() == relevant_file:
|
if file.filename and (file.filename.strip() == relevant_file):
|
||||||
patch = file.patch
|
patch = file.patch
|
||||||
patch_lines = patch.splitlines()
|
patch_lines = patch.splitlines()
|
||||||
|
|
||||||
@ -397,13 +408,13 @@ def get_pr_multi_diffs(git_provider: GitProvider,
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
# Remove delete-only hunks
|
# Remove delete-only hunks
|
||||||
patch = handle_patch_deletions(patch, original_file_content_str, new_file_content_str, file.filename)
|
patch = handle_patch_deletions(patch, original_file_content_str, new_file_content_str, file.filename, file.edit_type)
|
||||||
if patch is None:
|
if patch is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
patch = convert_to_hunks_with_lines_numbers(patch, file)
|
patch = convert_to_hunks_with_lines_numbers(patch, file)
|
||||||
new_patch_tokens = token_handler.count_tokens(patch)
|
new_patch_tokens = token_handler.count_tokens(patch)
|
||||||
if patch and (total_tokens + new_patch_tokens > MAX_TOKENS[model] - OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD):
|
if patch and (total_tokens + new_patch_tokens > get_max_tokens(model) - OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD):
|
||||||
final_diff = "\n".join(patches)
|
final_diff = "\n".join(patches)
|
||||||
final_diff_list.append(final_diff)
|
final_diff_list.append(final_diff)
|
||||||
patches = []
|
patches = []
|
||||||
|
@ -9,6 +9,8 @@ from typing import Any, List
|
|||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
from starlette_context import context
|
from starlette_context import context
|
||||||
|
|
||||||
|
from pr_agent.algo import MAX_TOKENS
|
||||||
from pr_agent.config_loader import get_settings, global_settings
|
from pr_agent.config_loader import get_settings, global_settings
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
|
|
||||||
@ -101,7 +103,8 @@ def parse_code_suggestion(code_suggestions: dict, gfm_supported: bool=True) -> s
|
|||||||
markdown_text += f" **{sub_key}:** {sub_value}\n"
|
markdown_text += f" **{sub_key}:** {sub_value}\n"
|
||||||
if not gfm_supported:
|
if not gfm_supported:
|
||||||
if "relevant line" not in sub_key.lower(): # nicer presentation
|
if "relevant line" not in sub_key.lower(): # nicer presentation
|
||||||
markdown_text = markdown_text.rstrip('\n') + "\\\n"
|
# markdown_text = markdown_text.rstrip('\n') + "\\\n" # works for gitlab
|
||||||
|
markdown_text = markdown_text.rstrip('\n') + " \n" # works for gitlab and bitbucker
|
||||||
|
|
||||||
markdown_text += "\n"
|
markdown_text += "\n"
|
||||||
return markdown_text
|
return markdown_text
|
||||||
@ -294,6 +297,21 @@ def load_yaml(review_text: str) -> dict:
|
|||||||
|
|
||||||
def try_fix_yaml(review_text: str) -> dict:
|
def try_fix_yaml(review_text: str) -> dict:
|
||||||
review_text_lines = review_text.split('\n')
|
review_text_lines = review_text.split('\n')
|
||||||
|
|
||||||
|
# first fallback - try to convert 'relevant line: ...' to relevant line: |-\n ...'
|
||||||
|
review_text_lines_copy = review_text_lines.copy()
|
||||||
|
for i in range(0, len(review_text_lines_copy)):
|
||||||
|
if 'relevant line:' in review_text_lines_copy[i] and not '|-' in review_text_lines_copy[i]:
|
||||||
|
review_text_lines_copy[i] = review_text_lines_copy[i].replace('relevant line: ',
|
||||||
|
'relevant line: |-\n ')
|
||||||
|
try:
|
||||||
|
data = yaml.load('\n'.join(review_text_lines_copy), Loader=yaml.SafeLoader)
|
||||||
|
get_logger().info(f"Successfully parsed AI prediction after adding |-\n to relevant line")
|
||||||
|
return data
|
||||||
|
except:
|
||||||
|
get_logger().debug(f"Failed to parse AI prediction after adding |-\n to relevant line")
|
||||||
|
|
||||||
|
# second fallback - try to remove last lines
|
||||||
data = {}
|
data = {}
|
||||||
for i in range(1, len(review_text_lines)):
|
for i in range(1, len(review_text_lines)):
|
||||||
review_text_lines_tmp = '\n'.join(review_text_lines[:-i])
|
review_text_lines_tmp = '\n'.join(review_text_lines[:-i])
|
||||||
@ -307,6 +325,9 @@ def try_fix_yaml(review_text: str) -> dict:
|
|||||||
|
|
||||||
|
|
||||||
def set_custom_labels(variables):
|
def set_custom_labels(variables):
|
||||||
|
if not get_settings().config.enable_custom_labels:
|
||||||
|
return
|
||||||
|
|
||||||
labels = get_settings().custom_labels
|
labels = get_settings().custom_labels
|
||||||
if not labels:
|
if not labels:
|
||||||
# set default labels
|
# set default labels
|
||||||
@ -320,3 +341,35 @@ def set_custom_labels(variables):
|
|||||||
final_labels += f" - {k} ({v['description']})\n"
|
final_labels += f" - {k} ({v['description']})\n"
|
||||||
variables["custom_labels"] = final_labels
|
variables["custom_labels"] = final_labels
|
||||||
variables["custom_labels_examples"] = f" - {list(labels.keys())[0]}"
|
variables["custom_labels_examples"] = f" - {list(labels.keys())[0]}"
|
||||||
|
|
||||||
|
|
||||||
|
def get_user_labels(current_labels: List[str] = None):
|
||||||
|
"""
|
||||||
|
Only keep labels that has been added by the user
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if current_labels is None:
|
||||||
|
current_labels = []
|
||||||
|
user_labels = []
|
||||||
|
for label in current_labels:
|
||||||
|
if label.lower() in ['bug fix', 'tests', 'refactoring', 'enhancement', 'documentation', 'other']:
|
||||||
|
continue
|
||||||
|
if get_settings().config.enable_custom_labels:
|
||||||
|
if label in get_settings().custom_labels:
|
||||||
|
continue
|
||||||
|
user_labels.append(label)
|
||||||
|
if user_labels:
|
||||||
|
get_logger().info(f"Keeping user labels: {user_labels}")
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().exception(f"Failed to get user labels: {e}")
|
||||||
|
return current_labels
|
||||||
|
return user_labels
|
||||||
|
|
||||||
|
|
||||||
|
def get_max_tokens(model):
|
||||||
|
settings = get_settings()
|
||||||
|
max_tokens_model = MAX_TOKENS[model]
|
||||||
|
if settings.config.max_model_tokens:
|
||||||
|
max_tokens_model = min(settings.config.max_model_tokens, max_tokens_model)
|
||||||
|
# get_logger().debug(f"limiting max tokens to {max_tokens_model}")
|
||||||
|
return max_tokens_model
|
||||||
|
@ -8,6 +8,8 @@ from pr_agent.log import setup_logger
|
|||||||
|
|
||||||
setup_logger()
|
setup_logger()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def run(inargs=None):
|
def run(inargs=None):
|
||||||
parser = argparse.ArgumentParser(description='AI based pull request analyzer', usage=
|
parser = argparse.ArgumentParser(description='AI based pull request analyzer', usage=
|
||||||
"""\
|
"""\
|
||||||
@ -51,9 +53,9 @@ For example: 'python cli.py --pr_url=... review --pr_reviewer.extra_instructions
|
|||||||
command = args.command.lower()
|
command = args.command.lower()
|
||||||
get_settings().set("CONFIG.CLI_MODE", True)
|
get_settings().set("CONFIG.CLI_MODE", True)
|
||||||
if args.issue_url:
|
if args.issue_url:
|
||||||
result = asyncio.run(PRAgent().handle_request(args.issue_url, command + " " + " ".join(args.rest)))
|
result = asyncio.run(PRAgent().handle_request(args.issue_url, [command] + args.rest))
|
||||||
else:
|
else:
|
||||||
result = asyncio.run(PRAgent().handle_request(args.pr_url, command + " " + " ".join(args.rest)))
|
result = asyncio.run(PRAgent().handle_request(args.pr_url, [command] + args.rest))
|
||||||
if not result:
|
if not result:
|
||||||
parser.print_help()
|
parser.print_help()
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ from starlette_context import context
|
|||||||
from ..algo.pr_processing import find_line_number_of_relevant_line_in_file
|
from ..algo.pr_processing import find_line_number_of_relevant_line_in_file
|
||||||
from ..config_loader import get_settings
|
from ..config_loader import get_settings
|
||||||
from ..log import get_logger
|
from ..log import get_logger
|
||||||
from .git_provider import FilePatchInfo, GitProvider
|
from .git_provider import FilePatchInfo, GitProvider, EDIT_TYPE
|
||||||
|
|
||||||
|
|
||||||
class BitbucketProvider(GitProvider):
|
class BitbucketProvider(GitProvider):
|
||||||
@ -32,8 +32,10 @@ class BitbucketProvider(GitProvider):
|
|||||||
self.repo = None
|
self.repo = None
|
||||||
self.pr_num = None
|
self.pr_num = None
|
||||||
self.pr = None
|
self.pr = None
|
||||||
|
self.pr_url = pr_url
|
||||||
self.temp_comments = []
|
self.temp_comments = []
|
||||||
self.incremental = incremental
|
self.incremental = incremental
|
||||||
|
self.diff_files = None
|
||||||
if pr_url:
|
if pr_url:
|
||||||
self.set_pr(pr_url)
|
self.set_pr(pr_url)
|
||||||
self.bitbucket_comment_api_url = self.pr._BitbucketBase__data["links"]["comments"]["href"]
|
self.bitbucket_comment_api_url = self.pr._BitbucketBase__data["links"]["comments"]["href"]
|
||||||
@ -41,9 +43,12 @@ class BitbucketProvider(GitProvider):
|
|||||||
|
|
||||||
def get_repo_settings(self):
|
def get_repo_settings(self):
|
||||||
try:
|
try:
|
||||||
contents = self.repo_obj.get_contents(
|
url = (f"https://api.bitbucket.org/2.0/repositories/{self.workspace_slug}/{self.repo_slug}/src/"
|
||||||
".pr_agent.toml", ref=self.pr.head.sha
|
f"{self.pr.destination_branch}/.pr_agent.toml")
|
||||||
).decoded_content
|
response = requests.request("GET", url, headers=self.headers)
|
||||||
|
if response.status_code == 404: # not found
|
||||||
|
return ""
|
||||||
|
contents = response.text.encode('utf-8')
|
||||||
return contents
|
return contents
|
||||||
except Exception:
|
except Exception:
|
||||||
return ""
|
return ""
|
||||||
@ -113,6 +118,9 @@ class BitbucketProvider(GitProvider):
|
|||||||
return [diff.new.path for diff in self.pr.diffstat()]
|
return [diff.new.path for diff in self.pr.diffstat()]
|
||||||
|
|
||||||
def get_diff_files(self) -> list[FilePatchInfo]:
|
def get_diff_files(self) -> list[FilePatchInfo]:
|
||||||
|
if self.diff_files:
|
||||||
|
return self.diff_files
|
||||||
|
|
||||||
diffs = self.pr.diffstat()
|
diffs = self.pr.diffstat()
|
||||||
diff_split = [
|
diff_split = [
|
||||||
"diff --git%s" % x for x in self.pr.diff().split("diff --git") if x.strip()
|
"diff --git%s" % x for x in self.pr.diff().split("diff --git") if x.strip()
|
||||||
@ -124,16 +132,56 @@ class BitbucketProvider(GitProvider):
|
|||||||
diff.old.get_data("links")
|
diff.old.get_data("links")
|
||||||
)
|
)
|
||||||
new_file_content_str = self._get_pr_file_content(diff.new.get_data("links"))
|
new_file_content_str = self._get_pr_file_content(diff.new.get_data("links"))
|
||||||
diff_files.append(
|
file_patch_canonic_structure = FilePatchInfo(
|
||||||
FilePatchInfo(
|
original_file_content_str,
|
||||||
original_file_content_str,
|
new_file_content_str,
|
||||||
new_file_content_str,
|
diff_split[index],
|
||||||
diff_split[index],
|
diff.new.path,
|
||||||
diff.new.path,
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if diff.data['status'] == 'added':
|
||||||
|
file_patch_canonic_structure.edit_type = EDIT_TYPE.ADDED
|
||||||
|
elif diff.data['status'] == 'removed':
|
||||||
|
file_patch_canonic_structure.edit_type = EDIT_TYPE.DELETED
|
||||||
|
elif diff.data['status'] == 'modified':
|
||||||
|
file_patch_canonic_structure.edit_type = EDIT_TYPE.MODIFIED
|
||||||
|
elif diff.data['status'] == 'renamed':
|
||||||
|
file_patch_canonic_structure.edit_type = EDIT_TYPE.RENAMED
|
||||||
|
diff_files.append(file_patch_canonic_structure)
|
||||||
|
|
||||||
|
|
||||||
|
self.diff_files = diff_files
|
||||||
return diff_files
|
return diff_files
|
||||||
|
|
||||||
|
def get_latest_commit_url(self):
|
||||||
|
return self.pr.data['source']['commit']['links']['html']['href']
|
||||||
|
|
||||||
|
def get_comment_url(self, comment):
|
||||||
|
return comment.data['links']['html']['href']
|
||||||
|
|
||||||
|
def publish_persistent_comment(self, pr_comment: str, initial_header: str, update_header: bool = True):
|
||||||
|
try:
|
||||||
|
for comment in self.pr.comments():
|
||||||
|
body = comment.raw
|
||||||
|
if initial_header in body:
|
||||||
|
latest_commit_url = self.get_latest_commit_url()
|
||||||
|
comment_url = self.get_comment_url(comment)
|
||||||
|
if update_header:
|
||||||
|
updated_header = f"{initial_header}\n\n### (review updated until commit {latest_commit_url})\n"
|
||||||
|
pr_comment_updated = pr_comment.replace(initial_header, updated_header)
|
||||||
|
else:
|
||||||
|
pr_comment_updated = pr_comment
|
||||||
|
get_logger().info(f"Persistent mode- updating comment {comment_url} to latest review message")
|
||||||
|
d = {"content": {"raw": pr_comment_updated}}
|
||||||
|
response = comment._update_data(comment.put(None, data=d))
|
||||||
|
self.publish_comment(
|
||||||
|
f"**[Persistent review]({comment_url})** updated to latest commit {latest_commit_url}")
|
||||||
|
return
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().exception(f"Failed to update persistent review, error: {e}")
|
||||||
|
pass
|
||||||
|
self.publish_comment(pr_comment)
|
||||||
|
|
||||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||||
comment = self.pr.comment(pr_comment)
|
comment = self.pr.comment(pr_comment)
|
||||||
if is_temporary:
|
if is_temporary:
|
||||||
@ -142,10 +190,15 @@ class BitbucketProvider(GitProvider):
|
|||||||
def remove_initial_comment(self):
|
def remove_initial_comment(self):
|
||||||
try:
|
try:
|
||||||
for comment in self.temp_comments:
|
for comment in self.temp_comments:
|
||||||
self.pr.delete(f"comments/{comment}")
|
self.remove_comment(comment)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
get_logger().exception(f"Failed to remove temp comments, error: {e}")
|
get_logger().exception(f"Failed to remove temp comments, error: {e}")
|
||||||
|
|
||||||
|
def remove_comment(self, comment):
|
||||||
|
try:
|
||||||
|
self.pr.delete(f"comments/{comment}")
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().exception(f"Failed to remove comment, error: {e}")
|
||||||
|
|
||||||
# funtion to create_inline_comment
|
# funtion to create_inline_comment
|
||||||
def create_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
def create_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
||||||
@ -175,9 +228,29 @@ class BitbucketProvider(GitProvider):
|
|||||||
)
|
)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
def generate_link_to_relevant_line_number(self, suggestion) -> str:
|
||||||
|
try:
|
||||||
|
relevant_file = suggestion['relevant file'].strip('`').strip("'")
|
||||||
|
relevant_line_str = suggestion['relevant line']
|
||||||
|
if not relevant_line_str:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
diff_files = self.get_diff_files()
|
||||||
|
position, absolute_position = find_line_number_of_relevant_line_in_file \
|
||||||
|
(diff_files, relevant_file, relevant_line_str)
|
||||||
|
|
||||||
|
if absolute_position != -1 and self.pr_url:
|
||||||
|
link = f"{self.pr_url}/#L{relevant_file}T{absolute_position}"
|
||||||
|
return link
|
||||||
|
except Exception as e:
|
||||||
|
if get_settings().config.verbosity_level >= 2:
|
||||||
|
get_logger().info(f"Failed adding line link, error: {e}")
|
||||||
|
|
||||||
|
return ""
|
||||||
|
|
||||||
def publish_inline_comments(self, comments: list[dict]):
|
def publish_inline_comments(self, comments: list[dict]):
|
||||||
for comment in comments:
|
for comment in comments:
|
||||||
self.publish_inline_comment(comment['body'], comment['start_line'], comment['path'])
|
self.publish_inline_comment(comment['body'], comment['position'], comment['path'])
|
||||||
|
|
||||||
def get_title(self):
|
def get_title(self):
|
||||||
return self.pr.title
|
return self.pr.title
|
||||||
@ -254,6 +327,11 @@ class BitbucketProvider(GitProvider):
|
|||||||
})
|
})
|
||||||
|
|
||||||
response = requests.request("PUT", self.bitbucket_pull_request_api_url, headers=self.headers, data=payload)
|
response = requests.request("PUT", self.bitbucket_pull_request_api_url, headers=self.headers, data=payload)
|
||||||
|
try:
|
||||||
|
if response.status_code != 200:
|
||||||
|
get_logger().info(f"Failed to update description, error code: {response.status_code}")
|
||||||
|
except:
|
||||||
|
pass
|
||||||
return response
|
return response
|
||||||
|
|
||||||
# bitbucket does not support labels
|
# bitbucket does not support labels
|
||||||
|
@ -221,6 +221,9 @@ class CodeCommitProvider(GitProvider):
|
|||||||
def remove_initial_comment(self):
|
def remove_initial_comment(self):
|
||||||
return "" # not implemented yet
|
return "" # not implemented yet
|
||||||
|
|
||||||
|
def remove_comment(self, comment):
|
||||||
|
return "" # not implemented yet
|
||||||
|
|
||||||
def publish_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
def publish_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
||||||
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codecommit/client/post_comment_for_compared_commit.html
|
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codecommit/client/post_comment_for_compared_commit.html
|
||||||
raise NotImplementedError("CodeCommit provider does not support publishing inline comments yet")
|
raise NotImplementedError("CodeCommit provider does not support publishing inline comments yet")
|
||||||
|
@ -396,5 +396,8 @@ class GerritProvider(GitProvider):
|
|||||||
# shutil.rmtree(self.repo_path)
|
# shutil.rmtree(self.repo_path)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def remove_comment(self, comment):
|
||||||
|
pass
|
||||||
|
|
||||||
def get_pr_branch(self):
|
def get_pr_branch(self):
|
||||||
return self.repo.head
|
return self.repo.head
|
||||||
|
@ -13,6 +13,7 @@ class EDIT_TYPE(Enum):
|
|||||||
DELETED = 2
|
DELETED = 2
|
||||||
MODIFIED = 3
|
MODIFIED = 3
|
||||||
RENAMED = 4
|
RENAMED = 4
|
||||||
|
UNKNOWN = 5
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@ -22,7 +23,7 @@ class FilePatchInfo:
|
|||||||
patch: str
|
patch: str
|
||||||
filename: str
|
filename: str
|
||||||
tokens: int = -1
|
tokens: int = -1
|
||||||
edit_type: EDIT_TYPE = EDIT_TYPE.MODIFIED
|
edit_type: EDIT_TYPE = EDIT_TYPE.UNKNOWN
|
||||||
old_filename: str = None
|
old_filename: str = None
|
||||||
|
|
||||||
|
|
||||||
@ -39,38 +40,10 @@ class GitProvider(ABC):
|
|||||||
def publish_description(self, pr_title: str, pr_body: str):
|
def publish_description(self, pr_title: str, pr_body: str):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def publish_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def create_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def publish_inline_comments(self, comments: list[dict]):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def publish_code_suggestions(self, code_suggestions: list) -> bool:
|
def publish_code_suggestions(self, code_suggestions: list) -> bool:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def publish_labels(self, labels):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_labels(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def remove_initial_comment(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_languages(self):
|
def get_languages(self):
|
||||||
pass
|
pass
|
||||||
@ -90,16 +63,16 @@ class GitProvider(ABC):
|
|||||||
def get_pr_description(self, *, full: bool = True) -> str:
|
def get_pr_description(self, *, full: bool = True) -> str:
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.algo.pr_processing import clip_tokens
|
from pr_agent.algo.pr_processing import clip_tokens
|
||||||
max_tokens = get_settings().get("CONFIG.MAX_DESCRIPTION_TOKENS", None)
|
max_tokens_description = get_settings().get("CONFIG.MAX_DESCRIPTION_TOKENS", None)
|
||||||
description = self.get_pr_description_full() if full else self.get_user_description()
|
description = self.get_pr_description_full() if full else self.get_user_description()
|
||||||
if max_tokens:
|
if max_tokens_description:
|
||||||
return clip_tokens(description, max_tokens)
|
return clip_tokens(description, max_tokens_description)
|
||||||
return description
|
return description
|
||||||
|
|
||||||
def get_user_description(self) -> str:
|
def get_user_description(self) -> str:
|
||||||
description = (self.get_pr_description_full() or "").strip()
|
description = (self.get_pr_description_full() or "").strip()
|
||||||
# if the existing description wasn't generated by the pr-agent, just return it as-is
|
# if the existing description wasn't generated by the pr-agent, just return it as-is
|
||||||
if not description.startswith("## PR Type"):
|
if not any(description.startswith(header) for header in ("## PR Type", "## PR Description")):
|
||||||
return description
|
return description
|
||||||
# if the existing description was generated by the pr-agent, but it doesn't contain the user description,
|
# if the existing description was generated by the pr-agent, but it doesn't contain the user description,
|
||||||
# return nothing (empty string) because it means there is no user description
|
# return nothing (empty string) because it means there is no user description
|
||||||
@ -109,11 +82,54 @@ class GitProvider(ABC):
|
|||||||
return description.split("## User Description:", 1)[1].strip()
|
return description.split("## User Description:", 1)[1].strip()
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_issue_comments(self):
|
def get_repo_settings(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_pr_id(self):
|
||||||
|
return ""
|
||||||
|
|
||||||
|
#### comments operations ####
|
||||||
|
@abstractmethod
|
||||||
|
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def publish_persistent_comment(self, pr_comment: str, initial_header: str, update_header: bool):
|
||||||
|
self.publish_comment(pr_comment)
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def publish_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_repo_settings(self):
|
def create_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def publish_inline_comments(self, comments: list[dict]):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def remove_initial_comment(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def remove_comment(self, comment):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_issue_comments(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_comment_url(self, comment) -> str:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
#### labels operations ####
|
||||||
|
@abstractmethod
|
||||||
|
def publish_labels(self, labels):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_labels(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
@ -124,11 +140,12 @@ class GitProvider(ABC):
|
|||||||
def remove_reaction(self, issue_comment_id: int, reaction_id: int) -> bool:
|
def remove_reaction(self, issue_comment_id: int, reaction_id: int) -> bool:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
#### commits operations ####
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_commit_messages(self):
|
def get_commit_messages(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_pr_id(self):
|
def get_latest_commit_url(self) -> str:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
def get_main_pr_language(languages, files) -> str:
|
def get_main_pr_language(languages, files) -> str:
|
||||||
@ -139,6 +156,9 @@ def get_main_pr_language(languages, files) -> str:
|
|||||||
if not languages:
|
if not languages:
|
||||||
get_logger().info("No languages detected")
|
get_logger().info("No languages detected")
|
||||||
return main_language_str
|
return main_language_str
|
||||||
|
if not files:
|
||||||
|
get_logger().info("No files in diff")
|
||||||
|
return main_language_str
|
||||||
|
|
||||||
try:
|
try:
|
||||||
top_language = max(languages, key=languages.get).lower()
|
top_language = max(languages, key=languages.get).lower()
|
||||||
@ -146,6 +166,8 @@ def get_main_pr_language(languages, files) -> str:
|
|||||||
# validate that the specific commit uses the main language
|
# validate that the specific commit uses the main language
|
||||||
extension_list = []
|
extension_list = []
|
||||||
for file in files:
|
for file in files:
|
||||||
|
if not file:
|
||||||
|
continue
|
||||||
if isinstance(file, str):
|
if isinstance(file, str):
|
||||||
file = FilePatchInfo(base_file=None, head_file=None, patch=None, filename=file)
|
file = FilePatchInfo(base_file=None, head_file=None, patch=None, filename=file)
|
||||||
extension_list.append(file.filename.rsplit('.')[-1])
|
extension_list.append(file.filename.rsplit('.')[-1])
|
||||||
@ -183,6 +205,13 @@ class IncrementalPR:
|
|||||||
def __init__(self, is_incremental: bool = False):
|
def __init__(self, is_incremental: bool = False):
|
||||||
self.is_incremental = is_incremental
|
self.is_incremental = is_incremental
|
||||||
self.commits_range = None
|
self.commits_range = None
|
||||||
self.first_new_commit_sha = None
|
self.first_new_commit = None
|
||||||
self.last_seen_commit_sha = None
|
self.last_seen_commit = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def first_new_commit_sha(self):
|
||||||
|
return None if self.first_new_commit is None else self.first_new_commit.sha
|
||||||
|
|
||||||
|
@property
|
||||||
|
def last_seen_commit_sha(self):
|
||||||
|
return None if self.last_seen_commit is None else self.last_seen_commit.sha
|
||||||
|
@ -13,7 +13,7 @@ from ..algo.utils import load_large_diff
|
|||||||
from ..config_loader import get_settings
|
from ..config_loader import get_settings
|
||||||
from ..log import get_logger
|
from ..log import get_logger
|
||||||
from ..servers.utils import RateLimitExceeded
|
from ..servers.utils import RateLimitExceeded
|
||||||
from .git_provider import FilePatchInfo, GitProvider, IncrementalPR
|
from .git_provider import FilePatchInfo, GitProvider, IncrementalPR, EDIT_TYPE
|
||||||
|
|
||||||
|
|
||||||
class GithubProvider(GitProvider):
|
class GithubProvider(GitProvider):
|
||||||
@ -50,7 +50,7 @@ class GithubProvider(GitProvider):
|
|||||||
def get_incremental_commits(self):
|
def get_incremental_commits(self):
|
||||||
self.commits = list(self.pr.get_commits())
|
self.commits = list(self.pr.get_commits())
|
||||||
|
|
||||||
self.get_previous_review()
|
self.previous_review = self.get_previous_review(full=True, incremental=True)
|
||||||
if self.previous_review:
|
if self.previous_review:
|
||||||
self.incremental.commits_range = self.get_commit_range()
|
self.incremental.commits_range = self.get_commit_range()
|
||||||
# Get all files changed during the commit range
|
# Get all files changed during the commit range
|
||||||
@ -63,23 +63,29 @@ class GithubProvider(GitProvider):
|
|||||||
|
|
||||||
def get_commit_range(self):
|
def get_commit_range(self):
|
||||||
last_review_time = self.previous_review.created_at
|
last_review_time = self.previous_review.created_at
|
||||||
first_new_commit_index = 0
|
first_new_commit_index = None
|
||||||
for index in range(len(self.commits) - 1, -1, -1):
|
for index in range(len(self.commits) - 1, -1, -1):
|
||||||
if self.commits[index].commit.author.date > last_review_time:
|
if self.commits[index].commit.author.date > last_review_time:
|
||||||
self.incremental.first_new_commit_sha = self.commits[index].sha
|
self.incremental.first_new_commit = self.commits[index]
|
||||||
first_new_commit_index = index
|
first_new_commit_index = index
|
||||||
else:
|
else:
|
||||||
self.incremental.last_seen_commit_sha = self.commits[index].sha
|
self.incremental.last_seen_commit = self.commits[index]
|
||||||
break
|
break
|
||||||
return self.commits[first_new_commit_index:]
|
return self.commits[first_new_commit_index:] if first_new_commit_index is not None else []
|
||||||
|
|
||||||
def get_previous_review(self):
|
def get_previous_review(self, *, full: bool, incremental: bool):
|
||||||
self.previous_review = None
|
if not (full or incremental):
|
||||||
self.comments = list(self.pr.get_issue_comments())
|
raise ValueError("At least one of full or incremental must be True")
|
||||||
|
if not getattr(self, "comments", None):
|
||||||
|
self.comments = list(self.pr.get_issue_comments())
|
||||||
|
prefixes = []
|
||||||
|
if full:
|
||||||
|
prefixes.append("## PR Analysis")
|
||||||
|
if incremental:
|
||||||
|
prefixes.append("## Incremental PR Review")
|
||||||
for index in range(len(self.comments) - 1, -1, -1):
|
for index in range(len(self.comments) - 1, -1, -1):
|
||||||
if self.comments[index].body.startswith("## PR Analysis") or self.comments[index].body.startswith("## Incremental PR Review"):
|
if any(self.comments[index].body.startswith(prefix) for prefix in prefixes):
|
||||||
self.previous_review = self.comments[index]
|
return self.comments[index]
|
||||||
break
|
|
||||||
|
|
||||||
def get_files(self):
|
def get_files(self):
|
||||||
if self.incremental.is_incremental and self.file_set:
|
if self.incremental.is_incremental and self.file_set:
|
||||||
@ -123,7 +129,20 @@ class GithubProvider(GitProvider):
|
|||||||
if not patch:
|
if not patch:
|
||||||
patch = load_large_diff(file.filename, new_file_content_str, original_file_content_str)
|
patch = load_large_diff(file.filename, new_file_content_str, original_file_content_str)
|
||||||
|
|
||||||
diff_files.append(FilePatchInfo(original_file_content_str, new_file_content_str, patch, file.filename))
|
if file.status == 'added':
|
||||||
|
edit_type = EDIT_TYPE.ADDED
|
||||||
|
elif file.status == 'removed':
|
||||||
|
edit_type = EDIT_TYPE.DELETED
|
||||||
|
elif file.status == 'renamed':
|
||||||
|
edit_type = EDIT_TYPE.RENAMED
|
||||||
|
elif file.status == 'modified':
|
||||||
|
edit_type = EDIT_TYPE.MODIFIED
|
||||||
|
else:
|
||||||
|
get_logger().error(f"Unknown edit type: {file.status}")
|
||||||
|
edit_type = EDIT_TYPE.UNKNOWN
|
||||||
|
file_patch_canonical_structure = FilePatchInfo(original_file_content_str, new_file_content_str, patch,
|
||||||
|
file.filename, edit_type=edit_type)
|
||||||
|
diff_files.append(file_patch_canonical_structure)
|
||||||
|
|
||||||
self.diff_files = diff_files
|
self.diff_files = diff_files
|
||||||
return diff_files
|
return diff_files
|
||||||
@ -135,10 +154,36 @@ class GithubProvider(GitProvider):
|
|||||||
def publish_description(self, pr_title: str, pr_body: str):
|
def publish_description(self, pr_title: str, pr_body: str):
|
||||||
self.pr.edit(title=pr_title, body=pr_body)
|
self.pr.edit(title=pr_title, body=pr_body)
|
||||||
|
|
||||||
|
def get_latest_commit_url(self) -> str:
|
||||||
|
return self.last_commit_id.html_url
|
||||||
|
|
||||||
|
def get_comment_url(self, comment) -> str:
|
||||||
|
return comment.html_url
|
||||||
|
|
||||||
|
def publish_persistent_comment(self, pr_comment: str, initial_header: str, update_header: bool = True):
|
||||||
|
prev_comments = list(self.pr.get_issue_comments())
|
||||||
|
for comment in prev_comments:
|
||||||
|
body = comment.body
|
||||||
|
if body.startswith(initial_header):
|
||||||
|
latest_commit_url = self.get_latest_commit_url()
|
||||||
|
comment_url = self.get_comment_url(comment)
|
||||||
|
if update_header:
|
||||||
|
updated_header = f"{initial_header}\n\n### (review updated until commit {latest_commit_url})\n"
|
||||||
|
pr_comment_updated = pr_comment.replace(initial_header, updated_header)
|
||||||
|
else:
|
||||||
|
pr_comment_updated = pr_comment
|
||||||
|
get_logger().info(f"Persistent mode- updating comment {comment_url} to latest review message")
|
||||||
|
response = comment.edit(pr_comment_updated)
|
||||||
|
self.publish_comment(
|
||||||
|
f"**[Persistent review]({comment_url})** updated to latest commit {latest_commit_url}")
|
||||||
|
return
|
||||||
|
self.publish_comment(pr_comment)
|
||||||
|
|
||||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||||
if is_temporary and not get_settings().config.publish_output_progress:
|
if is_temporary and not get_settings().config.publish_output_progress:
|
||||||
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
|
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
|
||||||
return
|
return
|
||||||
|
|
||||||
response = self.pr.create_issue_comment(pr_comment)
|
response = self.pr.create_issue_comment(pr_comment)
|
||||||
if hasattr(response, "user") and hasattr(response.user, "login"):
|
if hasattr(response, "user") and hasattr(response.user, "login"):
|
||||||
self.github_user_id = response.user.login
|
self.github_user_id = response.user.login
|
||||||
@ -218,10 +263,16 @@ class GithubProvider(GitProvider):
|
|||||||
try:
|
try:
|
||||||
for comment in getattr(self.pr, 'comments_list', []):
|
for comment in getattr(self.pr, 'comments_list', []):
|
||||||
if comment.is_temporary:
|
if comment.is_temporary:
|
||||||
comment.delete()
|
self.remove_comment(comment)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
get_logger().exception(f"Failed to remove initial comment, error: {e}")
|
get_logger().exception(f"Failed to remove initial comment, error: {e}")
|
||||||
|
|
||||||
|
def remove_comment(self, comment):
|
||||||
|
try:
|
||||||
|
comment.delete()
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().exception(f"Failed to remove comment, error: {e}")
|
||||||
|
|
||||||
def get_title(self):
|
def get_title(self):
|
||||||
return self.pr.title
|
return self.pr.title
|
||||||
|
|
||||||
|
@ -136,6 +136,33 @@ class GitLabProvider(GitProvider):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
get_logger().exception(f"Could not update merge request {self.id_mr} description: {e}")
|
get_logger().exception(f"Could not update merge request {self.id_mr} description: {e}")
|
||||||
|
|
||||||
|
def get_latest_commit_url(self):
|
||||||
|
return self.mr.commits().next().web_url
|
||||||
|
|
||||||
|
def get_comment_url(self, comment):
|
||||||
|
return f"{self.mr.web_url}#note_{comment.id}"
|
||||||
|
|
||||||
|
def publish_persistent_comment(self, pr_comment: str, initial_header: str, update_header: bool = True):
|
||||||
|
try:
|
||||||
|
for comment in self.mr.notes.list(get_all=True)[::-1]:
|
||||||
|
if comment.body.startswith(initial_header):
|
||||||
|
latest_commit_url = self.get_latest_commit_url()
|
||||||
|
comment_url = self.get_comment_url(comment)
|
||||||
|
if update_header:
|
||||||
|
updated_header = f"{initial_header}\n\n### (review updated until commit {latest_commit_url})\n"
|
||||||
|
pr_comment_updated = pr_comment.replace(initial_header, updated_header)
|
||||||
|
else:
|
||||||
|
pr_comment_updated = pr_comment
|
||||||
|
get_logger().info(f"Persistent mode- updating comment {comment_url} to latest review message")
|
||||||
|
response = self.mr.notes.update(comment.id, {'body': pr_comment_updated})
|
||||||
|
self.publish_comment(
|
||||||
|
f"**[Persistent review]({comment_url})** updated to latest commit {latest_commit_url}")
|
||||||
|
return
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().exception(f"Failed to update persistent review, error: {e}")
|
||||||
|
pass
|
||||||
|
self.publish_comment(pr_comment)
|
||||||
|
|
||||||
def publish_comment(self, mr_comment: str, is_temporary: bool = False):
|
def publish_comment(self, mr_comment: str, is_temporary: bool = False):
|
||||||
comment = self.mr.notes.create({'body': mr_comment})
|
comment = self.mr.notes.create({'body': mr_comment})
|
||||||
if is_temporary:
|
if is_temporary:
|
||||||
@ -287,10 +314,16 @@ class GitLabProvider(GitProvider):
|
|||||||
def remove_initial_comment(self):
|
def remove_initial_comment(self):
|
||||||
try:
|
try:
|
||||||
for comment in self.temp_comments:
|
for comment in self.temp_comments:
|
||||||
comment.delete()
|
self.remove_comment(comment)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
get_logger().exception(f"Failed to remove temp comments, error: {e}")
|
get_logger().exception(f"Failed to remove temp comments, error: {e}")
|
||||||
|
|
||||||
|
def remove_comment(self, comment):
|
||||||
|
try:
|
||||||
|
comment.delete()
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().exception(f"Failed to remove comment, error: {e}")
|
||||||
|
|
||||||
def get_title(self):
|
def get_title(self):
|
||||||
return self.mr.title
|
return self.mr.title
|
||||||
|
|
||||||
@ -309,7 +342,7 @@ class GitLabProvider(GitProvider):
|
|||||||
|
|
||||||
def get_repo_settings(self):
|
def get_repo_settings(self):
|
||||||
try:
|
try:
|
||||||
contents = self.gl.projects.get(self.id_project).files.get(file_path='.pr_agent.toml', ref=self.mr.source_branch)
|
contents = self.gl.projects.get(self.id_project).files.get(file_path='.pr_agent.toml', ref=self.mr.target_branch).decode()
|
||||||
return contents
|
return contents
|
||||||
except Exception:
|
except Exception:
|
||||||
return ""
|
return ""
|
||||||
|
@ -140,6 +140,9 @@ class LocalGitProvider(GitProvider):
|
|||||||
def remove_initial_comment(self):
|
def remove_initial_comment(self):
|
||||||
pass # Not applicable to the local git provider, but required by the interface
|
pass # Not applicable to the local git provider, but required by the interface
|
||||||
|
|
||||||
|
def remove_comment(self, comment):
|
||||||
|
pass # Not applicable to the local git provider, but required by the interface
|
||||||
|
|
||||||
def get_languages(self):
|
def get_languages(self):
|
||||||
"""
|
"""
|
||||||
Calculate percentage of languages in repository. Used for hunk prioritisation.
|
Calculate percentage of languages in repository. Used for hunk prioritisation.
|
||||||
|
@ -27,7 +27,8 @@ def apply_repo_settings(pr_url):
|
|||||||
get_settings().unset(section)
|
get_settings().unset(section)
|
||||||
get_settings().set(section, section_dict, merge=False)
|
get_settings().set(section, section_dict, merge=False)
|
||||||
get_logger().info(f"Applying repo settings for section {section}, contents: {contents}")
|
get_logger().info(f"Applying repo settings for section {section}, contents: {contents}")
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().exception("Failed to apply repo settings", e)
|
||||||
finally:
|
finally:
|
||||||
if repo_settings_file:
|
if repo_settings_file:
|
||||||
try:
|
try:
|
||||||
|
@ -19,10 +19,6 @@ async def run_action():
|
|||||||
OPENAI_KEY = os.environ.get('OPENAI_KEY') or os.environ.get('OPENAI.KEY')
|
OPENAI_KEY = os.environ.get('OPENAI_KEY') or os.environ.get('OPENAI.KEY')
|
||||||
OPENAI_ORG = os.environ.get('OPENAI_ORG') or os.environ.get('OPENAI.ORG')
|
OPENAI_ORG = os.environ.get('OPENAI_ORG') or os.environ.get('OPENAI.ORG')
|
||||||
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')
|
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')
|
||||||
CUSTOM_LABELS = os.environ.get('CUSTOM_LABELS')
|
|
||||||
CUSTOM_LABELS_DESCRIPTIONS = os.environ.get('CUSTOM_LABELS_DESCRIPTIONS')
|
|
||||||
# CUSTOM_LABELS is a comma separated list of labels (string), convert to list and strip spaces
|
|
||||||
|
|
||||||
get_settings().set("CONFIG.PUBLISH_OUTPUT_PROGRESS", False)
|
get_settings().set("CONFIG.PUBLISH_OUTPUT_PROGRESS", False)
|
||||||
|
|
||||||
# Check if required environment variables are set
|
# Check if required environment variables are set
|
||||||
@ -38,7 +34,6 @@ async def run_action():
|
|||||||
if not GITHUB_TOKEN:
|
if not GITHUB_TOKEN:
|
||||||
print("GITHUB_TOKEN not set")
|
print("GITHUB_TOKEN not set")
|
||||||
return
|
return
|
||||||
# CUSTOM_LABELS_DICT = handle_custom_labels(CUSTOM_LABELS, CUSTOM_LABELS_DESCRIPTIONS)
|
|
||||||
|
|
||||||
# Set the environment variables in the settings
|
# Set the environment variables in the settings
|
||||||
get_settings().set("OPENAI.KEY", OPENAI_KEY)
|
get_settings().set("OPENAI.KEY", OPENAI_KEY)
|
||||||
@ -46,7 +41,6 @@ async def run_action():
|
|||||||
get_settings().set("OPENAI.ORG", OPENAI_ORG)
|
get_settings().set("OPENAI.ORG", OPENAI_ORG)
|
||||||
get_settings().set("GITHUB.USER_TOKEN", GITHUB_TOKEN)
|
get_settings().set("GITHUB.USER_TOKEN", GITHUB_TOKEN)
|
||||||
get_settings().set("GITHUB.DEPLOYMENT_TYPE", "user")
|
get_settings().set("GITHUB.DEPLOYMENT_TYPE", "user")
|
||||||
# get_settings().set("CUSTOM_LABELS", CUSTOM_LABELS_DICT)
|
|
||||||
|
|
||||||
# Load the event payload
|
# Load the event payload
|
||||||
try:
|
try:
|
||||||
@ -104,31 +98,5 @@ async def run_action():
|
|||||||
await PRAgent().handle_request(url, body)
|
await PRAgent().handle_request(url, body)
|
||||||
|
|
||||||
|
|
||||||
def handle_custom_labels(CUSTOM_LABELS, CUSTOM_LABELS_DESCRIPTIONS):
|
|
||||||
if CUSTOM_LABELS:
|
|
||||||
CUSTOM_LABELS = [x.strip() for x in CUSTOM_LABELS.split(',')]
|
|
||||||
else:
|
|
||||||
# Set default labels
|
|
||||||
CUSTOM_LABELS = ['Bug fix', 'Tests', 'Bug fix with tests', 'Refactoring', 'Enhancement', 'Documentation',
|
|
||||||
'Other']
|
|
||||||
print(f"Using default labels: {CUSTOM_LABELS}")
|
|
||||||
if CUSTOM_LABELS_DESCRIPTIONS:
|
|
||||||
CUSTOM_LABELS_DESCRIPTIONS = [x.strip() for x in CUSTOM_LABELS_DESCRIPTIONS.split(',')]
|
|
||||||
else:
|
|
||||||
# Set default labels
|
|
||||||
CUSTOM_LABELS_DESCRIPTIONS = ['Fixes a bug in the code', 'Adds or modifies tests',
|
|
||||||
'Fixes a bug in the code and adds or modifies tests',
|
|
||||||
'Refactors the code without changing its functionality',
|
|
||||||
'Adds new features or functionality',
|
|
||||||
'Adds or modifies documentation',
|
|
||||||
'Other changes that do not fit in any of the above categories']
|
|
||||||
print(f"Using default labels: {CUSTOM_LABELS_DESCRIPTIONS}")
|
|
||||||
# create a dictionary of labels and descriptions
|
|
||||||
CUSTOM_LABELS_DICT = dict()
|
|
||||||
for i in range(len(CUSTOM_LABELS)):
|
|
||||||
CUSTOM_LABELS_DICT[CUSTOM_LABELS[i]] = {'description': CUSTOM_LABELS_DESCRIPTIONS[i]}
|
|
||||||
return CUSTOM_LABELS_DICT
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
asyncio.run(run_action())
|
asyncio.run(run_action())
|
@ -1,7 +1,7 @@
|
|||||||
import copy
|
import copy
|
||||||
import os
|
import os
|
||||||
import time
|
import asyncio.locks
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict, List, Tuple
|
||||||
|
|
||||||
import uvicorn
|
import uvicorn
|
||||||
from fastapi import APIRouter, FastAPI, HTTPException, Request, Response
|
from fastapi import APIRouter, FastAPI, HTTPException, Request, Response
|
||||||
@ -14,8 +14,9 @@ from pr_agent.algo.utils import update_settings_from_args
|
|||||||
from pr_agent.config_loader import get_settings, global_settings
|
from pr_agent.config_loader import get_settings, global_settings
|
||||||
from pr_agent.git_providers import get_git_provider
|
from pr_agent.git_providers import get_git_provider
|
||||||
from pr_agent.git_providers.utils import apply_repo_settings
|
from pr_agent.git_providers.utils import apply_repo_settings
|
||||||
|
from pr_agent.git_providers.git_provider import IncrementalPR
|
||||||
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
||||||
from pr_agent.servers.utils import verify_signature
|
from pr_agent.servers.utils import verify_signature, DefaultDictWithTimeout
|
||||||
|
|
||||||
setup_logger(fmt=LoggingFormat.JSON)
|
setup_logger(fmt=LoggingFormat.JSON)
|
||||||
|
|
||||||
@ -47,6 +48,7 @@ async def handle_marketplace_webhooks(request: Request, response: Response):
|
|||||||
body = await get_body(request)
|
body = await get_body(request)
|
||||||
get_logger().info(f'Request body:\n{body}')
|
get_logger().info(f'Request body:\n{body}')
|
||||||
|
|
||||||
|
|
||||||
async def get_body(request):
|
async def get_body(request):
|
||||||
try:
|
try:
|
||||||
body = await request.json()
|
body = await request.json()
|
||||||
@ -61,7 +63,9 @@ async def get_body(request):
|
|||||||
return body
|
return body
|
||||||
|
|
||||||
|
|
||||||
_duplicate_requests_cache = {}
|
_duplicate_requests_cache = DefaultDictWithTimeout(ttl=get_settings().github_app.duplicate_requests_cache_ttl)
|
||||||
|
_duplicate_push_triggers = DefaultDictWithTimeout(ttl=get_settings().github_app.push_trigger_pending_tasks_ttl)
|
||||||
|
_pending_task_duplicate_push_conditions = DefaultDictWithTimeout(asyncio.locks.Condition, ttl=get_settings().github_app.push_trigger_pending_tasks_ttl)
|
||||||
|
|
||||||
|
|
||||||
async def handle_request(body: Dict[str, Any], event: str):
|
async def handle_request(body: Dict[str, Any], event: str):
|
||||||
@ -109,40 +113,111 @@ async def handle_request(body: Dict[str, Any], event: str):
|
|||||||
# handle pull_request event:
|
# handle pull_request event:
|
||||||
# automatically review opened/reopened/ready_for_review PRs as long as they're not in draft,
|
# automatically review opened/reopened/ready_for_review PRs as long as they're not in draft,
|
||||||
# as well as direct review requests from the bot
|
# as well as direct review requests from the bot
|
||||||
elif event == 'pull_request':
|
elif event == 'pull_request' and action != 'synchronize':
|
||||||
pull_request = body.get("pull_request")
|
pull_request, api_url = _check_pull_request_event(action, body, log_context, bot_user)
|
||||||
if not pull_request:
|
if not (pull_request and api_url):
|
||||||
return {}
|
|
||||||
api_url = pull_request.get("url")
|
|
||||||
if not api_url:
|
|
||||||
return {}
|
|
||||||
log_context["api_url"] = api_url
|
|
||||||
if pull_request.get("draft", True) or pull_request.get("state") != "open" or pull_request.get("user", {}).get("login", "") == bot_user:
|
|
||||||
return {}
|
return {}
|
||||||
if action in get_settings().github_app.handle_pr_actions:
|
if action in get_settings().github_app.handle_pr_actions:
|
||||||
if action == "review_requested":
|
if action == "review_requested":
|
||||||
if body.get("requested_reviewer", {}).get("login", "") != bot_user:
|
if body.get("requested_reviewer", {}).get("login", "") != bot_user:
|
||||||
return {}
|
return {}
|
||||||
if pull_request.get("created_at") == pull_request.get("updated_at"):
|
get_logger().info(f"Performing review for {api_url=} because of {event=} and {action=}")
|
||||||
# avoid double reviews when opening a PR for the first time
|
await _perform_commands("pr_commands", agent, body, api_url, log_context)
|
||||||
return {}
|
|
||||||
get_logger().info(f"Performing review because of event={event} and action={action}")
|
# handle pull_request event with synchronize action - "push trigger" for new commits
|
||||||
apply_repo_settings(api_url)
|
elif event == 'pull_request' and action == 'synchronize' and get_settings().github_app.handle_push_trigger:
|
||||||
for command in get_settings().github_app.pr_commands:
|
pull_request, api_url = _check_pull_request_event(action, body, log_context, bot_user)
|
||||||
split_command = command.split(" ")
|
if not (pull_request and api_url):
|
||||||
command = split_command[0]
|
return {}
|
||||||
args = split_command[1:]
|
|
||||||
other_args = update_settings_from_args(args)
|
# TODO: do we still want to get the list of commits to filter bot/merge commits?
|
||||||
new_command = ' '.join([command] + other_args)
|
before_sha = body.get("before")
|
||||||
get_logger().info(body)
|
after_sha = body.get("after")
|
||||||
get_logger().info(f"Performing command: {new_command}")
|
merge_commit_sha = pull_request.get("merge_commit_sha")
|
||||||
with get_logger().contextualize(**log_context):
|
if before_sha == after_sha:
|
||||||
await agent.handle_request(api_url, new_command)
|
return {}
|
||||||
|
if get_settings().github_app.push_trigger_ignore_merge_commits and after_sha == merge_commit_sha:
|
||||||
|
return {}
|
||||||
|
if get_settings().github_app.push_trigger_ignore_bot_commits and body.get("sender", {}).get("login", "") == bot_user:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
# Prevent triggering multiple times for subsequent push triggers when one is enough:
|
||||||
|
# The first push will trigger the processing, and if there's a second push in the meanwhile it will wait.
|
||||||
|
# Any more events will be discarded, because they will all trigger the exact same processing on the PR.
|
||||||
|
# We let the second event wait instead of discarding it because while the first event was being processed,
|
||||||
|
# more commits may have been pushed that led to the subsequent events,
|
||||||
|
# so we keep just one waiting as a delegate to trigger the processing for the new commits when done waiting.
|
||||||
|
current_active_tasks = _duplicate_push_triggers.setdefault(api_url, 0)
|
||||||
|
max_active_tasks = 2 if get_settings().github_app.push_trigger_pending_tasks_backlog else 1
|
||||||
|
if current_active_tasks < max_active_tasks:
|
||||||
|
# first task can enter, and second tasks too if backlog is enabled
|
||||||
|
get_logger().info(
|
||||||
|
f"Continue processing push trigger for {api_url=} because there are {current_active_tasks} active tasks"
|
||||||
|
)
|
||||||
|
_duplicate_push_triggers[api_url] += 1
|
||||||
|
else:
|
||||||
|
get_logger().info(
|
||||||
|
f"Skipping push trigger for {api_url=} because another event already triggered the same processing"
|
||||||
|
)
|
||||||
|
return {}
|
||||||
|
async with _pending_task_duplicate_push_conditions[api_url]:
|
||||||
|
if current_active_tasks == 1:
|
||||||
|
# second task waits
|
||||||
|
get_logger().info(
|
||||||
|
f"Waiting to process push trigger for {api_url=} because the first task is still in progress"
|
||||||
|
)
|
||||||
|
await _pending_task_duplicate_push_conditions[api_url].wait()
|
||||||
|
get_logger().info(f"Finished waiting to process push trigger for {api_url=} - continue with flow")
|
||||||
|
|
||||||
|
try:
|
||||||
|
if get_settings().github_app.push_trigger_wait_for_initial_review and not get_git_provider()(api_url, incremental=IncrementalPR(True)).previous_review:
|
||||||
|
get_logger().info(f"Skipping incremental review because there was no initial review for {api_url=} yet")
|
||||||
|
return {}
|
||||||
|
get_logger().info(f"Performing incremental review for {api_url=} because of {event=} and {action=}")
|
||||||
|
await _perform_commands("push_commands", agent, body, api_url, log_context)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# release the waiting task block
|
||||||
|
async with _pending_task_duplicate_push_conditions[api_url]:
|
||||||
|
_pending_task_duplicate_push_conditions[api_url].notify(1)
|
||||||
|
_duplicate_push_triggers[api_url] -= 1
|
||||||
|
|
||||||
get_logger().info("event or action does not require handling")
|
get_logger().info("event or action does not require handling")
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def _check_pull_request_event(action: str, body: dict, log_context: dict, bot_user: str) -> Tuple[Dict[str, Any], str]:
|
||||||
|
invalid_result = {}, ""
|
||||||
|
pull_request = body.get("pull_request")
|
||||||
|
if not pull_request:
|
||||||
|
return invalid_result
|
||||||
|
api_url = pull_request.get("url")
|
||||||
|
if not api_url:
|
||||||
|
return invalid_result
|
||||||
|
log_context["api_url"] = api_url
|
||||||
|
if pull_request.get("draft", True) or pull_request.get("state") != "open" or pull_request.get("user", {}).get("login", "") == bot_user:
|
||||||
|
return invalid_result
|
||||||
|
if action in ("review_requested", "synchronize") and pull_request.get("created_at") == pull_request.get("updated_at"):
|
||||||
|
# avoid double reviews when opening a PR for the first time
|
||||||
|
return invalid_result
|
||||||
|
return pull_request, api_url
|
||||||
|
|
||||||
|
|
||||||
|
async def _perform_commands(commands_conf: str, agent: PRAgent, body: dict, api_url: str, log_context: dict):
|
||||||
|
apply_repo_settings(api_url)
|
||||||
|
commands = get_settings().get(f"github_app.{commands_conf}")
|
||||||
|
for command in commands:
|
||||||
|
split_command = command.split(" ")
|
||||||
|
command = split_command[0]
|
||||||
|
args = split_command[1:]
|
||||||
|
other_args = update_settings_from_args(args)
|
||||||
|
new_command = ' '.join([command] + other_args)
|
||||||
|
get_logger().info(body)
|
||||||
|
get_logger().info(f"Performing command: {new_command}")
|
||||||
|
with get_logger().contextualize(**log_context):
|
||||||
|
await agent.handle_request(api_url, new_command)
|
||||||
|
|
||||||
|
|
||||||
def _is_duplicate_request(body: Dict[str, Any]) -> bool:
|
def _is_duplicate_request(body: Dict[str, Any]) -> bool:
|
||||||
"""
|
"""
|
||||||
In some deployments its possible to get duplicate requests if the handling is long,
|
In some deployments its possible to get duplicate requests if the handling is long,
|
||||||
@ -150,13 +225,8 @@ def _is_duplicate_request(body: Dict[str, Any]) -> bool:
|
|||||||
"""
|
"""
|
||||||
request_hash = hash(str(body))
|
request_hash = hash(str(body))
|
||||||
get_logger().info(f"request_hash: {request_hash}")
|
get_logger().info(f"request_hash: {request_hash}")
|
||||||
request_time = time.monotonic()
|
is_duplicate = _duplicate_requests_cache.get(request_hash, False)
|
||||||
ttl = get_settings().github_app.duplicate_requests_cache_ttl # in seconds
|
_duplicate_requests_cache[request_hash] = True
|
||||||
to_delete = [key for key, key_time in _duplicate_requests_cache.items() if request_time - key_time > ttl]
|
|
||||||
for key in to_delete:
|
|
||||||
del _duplicate_requests_cache[key]
|
|
||||||
is_duplicate = request_hash in _duplicate_requests_cache
|
|
||||||
_duplicate_requests_cache[request_hash] = request_time
|
|
||||||
if is_duplicate:
|
if is_duplicate:
|
||||||
get_logger().info(f"Ignoring duplicate request {request_hash}")
|
get_logger().info(f"Ignoring duplicate request {request_hash}")
|
||||||
return is_duplicate
|
return is_duplicate
|
||||||
|
@ -58,13 +58,13 @@ async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
|
|||||||
if data.get('object_kind') == 'merge_request' and data['object_attributes'].get('action') in ['open', 'reopen']:
|
if data.get('object_kind') == 'merge_request' and data['object_attributes'].get('action') in ['open', 'reopen']:
|
||||||
get_logger().info(f"A merge request has been opened: {data['object_attributes'].get('title')}")
|
get_logger().info(f"A merge request has been opened: {data['object_attributes'].get('title')}")
|
||||||
url = data['object_attributes'].get('url')
|
url = data['object_attributes'].get('url')
|
||||||
handle_request(background_tasks, url, "/review")
|
handle_request(background_tasks, url, "/review", log_context)
|
||||||
elif data.get('object_kind') == 'note' and data['event_type'] == 'note':
|
elif data.get('object_kind') == 'note' and data['event_type'] == 'note':
|
||||||
if 'merge_request' in data:
|
if 'merge_request' in data:
|
||||||
mr = data['merge_request']
|
mr = data['merge_request']
|
||||||
url = mr.get('url')
|
url = mr.get('url')
|
||||||
body = data.get('object_attributes', {}).get('note')
|
body = data.get('object_attributes', {}).get('note')
|
||||||
handle_request(background_tasks, url, body)
|
handle_request(background_tasks, url, body, log_context)
|
||||||
return JSONResponse(status_code=status.HTTP_200_OK, content=jsonable_encoder({"message": "success"}))
|
return JSONResponse(status_code=status.HTTP_200_OK, content=jsonable_encoder({"message": "success"}))
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,12 +1,15 @@
|
|||||||
from fastapi import FastAPI
|
from fastapi import FastAPI
|
||||||
from mangum import Mangum
|
from mangum import Mangum
|
||||||
|
from starlette.middleware import Middleware
|
||||||
|
from starlette_context.middleware import RawContextMiddleware
|
||||||
|
|
||||||
from pr_agent.log import setup_logger
|
from pr_agent.log import setup_logger
|
||||||
from pr_agent.servers.github_app import router
|
from pr_agent.servers.github_app import router
|
||||||
|
|
||||||
setup_logger()
|
setup_logger()
|
||||||
|
|
||||||
app = FastAPI()
|
middleware = [Middleware(RawContextMiddleware)]
|
||||||
|
app = FastAPI(middleware=middleware)
|
||||||
app.include_router(router)
|
app.include_router(router)
|
||||||
|
|
||||||
handler = Mangum(app, lifespan="off")
|
handler = Mangum(app, lifespan="off")
|
||||||
|
@ -1,5 +1,8 @@
|
|||||||
import hashlib
|
import hashlib
|
||||||
import hmac
|
import hmac
|
||||||
|
import time
|
||||||
|
from collections import defaultdict
|
||||||
|
from typing import Callable, Any
|
||||||
|
|
||||||
from fastapi import HTTPException
|
from fastapi import HTTPException
|
||||||
|
|
||||||
@ -25,3 +28,59 @@ def verify_signature(payload_body, secret_token, signature_header):
|
|||||||
class RateLimitExceeded(Exception):
|
class RateLimitExceeded(Exception):
|
||||||
"""Raised when the git provider API rate limit has been exceeded."""
|
"""Raised when the git provider API rate limit has been exceeded."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DefaultDictWithTimeout(defaultdict):
|
||||||
|
"""A defaultdict with a time-to-live (TTL)."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
default_factory: Callable[[], Any] = None,
|
||||||
|
ttl: int = None,
|
||||||
|
refresh_interval: int = 60,
|
||||||
|
update_key_time_on_get: bool = True,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
default_factory: The default factory to use for keys that are not in the dictionary.
|
||||||
|
ttl: The time-to-live (TTL) in seconds.
|
||||||
|
refresh_interval: How often to refresh the dict and delete items older than the TTL.
|
||||||
|
update_key_time_on_get: Whether to update the access time of a key also on get (or only when set).
|
||||||
|
"""
|
||||||
|
super().__init__(default_factory, *args, **kwargs)
|
||||||
|
self.__key_times = dict()
|
||||||
|
self.__ttl = ttl
|
||||||
|
self.__refresh_interval = refresh_interval
|
||||||
|
self.__update_key_time_on_get = update_key_time_on_get
|
||||||
|
self.__last_refresh = self.__time() - self.__refresh_interval
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __time():
|
||||||
|
return time.monotonic()
|
||||||
|
|
||||||
|
def __refresh(self):
|
||||||
|
if self.__ttl is None:
|
||||||
|
return
|
||||||
|
request_time = self.__time()
|
||||||
|
if request_time - self.__last_refresh > self.__refresh_interval:
|
||||||
|
return
|
||||||
|
to_delete = [key for key, key_time in self.__key_times.items() if request_time - key_time > self.__ttl]
|
||||||
|
for key in to_delete:
|
||||||
|
del self[key]
|
||||||
|
self.__last_refresh = request_time
|
||||||
|
|
||||||
|
def __getitem__(self, __key):
|
||||||
|
if self.__update_key_time_on_get:
|
||||||
|
self.__key_times[__key] = self.__time()
|
||||||
|
self.__refresh()
|
||||||
|
return super().__getitem__(__key)
|
||||||
|
|
||||||
|
def __setitem__(self, __key, __value):
|
||||||
|
self.__key_times[__key] = self.__time()
|
||||||
|
return super().__setitem__(__key, __value)
|
||||||
|
|
||||||
|
def __delitem__(self, __key):
|
||||||
|
del self.__key_times[__key]
|
||||||
|
return super().__delitem__(__key)
|
||||||
|
@ -34,7 +34,11 @@ key = "" # Optional, uncomment if you want to use Huggingface Inference API. Acq
|
|||||||
api_base = "" # the base url for your huggingface inference endpoint
|
api_base = "" # the base url for your huggingface inference endpoint
|
||||||
|
|
||||||
[ollama]
|
[ollama]
|
||||||
api_base = "" # the base url for your huggingface inference endpoint
|
api_base = "" # the base url for your local Llama 2, Code Llama, and other models inference endpoint. Acquire through https://ollama.ai/
|
||||||
|
|
||||||
|
[vertexai]
|
||||||
|
vertex_project = "" # the google cloud platform project name for your vertexai deployment
|
||||||
|
vertex_location = "" # the google cloud platform location for your vertexai deployment
|
||||||
|
|
||||||
[github]
|
[github]
|
||||||
# ---- Set the following only for deployment type == "user"
|
# ---- Set the following only for deployment type == "user"
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
[config]
|
[config]
|
||||||
model="gpt-4"
|
model="gpt-4" # "gpt-4-1106-preview"
|
||||||
fallback_models=["gpt-3.5-turbo-16k"]
|
fallback_models=["gpt-3.5-turbo-16k"]
|
||||||
git_provider="github"
|
git_provider="github"
|
||||||
publish_output=true
|
publish_output=true
|
||||||
@ -10,21 +10,33 @@ use_repo_settings_file=true
|
|||||||
ai_timeout=180
|
ai_timeout=180
|
||||||
max_description_tokens = 500
|
max_description_tokens = 500
|
||||||
max_commits_tokens = 500
|
max_commits_tokens = 500
|
||||||
|
max_model_tokens = 32000 # Limits the maximum number of tokens that can be used by any model, regardless of the model's default capabilities.
|
||||||
patch_extra_lines = 3
|
patch_extra_lines = 3
|
||||||
secret_provider="google_cloud_storage"
|
secret_provider="google_cloud_storage"
|
||||||
cli_mode=false
|
cli_mode=false
|
||||||
|
|
||||||
[pr_reviewer] # /review #
|
[pr_reviewer] # /review #
|
||||||
|
# enable/disable features
|
||||||
require_focused_review=false
|
require_focused_review=false
|
||||||
require_score_review=false
|
require_score_review=false
|
||||||
require_tests_review=true
|
require_tests_review=true
|
||||||
require_security_review=true
|
require_security_review=true
|
||||||
require_estimate_effort_to_review=true
|
require_estimate_effort_to_review=true
|
||||||
|
# general options
|
||||||
num_code_suggestions=4
|
num_code_suggestions=4
|
||||||
inline_code_comments = false
|
inline_code_comments = false
|
||||||
ask_and_reflect=false
|
ask_and_reflect=false
|
||||||
automatic_review=true
|
automatic_review=true
|
||||||
|
remove_previous_review_comment=false
|
||||||
|
persistent_comment=true
|
||||||
extra_instructions = ""
|
extra_instructions = ""
|
||||||
|
# review labels
|
||||||
|
enable_review_labels_security=true
|
||||||
|
enable_review_labels_effort=false
|
||||||
|
# specific configurations for incremental review (/review -i)
|
||||||
|
require_all_thresholds_for_incremental_review=false
|
||||||
|
minimal_commits_for_incremental_review=0
|
||||||
|
minimal_minutes_for_incremental_review=0
|
||||||
|
|
||||||
[pr_description] # /describe #
|
[pr_description] # /describe #
|
||||||
publish_labels=true
|
publish_labels=true
|
||||||
@ -33,6 +45,7 @@ add_original_user_description=false
|
|||||||
keep_original_user_title=false
|
keep_original_user_title=false
|
||||||
use_bullet_points=true
|
use_bullet_points=true
|
||||||
extra_instructions = ""
|
extra_instructions = ""
|
||||||
|
enable_pr_type=true
|
||||||
|
|
||||||
# markers
|
# markers
|
||||||
use_description_markers=false
|
use_description_markers=false
|
||||||
@ -86,6 +99,30 @@ pr_commands = [
|
|||||||
"/describe --pr_description.add_original_user_description=true --pr_description.keep_original_user_title=true",
|
"/describe --pr_description.add_original_user_description=true --pr_description.keep_original_user_title=true",
|
||||||
"/auto_review",
|
"/auto_review",
|
||||||
]
|
]
|
||||||
|
# settings for "pull_request" event with "synchronize" action - used to detect and handle push triggers for new commits
|
||||||
|
handle_push_trigger = false
|
||||||
|
push_trigger_ignore_bot_commits = true
|
||||||
|
push_trigger_ignore_merge_commits = true
|
||||||
|
push_trigger_wait_for_initial_review = true
|
||||||
|
push_trigger_pending_tasks_backlog = true
|
||||||
|
push_trigger_pending_tasks_ttl = 300
|
||||||
|
push_commands = [
|
||||||
|
"/describe --pr_description.add_original_user_description=true --pr_description.keep_original_user_title=true",
|
||||||
|
"""/auto_review -i \
|
||||||
|
--pr_reviewer.require_focused_review=false \
|
||||||
|
--pr_reviewer.require_score_review=false \
|
||||||
|
--pr_reviewer.require_tests_review=false \
|
||||||
|
--pr_reviewer.require_security_review=false \
|
||||||
|
--pr_reviewer.require_estimate_effort_to_review=false \
|
||||||
|
--pr_reviewer.num_code_suggestions=0 \
|
||||||
|
--pr_reviewer.inline_code_comments=false \
|
||||||
|
--pr_reviewer.remove_previous_review_comment=true \
|
||||||
|
--pr_reviewer.require_all_thresholds_for_incremental_review=false \
|
||||||
|
--pr_reviewer.minimal_commits_for_incremental_review=5 \
|
||||||
|
--pr_reviewer.minimal_minutes_for_incremental_review=30 \
|
||||||
|
--pr_reviewer.extra_instructions='' \
|
||||||
|
"""
|
||||||
|
]
|
||||||
|
|
||||||
[gitlab]
|
[gitlab]
|
||||||
# URL to the gitlab service
|
# URL to the gitlab service
|
||||||
|
@ -16,7 +16,7 @@ You must use the following YAML schema to format your answer:
|
|||||||
PR Type:
|
PR Type:
|
||||||
type: array
|
type: array
|
||||||
{%- if enable_custom_labels %}
|
{%- if enable_custom_labels %}
|
||||||
description: One or more labels that describe the PR type. Don't output the description in the parentheses.
|
description: Labels that are applicable to the Pull Request. Don't output the description in the parentheses. If none of the labels is relevant to the PR, output an empty array.
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
@ -39,7 +39,6 @@ PR Type:
|
|||||||
{{ custom_labels_examples }}
|
{{ custom_labels_examples }}
|
||||||
{%- else %}
|
{%- else %}
|
||||||
- Bug fix
|
- Bug fix
|
||||||
- Tests
|
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
[pr_description_prompt]
|
[pr_description_prompt]
|
||||||
system="""You are CodiumAI-PR-Reviewer, a language model designed to review git pull requests.
|
system="""You are CodiumAI-PR-Reviewer, a language model designed to review git pull requests.
|
||||||
Your task is to provide full description of the PR content.
|
Your task is to provide full description of a Pull Request (PR) content.
|
||||||
- Make sure not to focus the new PR code (the '+' lines).
|
- Make sure to focus on the new PR code (the '+' lines).
|
||||||
- Notice that the 'Previous title', 'Previous description' and 'Commit messages' sections may be partial, simplistic, non-informative or not up-to-date. Hence, compare them to the PR diff code, and use them only as a reference.
|
- Notice that the 'Previous title', 'Previous description' and 'Commit messages' sections may be partial, simplistic, non-informative or not up-to-date. Hence, compare them to the PR diff code, and use them only as a reference.
|
||||||
- If needed, each YAML output should be in block scalar format ('|-')
|
- Emphasize first the most important changes, and then the less important ones.
|
||||||
|
- If needed, each YAML output should be in block scalar format ('|-')
|
||||||
{%- if extra_instructions %}
|
{%- if extra_instructions %}
|
||||||
|
|
||||||
Extra instructions from the user:
|
Extra instructions from the user:
|
||||||
@ -18,22 +19,22 @@ PR Title:
|
|||||||
type: string
|
type: string
|
||||||
description: an informative title for the PR, describing its main theme
|
description: an informative title for the PR, describing its main theme
|
||||||
PR Type:
|
PR Type:
|
||||||
type: array
|
type: string
|
||||||
|
enum:
|
||||||
|
- Bug fix
|
||||||
|
- Tests
|
||||||
|
- Refactoring
|
||||||
|
- Enhancement
|
||||||
|
- Documentation
|
||||||
|
- Other
|
||||||
{%- if enable_custom_labels %}
|
{%- if enable_custom_labels %}
|
||||||
description: One or more labels that describe the PR type. Don't output the description in the parentheses.
|
PR Labels:
|
||||||
{%- endif %}
|
type: array
|
||||||
|
description: Labels that are applicable to the Pull Request. Don't output the description in the parentheses. If none of the labels is relevant to the PR, output an empty array.
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
{%- if enable_custom_labels %}
|
|
||||||
{{ custom_labels }}
|
{{ custom_labels }}
|
||||||
{%- else %}
|
|
||||||
- Bug fix
|
|
||||||
- Tests
|
|
||||||
- Refactoring
|
|
||||||
- Enhancement
|
|
||||||
- Documentation
|
|
||||||
- Other
|
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
PR Description:
|
PR Description:
|
||||||
type: string
|
type: string
|
||||||
@ -51,6 +52,7 @@ PR Main Files Walkthrough:
|
|||||||
changes in file:
|
changes in file:
|
||||||
type: string
|
type: string
|
||||||
description: minimal and concise description of the changes in the relevant file
|
description: minimal and concise description of the changes in the relevant file
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
Example output:
|
Example output:
|
||||||
@ -58,10 +60,11 @@ Example output:
|
|||||||
PR Title: |-
|
PR Title: |-
|
||||||
...
|
...
|
||||||
PR Type:
|
PR Type:
|
||||||
|
...
|
||||||
{%- if enable_custom_labels %}
|
{%- if enable_custom_labels %}
|
||||||
{{ custom_labels_examples }}
|
PR Labels:
|
||||||
{%- else %}
|
- ...
|
||||||
- Bug fix
|
- ...
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
PR Description: |-
|
PR Description: |-
|
||||||
...
|
...
|
||||||
|
@ -51,22 +51,13 @@ PR Analysis:
|
|||||||
description: summary of the PR in 2-3 sentences.
|
description: summary of the PR in 2-3 sentences.
|
||||||
Type of PR:
|
Type of PR:
|
||||||
type: string
|
type: string
|
||||||
{%- if enable_custom_labels %}
|
|
||||||
description: One or more labels that describe the PR type. Don't output the description in the parentheses.
|
|
||||||
{%- endif %}
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
enum:
|
enum:
|
||||||
{%- if enable_custom_labels %}
|
|
||||||
{{ custom_labels }}
|
|
||||||
{%- else %}
|
|
||||||
- Bug fix
|
- Bug fix
|
||||||
- Tests
|
- Tests
|
||||||
- Refactoring
|
- Refactoring
|
||||||
- Enhancement
|
- Enhancement
|
||||||
- Documentation
|
- Documentation
|
||||||
- Other
|
- Other
|
||||||
{%- endif %}
|
|
||||||
{%- if require_score %}
|
{%- if require_score %}
|
||||||
Score:
|
Score:
|
||||||
type: int
|
type: int
|
||||||
@ -102,7 +93,7 @@ PR Analysis:
|
|||||||
description: >-
|
description: >-
|
||||||
Estimate, on a scale of 1-5 (inclusive), the time and effort required to review this PR by an experienced and knowledgeable developer. 1 means short and easy review , 5 means long and hard review.
|
Estimate, on a scale of 1-5 (inclusive), the time and effort required to review this PR by an experienced and knowledgeable developer. 1 means short and easy review , 5 means long and hard review.
|
||||||
Take into account the size, complexity, quality, and the needed changes of the PR code diff.
|
Take into account the size, complexity, quality, and the needed changes of the PR code diff.
|
||||||
Explain your answer shortly (1-2 sentences).
|
Explain your answer shortly (1-2 sentences). Use the format: '1, because ...'
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
PR Feedback:
|
PR Feedback:
|
||||||
General suggestions:
|
General suggestions:
|
||||||
@ -139,7 +130,8 @@ PR Feedback:
|
|||||||
Security concerns:
|
Security concerns:
|
||||||
type: string
|
type: string
|
||||||
description: >-
|
description: >-
|
||||||
yes\\no question: does this PR code introduce possible vulnerabilities such as exposure of sensitive information (e.g., API keys, secrets, passwords), or security concerns like SQL injection, XSS, CSRF, and others ? If answered 'yes', explain your answer briefly.
|
does this PR code introduce possible vulnerabilities such as exposure of sensitive information (e.g., API keys, secrets, passwords), or security concerns like SQL injection, XSS, CSRF, and others ? Answer 'No' if there are no possible issues.
|
||||||
|
Answer 'Yes, because ...' if there are security concerns or issues. Explain your answer shortly.
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -151,7 +143,7 @@ PR Analysis:
|
|||||||
PR summary: |-
|
PR summary: |-
|
||||||
xxx
|
xxx
|
||||||
Type of PR: |-
|
Type of PR: |-
|
||||||
Bug fix
|
...
|
||||||
{%- if require_score %}
|
{%- if require_score %}
|
||||||
Score: 89
|
Score: 89
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
@ -161,7 +153,8 @@ PR Analysis:
|
|||||||
Focused PR: no, because ...
|
Focused PR: no, because ...
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{%- if require_estimate_effort_to_review %}
|
{%- if require_estimate_effort_to_review %}
|
||||||
Estimated effort to review [1-5]: 3, because ...
|
Estimated effort to review [1-5]: |-
|
||||||
|
3, because ...
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
PR Feedback:
|
PR Feedback:
|
||||||
General PR suggestions: |-
|
General PR suggestions: |-
|
||||||
|
@ -7,7 +7,7 @@ from jinja2 import Environment, StrictUndefined
|
|||||||
from pr_agent.algo.ai_handler import AiHandler
|
from pr_agent.algo.ai_handler import AiHandler
|
||||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.algo.utils import load_yaml, set_custom_labels
|
from pr_agent.algo.utils import load_yaml, set_custom_labels, get_user_labels
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers import get_git_provider
|
from pr_agent.git_providers import get_git_provider
|
||||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||||
@ -98,9 +98,9 @@ class PRDescription:
|
|||||||
self.git_provider.publish_description(pr_title, pr_body)
|
self.git_provider.publish_description(pr_title, pr_body)
|
||||||
if get_settings().pr_description.publish_labels and self.git_provider.is_supported("get_labels"):
|
if get_settings().pr_description.publish_labels and self.git_provider.is_supported("get_labels"):
|
||||||
current_labels = self.git_provider.get_labels()
|
current_labels = self.git_provider.get_labels()
|
||||||
if current_labels is None:
|
user_labels = get_user_labels(current_labels)
|
||||||
current_labels = []
|
|
||||||
self.git_provider.publish_labels(pr_labels + current_labels)
|
self.git_provider.publish_labels(pr_labels + user_labels)
|
||||||
self.git_provider.remove_initial_comment()
|
self.git_provider.remove_initial_comment()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
get_logger().error(f"Error generating PR description {self.pr_id}: {e}")
|
get_logger().error(f"Error generating PR description {self.pr_id}: {e}")
|
||||||
@ -158,6 +158,9 @@ class PRDescription:
|
|||||||
user=user_prompt
|
user=user_prompt
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if get_settings().config.verbosity_level >= 2:
|
||||||
|
get_logger().info(f"\nAI response:\n{response}")
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def _prepare_data(self):
|
def _prepare_data(self):
|
||||||
@ -172,12 +175,16 @@ class PRDescription:
|
|||||||
pr_types = []
|
pr_types = []
|
||||||
|
|
||||||
# If the 'PR Type' key is present in the dictionary, split its value by comma and assign it to 'pr_types'
|
# If the 'PR Type' key is present in the dictionary, split its value by comma and assign it to 'pr_types'
|
||||||
if 'PR Type' in self.data:
|
if 'PR Labels' in self.data:
|
||||||
|
if type(self.data['PR Labels']) == list:
|
||||||
|
pr_types = self.data['PR Labels']
|
||||||
|
elif type(self.data['PR Labels']) == str:
|
||||||
|
pr_types = self.data['PR Labels'].split(',')
|
||||||
|
elif 'PR Type' in self.data:
|
||||||
if type(self.data['PR Type']) == list:
|
if type(self.data['PR Type']) == list:
|
||||||
pr_types = self.data['PR Type']
|
pr_types = self.data['PR Type']
|
||||||
elif type(self.data['PR Type']) == str:
|
elif type(self.data['PR Type']) == str:
|
||||||
pr_types = self.data['PR Type'].split(',')
|
pr_types = self.data['PR Type'].split(',')
|
||||||
|
|
||||||
return pr_types
|
return pr_types
|
||||||
|
|
||||||
def _prepare_pr_answer_with_markers(self) -> Tuple[str, str]:
|
def _prepare_pr_answer_with_markers(self) -> Tuple[str, str]:
|
||||||
@ -223,6 +230,11 @@ class PRDescription:
|
|||||||
|
|
||||||
# Iterate over the dictionary items and append the key and value to 'markdown_text' in a markdown format
|
# Iterate over the dictionary items and append the key and value to 'markdown_text' in a markdown format
|
||||||
markdown_text = ""
|
markdown_text = ""
|
||||||
|
# Don't display 'PR Labels'
|
||||||
|
if 'PR Labels' in self.data and self.git_provider.is_supported("get_labels"):
|
||||||
|
self.data.pop('PR Labels')
|
||||||
|
if not get_settings().pr_description.enable_pr_type:
|
||||||
|
self.data.pop('PR Type')
|
||||||
for key, value in self.data.items():
|
for key, value in self.data.items():
|
||||||
markdown_text += f"## {key}\n\n"
|
markdown_text += f"## {key}\n\n"
|
||||||
markdown_text += f"{value}\n\n"
|
markdown_text += f"{value}\n\n"
|
||||||
@ -248,7 +260,7 @@ class PRDescription:
|
|||||||
for file in value:
|
for file in value:
|
||||||
filename = file['filename'].replace("'", "`")
|
filename = file['filename'].replace("'", "`")
|
||||||
description = file['changes in file']
|
description = file['changes in file']
|
||||||
pr_body += f'`{filename}`: {description}\n'
|
pr_body += f'- `{filename}`: {description}\n'
|
||||||
if self.git_provider.is_supported("gfm_markdown"):
|
if self.git_provider.is_supported("gfm_markdown"):
|
||||||
pr_body +="</details>\n"
|
pr_body +="</details>\n"
|
||||||
else:
|
else:
|
||||||
|
@ -7,7 +7,7 @@ from jinja2 import Environment, StrictUndefined
|
|||||||
from pr_agent.algo.ai_handler import AiHandler
|
from pr_agent.algo.ai_handler import AiHandler
|
||||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.algo.utils import load_yaml, set_custom_labels
|
from pr_agent.algo.utils import load_yaml, set_custom_labels, get_user_labels
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers import get_git_provider
|
from pr_agent.git_providers import get_git_provider
|
||||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||||
@ -82,11 +82,17 @@ class PRGenerateLabels:
|
|||||||
|
|
||||||
if get_settings().config.publish_output:
|
if get_settings().config.publish_output:
|
||||||
get_logger().info(f"Pushing labels {self.pr_id}")
|
get_logger().info(f"Pushing labels {self.pr_id}")
|
||||||
|
|
||||||
|
current_labels = self.git_provider.get_labels()
|
||||||
|
user_labels = get_user_labels(current_labels)
|
||||||
|
pr_labels = pr_labels + user_labels
|
||||||
|
|
||||||
if self.git_provider.is_supported("get_labels"):
|
if self.git_provider.is_supported("get_labels"):
|
||||||
current_labels = self.git_provider.get_labels()
|
self.git_provider.publish_labels(pr_labels)
|
||||||
if current_labels is None:
|
elif pr_labels:
|
||||||
current_labels = []
|
value = ', '.join(v for v in pr_labels)
|
||||||
self.git_provider.publish_labels(pr_labels + current_labels)
|
pr_labels_text = f"## PR Labels:\n{value}\n"
|
||||||
|
self.git_provider.publish_comment(pr_labels_text, is_temporary=False)
|
||||||
self.git_provider.remove_initial_comment()
|
self.git_provider.remove_initial_comment()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
get_logger().error(f"Error generating PR labels {self.pr_id}: {e}")
|
get_logger().error(f"Error generating PR labels {self.pr_id}: {e}")
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
import copy
|
import copy
|
||||||
|
import datetime
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from typing import List, Tuple
|
from typing import List, Tuple
|
||||||
|
|
||||||
@ -9,7 +10,7 @@ from yaml import SafeLoader
|
|||||||
from pr_agent.algo.ai_handler import AiHandler
|
from pr_agent.algo.ai_handler import AiHandler
|
||||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.algo.utils import convert_to_markdown, load_yaml, try_fix_yaml, set_custom_labels
|
from pr_agent.algo.utils import convert_to_markdown, load_yaml, try_fix_yaml, set_custom_labels, get_user_labels
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers import get_git_provider
|
from pr_agent.git_providers import get_git_provider
|
||||||
from pr_agent.git_providers.git_provider import IncrementalPR, get_main_pr_language
|
from pr_agent.git_providers.git_provider import IncrementalPR, get_main_pr_language
|
||||||
@ -100,6 +101,8 @@ class PRReviewer:
|
|||||||
if self.is_auto and not get_settings().pr_reviewer.automatic_review:
|
if self.is_auto and not get_settings().pr_reviewer.automatic_review:
|
||||||
get_logger().info(f'Automatic review is disabled {self.pr_url}')
|
get_logger().info(f'Automatic review is disabled {self.pr_url}')
|
||||||
return None
|
return None
|
||||||
|
if self.incremental.is_incremental and not self._can_run_incremental_review():
|
||||||
|
return None
|
||||||
|
|
||||||
get_logger().info(f'Reviewing PR: {self.pr_url} ...')
|
get_logger().info(f'Reviewing PR: {self.pr_url} ...')
|
||||||
|
|
||||||
@ -113,9 +116,19 @@ class PRReviewer:
|
|||||||
|
|
||||||
if get_settings().config.publish_output:
|
if get_settings().config.publish_output:
|
||||||
get_logger().info('Pushing PR review...')
|
get_logger().info('Pushing PR review...')
|
||||||
self.git_provider.publish_comment(pr_comment)
|
previous_review_comment = self._get_previous_review_comment()
|
||||||
self.git_provider.remove_initial_comment()
|
|
||||||
|
|
||||||
|
# publish the review
|
||||||
|
if get_settings().pr_reviewer.persistent_comment and not self.incremental.is_incremental:
|
||||||
|
self.git_provider.publish_persistent_comment(pr_comment,
|
||||||
|
initial_header="## PR Analysis",
|
||||||
|
update_header=True)
|
||||||
|
else:
|
||||||
|
self.git_provider.publish_comment(pr_comment)
|
||||||
|
|
||||||
|
self.git_provider.remove_initial_comment()
|
||||||
|
if previous_review_comment:
|
||||||
|
self._remove_previous_review_comment(previous_review_comment)
|
||||||
if get_settings().pr_reviewer.inline_code_comments:
|
if get_settings().pr_reviewer.inline_code_comments:
|
||||||
get_logger().info('Pushing inline code comments...')
|
get_logger().info('Pushing inline code comments...')
|
||||||
self._publish_inline_code_comments()
|
self._publish_inline_code_comments()
|
||||||
@ -151,7 +164,6 @@ class PRReviewer:
|
|||||||
variables["diff"] = self.patches_diff # update diff
|
variables["diff"] = self.patches_diff # update diff
|
||||||
|
|
||||||
environment = Environment(undefined=StrictUndefined)
|
environment = Environment(undefined=StrictUndefined)
|
||||||
set_custom_labels(variables)
|
|
||||||
system_prompt = environment.from_string(get_settings().pr_review_prompt.system).render(variables)
|
system_prompt = environment.from_string(get_settings().pr_review_prompt.system).render(variables)
|
||||||
user_prompt = environment.from_string(get_settings().pr_review_prompt.user).render(variables)
|
user_prompt = environment.from_string(get_settings().pr_review_prompt.user).render(variables)
|
||||||
|
|
||||||
@ -166,6 +178,9 @@ class PRReviewer:
|
|||||||
user=user_prompt
|
user=user_prompt
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if get_settings().config.verbosity_level >= 2:
|
||||||
|
get_logger().info(f"\nAI response:\n{response}")
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def _prepare_pr_review(self) -> str:
|
def _prepare_pr_review(self) -> str:
|
||||||
@ -212,28 +227,20 @@ class PRReviewer:
|
|||||||
suggestion['relevant line'] = f"[{suggestion['relevant line']}]({link})"
|
suggestion['relevant line'] = f"[{suggestion['relevant line']}]({link})"
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
# try:
|
|
||||||
# relevant_file = suggestion['relevant file'].strip('`').strip("'")
|
|
||||||
# relevant_line_str = suggestion['relevant line']
|
|
||||||
# if not relevant_line_str:
|
|
||||||
# return ""
|
|
||||||
#
|
|
||||||
# position, absolute_position = find_line_number_of_relevant_line_in_file(
|
|
||||||
# self.git_provider.diff_files, relevant_file, relevant_line_str)
|
|
||||||
# if absolute_position != -1:
|
|
||||||
# suggestion[
|
|
||||||
# 'relevant line'] = f"{suggestion['relevant line']} (line {absolute_position})"
|
|
||||||
# except:
|
|
||||||
# pass
|
|
||||||
|
|
||||||
|
|
||||||
# Add incremental review section
|
# Add incremental review section
|
||||||
if self.incremental.is_incremental:
|
if self.incremental.is_incremental:
|
||||||
last_commit_url = f"{self.git_provider.get_pr_url()}/commits/" \
|
last_commit_url = f"{self.git_provider.get_pr_url()}/commits/" \
|
||||||
f"{self.git_provider.incremental.first_new_commit_sha}"
|
f"{self.git_provider.incremental.first_new_commit_sha}"
|
||||||
|
last_commit_msg = self.incremental.commits_range[0].commit.message if self.incremental.commits_range else ""
|
||||||
|
incremental_review_markdown_text = f"Starting from commit {last_commit_url}"
|
||||||
|
if last_commit_msg:
|
||||||
|
replacement = last_commit_msg.splitlines(keepends=False)[0].replace('_', r'\_')
|
||||||
|
incremental_review_markdown_text += f" \n_({replacement})_"
|
||||||
data = OrderedDict(data)
|
data = OrderedDict(data)
|
||||||
data.update({'Incremental PR Review': {
|
data.update({'Incremental PR Review': {
|
||||||
"⏮️ Review for commits since previous PR-Agent review": f"Starting from commit {last_commit_url}"}})
|
"⏮️ Review for commits since previous PR-Agent review": incremental_review_markdown_text}})
|
||||||
data.move_to_end('Incremental PR Review', last=False)
|
data.move_to_end('Incremental PR Review', last=False)
|
||||||
|
|
||||||
markdown_text = convert_to_markdown(data, self.git_provider.is_supported("gfm_markdown"))
|
markdown_text = convert_to_markdown(data, self.git_provider.is_supported("gfm_markdown"))
|
||||||
@ -248,6 +255,9 @@ class PRReviewer:
|
|||||||
else:
|
else:
|
||||||
markdown_text += actions_help_text
|
markdown_text += actions_help_text
|
||||||
|
|
||||||
|
# Add custom labels from the review prediction (effort, security)
|
||||||
|
self.set_review_labels(data)
|
||||||
|
|
||||||
# Log markdown response if verbosity level is high
|
# Log markdown response if verbosity level is high
|
||||||
if get_settings().config.verbosity_level >= 2:
|
if get_settings().config.verbosity_level >= 2:
|
||||||
get_logger().info(f"Markdown response:\n{markdown_text}")
|
get_logger().info(f"Markdown response:\n{markdown_text}")
|
||||||
@ -314,3 +324,82 @@ class PRReviewer:
|
|||||||
break
|
break
|
||||||
|
|
||||||
return question_str, answer_str
|
return question_str, answer_str
|
||||||
|
|
||||||
|
def _get_previous_review_comment(self):
|
||||||
|
"""
|
||||||
|
Get the previous review comment if it exists.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if get_settings().pr_reviewer.remove_previous_review_comment and hasattr(self.git_provider, "get_previous_review"):
|
||||||
|
return self.git_provider.get_previous_review(
|
||||||
|
full=not self.incremental.is_incremental,
|
||||||
|
incremental=self.incremental.is_incremental,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().exception(f"Failed to get previous review comment, error: {e}")
|
||||||
|
|
||||||
|
def _remove_previous_review_comment(self, comment):
|
||||||
|
"""
|
||||||
|
Remove the previous review comment if it exists.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if get_settings().pr_reviewer.remove_previous_review_comment and comment:
|
||||||
|
self.git_provider.remove_comment(comment)
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().exception(f"Failed to remove previous review comment, error: {e}")
|
||||||
|
|
||||||
|
def _can_run_incremental_review(self) -> bool:
|
||||||
|
"""Checks if we can run incremental review according the various configurations and previous review"""
|
||||||
|
# checking if running is auto mode but there are no new commits
|
||||||
|
if self.is_auto and not self.incremental.first_new_commit_sha:
|
||||||
|
get_logger().info(f"Incremental review is enabled for {self.pr_url} but there are no new commits")
|
||||||
|
return False
|
||||||
|
# checking if there are enough commits to start the review
|
||||||
|
num_new_commits = len(self.incremental.commits_range)
|
||||||
|
num_commits_threshold = get_settings().pr_reviewer.minimal_commits_for_incremental_review
|
||||||
|
not_enough_commits = num_new_commits < num_commits_threshold
|
||||||
|
# checking if the commits are not too recent to start the review
|
||||||
|
recent_commits_threshold = datetime.datetime.now() - datetime.timedelta(
|
||||||
|
minutes=get_settings().pr_reviewer.minimal_minutes_for_incremental_review
|
||||||
|
)
|
||||||
|
last_seen_commit_date = (
|
||||||
|
self.incremental.last_seen_commit.commit.author.date if self.incremental.last_seen_commit else None
|
||||||
|
)
|
||||||
|
all_commits_too_recent = (
|
||||||
|
last_seen_commit_date > recent_commits_threshold if self.incremental.last_seen_commit else False
|
||||||
|
)
|
||||||
|
# check all the thresholds or just one to start the review
|
||||||
|
condition = any if get_settings().pr_reviewer.require_all_thresholds_for_incremental_review else all
|
||||||
|
if condition((not_enough_commits, all_commits_too_recent)):
|
||||||
|
get_logger().info(
|
||||||
|
f"Incremental review is enabled for {self.pr_url} but didn't pass the threshold check to run:"
|
||||||
|
f"\n* Number of new commits = {num_new_commits} (threshold is {num_commits_threshold})"
|
||||||
|
f"\n* Last seen commit date = {last_seen_commit_date} (threshold is {recent_commits_threshold})"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def set_review_labels(self, data):
|
||||||
|
if (get_settings().pr_reviewer.enable_review_labels_security or
|
||||||
|
get_settings().pr_reviewer.enable_review_labels_effort):
|
||||||
|
try:
|
||||||
|
review_labels = []
|
||||||
|
if get_settings().pr_reviewer.enable_review_labels_effort:
|
||||||
|
estimated_effort = data['PR Analysis']['Estimated effort to review [1-5]']
|
||||||
|
estimated_effort_number = int(estimated_effort.split(',')[0])
|
||||||
|
if 1 <= estimated_effort_number <= 5: # 1, because ...
|
||||||
|
review_labels.append(f'Review effort [1-5]: {estimated_effort_number}')
|
||||||
|
if get_settings().pr_reviewer.enable_review_labels_security:
|
||||||
|
security_concerns = data['PR Analysis']['Security concerns'] # yes, because ...
|
||||||
|
security_concerns_bool = 'yes' in security_concerns.lower() or 'true' in security_concerns.lower()
|
||||||
|
if security_concerns_bool:
|
||||||
|
review_labels.append('Possible security concern')
|
||||||
|
|
||||||
|
if review_labels:
|
||||||
|
current_labels = self.git_provider.get_labels()
|
||||||
|
current_labels_filtered = [label for label in current_labels if
|
||||||
|
not label.lower().startswith('review effort [1-5]:') and not label.lower().startswith(
|
||||||
|
'possible security concern')]
|
||||||
|
self.git_provider.publish_labels(review_labels + current_labels_filtered)
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().error(f"Failed to set review labels, error: {e}")
|
||||||
|
@ -8,8 +8,8 @@ import pinecone
|
|||||||
from pinecone_datasets import Dataset, DatasetMetadata
|
from pinecone_datasets import Dataset, DatasetMetadata
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from pr_agent.algo import MAX_TOKENS
|
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
|
from pr_agent.algo.utils import get_max_tokens
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers import get_git_provider
|
from pr_agent.git_providers import get_git_provider
|
||||||
from pr_agent.log import get_logger
|
from pr_agent.log import get_logger
|
||||||
@ -197,7 +197,7 @@ class PRSimilarIssue:
|
|||||||
username = issue.user.login
|
username = issue.user.login
|
||||||
created_at = str(issue.created_at)
|
created_at = str(issue.created_at)
|
||||||
if len(issue_str) < 8000 or \
|
if len(issue_str) < 8000 or \
|
||||||
self.token_handler.count_tokens(issue_str) < MAX_TOKENS[MODEL]: # fast reject first
|
self.token_handler.count_tokens(issue_str) < get_max_tokens(MODEL): # fast reject first
|
||||||
issue_record = Record(
|
issue_record = Record(
|
||||||
id=issue_key + "." + "issue",
|
id=issue_key + "." + "issue",
|
||||||
text=issue_str,
|
text=issue_str,
|
||||||
|
@ -13,7 +13,7 @@ atlassian-python-api==3.39.0
|
|||||||
GitPython==3.1.32
|
GitPython==3.1.32
|
||||||
PyYAML==6.0
|
PyYAML==6.0
|
||||||
starlette-context==0.3.6
|
starlette-context==0.3.6
|
||||||
litellm~=0.1.574
|
litellm==0.12.5
|
||||||
boto3==1.28.25
|
boto3==1.28.25
|
||||||
google-cloud-storage==2.10.0
|
google-cloud-storage==2.10.0
|
||||||
ujson==5.8.0
|
ujson==5.8.0
|
||||||
@ -22,3 +22,4 @@ msrest==0.7.1
|
|||||||
pinecone-client
|
pinecone-client
|
||||||
pinecone-datasets @ git+https://github.com/mrT23/pinecone-datasets.git@main
|
pinecone-datasets @ git+https://github.com/mrT23/pinecone-datasets.git@main
|
||||||
loguru==0.7.2
|
loguru==0.7.2
|
||||||
|
google-cloud-aiplatform==1.35.0
|
||||||
|
31
tests/unittest/try_fix_yaml.py
Normal file
31
tests/unittest/try_fix_yaml.py
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
|
||||||
|
# Generated by CodiumAI
|
||||||
|
from pr_agent.algo.utils import try_fix_yaml
|
||||||
|
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
class TestTryFixYaml:
|
||||||
|
|
||||||
|
# The function successfully parses a valid YAML string.
|
||||||
|
def test_valid_yaml(self):
|
||||||
|
review_text = "key: value\n"
|
||||||
|
expected_output = {"key": "value"}
|
||||||
|
assert try_fix_yaml(review_text) == expected_output
|
||||||
|
|
||||||
|
# The function adds '|-' to 'relevant line:' if it is not already present and successfully parses the YAML string.
|
||||||
|
def test_add_relevant_line(self):
|
||||||
|
review_text = "relevant line: value: 3\n"
|
||||||
|
expected_output = {"relevant line": "value: 3"}
|
||||||
|
assert try_fix_yaml(review_text) == expected_output
|
||||||
|
|
||||||
|
# The function removes the last line(s) of the YAML string and successfully parses the YAML string.
|
||||||
|
def test_remove_last_line(self):
|
||||||
|
review_text = "key: value\nextra invalid line\n"
|
||||||
|
expected_output = {"key": "value"}
|
||||||
|
assert try_fix_yaml(review_text) == expected_output
|
||||||
|
|
||||||
|
# The YAML string is empty.
|
||||||
|
def test_empty_yaml_fixed(self):
|
||||||
|
review_text = ""
|
||||||
|
assert try_fix_yaml(review_text) is None
|
Reference in New Issue
Block a user