mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-04 12:50:38 +08:00
Compare commits
109 Commits
ok/fix_git
...
v0.10
Author | SHA1 | Date | |
---|---|---|---|
416a5495da | |||
a2b27dcac8 | |||
d8e4e2e8fd | |||
896a81d173 | |||
b216af8f04 | |||
388cc740b6 | |||
6214494c84 | |||
762a6981e1 | |||
b362c406bc | |||
7a342d3312 | |||
2e95988741 | |||
9478447141 | |||
082293b48c | |||
e1d92206f3 | |||
557ec72bfe | |||
6b4b16dcf9 | |||
c4899a6c54 | |||
24d82e65cb | |||
2567a6cf27 | |||
94cb6b9795 | |||
e878bbbe36 | |||
7d89b82967 | |||
c5f9bbbf92 | |||
a5e5a82952 | |||
ccbb62b50a | |||
1df36c6a44 | |||
9e5e9afe92 | |||
5e43c202dd | |||
37e6608e68 | |||
f64d5f1e2a | |||
8fdf174dec | |||
29d4f98b19 | |||
737792d83c | |||
7e5889061c | |||
755e04cf65 | |||
44d6c95714 | |||
14610d5375 | |||
f9c832d6cb | |||
c2bec614e5 | |||
49725e92f2 | |||
a1e32d8331 | |||
0293412a42 | |||
10ec0a1812 | |||
69b68b78f5 | |||
c5bc4b44ff | |||
39e5102a2e | |||
6c82bc9a3e | |||
54f41dd603 | |||
094f641fb5 | |||
a35a75eb34 | |||
5a7c118b56 | |||
cf9e0fbbc5 | |||
ef9af261ed | |||
ff79776410 | |||
ec3f2fb485 | |||
94a2a5e527 | |||
ea4bc548fc | |||
1eefd3365b | |||
db37ee819a | |||
e352c98ce8 | |||
e96b03da57 | |||
1d2aedf169 | |||
4c484f8e86 | |||
8a79114ed9 | |||
cd69f43c77 | |||
6d6d864417 | |||
b286c8ed20 | |||
7238c81f0c | |||
62412f8cd4 | |||
5d2bdadb45 | |||
06d030637c | |||
8e3fa3926a | |||
92071fcf1c | |||
fed1c160eb | |||
e37daf6987 | |||
8fc663911f | |||
bb2760ae41 | |||
3548b88463 | |||
c917e48098 | |||
e6ef123ce5 | |||
194bfe1193 | |||
e456cf36aa | |||
fe3527de3c | |||
b99c769b53 | |||
60bdfb78df | |||
c0b3c76884 | |||
e1370a8385 | |||
c623c3baf4 | |||
d0f3a4139d | |||
3ddc7e79d1 | |||
3e14edfd4e | |||
15573e2286 | |||
ce64877063 | |||
6666a128ee | |||
9fbf89670d | |||
ad1c51c536 | |||
9ab7ccd20d | |||
c907f93ab8 | |||
29a8cf8357 | |||
7b6a6c7164 | |||
cf4d007737 | |||
a751bb0ef0 | |||
26d6280a20 | |||
32a19fdab6 | |||
775ccb3f25 | |||
a1c6c57f7b | |||
73bb70fef4 | |||
dcac6c145c | |||
66644f0224 |
1
.github/workflows/pr-agent-review.yaml
vendored
1
.github/workflows/pr-agent-review.yaml
vendored
@ -26,5 +26,6 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PINECONE.API_KEY: ${{ secrets.PINECONE_API_KEY }}
|
||||
PINECONE.ENVIRONMENT: ${{ secrets.PINECONE_ENVIRONMENT }}
|
||||
GITHUB_ACTION.AUTO_REVIEW: true
|
||||
|
||||
|
||||
|
2
.pr_agent.toml
Normal file
2
.pr_agent.toml
Normal file
@ -0,0 +1,2 @@
|
||||
[pr_reviewer]
|
||||
enable_review_labels_effort = true
|
36
INSTALL.md
36
INSTALL.md
@ -29,39 +29,43 @@ There are several ways to use PR-Agent:
|
||||
|
||||
### Use Docker image (no installation required)
|
||||
|
||||
To request a review for a PR, or ask a question about a PR, you can run directly from the Docker image. Here's how:
|
||||
A list of the relevant tools can be found in the [tools guide](./docs/TOOLS_GUIDE.md).
|
||||
|
||||
For GitHub:
|
||||
To invoke a tool (for example `review`), you can run directly from the Docker image. Here's how:
|
||||
|
||||
- For GitHub:
|
||||
```
|
||||
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
|
||||
```
|
||||
For GitLab:
|
||||
|
||||
- For GitLab:
|
||||
```
|
||||
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
|
||||
```
|
||||
For BitBucket:
|
||||
|
||||
Note: If you have a dedicated GitLab instance, you need to specify the custom url as variable:
|
||||
```
|
||||
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> GITLAB.URL=<your gitlab instance url> codiumai/pr-agent:latest --pr_url <pr_url> review
|
||||
```
|
||||
|
||||
- For BitBucket:
|
||||
```
|
||||
docker run --rm -it -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=<pr_url> review
|
||||
```
|
||||
|
||||
For other git providers, update CONFIG.GIT_PROVIDER accordingly, and check the `pr_agent/settings/.secrets_template.toml` file for the environment variables expected names and values.
|
||||
|
||||
|
||||
Similarly, to ask a question about a PR, run the following command:
|
||||
```
|
||||
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent --pr_url <pr_url> ask "<your question>"
|
||||
```
|
||||
|
||||
A list of the relevant tools can be found in the [tools guide](./docs/TOOLS_GUIDE.md).
|
||||
---
|
||||
|
||||
|
||||
Note: If you want to ensure you're running a specific version of the Docker image, consider using the image's digest:
|
||||
If you want to ensure you're running a specific version of the Docker image, consider using the image's digest:
|
||||
```bash
|
||||
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent@sha256:71b5ee15df59c745d352d84752d01561ba64b6d51327f97d46152f0c58a5f678 --pr_url <pr_url> review
|
||||
```
|
||||
in addition, you can run a [specific released versions](./RELEASE_NOTES.md) of pr-agent, for example:
|
||||
|
||||
Or you can run a [specific released versions](./RELEASE_NOTES.md) of pr-agent, for example:
|
||||
```
|
||||
codiumai/pr-agent@v0.8
|
||||
codiumai/pr-agent@v0.9
|
||||
```
|
||||
|
||||
---
|
||||
@ -406,9 +410,9 @@ BITBUCKET_BEARER_TOKEN: <your token>
|
||||
You can get a Bitbucket token for your repository by following Repository Settings -> Security -> Access Tokens.
|
||||
|
||||
|
||||
### Run on a hosted Bitbucket app
|
||||
### Run using CodiumAI-hosted Bitbucket app
|
||||
|
||||
Please contact <support@codium.ai> if you're interested in a hosted BitBucket app solution that provides full functionality including PR reviews and comment handling. It's based on the [bitbucket_app.py](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/git_providers/bitbucket_provider.py) implmentation.
|
||||
Please contact <support@codium.ai> or visit [CodiumAI pricing page](https://www.codium.ai/pricing/) if you're interested in a hosted BitBucket app solution that provides full functionality including PR reviews and comment handling. It's based on the [bitbucket_app.py](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/git_providers/bitbucket_provider.py) implementation.
|
||||
|
||||
|
||||
=======
|
||||
|
@ -1,3 +1,25 @@
|
||||
## [Version 0.10] - 2023-11-15
|
||||
- codiumai/pr-agent:0.10
|
||||
- codiumai/pr-agent:0.10-github_app
|
||||
- codiumai/pr-agent:0.10-bitbucket-app
|
||||
- codiumai/pr-agent:0.10-gitlab_webhook
|
||||
- codiumai/pr-agent:0.10-github_polling
|
||||
- codiumai/pr-agent:0.10-github_action
|
||||
|
||||
### Added::Algo
|
||||
- Review tool now works with [persistent comments](https://github.com/Codium-ai/pr-agent/pull/451) by default
|
||||
- Bitbucket now publishes review suggestions with [code links](https://github.com/Codium-ai/pr-agent/pull/428)
|
||||
- Enabling to limit [max number of tokens](https://github.com/Codium-ai/pr-agent/pull/437/files)
|
||||
- Support ['gpt-4-1106-preview'](https://github.com/Codium-ai/pr-agent/pull/437/files) model
|
||||
- Support for Google's [Vertex AI](https://github.com/Codium-ai/pr-agent/pull/436)
|
||||
- Implementing [thresholds](https://github.com/Codium-ai/pr-agent/pull/423) for incremental PR reviews
|
||||
- Decoupled custom labels from [PR type](https://github.com/Codium-ai/pr-agent/pull/431)
|
||||
|
||||
### Fixed
|
||||
- Fixed bug in [parsing quotes](https://github.com/Codium-ai/pr-agent/pull/446) in CLI
|
||||
- Preserve [user-added labels](https://github.com/Codium-ai/pr-agent/pull/433) in pull requests
|
||||
- Bug fixes in GitLab and BitBucket
|
||||
|
||||
## [Version 0.9] - 2023-10-29
|
||||
- codiumai/pr-agent:0.9
|
||||
- codiumai/pr-agent:0.9-github_app
|
||||
|
37
Usage.md
37
Usage.md
@ -108,12 +108,22 @@ Any configuration value in [configuration file](pr_agent/settings/configuration.
|
||||
|
||||
|
||||
### Working with GitHub App
|
||||
When running PR-Agent from [GitHub App](INSTALL.md#method-5-run-as-a-github-app), the default configurations from a pre-built docker will be initially loaded.
|
||||
When running PR-Agent from GitHub App, the default [configuration file](pr_agent/settings/configuration.toml) from a pre-built docker will be initially loaded.
|
||||
|
||||
By uploading a local `.pr_agent.toml` file, you can edit and customize any configuration parameter.
|
||||
|
||||
For example, if you set in `.pr_agent.toml`:
|
||||
|
||||
```
|
||||
[pr_reviewer]
|
||||
num_code_suggestions=1
|
||||
```
|
||||
|
||||
Than you will overwrite the default number of code suggestions to be 1.
|
||||
|
||||
#### GitHub app automatic tools
|
||||
The [github_app](pr_agent/settings/configuration.toml#L56) section defines GitHub app specific configurations.
|
||||
The [github_app](pr_agent/settings/configuration.toml#L76) section defines GitHub app-specific configurations.
|
||||
In this section you can define configurations to control the conditions for which tools will **run automatically**.
|
||||
Note that a local `.pr_agent.toml` file enables you to edit and customize the default parameters of any tool, not just the ones that are run automatically.
|
||||
|
||||
##### GitHub app automatic tools for PR actions
|
||||
The GitHub app can respond to the following actions on a PR:
|
||||
@ -151,7 +161,7 @@ handle_pr_actions = []
|
||||
```
|
||||
|
||||
##### GitHub app automatic tools for new code (PR push)
|
||||
In addition the running automatic tools when a PR is opened, the GitHub app can also respond to new code that is pushed to an open PR.
|
||||
In addition to running automatic tools when a PR is opened, the GitHub app can also respond to new code that is pushed to an open PR.
|
||||
|
||||
The configuration toggle `handle_push_trigger` can be used to enable this feature.
|
||||
The configuration parameter `push_commands` defines the list of tools that will be **run automatically** when new code is pushed to the PR.
|
||||
@ -163,7 +173,7 @@ push_commands = [
|
||||
"/auto_review -i --pr_reviewer.remove_previous_review_comment=true",
|
||||
]
|
||||
```
|
||||
The means that when new code is pused to the PR, the PR-Agent will run the `describe` and incremental `auto_review` tools.
|
||||
The means that when new code is pushed to the PR, the PR-Agent will run the `describe` and incremental `auto_review` tools.
|
||||
For the describe tool, the `add_original_user_description` and `keep_original_user_title` parameters will be set to true.
|
||||
For the `auto_review` tool, it will run in incremental mode, and the `remove_previous_review_comment` parameter will be set to true.
|
||||
|
||||
@ -293,6 +303,23 @@ key = ...
|
||||
|
||||
Also review the [AiHandler](pr_agent/algo/ai_handler.py) file for instruction how to set keys for other models.
|
||||
|
||||
#### Vertex AI
|
||||
|
||||
To use Google's Vertex AI platform and its associated models (chat-bison/codechat-bison) set:
|
||||
|
||||
```
|
||||
[config] # in configuration.toml
|
||||
model = "vertex_ai/codechat-bison"
|
||||
|
||||
[vertexai] # in .secrets.toml
|
||||
vertex_project = "my-google-cloud-project"
|
||||
vertex_location = ""
|
||||
```
|
||||
|
||||
Your [application default credentials](https://cloud.google.com/docs/authentication/application-default-credentials) will be used for authentication so there is no need to set explicit credentials in most environments.
|
||||
|
||||
If you do want to set explicit credentials then you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable set to a path to a json credentials file.
|
||||
|
||||
### Working with large PRs
|
||||
|
||||
The default mode of CodiumAI is to have a single call per tool, using GPT-4, which has a token limit of 8000 tokens.
|
||||
|
@ -27,6 +27,8 @@ Under the section 'pr_description', the [configuration file](./../pr_agent/setti
|
||||
|
||||
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".
|
||||
- To enable `custom labels`, apply the configuration changes described [here](./GENERATE_CUSTOM_LABELS.md#configuration-changes)
|
||||
- `enable_pr_type`: if set to false, it will not show the `PR type` as a text value in the description content. Default is true.
|
||||
|
||||
### Markers template
|
||||
|
||||
markers enable to easily integrate user's content and auto-generated content, with a template-like mechanism.
|
||||
|
@ -16,25 +16,46 @@ The `review` tool can also be triggered automatically every time a new PR is ope
|
||||
|
||||
Under the section 'pr_reviewer', the [configuration file](./../pr_agent/settings/configuration.toml#L16) contains options to customize the 'review' tool:
|
||||
|
||||
#### enable\\disable features
|
||||
- `require_focused_review`: if set to true, the tool will add a section - 'is the PR a focused one'. Default is false.
|
||||
- `require_score_review`: if set to true, the tool will add a section that scores the PR. Default is false.
|
||||
- `require_tests_review`: if set to true, the tool will add a section that checks if the PR contains tests. Default is true.
|
||||
- `require_security_review`: if set to true, the tool will add a section that checks if the PR contains security issues. Default is true.
|
||||
- `require_estimate_effort_to_review`: if set to true, the tool will add a section that estimates thed effort needed to review the PR. Default is true.
|
||||
#### general options
|
||||
- `num_code_suggestions`: number of code suggestions provided by the 'review' tool. Default is 4.
|
||||
- `inline_code_comments`: if set to true, the tool will publish the code suggestions as comments on the code diff. Default is false.
|
||||
- `automatic_review`: if set to false, no automatic reviews will be done. Default is true.
|
||||
- `remove_previous_review_comment`: if set to true, the tool will remove the previous review comment before adding a new one. Default is false.
|
||||
- `persistent_comment`: if set to true, the review comment will be persistent, meaning that every new review request will edit the previous one. Default is true.
|
||||
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".
|
||||
#### review labels
|
||||
- `enable_review_labels_security`: if set to true, the tool will publish a 'possible security issue' label if it detects a security issue. Default is true.
|
||||
- `enable_review_labels_effort`: if set to true, the tool will publish a 'Review effort [1-5]: x' label. Default is false.
|
||||
- To enable `custom labels`, apply the configuration changes described [here](./GENERATE_CUSTOM_LABELS.md#configuration-changes)
|
||||
#### Incremental Mode
|
||||
For an incremental review, which only considers changes since the last PR-Agent review, this can be useful when working on the PR in an iterative manner, and you want to focus on the changes since the last review instead of reviewing the entire PR again, the following command can be used:
|
||||
```
|
||||
/improve -i
|
||||
/review -i
|
||||
```
|
||||
Note that the incremental mode is only available for GitHub.
|
||||
|
||||
<kbd><img src=./../pics/incremental_review.png width="768"></kbd>
|
||||
|
||||
Under the section 'pr_reviewer', the [configuration file](./../pr_agent/settings/configuration.toml#L16) contains options to customize the 'review -i' tool.
|
||||
These configurations can be used to control the rate at which the incremental review tool will create new review comments when invoked automatically, to prevent making too much noise in the PR.
|
||||
- `minimal_commits_for_incremental_review`: Minimal number of commits since the last review that are required to create incremental review.
|
||||
If there are less than the specified number of commits since the last review, the tool will not perform any action.
|
||||
Default is 0 - the tool will always run, no matter how many commits since the last review.
|
||||
- `minimal_minutes_for_incremental_review`: Minimal number of minutes that need to pass since the last reviewed commit to create incremental review.
|
||||
If less that the specified number of minutes have passed between the last reviewed commit and running this command, the tool will not perform any action.
|
||||
Default is 0 - the tool will always run, no matter how much time have passed since the last reviewed commit.
|
||||
- `require_all_thresholds_for_incremental_review`: If set to true, all the previous thresholds must be met for incremental review to run. If false, only one is enough to run the tool.
|
||||
For example, if `minimal_commits_for_incremental_review=2` and `minimal_minutes_for_incremental_review=2`, and we have 3 commits since the last review, but the last reviewed commit is from 1 minute ago:
|
||||
When `require_all_thresholds_for_incremental_review=true` the incremental review __will not__ run, because only 1 out of 2 conditions were met (we have enough commits but the last review is too recent),
|
||||
but when `require_all_thresholds_for_incremental_review=false` the incremental review __will__ run, because one condition is enough (we have 3 commits which is more than the configured 2).
|
||||
Default is false - the tool will run as long as at least once conditions is met.
|
||||
|
||||
#### PR Reflection
|
||||
By invoking:
|
||||
```
|
||||
|
@ -46,10 +46,13 @@ class PRAgent:
|
||||
apply_repo_settings(pr_url)
|
||||
|
||||
# Then, apply user specific settings if exists
|
||||
if isinstance(request, str):
|
||||
request = request.replace("'", "\\'")
|
||||
lexer = shlex.shlex(request, posix=True)
|
||||
lexer.whitespace_split = True
|
||||
action, *args = list(lexer)
|
||||
else:
|
||||
action, *args = request
|
||||
args = update_settings_from_args(args)
|
||||
|
||||
action = action.lstrip("/").lower()
|
||||
|
@ -8,9 +8,14 @@ MAX_TOKENS = {
|
||||
'gpt-4': 8000,
|
||||
'gpt-4-0613': 8000,
|
||||
'gpt-4-32k': 32000,
|
||||
'gpt-4-1106-preview': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||
'claude-instant-1': 100000,
|
||||
'claude-2': 100000,
|
||||
'command-nightly': 4096,
|
||||
'replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1': 4096,
|
||||
'meta-llama/Llama-2-7b-chat-hf': 4096
|
||||
'meta-llama/Llama-2-7b-chat-hf': 4096,
|
||||
'vertex_ai/codechat-bison': 6144,
|
||||
'vertex_ai/codechat-bison-32k': 32000,
|
||||
'codechat-bison': 6144,
|
||||
'codechat-bison-32k': 32000,
|
||||
}
|
||||
|
@ -23,7 +23,9 @@ class AiHandler:
|
||||
Initializes the OpenAI API key and other settings from a configuration file.
|
||||
Raises a ValueError if the OpenAI key is missing.
|
||||
"""
|
||||
try:
|
||||
self.azure = False
|
||||
|
||||
if get_settings().get("OPENAI.KEY", None):
|
||||
openai.api_key = get_settings().openai.key
|
||||
litellm.openai_key = get_settings().openai.key
|
||||
if get_settings().get("litellm.use_client"):
|
||||
@ -31,7 +33,6 @@ class AiHandler:
|
||||
assert litellm_token, "LITELLM_TOKEN is required"
|
||||
os.environ["LITELLM_TOKEN"] = litellm_token
|
||||
litellm.use_client = True
|
||||
self.azure = False
|
||||
if get_settings().get("OPENAI.ORG", None):
|
||||
litellm.organization = get_settings().openai.org
|
||||
if get_settings().get("OPENAI.API_TYPE", None):
|
||||
@ -54,8 +55,11 @@ class AiHandler:
|
||||
litellm.huggingface_key = get_settings().huggingface.key
|
||||
if get_settings().get("HUGGINGFACE.API_BASE", None):
|
||||
litellm.api_base = get_settings().huggingface.api_base
|
||||
except AttributeError as e:
|
||||
raise ValueError("OpenAI key is required") from e
|
||||
if get_settings().get("VERTEXAI.VERTEX_PROJECT", None):
|
||||
litellm.vertex_project = get_settings().vertexai.vertex_project
|
||||
litellm.vertex_location = get_settings().get(
|
||||
"VERTEXAI.VERTEX_LOCATION", None
|
||||
)
|
||||
|
||||
@property
|
||||
def deployment_id(self):
|
||||
|
@ -23,7 +23,7 @@ def filter_ignored(files):
|
||||
|
||||
# keep filenames that _don't_ match the ignore regex
|
||||
for r in compiled_patterns:
|
||||
files = [f for f in files if not r.match(f.filename)]
|
||||
files = [f for f in files if (f.filename and not r.match(f.filename))]
|
||||
|
||||
except Exception as e:
|
||||
print(f"Could not filter file list: {e}")
|
||||
|
@ -3,6 +3,7 @@ from __future__ import annotations
|
||||
import re
|
||||
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers.git_provider import EDIT_TYPE
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
@ -115,7 +116,7 @@ def omit_deletion_hunks(patch_lines) -> str:
|
||||
|
||||
|
||||
def handle_patch_deletions(patch: str, original_file_content_str: str,
|
||||
new_file_content_str: str, file_name: str) -> str:
|
||||
new_file_content_str: str, file_name: str, edit_type: EDIT_TYPE = EDIT_TYPE.UNKNOWN) -> str:
|
||||
"""
|
||||
Handle entire file or deletion patches.
|
||||
|
||||
@ -132,7 +133,7 @@ def handle_patch_deletions(patch: str, original_file_content_str: str,
|
||||
str: The modified patch with deletion hunks omitted.
|
||||
|
||||
"""
|
||||
if not new_file_content_str:
|
||||
if not new_file_content_str and edit_type != EDIT_TYPE.ADDED:
|
||||
# logic for handling deleted files - don't show patch, just show that the file was deleted
|
||||
if get_settings().config.verbosity_level > 0:
|
||||
get_logger().info(f"Processing file: {file_name}, minimizing deletion file")
|
||||
|
@ -7,18 +7,20 @@ from typing import Any, Callable, List, Tuple
|
||||
|
||||
from github import RateLimitExceededException
|
||||
|
||||
from pr_agent.algo import MAX_TOKENS
|
||||
from pr_agent.algo.git_patch_processing import convert_to_hunks_with_lines_numbers, extend_patch, handle_patch_deletions
|
||||
from pr_agent.algo.language_handler import sort_files_by_main_languages
|
||||
from pr_agent.algo.file_filter import filter_ignored
|
||||
from pr_agent.algo.token_handler import TokenHandler, get_token_encoder
|
||||
from pr_agent.algo.utils import get_max_tokens
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers.git_provider import FilePatchInfo, GitProvider
|
||||
from pr_agent.git_providers.git_provider import FilePatchInfo, GitProvider, EDIT_TYPE
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
DELETED_FILES_ = "Deleted files:\n"
|
||||
|
||||
MORE_MODIFIED_FILES_ = "More modified files:\n"
|
||||
MORE_MODIFIED_FILES_ = "Additional modified files (insufficient token budget to process):\n"
|
||||
|
||||
ADDED_FILES_ = "Additional added files (insufficient token budget to process):\n"
|
||||
|
||||
OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD = 1000
|
||||
OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD = 600
|
||||
@ -64,14 +66,17 @@ def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler, model: s
|
||||
pr_languages, token_handler, add_line_numbers_to_hunks, patch_extra_lines=PATCH_EXTRA_LINES)
|
||||
|
||||
# if we are under the limit, return the full diff
|
||||
if total_tokens + OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD < MAX_TOKENS[model]:
|
||||
if total_tokens + OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD < get_max_tokens(model):
|
||||
return "\n".join(patches_extended)
|
||||
|
||||
# if we are over the limit, start pruning
|
||||
patches_compressed, modified_file_names, deleted_file_names = \
|
||||
patches_compressed, modified_file_names, deleted_file_names, added_file_names = \
|
||||
pr_generate_compressed_diff(pr_languages, token_handler, model, add_line_numbers_to_hunks)
|
||||
|
||||
final_diff = "\n".join(patches_compressed)
|
||||
if added_file_names:
|
||||
added_list_str = ADDED_FILES_ + "\n".join(added_file_names)
|
||||
final_diff = final_diff + "\n\n" + added_list_str
|
||||
if modified_file_names:
|
||||
modified_list_str = MORE_MODIFIED_FILES_ + "\n".join(modified_file_names)
|
||||
final_diff = final_diff + "\n\n" + modified_list_str
|
||||
@ -122,7 +127,7 @@ def pr_generate_extended_diff(pr_languages: list,
|
||||
|
||||
|
||||
def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, model: str,
|
||||
convert_hunks_to_line_numbers: bool) -> Tuple[list, list, list]:
|
||||
convert_hunks_to_line_numbers: bool) -> Tuple[list, list, list, list]:
|
||||
"""
|
||||
Generate a compressed diff string for a pull request, using diff minimization techniques to reduce the number of
|
||||
tokens used.
|
||||
@ -148,6 +153,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
|
||||
"""
|
||||
|
||||
patches = []
|
||||
added_files_list = []
|
||||
modified_files_list = []
|
||||
deleted_files_list = []
|
||||
# sort each one of the languages in top_langs by the number of tokens in the diff
|
||||
@ -165,7 +171,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
|
||||
|
||||
# removing delete-only hunks
|
||||
patch = handle_patch_deletions(patch, original_file_content_str,
|
||||
new_file_content_str, file.filename)
|
||||
new_file_content_str, file.filename, file.edit_type)
|
||||
if patch is None:
|
||||
if not deleted_files_list:
|
||||
total_tokens += token_handler.count_tokens(DELETED_FILES_)
|
||||
@ -179,17 +185,22 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
|
||||
new_patch_tokens = token_handler.count_tokens(patch)
|
||||
|
||||
# Hard Stop, no more tokens
|
||||
if total_tokens > MAX_TOKENS[model] - OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD:
|
||||
if total_tokens > get_max_tokens(model) - OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD:
|
||||
get_logger().warning(f"File was fully skipped, no more tokens: {file.filename}.")
|
||||
continue
|
||||
|
||||
# If the patch is too large, just show the file name
|
||||
if total_tokens + new_patch_tokens > MAX_TOKENS[model] - OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD:
|
||||
if total_tokens + new_patch_tokens > get_max_tokens(model) - OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD:
|
||||
# Current logic is to skip the patch if it's too large
|
||||
# TODO: Option for alternative logic to remove hunks from the patch to reduce the number of tokens
|
||||
# until we meet the requirements
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().warning(f"Patch too large, minimizing it, {file.filename}")
|
||||
if file.edit_type == EDIT_TYPE.ADDED:
|
||||
if not added_files_list:
|
||||
total_tokens += token_handler.count_tokens(ADDED_FILES_)
|
||||
added_files_list.append(file.filename)
|
||||
else:
|
||||
if not modified_files_list:
|
||||
total_tokens += token_handler.count_tokens(MORE_MODIFIED_FILES_)
|
||||
modified_files_list.append(file.filename)
|
||||
@ -206,7 +217,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"Tokens: {total_tokens}, last filename: {file.filename}")
|
||||
|
||||
return patches, modified_files_list, deleted_files_list
|
||||
return patches, modified_files_list, deleted_files_list, added_files_list
|
||||
|
||||
|
||||
async def retry_with_fallback_models(f: Callable):
|
||||
@ -271,7 +282,7 @@ def find_line_number_of_relevant_line_in_file(diff_files: List[FilePatchInfo],
|
||||
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
|
||||
|
||||
for file in diff_files:
|
||||
if file.filename.strip() == relevant_file:
|
||||
if file.filename and (file.filename.strip() == relevant_file):
|
||||
patch = file.patch
|
||||
patch_lines = patch.splitlines()
|
||||
|
||||
@ -397,13 +408,13 @@ def get_pr_multi_diffs(git_provider: GitProvider,
|
||||
continue
|
||||
|
||||
# Remove delete-only hunks
|
||||
patch = handle_patch_deletions(patch, original_file_content_str, new_file_content_str, file.filename)
|
||||
patch = handle_patch_deletions(patch, original_file_content_str, new_file_content_str, file.filename, file.edit_type)
|
||||
if patch is None:
|
||||
continue
|
||||
|
||||
patch = convert_to_hunks_with_lines_numbers(patch, file)
|
||||
new_patch_tokens = token_handler.count_tokens(patch)
|
||||
if patch and (total_tokens + new_patch_tokens > MAX_TOKENS[model] - OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD):
|
||||
if patch and (total_tokens + new_patch_tokens > get_max_tokens(model) - OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD):
|
||||
final_diff = "\n".join(patches)
|
||||
final_diff_list.append(final_diff)
|
||||
patches = []
|
||||
|
@ -9,6 +9,8 @@ from typing import Any, List
|
||||
|
||||
import yaml
|
||||
from starlette_context import context
|
||||
|
||||
from pr_agent.algo import MAX_TOKENS
|
||||
from pr_agent.config_loader import get_settings, global_settings
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
@ -295,6 +297,21 @@ def load_yaml(review_text: str) -> dict:
|
||||
|
||||
def try_fix_yaml(review_text: str) -> dict:
|
||||
review_text_lines = review_text.split('\n')
|
||||
|
||||
# first fallback - try to convert 'relevant line: ...' to relevant line: |-\n ...'
|
||||
review_text_lines_copy = review_text_lines.copy()
|
||||
for i in range(0, len(review_text_lines_copy)):
|
||||
if 'relevant line:' in review_text_lines_copy[i] and not '|-' in review_text_lines_copy[i]:
|
||||
review_text_lines_copy[i] = review_text_lines_copy[i].replace('relevant line: ',
|
||||
'relevant line: |-\n ')
|
||||
try:
|
||||
data = yaml.load('\n'.join(review_text_lines_copy), Loader=yaml.SafeLoader)
|
||||
get_logger().info(f"Successfully parsed AI prediction after adding |-\n to relevant line")
|
||||
return data
|
||||
except:
|
||||
get_logger().debug(f"Failed to parse AI prediction after adding |-\n to relevant line")
|
||||
|
||||
# second fallback - try to remove last lines
|
||||
data = {}
|
||||
for i in range(1, len(review_text_lines)):
|
||||
review_text_lines_tmp = '\n'.join(review_text_lines[:-i])
|
||||
@ -324,3 +341,35 @@ def set_custom_labels(variables):
|
||||
final_labels += f" - {k} ({v['description']})\n"
|
||||
variables["custom_labels"] = final_labels
|
||||
variables["custom_labels_examples"] = f" - {list(labels.keys())[0]}"
|
||||
|
||||
|
||||
def get_user_labels(current_labels: List[str] = None):
|
||||
"""
|
||||
Only keep labels that has been added by the user
|
||||
"""
|
||||
try:
|
||||
if current_labels is None:
|
||||
current_labels = []
|
||||
user_labels = []
|
||||
for label in current_labels:
|
||||
if label.lower() in ['bug fix', 'tests', 'refactoring', 'enhancement', 'documentation', 'other']:
|
||||
continue
|
||||
if get_settings().config.enable_custom_labels:
|
||||
if label in get_settings().custom_labels:
|
||||
continue
|
||||
user_labels.append(label)
|
||||
if user_labels:
|
||||
get_logger().info(f"Keeping user labels: {user_labels}")
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Failed to get user labels: {e}")
|
||||
return current_labels
|
||||
return user_labels
|
||||
|
||||
|
||||
def get_max_tokens(model):
|
||||
settings = get_settings()
|
||||
max_tokens_model = MAX_TOKENS[model]
|
||||
if settings.config.max_model_tokens:
|
||||
max_tokens_model = min(settings.config.max_model_tokens, max_tokens_model)
|
||||
# get_logger().debug(f"limiting max tokens to {max_tokens_model}")
|
||||
return max_tokens_model
|
||||
|
@ -8,6 +8,8 @@ from pr_agent.log import setup_logger
|
||||
|
||||
setup_logger()
|
||||
|
||||
|
||||
|
||||
def run(inargs=None):
|
||||
parser = argparse.ArgumentParser(description='AI based pull request analyzer', usage=
|
||||
"""\
|
||||
@ -51,9 +53,9 @@ For example: 'python cli.py --pr_url=... review --pr_reviewer.extra_instructions
|
||||
command = args.command.lower()
|
||||
get_settings().set("CONFIG.CLI_MODE", True)
|
||||
if args.issue_url:
|
||||
result = asyncio.run(PRAgent().handle_request(args.issue_url, command + " " + " ".join(args.rest)))
|
||||
result = asyncio.run(PRAgent().handle_request(args.issue_url, [command] + args.rest))
|
||||
else:
|
||||
result = asyncio.run(PRAgent().handle_request(args.pr_url, command + " " + " ".join(args.rest)))
|
||||
result = asyncio.run(PRAgent().handle_request(args.pr_url, [command] + args.rest))
|
||||
if not result:
|
||||
parser.print_help()
|
||||
|
||||
|
@ -9,7 +9,7 @@ from starlette_context import context
|
||||
from ..algo.pr_processing import find_line_number_of_relevant_line_in_file
|
||||
from ..config_loader import get_settings
|
||||
from ..log import get_logger
|
||||
from .git_provider import FilePatchInfo, GitProvider
|
||||
from .git_provider import FilePatchInfo, GitProvider, EDIT_TYPE
|
||||
|
||||
|
||||
class BitbucketProvider(GitProvider):
|
||||
@ -32,8 +32,10 @@ class BitbucketProvider(GitProvider):
|
||||
self.repo = None
|
||||
self.pr_num = None
|
||||
self.pr = None
|
||||
self.pr_url = pr_url
|
||||
self.temp_comments = []
|
||||
self.incremental = incremental
|
||||
self.diff_files = None
|
||||
if pr_url:
|
||||
self.set_pr(pr_url)
|
||||
self.bitbucket_comment_api_url = self.pr._BitbucketBase__data["links"]["comments"]["href"]
|
||||
@ -41,9 +43,12 @@ class BitbucketProvider(GitProvider):
|
||||
|
||||
def get_repo_settings(self):
|
||||
try:
|
||||
contents = self.repo_obj.get_contents(
|
||||
".pr_agent.toml", ref=self.pr.head.sha
|
||||
).decoded_content
|
||||
url = (f"https://api.bitbucket.org/2.0/repositories/{self.workspace_slug}/{self.repo_slug}/src/"
|
||||
f"{self.pr.destination_branch}/.pr_agent.toml")
|
||||
response = requests.request("GET", url, headers=self.headers)
|
||||
if response.status_code == 404: # not found
|
||||
return ""
|
||||
contents = response.text.encode('utf-8')
|
||||
return contents
|
||||
except Exception:
|
||||
return ""
|
||||
@ -113,6 +118,9 @@ class BitbucketProvider(GitProvider):
|
||||
return [diff.new.path for diff in self.pr.diffstat()]
|
||||
|
||||
def get_diff_files(self) -> list[FilePatchInfo]:
|
||||
if self.diff_files:
|
||||
return self.diff_files
|
||||
|
||||
diffs = self.pr.diffstat()
|
||||
diff_split = [
|
||||
"diff --git%s" % x for x in self.pr.diff().split("diff --git") if x.strip()
|
||||
@ -124,16 +132,56 @@ class BitbucketProvider(GitProvider):
|
||||
diff.old.get_data("links")
|
||||
)
|
||||
new_file_content_str = self._get_pr_file_content(diff.new.get_data("links"))
|
||||
diff_files.append(
|
||||
FilePatchInfo(
|
||||
file_patch_canonic_structure = FilePatchInfo(
|
||||
original_file_content_str,
|
||||
new_file_content_str,
|
||||
diff_split[index],
|
||||
diff.new.path,
|
||||
)
|
||||
)
|
||||
|
||||
if diff.data['status'] == 'added':
|
||||
file_patch_canonic_structure.edit_type = EDIT_TYPE.ADDED
|
||||
elif diff.data['status'] == 'removed':
|
||||
file_patch_canonic_structure.edit_type = EDIT_TYPE.DELETED
|
||||
elif diff.data['status'] == 'modified':
|
||||
file_patch_canonic_structure.edit_type = EDIT_TYPE.MODIFIED
|
||||
elif diff.data['status'] == 'renamed':
|
||||
file_patch_canonic_structure.edit_type = EDIT_TYPE.RENAMED
|
||||
diff_files.append(file_patch_canonic_structure)
|
||||
|
||||
|
||||
self.diff_files = diff_files
|
||||
return diff_files
|
||||
|
||||
def get_latest_commit_url(self):
|
||||
return self.pr.data['source']['commit']['links']['html']['href']
|
||||
|
||||
def get_comment_url(self, comment):
|
||||
return comment.data['links']['html']['href']
|
||||
|
||||
def publish_persistent_comment(self, pr_comment: str, initial_header: str, update_header: bool = True):
|
||||
try:
|
||||
for comment in self.pr.comments():
|
||||
body = comment.raw
|
||||
if initial_header in body:
|
||||
latest_commit_url = self.get_latest_commit_url()
|
||||
comment_url = self.get_comment_url(comment)
|
||||
if update_header:
|
||||
updated_header = f"{initial_header}\n\n### (review updated until commit {latest_commit_url})\n"
|
||||
pr_comment_updated = pr_comment.replace(initial_header, updated_header)
|
||||
else:
|
||||
pr_comment_updated = pr_comment
|
||||
get_logger().info(f"Persistent mode- updating comment {comment_url} to latest review message")
|
||||
d = {"content": {"raw": pr_comment_updated}}
|
||||
response = comment._update_data(comment.put(None, data=d))
|
||||
self.publish_comment(
|
||||
f"**[Persistent review]({comment_url})** updated to latest commit {latest_commit_url}")
|
||||
return
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Failed to update persistent review, error: {e}")
|
||||
pass
|
||||
self.publish_comment(pr_comment)
|
||||
|
||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||
comment = self.pr.comment(pr_comment)
|
||||
if is_temporary:
|
||||
@ -180,9 +228,29 @@ class BitbucketProvider(GitProvider):
|
||||
)
|
||||
return response
|
||||
|
||||
def generate_link_to_relevant_line_number(self, suggestion) -> str:
|
||||
try:
|
||||
relevant_file = suggestion['relevant file'].strip('`').strip("'")
|
||||
relevant_line_str = suggestion['relevant line']
|
||||
if not relevant_line_str:
|
||||
return ""
|
||||
|
||||
diff_files = self.get_diff_files()
|
||||
position, absolute_position = find_line_number_of_relevant_line_in_file \
|
||||
(diff_files, relevant_file, relevant_line_str)
|
||||
|
||||
if absolute_position != -1 and self.pr_url:
|
||||
link = f"{self.pr_url}/#L{relevant_file}T{absolute_position}"
|
||||
return link
|
||||
except Exception as e:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"Failed adding line link, error: {e}")
|
||||
|
||||
return ""
|
||||
|
||||
def publish_inline_comments(self, comments: list[dict]):
|
||||
for comment in comments:
|
||||
self.publish_inline_comment(comment['body'], comment['start_line'], comment['path'])
|
||||
self.publish_inline_comment(comment['body'], comment['position'], comment['path'])
|
||||
|
||||
def get_title(self):
|
||||
return self.pr.title
|
||||
@ -259,6 +327,11 @@ class BitbucketProvider(GitProvider):
|
||||
})
|
||||
|
||||
response = requests.request("PUT", self.bitbucket_pull_request_api_url, headers=self.headers, data=payload)
|
||||
try:
|
||||
if response.status_code != 200:
|
||||
get_logger().info(f"Failed to update description, error code: {response.status_code}")
|
||||
except:
|
||||
pass
|
||||
return response
|
||||
|
||||
# bitbucket does not support labels
|
||||
|
@ -13,6 +13,7 @@ class EDIT_TYPE(Enum):
|
||||
DELETED = 2
|
||||
MODIFIED = 3
|
||||
RENAMED = 4
|
||||
UNKNOWN = 5
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -22,7 +23,7 @@ class FilePatchInfo:
|
||||
patch: str
|
||||
filename: str
|
||||
tokens: int = -1
|
||||
edit_type: EDIT_TYPE = EDIT_TYPE.MODIFIED
|
||||
edit_type: EDIT_TYPE = EDIT_TYPE.UNKNOWN
|
||||
old_filename: str = None
|
||||
|
||||
|
||||
@ -39,42 +40,10 @@ class GitProvider(ABC):
|
||||
def publish_description(self, pr_title: str, pr_body: str):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def publish_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def create_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def publish_inline_comments(self, comments: list[dict]):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def publish_code_suggestions(self, code_suggestions: list) -> bool:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def publish_labels(self, labels):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_labels(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def remove_initial_comment(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def remove_comment(self, comment):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_languages(self):
|
||||
pass
|
||||
@ -94,16 +63,16 @@ class GitProvider(ABC):
|
||||
def get_pr_description(self, *, full: bool = True) -> str:
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.algo.pr_processing import clip_tokens
|
||||
max_tokens = get_settings().get("CONFIG.MAX_DESCRIPTION_TOKENS", None)
|
||||
max_tokens_description = get_settings().get("CONFIG.MAX_DESCRIPTION_TOKENS", None)
|
||||
description = self.get_pr_description_full() if full else self.get_user_description()
|
||||
if max_tokens:
|
||||
return clip_tokens(description, max_tokens)
|
||||
if max_tokens_description:
|
||||
return clip_tokens(description, max_tokens_description)
|
||||
return description
|
||||
|
||||
def get_user_description(self) -> str:
|
||||
description = (self.get_pr_description_full() or "").strip()
|
||||
# if the existing description wasn't generated by the pr-agent, just return it as-is
|
||||
if not description.startswith("## PR Type"):
|
||||
if not any(description.startswith(header) for header in ("## PR Type", "## PR Description")):
|
||||
return description
|
||||
# if the existing description was generated by the pr-agent, but it doesn't contain the user description,
|
||||
# return nothing (empty string) because it means there is no user description
|
||||
@ -113,11 +82,54 @@ class GitProvider(ABC):
|
||||
return description.split("## User Description:", 1)[1].strip()
|
||||
|
||||
@abstractmethod
|
||||
def get_issue_comments(self):
|
||||
def get_repo_settings(self):
|
||||
pass
|
||||
|
||||
def get_pr_id(self):
|
||||
return ""
|
||||
|
||||
#### comments operations ####
|
||||
@abstractmethod
|
||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||
pass
|
||||
|
||||
def publish_persistent_comment(self, pr_comment: str, initial_header: str, update_header: bool):
|
||||
self.publish_comment(pr_comment)
|
||||
|
||||
@abstractmethod
|
||||
def publish_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_repo_settings(self):
|
||||
def create_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def publish_inline_comments(self, comments: list[dict]):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def remove_initial_comment(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def remove_comment(self, comment):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_issue_comments(self):
|
||||
pass
|
||||
|
||||
def get_comment_url(self, comment) -> str:
|
||||
return ""
|
||||
|
||||
#### labels operations ####
|
||||
@abstractmethod
|
||||
def publish_labels(self, labels):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_labels(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
@ -128,11 +140,12 @@ class GitProvider(ABC):
|
||||
def remove_reaction(self, issue_comment_id: int, reaction_id: int) -> bool:
|
||||
pass
|
||||
|
||||
#### commits operations ####
|
||||
@abstractmethod
|
||||
def get_commit_messages(self):
|
||||
pass
|
||||
|
||||
def get_pr_id(self):
|
||||
def get_latest_commit_url(self) -> str:
|
||||
return ""
|
||||
|
||||
def get_main_pr_language(languages, files) -> str:
|
||||
@ -143,6 +156,9 @@ def get_main_pr_language(languages, files) -> str:
|
||||
if not languages:
|
||||
get_logger().info("No languages detected")
|
||||
return main_language_str
|
||||
if not files:
|
||||
get_logger().info("No files in diff")
|
||||
return main_language_str
|
||||
|
||||
try:
|
||||
top_language = max(languages, key=languages.get).lower()
|
||||
@ -150,6 +166,8 @@ def get_main_pr_language(languages, files) -> str:
|
||||
# validate that the specific commit uses the main language
|
||||
extension_list = []
|
||||
for file in files:
|
||||
if not file:
|
||||
continue
|
||||
if isinstance(file, str):
|
||||
file = FilePatchInfo(base_file=None, head_file=None, patch=None, filename=file)
|
||||
extension_list.append(file.filename.rsplit('.')[-1])
|
||||
@ -187,6 +205,13 @@ class IncrementalPR:
|
||||
def __init__(self, is_incremental: bool = False):
|
||||
self.is_incremental = is_incremental
|
||||
self.commits_range = None
|
||||
self.first_new_commit_sha = None
|
||||
self.last_seen_commit_sha = None
|
||||
self.first_new_commit = None
|
||||
self.last_seen_commit = None
|
||||
|
||||
@property
|
||||
def first_new_commit_sha(self):
|
||||
return None if self.first_new_commit is None else self.first_new_commit.sha
|
||||
|
||||
@property
|
||||
def last_seen_commit_sha(self):
|
||||
return None if self.last_seen_commit is None else self.last_seen_commit.sha
|
||||
|
@ -13,7 +13,7 @@ from ..algo.utils import load_large_diff
|
||||
from ..config_loader import get_settings
|
||||
from ..log import get_logger
|
||||
from ..servers.utils import RateLimitExceeded
|
||||
from .git_provider import FilePatchInfo, GitProvider, IncrementalPR
|
||||
from .git_provider import FilePatchInfo, GitProvider, IncrementalPR, EDIT_TYPE
|
||||
|
||||
|
||||
class GithubProvider(GitProvider):
|
||||
@ -66,10 +66,10 @@ class GithubProvider(GitProvider):
|
||||
first_new_commit_index = None
|
||||
for index in range(len(self.commits) - 1, -1, -1):
|
||||
if self.commits[index].commit.author.date > last_review_time:
|
||||
self.incremental.first_new_commit_sha = self.commits[index].sha
|
||||
self.incremental.first_new_commit = self.commits[index]
|
||||
first_new_commit_index = index
|
||||
else:
|
||||
self.incremental.last_seen_commit_sha = self.commits[index].sha
|
||||
self.incremental.last_seen_commit = self.commits[index]
|
||||
break
|
||||
return self.commits[first_new_commit_index:] if first_new_commit_index is not None else []
|
||||
|
||||
@ -129,7 +129,20 @@ class GithubProvider(GitProvider):
|
||||
if not patch:
|
||||
patch = load_large_diff(file.filename, new_file_content_str, original_file_content_str)
|
||||
|
||||
diff_files.append(FilePatchInfo(original_file_content_str, new_file_content_str, patch, file.filename))
|
||||
if file.status == 'added':
|
||||
edit_type = EDIT_TYPE.ADDED
|
||||
elif file.status == 'removed':
|
||||
edit_type = EDIT_TYPE.DELETED
|
||||
elif file.status == 'renamed':
|
||||
edit_type = EDIT_TYPE.RENAMED
|
||||
elif file.status == 'modified':
|
||||
edit_type = EDIT_TYPE.MODIFIED
|
||||
else:
|
||||
get_logger().error(f"Unknown edit type: {file.status}")
|
||||
edit_type = EDIT_TYPE.UNKNOWN
|
||||
file_patch_canonical_structure = FilePatchInfo(original_file_content_str, new_file_content_str, patch,
|
||||
file.filename, edit_type=edit_type)
|
||||
diff_files.append(file_patch_canonical_structure)
|
||||
|
||||
self.diff_files = diff_files
|
||||
return diff_files
|
||||
@ -141,10 +154,36 @@ class GithubProvider(GitProvider):
|
||||
def publish_description(self, pr_title: str, pr_body: str):
|
||||
self.pr.edit(title=pr_title, body=pr_body)
|
||||
|
||||
def get_latest_commit_url(self) -> str:
|
||||
return self.last_commit_id.html_url
|
||||
|
||||
def get_comment_url(self, comment) -> str:
|
||||
return comment.html_url
|
||||
|
||||
def publish_persistent_comment(self, pr_comment: str, initial_header: str, update_header: bool = True):
|
||||
prev_comments = list(self.pr.get_issue_comments())
|
||||
for comment in prev_comments:
|
||||
body = comment.body
|
||||
if body.startswith(initial_header):
|
||||
latest_commit_url = self.get_latest_commit_url()
|
||||
comment_url = self.get_comment_url(comment)
|
||||
if update_header:
|
||||
updated_header = f"{initial_header}\n\n### (review updated until commit {latest_commit_url})\n"
|
||||
pr_comment_updated = pr_comment.replace(initial_header, updated_header)
|
||||
else:
|
||||
pr_comment_updated = pr_comment
|
||||
get_logger().info(f"Persistent mode- updating comment {comment_url} to latest review message")
|
||||
response = comment.edit(pr_comment_updated)
|
||||
self.publish_comment(
|
||||
f"**[Persistent review]({comment_url})** updated to latest commit {latest_commit_url}")
|
||||
return
|
||||
self.publish_comment(pr_comment)
|
||||
|
||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||
if is_temporary and not get_settings().config.publish_output_progress:
|
||||
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
|
||||
return
|
||||
|
||||
response = self.pr.create_issue_comment(pr_comment)
|
||||
if hasattr(response, "user") and hasattr(response.user, "login"):
|
||||
self.github_user_id = response.user.login
|
||||
|
@ -136,6 +136,33 @@ class GitLabProvider(GitProvider):
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Could not update merge request {self.id_mr} description: {e}")
|
||||
|
||||
def get_latest_commit_url(self):
|
||||
return self.mr.commits().next().web_url
|
||||
|
||||
def get_comment_url(self, comment):
|
||||
return f"{self.mr.web_url}#note_{comment.id}"
|
||||
|
||||
def publish_persistent_comment(self, pr_comment: str, initial_header: str, update_header: bool = True):
|
||||
try:
|
||||
for comment in self.mr.notes.list(get_all=True)[::-1]:
|
||||
if comment.body.startswith(initial_header):
|
||||
latest_commit_url = self.get_latest_commit_url()
|
||||
comment_url = self.get_comment_url(comment)
|
||||
if update_header:
|
||||
updated_header = f"{initial_header}\n\n### (review updated until commit {latest_commit_url})\n"
|
||||
pr_comment_updated = pr_comment.replace(initial_header, updated_header)
|
||||
else:
|
||||
pr_comment_updated = pr_comment
|
||||
get_logger().info(f"Persistent mode- updating comment {comment_url} to latest review message")
|
||||
response = self.mr.notes.update(comment.id, {'body': pr_comment_updated})
|
||||
self.publish_comment(
|
||||
f"**[Persistent review]({comment_url})** updated to latest commit {latest_commit_url}")
|
||||
return
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Failed to update persistent review, error: {e}")
|
||||
pass
|
||||
self.publish_comment(pr_comment)
|
||||
|
||||
def publish_comment(self, mr_comment: str, is_temporary: bool = False):
|
||||
comment = self.mr.notes.create({'body': mr_comment})
|
||||
if is_temporary:
|
||||
|
@ -27,7 +27,8 @@ def apply_repo_settings(pr_url):
|
||||
get_settings().unset(section)
|
||||
get_settings().set(section, section_dict, merge=False)
|
||||
get_logger().info(f"Applying repo settings for section {section}, contents: {contents}")
|
||||
|
||||
except Exception as e:
|
||||
get_logger().exception("Failed to apply repo settings", e)
|
||||
finally:
|
||||
if repo_settings_file:
|
||||
try:
|
||||
|
@ -122,7 +122,7 @@ async def handle_request(body: Dict[str, Any], event: str):
|
||||
if body.get("requested_reviewer", {}).get("login", "") != bot_user:
|
||||
return {}
|
||||
get_logger().info(f"Performing review for {api_url=} because of {event=} and {action=}")
|
||||
await _perform_commands(get_settings().github_app.pr_commands, agent, body, api_url, log_context)
|
||||
await _perform_commands("pr_commands", agent, body, api_url, log_context)
|
||||
|
||||
# handle pull_request event with synchronize action - "push trigger" for new commits
|
||||
elif event == 'pull_request' and action == 'synchronize' and get_settings().github_app.handle_push_trigger:
|
||||
@ -174,7 +174,7 @@ async def handle_request(body: Dict[str, Any], event: str):
|
||||
get_logger().info(f"Skipping incremental review because there was no initial review for {api_url=} yet")
|
||||
return {}
|
||||
get_logger().info(f"Performing incremental review for {api_url=} because of {event=} and {action=}")
|
||||
await _perform_commands(get_settings().github_app.push_commands, agent, body, api_url, log_context)
|
||||
await _perform_commands("push_commands", agent, body, api_url, log_context)
|
||||
|
||||
finally:
|
||||
# release the waiting task block
|
||||
@ -203,8 +203,9 @@ def _check_pull_request_event(action: str, body: dict, log_context: dict, bot_us
|
||||
return pull_request, api_url
|
||||
|
||||
|
||||
async def _perform_commands(commands: List[str], agent: PRAgent, body: dict, api_url: str, log_context: dict):
|
||||
async def _perform_commands(commands_conf: str, agent: PRAgent, body: dict, api_url: str, log_context: dict):
|
||||
apply_repo_settings(api_url)
|
||||
commands = get_settings().get(f"github_app.{commands_conf}")
|
||||
for command in commands:
|
||||
split_command = command.split(" ")
|
||||
command = split_command[0]
|
||||
|
@ -1,12 +1,15 @@
|
||||
from fastapi import FastAPI
|
||||
from mangum import Mangum
|
||||
from starlette.middleware import Middleware
|
||||
from starlette_context.middleware import RawContextMiddleware
|
||||
|
||||
from pr_agent.log import setup_logger
|
||||
from pr_agent.servers.github_app import router
|
||||
|
||||
setup_logger()
|
||||
|
||||
app = FastAPI()
|
||||
middleware = [Middleware(RawContextMiddleware)]
|
||||
app = FastAPI(middleware=middleware)
|
||||
app.include_router(router)
|
||||
|
||||
handler = Mangum(app, lifespan="off")
|
||||
|
@ -34,7 +34,11 @@ key = "" # Optional, uncomment if you want to use Huggingface Inference API. Acq
|
||||
api_base = "" # the base url for your huggingface inference endpoint
|
||||
|
||||
[ollama]
|
||||
api_base = "" # the base url for your huggingface inference endpoint
|
||||
api_base = "" # the base url for your local Llama 2, Code Llama, and other models inference endpoint. Acquire through https://ollama.ai/
|
||||
|
||||
[vertexai]
|
||||
vertex_project = "" # the google cloud platform project name for your vertexai deployment
|
||||
vertex_location = "" # the google cloud platform location for your vertexai deployment
|
||||
|
||||
[github]
|
||||
# ---- Set the following only for deployment type == "user"
|
||||
|
@ -1,5 +1,5 @@
|
||||
[config]
|
||||
model="gpt-4"
|
||||
model="gpt-4" # "gpt-4-1106-preview"
|
||||
fallback_models=["gpt-3.5-turbo-16k"]
|
||||
git_provider="github"
|
||||
publish_output=true
|
||||
@ -10,22 +10,33 @@ use_repo_settings_file=true
|
||||
ai_timeout=180
|
||||
max_description_tokens = 500
|
||||
max_commits_tokens = 500
|
||||
max_model_tokens = 32000 # Limits the maximum number of tokens that can be used by any model, regardless of the model's default capabilities.
|
||||
patch_extra_lines = 3
|
||||
secret_provider="google_cloud_storage"
|
||||
cli_mode=false
|
||||
|
||||
[pr_reviewer] # /review #
|
||||
# enable/disable features
|
||||
require_focused_review=false
|
||||
require_score_review=false
|
||||
require_tests_review=true
|
||||
require_security_review=true
|
||||
require_estimate_effort_to_review=true
|
||||
# general options
|
||||
num_code_suggestions=4
|
||||
inline_code_comments = false
|
||||
ask_and_reflect=false
|
||||
automatic_review=true
|
||||
remove_previous_review_comment=false
|
||||
persistent_comment=true
|
||||
extra_instructions = ""
|
||||
# review labels
|
||||
enable_review_labels_security=true
|
||||
enable_review_labels_effort=false
|
||||
# specific configurations for incremental review (/review -i)
|
||||
require_all_thresholds_for_incremental_review=false
|
||||
minimal_commits_for_incremental_review=0
|
||||
minimal_minutes_for_incremental_review=0
|
||||
|
||||
[pr_description] # /describe #
|
||||
publish_labels=true
|
||||
@ -34,6 +45,7 @@ add_original_user_description=false
|
||||
keep_original_user_title=false
|
||||
use_bullet_points=true
|
||||
extra_instructions = ""
|
||||
enable_pr_type=true
|
||||
|
||||
# markers
|
||||
use_description_markers=false
|
||||
@ -105,6 +117,9 @@ push_commands = [
|
||||
--pr_reviewer.num_code_suggestions=0 \
|
||||
--pr_reviewer.inline_code_comments=false \
|
||||
--pr_reviewer.remove_previous_review_comment=true \
|
||||
--pr_reviewer.require_all_thresholds_for_incremental_review=false \
|
||||
--pr_reviewer.minimal_commits_for_incremental_review=5 \
|
||||
--pr_reviewer.minimal_minutes_for_incremental_review=30 \
|
||||
--pr_reviewer.extra_instructions='' \
|
||||
"""
|
||||
]
|
||||
|
@ -16,7 +16,7 @@ You must use the following YAML schema to format your answer:
|
||||
PR Type:
|
||||
type: array
|
||||
{%- if enable_custom_labels %}
|
||||
description: One or more labels that describe the PR type. Don't output the description in the parentheses.
|
||||
description: Labels that are applicable to the Pull Request. Don't output the description in the parentheses. If none of the labels is relevant to the PR, output an empty array.
|
||||
{%- endif %}
|
||||
items:
|
||||
type: string
|
||||
@ -39,7 +39,6 @@ PR Type:
|
||||
{{ custom_labels_examples }}
|
||||
{%- else %}
|
||||
- Bug fix
|
||||
- Tests
|
||||
{%- endif %}
|
||||
```
|
||||
|
||||
|
@ -1,8 +1,9 @@
|
||||
[pr_description_prompt]
|
||||
system="""You are CodiumAI-PR-Reviewer, a language model designed to review git pull requests.
|
||||
Your task is to provide full description of the PR content.
|
||||
- Make sure not to focus the new PR code (the '+' lines).
|
||||
Your task is to provide full description of a Pull Request (PR) content.
|
||||
- Make sure to focus on the new PR code (the '+' lines).
|
||||
- Notice that the 'Previous title', 'Previous description' and 'Commit messages' sections may be partial, simplistic, non-informative or not up-to-date. Hence, compare them to the PR diff code, and use them only as a reference.
|
||||
- Emphasize first the most important changes, and then the less important ones.
|
||||
- If needed, each YAML output should be in block scalar format ('|-')
|
||||
{%- if extra_instructions %}
|
||||
|
||||
@ -18,22 +19,22 @@ PR Title:
|
||||
type: string
|
||||
description: an informative title for the PR, describing its main theme
|
||||
PR Type:
|
||||
type: array
|
||||
{%- if enable_custom_labels %}
|
||||
description: One or more labels that describe the PR type. Don't output the description in the parentheses.
|
||||
{%- endif %}
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
{%- if enable_custom_labels %}
|
||||
{{ custom_labels }}
|
||||
{%- else %}
|
||||
- Bug fix
|
||||
- Tests
|
||||
- Refactoring
|
||||
- Enhancement
|
||||
- Documentation
|
||||
- Other
|
||||
{%- if enable_custom_labels %}
|
||||
PR Labels:
|
||||
type: array
|
||||
description: Labels that are applicable to the Pull Request. Don't output the description in the parentheses. If none of the labels is relevant to the PR, output an empty array.
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
{{ custom_labels }}
|
||||
{%- endif %}
|
||||
PR Description:
|
||||
type: string
|
||||
@ -51,6 +52,7 @@ PR Main Files Walkthrough:
|
||||
changes in file:
|
||||
type: string
|
||||
description: minimal and concise description of the changes in the relevant file
|
||||
```
|
||||
|
||||
|
||||
Example output:
|
||||
@ -58,10 +60,11 @@ Example output:
|
||||
PR Title: |-
|
||||
...
|
||||
PR Type:
|
||||
...
|
||||
{%- if enable_custom_labels %}
|
||||
{{ custom_labels_examples }}
|
||||
{%- else %}
|
||||
- Bug fix
|
||||
PR Labels:
|
||||
- ...
|
||||
- ...
|
||||
{%- endif %}
|
||||
PR Description: |-
|
||||
...
|
||||
|
@ -51,22 +51,13 @@ PR Analysis:
|
||||
description: summary of the PR in 2-3 sentences.
|
||||
Type of PR:
|
||||
type: string
|
||||
{%- if enable_custom_labels %}
|
||||
description: One or more labels that describe the PR type. Don't output the description in the parentheses.
|
||||
{%- endif %}
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
{%- if enable_custom_labels %}
|
||||
{{ custom_labels }}
|
||||
{%- else %}
|
||||
- Bug fix
|
||||
- Tests
|
||||
- Refactoring
|
||||
- Enhancement
|
||||
- Documentation
|
||||
- Other
|
||||
{%- endif %}
|
||||
{%- if require_score %}
|
||||
Score:
|
||||
type: int
|
||||
@ -102,7 +93,7 @@ PR Analysis:
|
||||
description: >-
|
||||
Estimate, on a scale of 1-5 (inclusive), the time and effort required to review this PR by an experienced and knowledgeable developer. 1 means short and easy review , 5 means long and hard review.
|
||||
Take into account the size, complexity, quality, and the needed changes of the PR code diff.
|
||||
Explain your answer shortly (1-2 sentences).
|
||||
Explain your answer shortly (1-2 sentences). Use the format: '1, because ...'
|
||||
{%- endif %}
|
||||
PR Feedback:
|
||||
General suggestions:
|
||||
@ -139,7 +130,8 @@ PR Feedback:
|
||||
Security concerns:
|
||||
type: string
|
||||
description: >-
|
||||
yes\\no question: does this PR code introduce possible vulnerabilities such as exposure of sensitive information (e.g., API keys, secrets, passwords), or security concerns like SQL injection, XSS, CSRF, and others ? If answered 'yes', explain your answer briefly.
|
||||
does this PR code introduce possible vulnerabilities such as exposure of sensitive information (e.g., API keys, secrets, passwords), or security concerns like SQL injection, XSS, CSRF, and others ? Answer 'No' if there are no possible issues.
|
||||
Answer 'Yes, because ...' if there are security concerns or issues. Explain your answer shortly.
|
||||
{%- endif %}
|
||||
```
|
||||
|
||||
@ -151,7 +143,7 @@ PR Analysis:
|
||||
PR summary: |-
|
||||
xxx
|
||||
Type of PR: |-
|
||||
Bug fix
|
||||
...
|
||||
{%- if require_score %}
|
||||
Score: 89
|
||||
{%- endif %}
|
||||
@ -161,7 +153,8 @@ PR Analysis:
|
||||
Focused PR: no, because ...
|
||||
{%- endif %}
|
||||
{%- if require_estimate_effort_to_review %}
|
||||
Estimated effort to review [1-5]: 3, because ...
|
||||
Estimated effort to review [1-5]: |-
|
||||
3, because ...
|
||||
{%- endif %}
|
||||
PR Feedback:
|
||||
General PR suggestions: |-
|
||||
|
@ -7,7 +7,7 @@ from jinja2 import Environment, StrictUndefined
|
||||
from pr_agent.algo.ai_handler import AiHandler
|
||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.algo.utils import load_yaml, set_custom_labels
|
||||
from pr_agent.algo.utils import load_yaml, set_custom_labels, get_user_labels
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||
@ -98,9 +98,9 @@ class PRDescription:
|
||||
self.git_provider.publish_description(pr_title, pr_body)
|
||||
if get_settings().pr_description.publish_labels and self.git_provider.is_supported("get_labels"):
|
||||
current_labels = self.git_provider.get_labels()
|
||||
if current_labels is None:
|
||||
current_labels = []
|
||||
self.git_provider.publish_labels(pr_labels + current_labels)
|
||||
user_labels = get_user_labels(current_labels)
|
||||
|
||||
self.git_provider.publish_labels(pr_labels + user_labels)
|
||||
self.git_provider.remove_initial_comment()
|
||||
except Exception as e:
|
||||
get_logger().error(f"Error generating PR description {self.pr_id}: {e}")
|
||||
@ -158,6 +158,9 @@ class PRDescription:
|
||||
user=user_prompt
|
||||
)
|
||||
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"\nAI response:\n{response}")
|
||||
|
||||
return response
|
||||
|
||||
def _prepare_data(self):
|
||||
@ -172,12 +175,16 @@ class PRDescription:
|
||||
pr_types = []
|
||||
|
||||
# If the 'PR Type' key is present in the dictionary, split its value by comma and assign it to 'pr_types'
|
||||
if 'PR Type' in self.data:
|
||||
if 'PR Labels' in self.data:
|
||||
if type(self.data['PR Labels']) == list:
|
||||
pr_types = self.data['PR Labels']
|
||||
elif type(self.data['PR Labels']) == str:
|
||||
pr_types = self.data['PR Labels'].split(',')
|
||||
elif 'PR Type' in self.data:
|
||||
if type(self.data['PR Type']) == list:
|
||||
pr_types = self.data['PR Type']
|
||||
elif type(self.data['PR Type']) == str:
|
||||
pr_types = self.data['PR Type'].split(',')
|
||||
|
||||
return pr_types
|
||||
|
||||
def _prepare_pr_answer_with_markers(self) -> Tuple[str, str]:
|
||||
@ -223,6 +230,11 @@ class PRDescription:
|
||||
|
||||
# Iterate over the dictionary items and append the key and value to 'markdown_text' in a markdown format
|
||||
markdown_text = ""
|
||||
# Don't display 'PR Labels'
|
||||
if 'PR Labels' in self.data and self.git_provider.is_supported("get_labels"):
|
||||
self.data.pop('PR Labels')
|
||||
if not get_settings().pr_description.enable_pr_type:
|
||||
self.data.pop('PR Type')
|
||||
for key, value in self.data.items():
|
||||
markdown_text += f"## {key}\n\n"
|
||||
markdown_text += f"{value}\n\n"
|
||||
@ -248,7 +260,7 @@ class PRDescription:
|
||||
for file in value:
|
||||
filename = file['filename'].replace("'", "`")
|
||||
description = file['changes in file']
|
||||
pr_body += f'`{filename}`: {description}\n'
|
||||
pr_body += f'- `{filename}`: {description}\n'
|
||||
if self.git_provider.is_supported("gfm_markdown"):
|
||||
pr_body +="</details>\n"
|
||||
else:
|
||||
|
@ -7,7 +7,7 @@ from jinja2 import Environment, StrictUndefined
|
||||
from pr_agent.algo.ai_handler import AiHandler
|
||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.algo.utils import load_yaml, set_custom_labels
|
||||
from pr_agent.algo.utils import load_yaml, set_custom_labels, get_user_labels
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||
@ -82,11 +82,17 @@ class PRGenerateLabels:
|
||||
|
||||
if get_settings().config.publish_output:
|
||||
get_logger().info(f"Pushing labels {self.pr_id}")
|
||||
if self.git_provider.is_supported("get_labels"):
|
||||
|
||||
current_labels = self.git_provider.get_labels()
|
||||
if current_labels is None:
|
||||
current_labels = []
|
||||
self.git_provider.publish_labels(pr_labels + current_labels)
|
||||
user_labels = get_user_labels(current_labels)
|
||||
pr_labels = pr_labels + user_labels
|
||||
|
||||
if self.git_provider.is_supported("get_labels"):
|
||||
self.git_provider.publish_labels(pr_labels)
|
||||
elif pr_labels:
|
||||
value = ', '.join(v for v in pr_labels)
|
||||
pr_labels_text = f"## PR Labels:\n{value}\n"
|
||||
self.git_provider.publish_comment(pr_labels_text, is_temporary=False)
|
||||
self.git_provider.remove_initial_comment()
|
||||
except Exception as e:
|
||||
get_logger().error(f"Error generating PR labels {self.pr_id}: {e}")
|
||||
|
@ -1,4 +1,5 @@
|
||||
import copy
|
||||
import datetime
|
||||
from collections import OrderedDict
|
||||
from typing import List, Tuple
|
||||
|
||||
@ -9,7 +10,7 @@ from yaml import SafeLoader
|
||||
from pr_agent.algo.ai_handler import AiHandler
|
||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.algo.utils import convert_to_markdown, load_yaml, try_fix_yaml, set_custom_labels
|
||||
from pr_agent.algo.utils import convert_to_markdown, load_yaml, try_fix_yaml, set_custom_labels, get_user_labels
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.git_providers.git_provider import IncrementalPR, get_main_pr_language
|
||||
@ -100,8 +101,7 @@ class PRReviewer:
|
||||
if self.is_auto and not get_settings().pr_reviewer.automatic_review:
|
||||
get_logger().info(f'Automatic review is disabled {self.pr_url}')
|
||||
return None
|
||||
if self.is_auto and self.incremental.is_incremental and not self.incremental.first_new_commit_sha:
|
||||
get_logger().info(f"Incremental review is enabled for {self.pr_url} but there are no new commits")
|
||||
if self.incremental.is_incremental and not self._can_run_incremental_review():
|
||||
return None
|
||||
|
||||
get_logger().info(f'Reviewing PR: {self.pr_url} ...')
|
||||
@ -117,7 +117,15 @@ class PRReviewer:
|
||||
if get_settings().config.publish_output:
|
||||
get_logger().info('Pushing PR review...')
|
||||
previous_review_comment = self._get_previous_review_comment()
|
||||
|
||||
# publish the review
|
||||
if get_settings().pr_reviewer.persistent_comment and not self.incremental.is_incremental:
|
||||
self.git_provider.publish_persistent_comment(pr_comment,
|
||||
initial_header="## PR Analysis",
|
||||
update_header=True)
|
||||
else:
|
||||
self.git_provider.publish_comment(pr_comment)
|
||||
|
||||
self.git_provider.remove_initial_comment()
|
||||
if previous_review_comment:
|
||||
self._remove_previous_review_comment(previous_review_comment)
|
||||
@ -156,7 +164,6 @@ class PRReviewer:
|
||||
variables["diff"] = self.patches_diff # update diff
|
||||
|
||||
environment = Environment(undefined=StrictUndefined)
|
||||
set_custom_labels(variables)
|
||||
system_prompt = environment.from_string(get_settings().pr_review_prompt.system).render(variables)
|
||||
user_prompt = environment.from_string(get_settings().pr_review_prompt.user).render(variables)
|
||||
|
||||
@ -171,6 +178,9 @@ class PRReviewer:
|
||||
user=user_prompt
|
||||
)
|
||||
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"\nAI response:\n{response}")
|
||||
|
||||
return response
|
||||
|
||||
def _prepare_pr_review(self) -> str:
|
||||
@ -217,19 +227,6 @@ class PRReviewer:
|
||||
suggestion['relevant line'] = f"[{suggestion['relevant line']}]({link})"
|
||||
else:
|
||||
pass
|
||||
# try:
|
||||
# relevant_file = suggestion['relevant file'].strip('`').strip("'")
|
||||
# relevant_line_str = suggestion['relevant line']
|
||||
# if not relevant_line_str:
|
||||
# return ""
|
||||
#
|
||||
# position, absolute_position = find_line_number_of_relevant_line_in_file(
|
||||
# self.git_provider.diff_files, relevant_file, relevant_line_str)
|
||||
# if absolute_position != -1:
|
||||
# suggestion[
|
||||
# 'relevant line'] = f"{suggestion['relevant line']} (line {absolute_position})"
|
||||
# except:
|
||||
# pass
|
||||
|
||||
|
||||
# Add incremental review section
|
||||
@ -239,7 +236,8 @@ class PRReviewer:
|
||||
last_commit_msg = self.incremental.commits_range[0].commit.message if self.incremental.commits_range else ""
|
||||
incremental_review_markdown_text = f"Starting from commit {last_commit_url}"
|
||||
if last_commit_msg:
|
||||
incremental_review_markdown_text += f" \n_({last_commit_msg.splitlines(keepends=False)[0]})_"
|
||||
replacement = last_commit_msg.splitlines(keepends=False)[0].replace('_', r'\_')
|
||||
incremental_review_markdown_text += f" \n_({replacement})_"
|
||||
data = OrderedDict(data)
|
||||
data.update({'Incremental PR Review': {
|
||||
"⏮️ Review for commits since previous PR-Agent review": incremental_review_markdown_text}})
|
||||
@ -257,6 +255,9 @@ class PRReviewer:
|
||||
else:
|
||||
markdown_text += actions_help_text
|
||||
|
||||
# Add custom labels from the review prediction (effort, security)
|
||||
self.set_review_labels(data)
|
||||
|
||||
# Log markdown response if verbosity level is high
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"Markdown response:\n{markdown_text}")
|
||||
@ -346,3 +347,59 @@ class PRReviewer:
|
||||
self.git_provider.remove_comment(comment)
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Failed to remove previous review comment, error: {e}")
|
||||
|
||||
def _can_run_incremental_review(self) -> bool:
|
||||
"""Checks if we can run incremental review according the various configurations and previous review"""
|
||||
# checking if running is auto mode but there are no new commits
|
||||
if self.is_auto and not self.incremental.first_new_commit_sha:
|
||||
get_logger().info(f"Incremental review is enabled for {self.pr_url} but there are no new commits")
|
||||
return False
|
||||
# checking if there are enough commits to start the review
|
||||
num_new_commits = len(self.incremental.commits_range)
|
||||
num_commits_threshold = get_settings().pr_reviewer.minimal_commits_for_incremental_review
|
||||
not_enough_commits = num_new_commits < num_commits_threshold
|
||||
# checking if the commits are not too recent to start the review
|
||||
recent_commits_threshold = datetime.datetime.now() - datetime.timedelta(
|
||||
minutes=get_settings().pr_reviewer.minimal_minutes_for_incremental_review
|
||||
)
|
||||
last_seen_commit_date = (
|
||||
self.incremental.last_seen_commit.commit.author.date if self.incremental.last_seen_commit else None
|
||||
)
|
||||
all_commits_too_recent = (
|
||||
last_seen_commit_date > recent_commits_threshold if self.incremental.last_seen_commit else False
|
||||
)
|
||||
# check all the thresholds or just one to start the review
|
||||
condition = any if get_settings().pr_reviewer.require_all_thresholds_for_incremental_review else all
|
||||
if condition((not_enough_commits, all_commits_too_recent)):
|
||||
get_logger().info(
|
||||
f"Incremental review is enabled for {self.pr_url} but didn't pass the threshold check to run:"
|
||||
f"\n* Number of new commits = {num_new_commits} (threshold is {num_commits_threshold})"
|
||||
f"\n* Last seen commit date = {last_seen_commit_date} (threshold is {recent_commits_threshold})"
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
def set_review_labels(self, data):
|
||||
if (get_settings().pr_reviewer.enable_review_labels_security or
|
||||
get_settings().pr_reviewer.enable_review_labels_effort):
|
||||
try:
|
||||
review_labels = []
|
||||
if get_settings().pr_reviewer.enable_review_labels_effort:
|
||||
estimated_effort = data['PR Analysis']['Estimated effort to review [1-5]']
|
||||
estimated_effort_number = int(estimated_effort.split(',')[0])
|
||||
if 1 <= estimated_effort_number <= 5: # 1, because ...
|
||||
review_labels.append(f'Review effort [1-5]: {estimated_effort_number}')
|
||||
if get_settings().pr_reviewer.enable_review_labels_security:
|
||||
security_concerns = data['PR Analysis']['Security concerns'] # yes, because ...
|
||||
security_concerns_bool = 'yes' in security_concerns.lower() or 'true' in security_concerns.lower()
|
||||
if security_concerns_bool:
|
||||
review_labels.append('Possible security concern')
|
||||
|
||||
if review_labels:
|
||||
current_labels = self.git_provider.get_labels()
|
||||
current_labels_filtered = [label for label in current_labels if
|
||||
not label.lower().startswith('review effort [1-5]:') and not label.lower().startswith(
|
||||
'possible security concern')]
|
||||
self.git_provider.publish_labels(review_labels + current_labels_filtered)
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed to set review labels, error: {e}")
|
||||
|
@ -8,8 +8,8 @@ import pinecone
|
||||
from pinecone_datasets import Dataset, DatasetMetadata
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from pr_agent.algo import MAX_TOKENS
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.algo.utils import get_max_tokens
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.log import get_logger
|
||||
@ -197,7 +197,7 @@ class PRSimilarIssue:
|
||||
username = issue.user.login
|
||||
created_at = str(issue.created_at)
|
||||
if len(issue_str) < 8000 or \
|
||||
self.token_handler.count_tokens(issue_str) < MAX_TOKENS[MODEL]: # fast reject first
|
||||
self.token_handler.count_tokens(issue_str) < get_max_tokens(MODEL): # fast reject first
|
||||
issue_record = Record(
|
||||
id=issue_key + "." + "issue",
|
||||
text=issue_str,
|
||||
|
@ -13,7 +13,7 @@ atlassian-python-api==3.39.0
|
||||
GitPython==3.1.32
|
||||
PyYAML==6.0
|
||||
starlette-context==0.3.6
|
||||
litellm~=0.1.574
|
||||
litellm==0.12.5
|
||||
boto3==1.28.25
|
||||
google-cloud-storage==2.10.0
|
||||
ujson==5.8.0
|
||||
@ -22,3 +22,4 @@ msrest==0.7.1
|
||||
pinecone-client
|
||||
pinecone-datasets @ git+https://github.com/mrT23/pinecone-datasets.git@main
|
||||
loguru==0.7.2
|
||||
google-cloud-aiplatform==1.35.0
|
||||
|
31
tests/unittest/try_fix_yaml.py
Normal file
31
tests/unittest/try_fix_yaml.py
Normal file
@ -0,0 +1,31 @@
|
||||
|
||||
# Generated by CodiumAI
|
||||
from pr_agent.algo.utils import try_fix_yaml
|
||||
|
||||
|
||||
import pytest
|
||||
|
||||
class TestTryFixYaml:
|
||||
|
||||
# The function successfully parses a valid YAML string.
|
||||
def test_valid_yaml(self):
|
||||
review_text = "key: value\n"
|
||||
expected_output = {"key": "value"}
|
||||
assert try_fix_yaml(review_text) == expected_output
|
||||
|
||||
# The function adds '|-' to 'relevant line:' if it is not already present and successfully parses the YAML string.
|
||||
def test_add_relevant_line(self):
|
||||
review_text = "relevant line: value: 3\n"
|
||||
expected_output = {"relevant line": "value: 3"}
|
||||
assert try_fix_yaml(review_text) == expected_output
|
||||
|
||||
# The function removes the last line(s) of the YAML string and successfully parses the YAML string.
|
||||
def test_remove_last_line(self):
|
||||
review_text = "key: value\nextra invalid line\n"
|
||||
expected_output = {"key": "value"}
|
||||
assert try_fix_yaml(review_text) == expected_output
|
||||
|
||||
# The YAML string is empty.
|
||||
def test_empty_yaml_fixed(self):
|
||||
review_text = ""
|
||||
assert try_fix_yaml(review_text) is None
|
Reference in New Issue
Block a user