Compare commits
416 Commits
ok/overrid
...
ok/fix_git
Author | SHA1 | Date | |
---|---|---|---|
4bda9dfe04 | |||
e74bb80668 | |||
e06fb534d3 | |||
71a341855e | |||
7d949ad6e2 | |||
4b5f86fcf0 | |||
cd11f51df0 | |||
b40c0b9b23 | |||
816ddeeb9e | |||
11f01a226c | |||
b57ec301e8 | |||
71da20ea7e | |||
c895657310 | |||
eda20ccca9 | |||
aed113cd79 | |||
0ab07a46c6 | |||
5f32e28933 | |||
7538c4dd2f | |||
e3845283f8 | |||
a85921d3c5 | |||
27b64fbcaf | |||
8d50f2ae82 | |||
e97a03f522 | |||
2e3344b5b0 | |||
e1b51eace7 | |||
49e3d5ec5f | |||
afa78ed3fb | |||
72d5e4748e | |||
61d3e1ebf4 | |||
055b5ea700 | |||
3434296792 | |||
ae375c2ff0 | |||
3d5efdf4f3 | |||
9a585de364 | |||
c27dc436c4 | |||
e83747300d | |||
7374243d0b | |||
5c568bc0c5 | |||
22c196cb3b | |||
d2cc856cfc | |||
013a689b33 | |||
d772213cfc | |||
638db96311 | |||
4dffabf397 | |||
6f2bbd3baa | |||
9e41f3780c | |||
f53ec1d0cc | |||
f7666cb59a | |||
a7cb59ca8b | |||
ca0ea77415 | |||
0cf27e5fee | |||
f3bdbfc103 | |||
20e3acdd86 | |||
f965b09571 | |||
e6bea76eee | |||
414f2b6767 | |||
6541575a0e | |||
02570ea797 | |||
b8583c998d | |||
726594600b | |||
c77cc1d6ed | |||
b6c9e01a59 | |||
ec673214c8 | |||
16777a5334 | |||
65bb70a1dd | |||
1a89c7eadf | |||
07617eab5a | |||
f9e4c2b098 | |||
fa24413201 | |||
b6cabda586 | |||
abbce60f18 | |||
5daaaf2c1d | |||
e8f207691e | |||
b0dce4ceae | |||
fc494296d7 | |||
67b4069540 | |||
e6defcc846 | |||
096fcbbc17 | |||
eb7add1c77 | |||
1b6fb3ea53 | |||
c57b70f1d4 | |||
a2c3db463a | |||
193da1c356 | |||
5bc26880b3 | |||
21a1cc970e | |||
954727ad67 | |||
1314898cbf | |||
ff04d459d7 | |||
88ca501c0c | |||
fe284a8f91 | |||
d41fe0cf79 | |||
3673924fe9 | |||
d5c098de73 | |||
9f5c0daa8e | |||
bce2262d4e | |||
e6f1e0520a | |||
d8de89ae33 | |||
428c38e3d9 | |||
7ffdf8de37 | |||
83e670c5df | |||
c324d88be3 | |||
41166dc271 | |||
e7258e732b | |||
18ee9d66b0 | |||
8755f635b4 | |||
e2417ebe88 | |||
264dea2a8b | |||
da98fd712f | |||
e6548f4fe1 | |||
e66fed2468 | |||
1b3fb49f9c | |||
66a5f06b45 | |||
51c817ba29 | |||
8f9f09ecbf | |||
d77a71bf47 | |||
70c0ef5ce1 | |||
92e9012fb6 | |||
43dc648b05 | |||
6dee18b24a | |||
baa0e95227 | |||
b27f57d05d | |||
fd8c90041c | |||
ea6253e2e8 | |||
2a270886ea | |||
2945c36899 | |||
1bab26f1c5 | |||
72eecbbf61 | |||
989c56220b | |||
e387086890 | |||
088f256415 | |||
d92f5284df | |||
f3e794e50b | |||
44239f1a79 | |||
428e6382bd | |||
d13a92515b | |||
eaf7cfbcf2 | |||
abb633db0f | |||
ca11cfa54e | |||
8df57941c6 | |||
af45dcc7df | |||
18c33ae6fc | |||
896d65a43a | |||
02ea2c7a40 | |||
fdbb7f176c | |||
5386ec359d | |||
85f64ad895 | |||
589d329a3c | |||
3585a4ebff | |||
706f6bf44d | |||
54f29fcf38 | |||
e941fa9ec0 | |||
4479c5f11b | |||
b2369c66d8 | |||
ab5ac8ffa8 | |||
ccc7f1e10a | |||
8d075b76ae | |||
32a8b0e9bc | |||
69c6acf89d | |||
e07412c098 | |||
8cec3ffde3 | |||
25bc54785b | |||
8b033ccc94 | |||
2003b6915b | |||
175218c779 | |||
f96c6cbc95 | |||
4dbb70c9d5 | |||
73cc7c2d71 | |||
902975b660 | |||
813fa8571e | |||
b5c505f727 | |||
26b9e8a235 | |||
c90e72cb75 | |||
55f022d93e | |||
71b532d4d5 | |||
c18ae77299 | |||
2f0fa246c0 | |||
a371f7ab84 | |||
e1149862b2 | |||
b8f93516ce | |||
d15d374cdc | |||
cae0f627e2 | |||
7fbdc3aead | |||
0551922839 | |||
043d453cab | |||
663ae92bdf | |||
96824aa9e2 | |||
5cca299b16 | |||
cd3527f7d4 | |||
bb12c75431 | |||
4accddcaa7 | |||
bb8a0f10f4 | |||
c3cbaaf09e | |||
a6e65e867f | |||
0df8071673 | |||
b17a4d9551 | |||
dc7db4cbdd | |||
4e94fcc372 | |||
4c72cfbff4 | |||
ac89867ac7 | |||
34ed598c20 | |||
e7aee84ea8 | |||
388684e2e8 | |||
8f81c18647 | |||
ba78475944 | |||
75dd5688fa | |||
aa32024078 | |||
9167c20512 | |||
a7fb5d98b1 | |||
fda47bb5cf | |||
9c4f849066 | |||
3e2e2d6c6e | |||
56cc804fcf | |||
62746294e3 | |||
d384b0644e | |||
3e07fe618f | |||
be54fb5bf8 | |||
46ec3c0754 | |||
5e608cc7e7 | |||
04162564ca | |||
992f51a019 | |||
2bc25b7435 | |||
fcd9821d10 | |||
911ad299e2 | |||
fbfa186733 | |||
7545b25823 | |||
1370a051f1 | |||
dcbd3132d1 | |||
497f84b3bd | |||
c2fe2fc657 | |||
f7abdc6ae8 | |||
d327245edf | |||
632de3f186 | |||
de14b0e4c0 | |||
f010d1389b | |||
4411f6d88a | |||
a2ca43afcd | |||
1f62520606 | |||
c0511c954e | |||
818ab5a9e8 | |||
291ffdd6ae | |||
4fbe7d14b5 | |||
ea91a38541 | |||
caaee4e43d | |||
43af4aa182 | |||
e343ce8468 | |||
7b2c01181b | |||
978c56c128 | |||
4043dfff9e | |||
279d45996f | |||
01aa038ad6 | |||
084256b923 | |||
dc42713217 | |||
99f17666c5 | |||
bba22667f1 | |||
1b8349b0ef | |||
b94e3521d1 | |||
32931f0bc0 | |||
72ac8e8091 | |||
33045e6898 | |||
069c3a8e5c | |||
9c0656c296 | |||
228ee26541 | |||
f8d548367f | |||
d3f466f59b | |||
6b45940128 | |||
a52e94fcbc | |||
ee3874f0aa | |||
31ba7acf49 | |||
b7a2551cab | |||
d4eb100cbc | |||
21feb92b75 | |||
2f6178306f | |||
36e7c1c22f | |||
c31baa5aea | |||
67052aa714 | |||
caee7cbf50 | |||
9bee3055c2 | |||
901eda2f10 | |||
8cf7d2d0b1 | |||
d7f43d6ee0 | |||
9bd5140ea4 | |||
12bd9e8b42 | |||
ca8997b616 | |||
8e42162b5e | |||
98d0835c48 | |||
2aef9dfe55 | |||
115b513c9b | |||
fd63fe4c95 | |||
d40285e4d3 | |||
517658fb37 | |||
f9f0f220c2 | |||
6382b8a68b | |||
e371b217ec | |||
7dec7b0583 | |||
bf6a235add | |||
1d9489c734 | |||
bd588b4509 | |||
245f29e58a | |||
7f5f2d2d1a | |||
fe500845b7 | |||
b42b2536b5 | |||
498ad3d19c | |||
892dbe458e | |||
1b098aea13 | |||
ed1816a2d7 | |||
e90c9e5853 | |||
e4f28b157f | |||
6fb8a882af | |||
9889d26d3e | |||
b23a4c0535 | |||
0f7a481eaa | |||
3fc88b2bc4 | |||
ed5aaaab45 | |||
145b5db458 | |||
8321792a8d | |||
8af8fd8e5d | |||
753ea3e44c | |||
660601f7c5 | |||
4e7f67f596 | |||
e486addb8f | |||
4a5310e2a1 | |||
8962c9cf8a | |||
bc95cf5b8e | |||
dcd8196b94 | |||
901c1dc3f0 | |||
adb9964823 | |||
335877c4a7 | |||
5da6a0147c | |||
cd1ae55f4f | |||
ca50724952 | |||
460b315b53 | |||
00ff516e8a | |||
55b3c3fe5c | |||
1443df7227 | |||
739b63f73b | |||
4a54532b6a | |||
0dbe64e401 | |||
c0b23e1091 | |||
704c169181 | |||
746140b26e | |||
53ce609266 | |||
7584ec84ce | |||
140760c517 | |||
56e9493f7a | |||
958ecf333a | |||
ae3d7067d3 | |||
a49e81d959 | |||
916d7c236e | |||
6343d35616 | |||
0203086aac | |||
0066156aca | |||
544bac7010 | |||
34090b078b | |||
9567199bb2 | |||
1f7a833a54 | |||
990f69a95d | |||
2b8a8ce824 | |||
6585854c85 | |||
98019fe97f | |||
d52c11b907 | |||
e79bcbed93 | |||
690c819479 | |||
630d1d9e03 | |||
20c32375e1 | |||
44b790567b | |||
4d6d6c4812 | |||
7f6493009c | |||
7a6efbcb55 | |||
777c773a90 | |||
f7c698ff54 | |||
1b780c0496 | |||
2e095807b7 | |||
ae98cfe17b | |||
35a6eb2e52 | |||
8b477c694c | |||
1254ad1727 | |||
eeea38dab3 | |||
8983fd9071 | |||
918ae25654 | |||
de39595522 | |||
4c6595148b | |||
02e0f958e7 | |||
be19b64542 | |||
24900305d6 | |||
06d00032df | |||
244cbbd27f | |||
970a7896e9 | |||
8263bf5f9c | |||
8823d8c0e9 | |||
5cbcef276c | |||
ce9014073c | |||
376c4523dd | |||
e0ca594a69 | |||
48233fde23 | |||
9c05a6b1b5 | |||
da848d7e39 | |||
c6c97ac98a | |||
92e23ff260 | |||
aa03654ffc | |||
85130c0d30 | |||
3c27432f50 | |||
eec62c14dc | |||
ad6dd38fe3 | |||
307b3b4bf7 | |||
8e7e13ab62 | |||
bd085e610a | |||
f26264daf1 | |||
2aaa722102 | |||
edaeb99b43 | |||
ce54a7b79e | |||
39522abc03 | |||
0e42634da4 | |||
f0dc485305 | |||
db6bf41051 | |||
67ff50583a | |||
6693aa3cbc |
3
.github/workflows/pr-agent-review.yaml
vendored
@ -24,4 +24,7 @@ jobs:
|
||||
OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
|
||||
OPENAI_ORG: ${{ secrets.OPENAI_ORG }} # optional
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PINECONE.API_KEY: ${{ secrets.PINECONE_API_KEY }}
|
||||
PINECONE.ENVIRONMENT: ${{ secrets.PINECONE_ENVIRONMENT }}
|
||||
|
||||
|
||||
|
@ -1,57 +0,0 @@
|
||||
## Configuration
|
||||
|
||||
The different tools and sub-tools used by CodiumAI PR-Agent are adjustable via the **[configuration file](pr_agent/settings/configuration.toml)**
|
||||
|
||||
### Working from CLI
|
||||
When running from source (CLI), your local configuration file will be initially used.
|
||||
|
||||
Example for invoking the 'review' tools via the CLI:
|
||||
|
||||
```
|
||||
python cli.py --pr-url=<pr_url> review
|
||||
```
|
||||
In addition to general configurations, the 'review' tool will use parameters from the `[pr_reviewer]` section (every tool has a dedicated section in the configuration file).
|
||||
|
||||
Note that you can print results locally, without publishing them, by setting in `configuration.toml`:
|
||||
|
||||
```
|
||||
[config]
|
||||
publish_output=true
|
||||
verbosity_level=2
|
||||
```
|
||||
This is useful for debugging or experimenting with the different tools.
|
||||
|
||||
### Working from pre-built repo (GitHub Action/GitHub App/Docker)
|
||||
When running PR-Agent from a pre-built repo, the default configuration file will be loaded.
|
||||
|
||||
To edit the configuration, you have two options:
|
||||
1. Place a local configuration file in the root of your local repo. The local file will be used instead of the default one.
|
||||
2. For online usage, just add `--config_path=<value>` to you command, to edit a specific configuration value.
|
||||
For example if you want to edit `pr_reviewer` configurations, you can run:
|
||||
```
|
||||
/review --pr_reviewer.extra_instructions="..." --pr_reviewer.require_score_review=false ...
|
||||
```
|
||||
|
||||
Any configuration value in `configuration.toml` file can be similarly edited.
|
||||
|
||||
### General configuration parameters
|
||||
|
||||
#### Changing a model
|
||||
See [here](pr_agent/algo/__init__.py) for the list of available models.
|
||||
|
||||
To use Llama2 model, for example, set:
|
||||
```
|
||||
[config]
|
||||
model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
|
||||
[replicate]
|
||||
key = ...
|
||||
```
|
||||
(you can obtain a Llama2 key from [here](https://replicate.com/replicate/llama-2-70b-chat/api))
|
||||
|
||||
Also review the [AiHandler](pr_agent/algo/ai_handler.py) file for instruction how to set keys for other models.
|
||||
|
||||
#### Extra instructions
|
||||
All PR-Agent tools have a parameter called `extra_instructions`, that enables to add free-text extra instructions. Example usage:
|
||||
```
|
||||
/update_changelog --pr_update_changelog.extra_instructions="Make sure to update also the version ..."
|
||||
```
|
@ -2,7 +2,8 @@ FROM python:3.10 as base
|
||||
|
||||
WORKDIR /app
|
||||
ADD pyproject.toml .
|
||||
RUN pip install . && rm pyproject.toml
|
||||
ADD requirements.txt .
|
||||
RUN pip install . && rm pyproject.toml requirements.txt
|
||||
ENV PYTHONPATH=/app
|
||||
ADD pr_agent pr_agent
|
||||
ADD github_action/entrypoint.sh /
|
||||
|
304
INSTALL.md
@ -4,128 +4,69 @@
|
||||
To get started with PR-Agent quickly, you first need to acquire two tokens:
|
||||
|
||||
1. An OpenAI key from [here](https://platform.openai.com/), with access to GPT-4.
|
||||
2. A GitHub personal access token (classic) with the repo scope.
|
||||
2. A GitHub\GitLab\BitBucket personal access token (classic) with the repo scope.
|
||||
|
||||
There are several ways to use PR-Agent:
|
||||
|
||||
- [Method 1: Use Docker image (no installation required)](INSTALL.md#method-1-use-docker-image-no-installation-required)
|
||||
- [Method 2: Run as a GitHub Action](INSTALL.md#method-2-run-as-a-github-action)
|
||||
- [Method 3: Run from source](INSTALL.md#method-3-run-from-source)
|
||||
- [Method 4: Run as a polling server](INSTALL.md#method-4-run-as-a-polling-server)
|
||||
- [Method 5: Run as a GitHub App](INSTALL.md#method-5-run-as-a-github-app)
|
||||
- [Method 6: Deploy as a Lambda Function](INSTALL.md#method-6---deploy-as-a-lambda-function)
|
||||
- [Method 7: AWS CodeCommit](INSTALL.md#method-7---aws-codecommit-setup)
|
||||
**Locally**
|
||||
- [Using Docker image (no installation required)](INSTALL.md#use-docker-image-no-installation-required)
|
||||
- [Run from source](INSTALL.md#run-from-source)
|
||||
|
||||
**GitHub specific methods**
|
||||
- [Run as a GitHub Action](INSTALL.md#run-as-a-github-action)
|
||||
- [Run as a polling server](INSTALL.md#run-as-a-polling-server)
|
||||
- [Run as a GitHub App](INSTALL.md#run-as-a-github-app)
|
||||
- [Deploy as a Lambda Function](INSTALL.md#deploy-as-a-lambda-function)
|
||||
- [AWS CodeCommit](INSTALL.md#aws-codecommit-setup)
|
||||
|
||||
**GitLab specific methods**
|
||||
- [Run a GitLab webhook server](INSTALL.md#run-a-gitlab-webhook-server)
|
||||
|
||||
**BitBucket specific methods**
|
||||
- [Run as a Bitbucket Pipeline](INSTALL.md#run-as-a-bitbucket-pipeline)
|
||||
- [Run on a hosted app](INSTALL.md#run-on-a-hosted-bitbucket-app)
|
||||
---
|
||||
|
||||
### Method 1: Use Docker image (no installation required)
|
||||
### Use Docker image (no installation required)
|
||||
|
||||
To request a review for a PR, or ask a question about a PR, you can run directly from the Docker image. Here's how:
|
||||
|
||||
1. To request a review for a PR, run the following command:
|
||||
|
||||
For GitHub:
|
||||
```
|
||||
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent --pr_url <pr_url> review
|
||||
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
|
||||
```
|
||||
For GitLab:
|
||||
```
|
||||
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
|
||||
```
|
||||
For BitBucket:
|
||||
```
|
||||
docker run --rm -it -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=<pr_url> review
|
||||
```
|
||||
|
||||
2. To ask a question about a PR, run the following command:
|
||||
For other git providers, update CONFIG.GIT_PROVIDER accordingly, and check the `pr_agent/settings/.secrets_template.toml` file for the environment variables expected names and values.
|
||||
|
||||
|
||||
Similarly, to ask a question about a PR, run the following command:
|
||||
```
|
||||
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent --pr_url <pr_url> ask "<your question>"
|
||||
```
|
||||
Note: If you want to ensure you're running a specific version of the Docker image, consider using the image's digest.
|
||||
The digest is a unique identifier for a specific version of an image. You can pull and run an image using its digest by referencing it like so: repository@sha256:digest. Always ensure you're using the correct and trusted digest for your operations.
|
||||
|
||||
1. To request a review for a PR using a specific digest, run the following command:
|
||||
A list of the relevant tools can be found in the [tools guide](./docs/TOOLS_GUIDE.md).
|
||||
|
||||
|
||||
Note: If you want to ensure you're running a specific version of the Docker image, consider using the image's digest:
|
||||
```bash
|
||||
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent@sha256:71b5ee15df59c745d352d84752d01561ba64b6d51327f97d46152f0c58a5f678 --pr_url <pr_url> review
|
||||
```
|
||||
|
||||
2. To ask a question about a PR using the same digest, run the following command:
|
||||
```bash
|
||||
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent@sha256:71b5ee15df59c745d352d84752d01561ba64b6d51327f97d46152f0c58a5f678 --pr_url <pr_url> ask "<your question>"
|
||||
in addition, you can run a [specific released versions](./RELEASE_NOTES.md) of pr-agent, for example:
|
||||
```
|
||||
|
||||
Possible questions you can ask include:
|
||||
|
||||
- What is the main theme of this PR?
|
||||
- Is the PR ready for merge?
|
||||
- What are the main changes in this PR?
|
||||
- Should this PR be split into smaller parts?
|
||||
- Can you compose a rhymed song about this PR?
|
||||
|
||||
---
|
||||
|
||||
### Method 2: Run as a GitHub Action
|
||||
|
||||
You can use our pre-built Github Action Docker image to run PR-Agent as a Github Action.
|
||||
|
||||
1. Add the following file to your repository under `.github/workflows/pr_agent.yml`:
|
||||
|
||||
```yaml
|
||||
on:
|
||||
pull_request:
|
||||
issue_comment:
|
||||
jobs:
|
||||
pr_agent_job:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
contents: write
|
||||
name: Run pr agent on every pull request, respond to user comments
|
||||
steps:
|
||||
- name: PR Agent action step
|
||||
id: pragent
|
||||
uses: Codium-ai/pr-agent@main
|
||||
env:
|
||||
OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
```
|
||||
** if you want to pin your action to a specific commit for stability reasons
|
||||
```yaml
|
||||
on:
|
||||
pull_request:
|
||||
issue_comment:
|
||||
|
||||
jobs:
|
||||
pr_agent_job:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
contents: write
|
||||
name: Run pr agent on every pull request, respond to user comments
|
||||
steps:
|
||||
- name: PR Agent action step
|
||||
id: pragent
|
||||
uses: Codium-ai/pr-agent@<commit_sha>
|
||||
env:
|
||||
OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
```
|
||||
2. Add the following secret to your repository under `Settings > Secrets`:
|
||||
|
||||
```
|
||||
OPENAI_KEY: <your key>
|
||||
```
|
||||
|
||||
The GITHUB_TOKEN secret is automatically created by GitHub.
|
||||
|
||||
3. Merge this change to your main branch.
|
||||
When you open your next PR, you should see a comment from `github-actions` bot with a review of your PR, and instructions on how to use the rest of the tools.
|
||||
|
||||
4. You may configure PR-Agent by adding environment variables under the env section corresponding to any configurable property in the [configuration](./CONFIGURATION.md) file. Some examples:
|
||||
```yaml
|
||||
env:
|
||||
# ... previous environment values
|
||||
OPENAI.ORG: "<Your organization name under your OpenAI account>"
|
||||
PR_REVIEWER.REQUIRE_TESTS_REVIEW: "false" # Disable tests review
|
||||
PR_CODE_SUGGESTIONS.NUM_CODE_SUGGESTIONS: 6 # Increase number of code suggestions
|
||||
codiumai/pr-agent@v0.8
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Method 3: Run from source
|
||||
### Run from source
|
||||
|
||||
1. Clone this repository:
|
||||
|
||||
@ -151,18 +92,92 @@ chmod 600 pr_agent/settings/.secrets.toml
|
||||
|
||||
```
|
||||
export PYTHONPATH=[$PYTHONPATH:]<PATH to pr_agent folder>
|
||||
python pr_agent/cli.py --pr_url <pr_url> review
|
||||
python pr_agent/cli.py --pr_url <pr_url> ask <your question>
|
||||
python pr_agent/cli.py --pr_url <pr_url> describe
|
||||
python pr_agent/cli.py --pr_url <pr_url> improve
|
||||
python3 -m pr_agent.cli --pr_url <pr_url> review
|
||||
python3 -m pr_agent.cli --pr_url <pr_url> ask <your question>
|
||||
python3 -m pr_agent.cli --pr_url <pr_url> describe
|
||||
python3 -m pr_agent.cli --pr_url <pr_url> improve
|
||||
python3 -m pr_agent.cli --pr_url <pr_url> add_docs
|
||||
python3 -m pr_agent.cli --issue_url <issue_url> similar_issue
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Method 4: Run as a polling server
|
||||
Request reviews by tagging your Github user on a PR
|
||||
### Run as a GitHub Action
|
||||
|
||||
You can use our pre-built Github Action Docker image to run PR-Agent as a Github Action.
|
||||
|
||||
1. Add the following file to your repository under `.github/workflows/pr_agent.yml`:
|
||||
|
||||
```yaml
|
||||
on:
|
||||
pull_request:
|
||||
issue_comment:
|
||||
jobs:
|
||||
pr_agent_job:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
contents: write
|
||||
name: Run pr agent on every pull request, respond to user comments
|
||||
steps:
|
||||
- name: PR Agent action step
|
||||
id: pragent
|
||||
uses: Codium-ai/pr-agent@main
|
||||
env:
|
||||
OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
```
|
||||
** if you want to pin your action to a specific release (v0.7 for example) for stability reasons, use:
|
||||
```yaml
|
||||
on:
|
||||
pull_request:
|
||||
issue_comment:
|
||||
|
||||
jobs:
|
||||
pr_agent_job:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
contents: write
|
||||
name: Run pr agent on every pull request, respond to user comments
|
||||
steps:
|
||||
- name: PR Agent action step
|
||||
id: pragent
|
||||
uses: Codium-ai/pr-agent@v0.7
|
||||
env:
|
||||
OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
```
|
||||
2. Add the following secret to your repository under `Settings > Secrets`:
|
||||
|
||||
```
|
||||
OPENAI_KEY: <your key>
|
||||
```
|
||||
|
||||
The GITHUB_TOKEN secret is automatically created by GitHub.
|
||||
|
||||
3. Merge this change to your main branch.
|
||||
When you open your next PR, you should see a comment from `github-actions` bot with a review of your PR, and instructions on how to use the rest of the tools.
|
||||
|
||||
4. You may configure PR-Agent by adding environment variables under the env section corresponding to any configurable property in the [configuration](pr_agent/settings/configuration.toml) file. Some examples:
|
||||
```yaml
|
||||
env:
|
||||
# ... previous environment values
|
||||
OPENAI.ORG: "<Your organization name under your OpenAI account>"
|
||||
PR_REVIEWER.REQUIRE_TESTS_REVIEW: "false" # Disable tests review
|
||||
PR_CODE_SUGGESTIONS.NUM_CODE_SUGGESTIONS: 6 # Increase number of code suggestions
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Run as a polling server
|
||||
Request reviews by tagging your GitHub user on a PR
|
||||
|
||||
Follow [steps 1-3](#run-as-a-github-action) of the GitHub Action setup.
|
||||
|
||||
Follow steps 1-3 of method 2.
|
||||
Run the following command to start the server:
|
||||
|
||||
```
|
||||
@ -171,7 +186,7 @@ python pr_agent/servers/github_polling.py
|
||||
|
||||
---
|
||||
|
||||
### Method 5: Run as a GitHub App
|
||||
### Run as a GitHub App
|
||||
Allowing you to automate the review process on your private or public repositories.
|
||||
|
||||
1. Create a GitHub App from the [Github Developer Portal](https://docs.github.com/en/developers/apps/creating-a-github-app).
|
||||
@ -213,12 +228,12 @@ git clone https://github.com/Codium-ai/pr-agent.git
|
||||
- Copy your app's webhook secret to the webhook_secret field.
|
||||
- Set deployment_type to 'app' in [configuration.toml](./pr_agent/settings/configuration.toml)
|
||||
|
||||
> The .secrets.toml file is not copied to the Docker image by default, and is only used for local development.
|
||||
> The .secrets.toml file is not copied to the Docker image by default, and is only used for local development.
|
||||
> If you want to use the .secrets.toml file in your Docker image, you can add remove it from the .dockerignore file.
|
||||
> In most production environments, you would inject the secrets file as environment variables or as mounted volumes.
|
||||
> In most production environments, you would inject the secrets file as environment variables or as mounted volumes.
|
||||
> For example, in order to inject a secrets file as a volume in a Kubernetes environment you can update your pod spec to include the following,
|
||||
> assuming you have a secret named `pr-agent-settings` with a key named `.secrets.toml`:
|
||||
```
|
||||
```
|
||||
volumes:
|
||||
- name: settings-volume
|
||||
secret:
|
||||
@ -251,11 +266,14 @@ docker push codiumai/pr-agent:github_app # Push to your Docker repository
|
||||
|
||||
9. Install the app by navigating to the "Install App" tab and selecting your desired repositories.
|
||||
|
||||
> **Note:** When running PR-Agent from GitHub App, the default configuration file (configuration.toml) will be loaded.<br>
|
||||
> However, you can override the default tool parameters by uploading a local configuration file `.pr_agent.toml`<br>
|
||||
> For more information please check out the [USAGE GUIDE](./Usage.md#working-with-github-app)
|
||||
---
|
||||
|
||||
### Method 6 - Deploy as a Lambda Function
|
||||
### Deploy as a Lambda Function
|
||||
|
||||
1. Follow steps 1-5 of [Method 5](#method-5-run-as-a-github-app).
|
||||
1. Follow steps 1-5 of [Method 5](#run-as-a-github-app).
|
||||
2. Build a docker image that can be used as a lambda function
|
||||
```shell
|
||||
docker buildx build --platform=linux/amd64 . -t codiumai/pr-agent:serverless -f docker/Dockerfile.lambda
|
||||
@ -267,12 +285,12 @@ docker push codiumai/pr-agent:github_app # Push to your Docker repository
|
||||
```
|
||||
4. Create a lambda function that uses the uploaded image. Set the lambda timeout to be at least 3m.
|
||||
5. Configure the lambda function to have a Function URL.
|
||||
6. Go back to steps 8-9 of [Method 5](#method-5-run-as-a-github-app) with the function url as your Webhook URL.
|
||||
6. Go back to steps 8-9 of [Method 5](#run-as-a-github-app) with the function url as your Webhook URL.
|
||||
The Webhook URL would look like `https://<LAMBDA_FUNCTION_URL>/api/v1/github_webhooks`
|
||||
|
||||
---
|
||||
|
||||
### Method 7 - AWS CodeCommit Setup
|
||||
### AWS CodeCommit Setup
|
||||
|
||||
Not all features have been added to CodeCommit yet. As of right now, CodeCommit has been implemented to run the pr-agent CLI on the command line, using AWS credentials stored in environment variables. (More features will be added in the future.) The following is a set of instructions to have pr-agent do a review of your CodeCommit pull request from the command line:
|
||||
|
||||
@ -309,7 +327,9 @@ Example IAM permissions to that user to allow access to CodeCommit:
|
||||
"codecommit:Get*",
|
||||
"codecommit:List*",
|
||||
"codecommit:PostComment*",
|
||||
"codecommit:PutCommentReaction"
|
||||
"codecommit:PutCommentReaction",
|
||||
"codecommit:UpdatePullRequestDescription",
|
||||
"codecommit:UpdatePullRequestTitle"
|
||||
],
|
||||
"Resource": "*"
|
||||
}
|
||||
@ -338,9 +358,57 @@ PYTHONPATH="/PATH/TO/PROJECTS/pr-agent" python pr_agent/cli.py \
|
||||
review
|
||||
```
|
||||
|
||||
### Appendix - **Debugging LLM API Calls**
|
||||
If you're testing your codium/pr-agent server, and need to see if calls were made successfully + the exact call logs, you can use the [LiteLLM Debugger tool](https://docs.litellm.ai/docs/debugging/hosted_debugging).
|
||||
---
|
||||
|
||||
You can do this by setting `litellm_debugger=true` in configuration.toml. Your Logs will be viewable in real-time @ `admin.litellm.ai/<your_email>`. Set your email in the `.secrets.toml` under 'user_email'.
|
||||
### Run a GitLab webhook server
|
||||
|
||||
<img src="./pics/debugger.png" width="800"/>
|
||||
1. From the GitLab workspace or group, create an access token. Enable the "api" scope only.
|
||||
2. Generate a random secret for your app, and save it for later. For example, you can use:
|
||||
|
||||
```
|
||||
WEBHOOK_SECRET=$(python -c "import secrets; print(secrets.token_hex(10))")
|
||||
```
|
||||
3. Follow the instructions to build the Docker image, setup a secrets file and deploy on your own server from [Method 5](#run-as-a-github-app) steps 4-7.
|
||||
4. In the secrets file, fill in the following:
|
||||
- Your OpenAI key.
|
||||
- In the [gitlab] section, fill in personal_access_token and shared_secret. The access token can be a personal access token, or a group or project access token.
|
||||
- Set deployment_type to 'gitlab' in [configuration.toml](./pr_agent/settings/configuration.toml)
|
||||
5. Create a webhook in GitLab. Set the URL to the URL of your app's server. Set the secret token to the generated secret from step 2.
|
||||
In the "Trigger" section, check the ‘comments’ and ‘merge request events’ boxes.
|
||||
6. Test your installation by opening a merge request or commenting or a merge request using one of CodiumAI's commands.
|
||||
|
||||
|
||||
|
||||
### Run as a Bitbucket Pipeline
|
||||
|
||||
|
||||
You can use the Bitbucket Pipeline system to run PR-Agent on every pull request open or update.
|
||||
|
||||
1. Add the following file in your repository bitbucket_pipelines.yml
|
||||
|
||||
```yaml
|
||||
pipelines:
|
||||
pull-requests:
|
||||
'**':
|
||||
- step:
|
||||
name: PR Agent Review
|
||||
image: python:3.10
|
||||
services:
|
||||
- docker
|
||||
script:
|
||||
- docker run -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=https://bitbucket.org/$BITBUCKET_WORKSPACE/$BITBUCKET_REPO_SLUG/pull-requests/$BITBUCKET_PR_ID review
|
||||
```
|
||||
|
||||
2. Add the following secure variables to your repository under Repository settings > Pipelines > Repository variables.
|
||||
OPENAI_API_KEY: <your key>
|
||||
BITBUCKET_BEARER_TOKEN: <your token>
|
||||
|
||||
You can get a Bitbucket token for your repository by following Repository Settings -> Security -> Access Tokens.
|
||||
|
||||
|
||||
### Run on a hosted Bitbucket app
|
||||
|
||||
Please contact <support@codium.ai> if you're interested in a hosted BitBucket app solution that provides full functionality including PR reviews and comment handling. It's based on the [bitbucket_app.py](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/git_providers/bitbucket_provider.py) implmentation.
|
||||
|
||||
|
||||
=======
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Git Patch Logic
|
||||
# PR Compression Strategy
|
||||
There are two scenarios:
|
||||
1. The PR is small enough to fit in a single prompt (including system and user prompt)
|
||||
2. The PR is too large to fit in a single prompt (including system and user prompt)
|
||||
@ -16,7 +16,7 @@ We prioritize the languages of the repo based on the following criteria:
|
||||
## Small PR
|
||||
In this case, we can fit the entire PR in a single prompt:
|
||||
1. Exclude binary files and non code files (e.g. images, pdfs, etc)
|
||||
2. We Expand the surrounding context of each patch to 6 lines above and below the patch
|
||||
2. We Expand the surrounding context of each patch to 3 lines above and below the patch
|
||||
## Large PR
|
||||
|
||||
### Motivation
|
||||
@ -25,7 +25,7 @@ We want to be able to pack as much information as possible in a single LMM promp
|
||||
|
||||
|
||||
|
||||
#### PR compression strategy
|
||||
#### Compression strategy
|
||||
We prioritize additions over deletions:
|
||||
- Combine all deleted files into a single list (`deleted files`)
|
||||
- File patches are a list of hunks, remove all hunks of type deletion-only from the hunks in the file patch
|
||||
|
190
README.md
@ -9,25 +9,38 @@ Making pull requests less painful with an AI agent
|
||||
|
||||
[](https://github.com/Codium-ai/pr-agent/blob/main/LICENSE)
|
||||
[](https://discord.com/channels/1057273017547378788/1126104260430528613)
|
||||
[](https://twitter.com/codiumai)
|
||||
<a href="https://github.com/Codium-ai/pr-agent/commits/main">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/last-commit/Codium-ai/pr-agent/main?style=for-the-badge" height="20">
|
||||
</a>
|
||||
</div>
|
||||
<div style="text-align:left;">
|
||||
|
||||
CodiumAI `PR-Agent` is an open-source tool aiming to help developers review pull requests faster and more efficiently. It automatically analyzes the pull request and can provide several types of PR feedback:
|
||||
CodiumAI `PR-Agent` is an open-source tool aiming to help developers review pull requests faster and more efficiently. It automatically analyzes the pull request and can provide several types of commands:
|
||||
|
||||
**Auto-Description**: Automatically generating [PR description](https://github.com/Codium-ai/pr-agent/pull/229#issue-1860711415) - title, type, summary, code walkthrough and labels.
|
||||
‣ **Auto Description ([`/describe`](./docs/DESCRIBE.md))**: Automatically generating PR description - title, type, summary, code walkthrough and labels.
|
||||
\
|
||||
**Auto Review**: [Adjustable feedback](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695022908) about the PR main theme, type, relevant tests, security issues, score, and various suggestions for the PR content.
|
||||
‣ **Auto Review ([`/review`](./docs/REVIEW.md))**: Adjustable feedback about the PR main theme, type, relevant tests, security issues, score, and various suggestions for the PR content.
|
||||
\
|
||||
**Question Answering**: Answering [free-text questions](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021332) about the PR.
|
||||
‣ **Question Answering ([`/ask ...`](./docs/ASK.md))**: Answering free-text questions about the PR.
|
||||
\
|
||||
**Code Suggestions**: [Committable code suggestions](https://github.com/Codium-ai/pr-agent/pull/229#discussion_r1306919276) for improving the PR.
|
||||
‣ **Code Suggestions ([`/improve`](./docs/IMPROVE.md))**: Committable code suggestions for improving the PR.
|
||||
\
|
||||
**Update Changelog**: Automatically updating the CHANGELOG.md file with the [PR changes](https://github.com/Codium-ai/pr-agent/pull/168#discussion_r1282077645).
|
||||
‣ **Update Changelog ([`/update_changelog`](./docs/UPDATE_CHANGELOG.md))**: Automatically updating the CHANGELOG.md file with the PR changes.
|
||||
\
|
||||
‣ **Find Similar Issue ([`/similar_issue`](./docs/SIMILAR_ISSUE.md))**: Automatically retrieves and presents similar issues
|
||||
\
|
||||
‣ **Add Documentation ([`/add_docs`](./docs/ADD_DOCUMENTATION.md))**: Automatically adds documentation to un-documented functions/classes in the PR.
|
||||
\
|
||||
‣ **Generate Custom Labels ([`/generate_labels`](./docs/GENERATE_CUSTOM_LABELS.md))**: Automatically suggests custom labels based on the PR code changes.
|
||||
|
||||
<h3>Example results:</h2>
|
||||
See the [Installation Guide](./INSTALL.md) for instructions how to install and run the tool on different platforms.
|
||||
|
||||
See the [Usage Guide](./Usage.md) for instructions how to run the different tools from _CLI_, _online usage_, or by _automatically triggering_ them when a new PR is opened.
|
||||
|
||||
See the [Tools Guide](./docs/TOOLS_GUIDE.md) for detailed description of the different tools.
|
||||
|
||||
<h3>Example results:</h3>
|
||||
</div>
|
||||
<h4><a href="https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1687561986">/describe:</a></h4>
|
||||
<div align="center">
|
||||
@ -35,87 +48,104 @@ CodiumAI `PR-Agent` is an open-source tool aiming to help developers review pull
|
||||
<img src="https://www.codium.ai/images/describe-2.gif" width="800">
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<h4><a href="https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021901">/review:</a></h4>
|
||||
<div align="center">
|
||||
<p float="center">
|
||||
<img src="https://www.codium.ai/images/review-2.gif" width="800">
|
||||
</p>
|
||||
</div>
|
||||
<h4><a href="https://github.com/Codium-ai/pr-agent/pull/78#issuecomment-1639739496">/reflect_and_review:</a></h4>
|
||||
<div align="center">
|
||||
<p float="center">
|
||||
<img src="https://www.codium.ai/images/reflect_and_review.gif" width="800">
|
||||
</p>
|
||||
</div>
|
||||
<h4><a href="https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695020538">/ask:</a></h4>
|
||||
<div align="center">
|
||||
<p float="center">
|
||||
<img src="https://www.codium.ai/images/ask-2.gif" width="800">
|
||||
</p>
|
||||
</div>
|
||||
<h4><a href="https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695024952">/improve:</a></h4>
|
||||
<div align="center">
|
||||
<p float="center">
|
||||
<img src="https://www.codium.ai/images/improve-2.gif" width="800">
|
||||
</p>
|
||||
</div>
|
||||
|
||||
[//]: # (<h4><a href="https://github.com/Codium-ai/pr-agent/pull/78#issuecomment-1639739496">/reflect_and_review:</a></h4>)
|
||||
|
||||
[//]: # (<div align="center">)
|
||||
|
||||
[//]: # (<p float="center">)
|
||||
|
||||
[//]: # (<img src="https://www.codium.ai/images/reflect_and_review.gif" width="800">)
|
||||
|
||||
[//]: # (</p>)
|
||||
|
||||
[//]: # (</div>)
|
||||
|
||||
[//]: # (<h4><a href="https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695020538">/ask:</a></h4>)
|
||||
|
||||
[//]: # (<div align="center">)
|
||||
|
||||
[//]: # (<p float="center">)
|
||||
|
||||
[//]: # (<img src="https://www.codium.ai/images/ask-2.gif" width="800">)
|
||||
|
||||
[//]: # (</p>)
|
||||
|
||||
[//]: # (</div>)
|
||||
|
||||
[//]: # (<h4><a href="https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695024952">/improve:</a></h4>)
|
||||
|
||||
[//]: # (<div align="center">)
|
||||
|
||||
[//]: # (<p float="center">)
|
||||
|
||||
[//]: # (<img src="https://www.codium.ai/images/improve-2.gif" width="800">)
|
||||
|
||||
[//]: # (</p>)
|
||||
|
||||
[//]: # (</div>)
|
||||
<div align="left">
|
||||
|
||||
|
||||
## Table of Contents
|
||||
- [Overview](#overview)
|
||||
- [Try it now](#try-it-now)
|
||||
- [Installation](#installation)
|
||||
- [Configuration](./CONFIGURATION.md)
|
||||
- [How it works](#how-it-works)
|
||||
- [Why use PR-Agent](#why-use-pr-agent)
|
||||
- [Why use PR-Agent?](#why-use-pr-agent)
|
||||
- [Roadmap](#roadmap)
|
||||
- [Similar projects](#similar-projects)
|
||||
</div>
|
||||
|
||||
|
||||
## Overview
|
||||
`PR-Agent` offers extensive pull request functionalities across various git providers:
|
||||
| | | GitHub | Gitlab | Bitbucket | CodeCommit |
|
||||
|-------|---------------------------------------------|:------:|:------:|:---------:|:----------:|
|
||||
| TOOLS | Review | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| | ⮑ Inline review | :white_check_mark: | :white_check_mark: | | |
|
||||
| | Ask | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| | Auto-Description | :white_check_mark: | :white_check_mark: | | |
|
||||
| | Improve Code | :white_check_mark: | :white_check_mark: | | |
|
||||
| | ⮑ Extended | :white_check_mark: | :white_check_mark: | | |
|
||||
| | Reflect and Review | :white_check_mark: | | | |
|
||||
| | Update CHANGELOG.md | :white_check_mark: | | | |
|
||||
| | | | | | |
|
||||
| USAGE | CLI | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| | App / webhook | :white_check_mark: | :white_check_mark: | | |
|
||||
| | Tagging bot | :white_check_mark: | | | |
|
||||
| | Actions | :white_check_mark: | | | |
|
||||
| | | | | | |
|
||||
| CORE | PR compression | :white_check_mark: | :white_check_mark: | :white_check_mark: | |
|
||||
| | Repo language prioritization | :white_check_mark: | :white_check_mark: | :white_check_mark: | |
|
||||
| | Adaptive and token-aware<br />file patch fitting | :white_check_mark: | :white_check_mark: | :white_check_mark: | |
|
||||
| | Multiple models support | :white_check_mark: | :white_check_mark: | :white_check_mark: | |
|
||||
| | Incremental PR Review | :white_check_mark: | | | |
|
||||
| | | GitHub | Gitlab | Bitbucket | CodeCommit | Azure DevOps | Gerrit |
|
||||
|-------|---------------------------------------------|:------:|:------:|:---------:|:----------:|:----------:|:----------:|
|
||||
| TOOLS | Review | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| | ⮑ Incremental | :white_check_mark: | | | | | |
|
||||
| | Ask | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| | Auto-Description | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| | Improve Code | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | :white_check_mark: |
|
||||
| | ⮑ Extended | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | :white_check_mark: |
|
||||
| | Reflect and Review | :white_check_mark: | :white_check_mark: | :white_check_mark: | | :white_check_mark: | :white_check_mark: |
|
||||
| | Update CHANGELOG.md | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | |
|
||||
| | Find similar issue | :white_check_mark: | | | | | |
|
||||
| | Add Documentation | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | :white_check_mark: |
|
||||
| | Generate Labels | :white_check_mark: | :white_check_mark: | | | | |
|
||||
| | | | | | | |
|
||||
| USAGE | CLI | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| | App / webhook | :white_check_mark: | :white_check_mark: | | | |
|
||||
| | Tagging bot | :white_check_mark: | | | | |
|
||||
| | Actions | :white_check_mark: | | | | |
|
||||
| | Web server | | | | | | :white_check_mark: |
|
||||
| | | | | | | |
|
||||
| CORE | PR compression | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| | Repo language prioritization | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| | Adaptive and token-aware<br />file patch fitting | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| | Multiple models support | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| | Incremental PR Review | :white_check_mark: | | | | | |
|
||||
|
||||
Examples for invoking the different tools via the CLI:
|
||||
- **Review**: python cli.py --pr_url=<pr_url> review
|
||||
- **Describe**: python cli.py --pr_url=<pr_url> describe
|
||||
- **Improve**: python cli.py --pr_url=<pr_url> improve
|
||||
- **Ask**: python cli.py --pr_url=<pr_url> ask "Write me a poem about this PR"
|
||||
- **Reflect**: python cli.py --pr_url=<pr_url> reflect
|
||||
- **Update Changelog**: python cli.py --pr_url=<pr_url> update_changelog
|
||||
|
||||
"<pr_url>" is the url of the relevant PR (for example: https://github.com/Codium-ai/pr-agent/pull/50).
|
||||
|
||||
In the [configuration](./CONFIGURATION.md) file you can select your git provider (GitHub, Gitlab, Bitbucket), and further configure the different tools.
|
||||
Review the [usage guide](./Usage.md) section for detailed instructions how to use the different tools, select the relevant git provider (GitHub, Gitlab, Bitbucket,...), and adjust the configuration file to your needs.
|
||||
|
||||
## Try it now
|
||||
|
||||
Try GPT-4 powered PR-Agent on your public GitHub repository for free. Just mention `@CodiumAI-Agent` and add the desired command in any PR comment! The agent will generate a response based on your command.
|
||||
You can try GPT-4 powered PR-Agent, on your public GitHub repository, instantly. Just mention `@CodiumAI-Agent` and add the desired command in any PR comment. The agent will generate a response based on your command.
|
||||
For example, add a comment to any pull request with the following text:
|
||||
```
|
||||
@CodiumAI-Agent /review
|
||||
```
|
||||
and the agent will respond with a review of your PR
|
||||
|
||||

|
||||
|
||||
To set up your own PR-Agent, see the [Installation](#installation) section
|
||||
|
||||
To set up your own PR-Agent, see the [Installation](#installation) section below.
|
||||
|
||||
---
|
||||
|
||||
@ -129,20 +159,22 @@ To get started with PR-Agent quickly, you first need to acquire two tokens:
|
||||
There are several ways to use PR-Agent:
|
||||
|
||||
- [Method 1: Use Docker image (no installation required)](INSTALL.md#method-1-use-docker-image-no-installation-required)
|
||||
- [Method 2: Run as a GitHub Action](INSTALL.md#method-2-run-as-a-github-action)
|
||||
- [Method 3: Run from source](INSTALL.md#method-3-run-from-source)
|
||||
- [Method 2: Run from source](INSTALL.md#method-2-run-from-source)
|
||||
- [Method 3: Run as a GitHub Action](INSTALL.md#method-3-run-as-a-github-action)
|
||||
- [Method 4: Run as a polling server](INSTALL.md#method-4-run-as-a-polling-server)
|
||||
- Request reviews by tagging your GitHub user on a PR
|
||||
- [Method 5: Run as a GitHub App](INSTALL.md#method-5-run-as-a-github-app)
|
||||
- Allowing you to automate the review process on your private or public repositories
|
||||
- [Method 6: Deploy as a Lambda Function](INSTALL.md#method-6---deploy-as-a-lambda-function)
|
||||
- [Method 7: AWS CodeCommit](INSTALL.md#method-7---aws-codecommit-setup)
|
||||
- [Method 8: Run a GitLab webhook server](INSTALL.md#method-8---run-a-gitlab-webhook-server)
|
||||
- [Method 9: Run as a Bitbucket Pipeline](INSTALL.md#method-9-run-as-a-bitbucket-pipeline)
|
||||
|
||||
## How it works
|
||||
|
||||
The following diagram illustrates PR-Agent tools and their flow:
|
||||
|
||||

|
||||

|
||||
|
||||
Check out the [PR Compression strategy](./PR_COMPRESSION.md) page for more details on how we convert a code diff to a manageable LLM prompt
|
||||
|
||||
@ -154,7 +186,7 @@ Here are some advantages of PR-Agent:
|
||||
|
||||
- We emphasize **real-life practical usage**. Each tool (review, improve, ask, ...) has a single GPT-4 call, no more. We feel that this is critical for realistic team usage - obtaining an answer quickly (~30 seconds) and affordably.
|
||||
- Our [PR Compression strategy](./PR_COMPRESSION.md) is a core ability that enables to effectively tackle both short and long PRs.
|
||||
- Our JSON prompting strategy enables to have **modular, customizable tools**. For example, the '/review' tool categories can be controlled via the [configuration](./CONFIGURATION.md) file. Adding additional categories is easy and accessible.
|
||||
- Our JSON prompting strategy enables to have **modular, customizable tools**. For example, the '/review' tool categories can be controlled via the [configuration](pr_agent/settings/configuration.toml) file. Adding additional categories is easy and accessible.
|
||||
- We support **multiple git providers** (GitHub, Gitlab, Bitbucket, CodeCommit), **multiple ways** to use the tool (CLI, GitHub Action, GitHub App, Docker, ...), and **multiple models** (GPT-4, GPT-3.5, Anthropic, Cohere, Llama2).
|
||||
- We are open-source, and welcome contributions from the community.
|
||||
|
||||
@ -164,7 +196,7 @@ Here are some advantages of PR-Agent:
|
||||
- [x] Support additional models, as a replacement for OpenAI (see [here](https://github.com/Codium-ai/pr-agent/pull/172))
|
||||
- [x] Develop additional logic for handling large PRs (see [here](https://github.com/Codium-ai/pr-agent/pull/229))
|
||||
- [ ] Add additional context to the prompt. For example, repo (or relevant files) summarization, with tools such a [ctags](https://github.com/universal-ctags/ctags)
|
||||
- [ ] PR-Agent for issues, and just for pull requests
|
||||
- [x] PR-Agent for issues
|
||||
- [ ] Adding more tools. Possible directions:
|
||||
- [x] PR description
|
||||
- [x] Inline code suggestions
|
||||
@ -172,13 +204,31 @@ Here are some advantages of PR-Agent:
|
||||
- [x] Rank the PR (see [here](https://github.com/Codium-ai/pr-agent/pull/89))
|
||||
- [ ] Enforcing CONTRIBUTING.md guidelines
|
||||
- [ ] Performance (are there any performance issues)
|
||||
- [ ] Documentation (is the PR properly documented)
|
||||
- [x] Documentation (is the PR properly documented)
|
||||
- [ ] ...
|
||||
|
||||
See the [Release notes](./RELEASE_NOTES.md) for updates on the latest changes.
|
||||
|
||||
|
||||
## Similar Projects
|
||||
|
||||
- [CodiumAI - Meaningful tests for busy devs](https://github.com/Codium-ai/codiumai-vscode-release)
|
||||
- [CodiumAI - Meaningful tests for busy devs](https://github.com/Codium-ai/codiumai-vscode-release) (although various capabilities are much more advanced in the CodiumAI IDE plugins)
|
||||
- [Aider - GPT powered coding in your terminal](https://github.com/paul-gauthier/aider)
|
||||
- [openai-pr-reviewer](https://github.com/coderabbitai/openai-pr-reviewer)
|
||||
- [CodeReview BOT](https://github.com/anc95/ChatGPT-CodeReview)
|
||||
- [AI-Maintainer](https://github.com/merwanehamadi/AI-Maintainer)
|
||||
|
||||
## Data Privacy
|
||||
|
||||
If you use self-host PR-Agent, e.g. via CLI running on your computer, with your OpenAI API key, it is between you and OpenAI. You can read their API data privacy policy here:
|
||||
https://openai.com/enterprise-privacy
|
||||
|
||||
## Links
|
||||
|
||||
[](https://discord.gg/kG35uSHDBc)
|
||||
|
||||
- Discord community: https://discord.gg/kG35uSHDBc
|
||||
- CodiumAI site: https://codium.ai
|
||||
- Blog: https://www.codium.ai/blog/
|
||||
- Troubleshooting: https://www.codium.ai/blog/technical-faq-and-troubleshooting/
|
||||
- Support: support@codium.ai
|
||||
|
63
RELEASE_NOTES.md
Normal file
@ -0,0 +1,63 @@
|
||||
## [Version 0.9] - 2023-10-29
|
||||
- codiumai/pr-agent:0.9
|
||||
- codiumai/pr-agent:0.9-github_app
|
||||
- codiumai/pr-agent:0.9-bitbucket-app
|
||||
- codiumai/pr-agent:0.9-gitlab_webhook
|
||||
- codiumai/pr-agent:0.9-github_polling
|
||||
- codiumai/pr-agent:0.9-github_action
|
||||
|
||||
### Added::Algo
|
||||
- New tool - [generate_labels](https://github.com/Codium-ai/pr-agent/blob/main/docs/GENERATE_CUSTOM_LABELS.md)
|
||||
- New ability to use [customize labels](https://github.com/Codium-ai/pr-agent/blob/main/docs/GENERATE_CUSTOM_LABELS.md#how-to-enable-custom-labels) on the `review` and `describe` tools.
|
||||
- New tool - [add_docs](https://github.com/Codium-ai/pr-agent/blob/main/docs/ADD_DOCUMENTATION.md)
|
||||
- GitHub Action: Can now use a `.pr_agent.toml` file to control configuration parameters (see [Usage Guide](./Usage.md#working-with-github-action)).
|
||||
- GitHub App: Added ability to trigger tools on [push events](https://github.com/Codium-ai/pr-agent/blob/main/Usage.md#github-app-automatic-tools-for-new-code-pr-push)
|
||||
- Support custom domain URLs for Azure devops integration (see [link](https://github.com/Codium-ai/pr-agent/pull/381)).
|
||||
- PR Description default mode is now in [bullet points](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L35).
|
||||
|
||||
### Added::Documentation
|
||||
Significant documentation updates (see [Installation Guide](https://github.com/Codium-ai/pr-agent/blob/main/INSTALL.md), [Usage Guide](https://github.com/Codium-ai/pr-agent/blob/main/Usage.md), and [Tools Guide](https://github.com/Codium-ai/pr-agent/blob/main/docs/TOOLS_GUIDE.md))
|
||||
|
||||
### Fixed
|
||||
- Fixed support for BitBucket pipeline (see [link](https://github.com/Codium-ai/pr-agent/pull/386))
|
||||
- Fixed a bug in `review -i` tool
|
||||
- Added blacklist for specific file extensions in `add_docs` tool (see [link](https://github.com/Codium-ai/pr-agent/pull/385/))
|
||||
|
||||
## [Version 0.8] - 2023-09-27
|
||||
- codiumai/pr-agent:0.8
|
||||
- codiumai/pr-agent:0.8-github_app
|
||||
- codiumai/pr-agent:0.8-bitbucket-app
|
||||
- codiumai/pr-agent:0.8-gitlab_webhook
|
||||
- codiumai/pr-agent:0.8-github_polling
|
||||
- codiumai/pr-agent:0.8-github_action
|
||||
|
||||
### Added::Algo
|
||||
- GitHub Action: Can control which tools will run automatically when a new PR is created. (see usage guide: https://github.com/Codium-ai/pr-agent/blob/main/Usage.md#working-with-github-action)
|
||||
- Code suggestion tool: Will try to avoid an 'add comments' suggestion (see https://github.com/Codium-ai/pr-agent/pull/327)
|
||||
|
||||
### Fixed
|
||||
- Gitlab: Fixed a bug of improper usage of pr_id
|
||||
|
||||
|
||||
## [Version 0.7] - 2023-09-20
|
||||
|
||||
### Docker Tags
|
||||
- codiumai/pr-agent:0.7
|
||||
- codiumai/pr-agent:0.7-github_app
|
||||
- codiumai/pr-agent:0.7-bitbucket-app
|
||||
- codiumai/pr-agent:0.7-gitlab_webhook
|
||||
- codiumai/pr-agent:0.7-github_polling
|
||||
- codiumai/pr-agent:0.7-github_action
|
||||
|
||||
### Added::Algo
|
||||
- New tool /similar_issue - Currently on GitHub app and CLI: indexes the issues in the repo, find the most similar issues to the target issue.
|
||||
- Describe markers: Empower the /describe tool with a templating capability (see more details in https://github.com/Codium-ai/pr-agent/pull/273).
|
||||
- New feature in the /review tool - added an estimated effort estimation to the review (https://github.com/Codium-ai/pr-agent/pull/306).
|
||||
|
||||
### Added::Infrastructure
|
||||
- Implementation of a GitLab webhook.
|
||||
- Implementation of a BitBucket app.
|
||||
|
||||
### Fixed
|
||||
- Protection against no code suggestions generated.
|
||||
- Resilience to repositories where the languages cannot be automatically detected.
|
353
Usage.md
Normal file
@ -0,0 +1,353 @@
|
||||
## Usage Guide
|
||||
|
||||
### Table of Contents
|
||||
- [Introduction](#introduction)
|
||||
- [Working from a local repo (CLI)](#working-from-a-local-repo-cli)
|
||||
- [Online usage](#online-usage)
|
||||
- [Working with GitHub App](#working-with-github-app)
|
||||
- [Working with GitHub Action](#working-with-github-action)
|
||||
- [Changing a model](#changing-a-model)
|
||||
- [Working with large PRs](#working-with-large-prs)
|
||||
- [Appendix - additional configurations walkthrough](#appendix---additional-configurations-walkthrough)
|
||||
|
||||
### Introduction
|
||||
|
||||
After [installation](/INSTALL.md), there are three basic ways to invoke CodiumAI PR-Agent:
|
||||
1. Locally running a CLI command
|
||||
2. Online usage - by [commenting](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021901) on a PR
|
||||
3. Enabling PR-Agent tools to run automatically when a new PR is opened
|
||||
|
||||
|
||||
Specifically, CLI commands can be issued by invoking a pre-built [docker image](/INSTALL.md#running-from-source), or by invoking a [locally cloned repo](INSTALL.md#method-2-run-from-source).
|
||||
For online usage, you will need to setup either a [GitHub App](INSTALL.md#method-5-run-as-a-github-app), or a [GitHub Action](INSTALL.md#method-3-run-as-a-github-action).
|
||||
GitHub App and GitHub Action also enable to run PR-Agent specific tool automatically when a new PR is opened.
|
||||
|
||||
|
||||
#### The configuration file
|
||||
The different tools and sub-tools used by CodiumAI PR-Agent are adjustable via the **[configuration file](pr_agent/settings/configuration.toml)**.
|
||||
In addition to general configuration options, each tool has its own configurations. For example, the `review` tool will use parameters from the [pr_reviewer](/pr_agent/settings/configuration.toml#L16) section in the configuration file.
|
||||
|
||||
The [Tools Guide](./docs/TOOLS_GUIDE.md) provides a detailed description of the different tools and their configurations.
|
||||
|
||||
#### Ignoring files from analysis
|
||||
In some cases, you may want to exclude specific files or directories from the analysis performed by CodiumAI PR-Agent. This can be useful, for example, when you have files that are generated automatically or files that shouldn't be reviewed, like vendored code.
|
||||
|
||||
To ignore files or directories, edit the **[ignore.toml](/pr_agent/settings/ignore.toml)** configuration file. This setting is also exposed the following environment variables:
|
||||
|
||||
- `IGNORE.GLOB`
|
||||
- `IGNORE.REGEX`
|
||||
|
||||
See [dynaconf envvars documentation](https://www.dynaconf.com/envvars/).
|
||||
|
||||
#### git provider
|
||||
The [git_provider](pr_agent/settings/configuration.toml#L4) field in the configuration file determines the GIT provider that will be used by PR-Agent. Currently, the following providers are supported:
|
||||
`
|
||||
"github", "gitlab", "azure", "codecommit", "local", "gerrit"
|
||||
`
|
||||
|
||||
[//]: # (** online usage:**)
|
||||
|
||||
[//]: # (Options that are available in the configuration file can be specified at run time when calling actions. Two examples:)
|
||||
|
||||
[//]: # (```)
|
||||
|
||||
[//]: # (- /review --pr_reviewer.extra_instructions="focus on the file: ...")
|
||||
|
||||
[//]: # (- /describe --pr_description.add_original_user_description=false -pr_description.extra_instructions="make sure to mention: ...")
|
||||
|
||||
[//]: # (```)
|
||||
|
||||
### Working from a local repo (CLI)
|
||||
When running from your local repo (CLI), your local configuration file will be used.
|
||||
Examples for invoking the different tools via the CLI:
|
||||
|
||||
- **Review**: `python -m pr_agent.cli --pr_url=<pr_url> review`
|
||||
- **Describe**: `python -m pr_agent.cli --pr_url=<pr_url> describe`
|
||||
- **Improve**: `python -m pr_agent.cli --pr_url=<pr_url> improve`
|
||||
- **Ask**: `python -m pr_agent.cli --pr_url=<pr_url> ask "Write me a poem about this PR"`
|
||||
- **Reflect**: `python -m pr_agent.cli --pr_url=<pr_url> reflect`
|
||||
- **Update Changelog**: `python -m pr_agent.cli --pr_url=<pr_url> update_changelog`
|
||||
|
||||
`<pr_url>` is the url of the relevant PR (for example: https://github.com/Codium-ai/pr-agent/pull/50).
|
||||
|
||||
**Notes:**
|
||||
|
||||
(1) in addition to editing your local configuration file, you can also change any configuration value by adding it to the command line:
|
||||
```
|
||||
python -m pr_agent.cli --pr_url=<pr_url> /review --pr_reviewer.extra_instructions="focus on the file: ..."
|
||||
```
|
||||
|
||||
(2) You can print results locally, without publishing them, by setting in `configuration.toml`:
|
||||
```
|
||||
[config]
|
||||
publish_output=true
|
||||
verbosity_level=2
|
||||
```
|
||||
This is useful for debugging or experimenting with the different tools.
|
||||
|
||||
|
||||
### Online usage
|
||||
|
||||
Online usage means invoking PR-Agent tools by [comments](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021901) on a PR.
|
||||
Commands for invoking the different tools via comments:
|
||||
|
||||
- **Review**: `/review`
|
||||
- **Describe**: `/describe`
|
||||
- **Improve**: `/improve`
|
||||
- **Ask**: `/ask "..."`
|
||||
- **Reflect**: `/reflect`
|
||||
- **Update Changelog**: `/update_changelog`
|
||||
|
||||
|
||||
To edit a specific configuration value, just add `--config_path=<value>` to any command.
|
||||
For example if you want to edit the `review` tool configurations, you can run:
|
||||
```
|
||||
/review --pr_reviewer.extra_instructions="..." --pr_reviewer.require_score_review=false
|
||||
```
|
||||
Any configuration value in [configuration file](pr_agent/settings/configuration.toml) file can be similarly edited. comment `/config` to see the list of available configurations.
|
||||
|
||||
|
||||
### Working with GitHub App
|
||||
When running PR-Agent from [GitHub App](INSTALL.md#method-5-run-as-a-github-app), the default configurations from a pre-built docker will be initially loaded.
|
||||
|
||||
#### GitHub app automatic tools
|
||||
The [github_app](pr_agent/settings/configuration.toml#L56) section defines GitHub app specific configurations.
|
||||
In this section you can define configurations to control the conditions for which tools will **run automatically**.
|
||||
Note that a local `.pr_agent.toml` file enables you to edit and customize the default parameters of any tool, not just the ones that are run automatically.
|
||||
|
||||
##### GitHub app automatic tools for PR actions
|
||||
The GitHub app can respond to the following actions on a PR:
|
||||
1. `opened` - Opening a new PR
|
||||
2. `reopened` - Reopening a closed PR
|
||||
3. `ready_for_review` - Moving a PR from Draft to Open
|
||||
4. `review_requested` - Specifically requesting review (in the PR reviewers list) from the `github-actions[bot]` user
|
||||
|
||||
The configuration parameter `handle_pr_actions` defines the list of actions for which the GitHub app will trigger the PR-Agent.
|
||||
The configuration parameter `pr_commands` defines the list of tools that will be **run automatically** when one of the above action happens (e.g. a new PR is opened):
|
||||
```
|
||||
[github_app]
|
||||
handle_pr_actions = ['opened', 'reopened', 'ready_for_review', 'review_requested']
|
||||
pr_commands = [
|
||||
"/describe --pr_description.add_original_user_description=true --pr_description.keep_original_user_title=true",
|
||||
"/auto_review",
|
||||
]
|
||||
```
|
||||
This means that when a new PR is opened/reopened or marked as ready for review, PR-Agent will run the `describe` and `auto_review` tools.
|
||||
For the describe tool, the `add_original_user_description` and `keep_original_user_title` parameters will be set to true.
|
||||
|
||||
You can override the default tool parameters by uploading a local configuration file called `.pr_agent.toml` to the root of your repo.
|
||||
For example, if your local `.pr_agent.toml` file contains:
|
||||
```
|
||||
[pr_description]
|
||||
add_original_user_description = false
|
||||
keep_original_user_title = false
|
||||
```
|
||||
When a new PR is opened, PR-Agent will run the `describe` tool with the above parameters.
|
||||
|
||||
To cancel the automatic run of all the tools, set:
|
||||
```
|
||||
[github_app]
|
||||
handle_pr_actions = []
|
||||
```
|
||||
|
||||
##### GitHub app automatic tools for new code (PR push)
|
||||
In addition the running automatic tools when a PR is opened, the GitHub app can also respond to new code that is pushed to an open PR.
|
||||
|
||||
The configuration toggle `handle_push_trigger` can be used to enable this feature.
|
||||
The configuration parameter `push_commands` defines the list of tools that will be **run automatically** when new code is pushed to the PR.
|
||||
```
|
||||
[github_app]
|
||||
handle_push_trigger = true
|
||||
push_commands = [
|
||||
"/describe --pr_description.add_original_user_description=true --pr_description.keep_original_user_title=true",
|
||||
"/auto_review -i --pr_reviewer.remove_previous_review_comment=true",
|
||||
]
|
||||
```
|
||||
The means that when new code is pused to the PR, the PR-Agent will run the `describe` and incremental `auto_review` tools.
|
||||
For the describe tool, the `add_original_user_description` and `keep_original_user_title` parameters will be set to true.
|
||||
For the `auto_review` tool, it will run in incremental mode, and the `remove_previous_review_comment` parameter will be set to true.
|
||||
|
||||
Much like the configurations for `pr_commands`, you can override the default tool paramteres by uploading a local configuration file to the root of your repo.
|
||||
|
||||
#### Editing the prompts
|
||||
The prompts for the various PR-Agent tools are defined in the `pr_agent/settings` folder.
|
||||
In practice, the prompts are loaded and stored as a standard setting object.
|
||||
Hence, editing them is similar to editing any other configuration value - just place the relevant key in `.pr_agent.toml`file, and override the default value.
|
||||
|
||||
For example, if you want to edit the prompts of the [describe](./pr_agent/settings/pr_description_prompts.toml) tool, you can add the following to your `.pr_agent.toml` file:
|
||||
```
|
||||
[pr_description_prompt]
|
||||
system="""
|
||||
...
|
||||
"""
|
||||
user="""
|
||||
...
|
||||
"""
|
||||
```
|
||||
Note that the new prompt will need to generate an output compatible with the relevant [post-process function](./pr_agent/tools/pr_description.py#L137).
|
||||
|
||||
### Working with GitHub Action
|
||||
You can configure settings in GitHub action by adding environment variables under the env section in `.github/workflows/pr_agent.yml` file.
|
||||
Specifically, start by setting the following environment variables:
|
||||
```yaml
|
||||
env:
|
||||
OPENAI_KEY: ${{ secrets.OPENAI_KEY }} # Make sure to add your OpenAI key to your repo secrets
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Make sure to add your GitHub token to your repo secrets
|
||||
github_action.auto_review: "true" # enable\disable auto review
|
||||
github_action.auto_describe: "true" # enable\disable auto describe
|
||||
github_action.auto_improve: "false" # enable\disable auto improve
|
||||
```
|
||||
`github_action.auto_review`, `github_action.auto_describe` and `github_action.auto_improve` are used to enable/disable automatic tools that run when a new PR is opened.
|
||||
If not set, the default option is that only the `review` tool will run automatically when a new PR is opened.
|
||||
|
||||
Note that you can give additional config parameters by adding environment variables to `.github/workflows/pr_agent.yml`, or by using a `.pr_agent.toml` file in the root of your repo, similar to the GitHub App usage.
|
||||
|
||||
For example, you can set an environment variable: `pr_description.add_original_user_description=false`, or add a `.pr_agent.toml` file with the following content:
|
||||
```
|
||||
[pr_description]
|
||||
add_original_user_description = false
|
||||
```
|
||||
|
||||
|
||||
### Changing a model
|
||||
|
||||
See [here](pr_agent/algo/__init__.py) for the list of available models.
|
||||
To use a different model than the default (GPT-4), you need to edit [configuration file](pr_agent/settings/configuration.toml#L2).
|
||||
For models and environments not from OPENAI, you might need to provide additional keys and other parameters. See below for instructions.
|
||||
|
||||
#### Azure
|
||||
To use Azure, set in your .secrets.toml:
|
||||
```
|
||||
api_key = "" # your azure api key
|
||||
api_type = "azure"
|
||||
api_version = '2023-05-15' # Check Azure documentation for the current API version
|
||||
api_base = "" # The base URL for your Azure OpenAI resource. e.g. "https://<your resource name>.openai.azure.com"
|
||||
openai.deployment_id = "" # The deployment name you chose when you deployed the engine
|
||||
```
|
||||
|
||||
and
|
||||
```
|
||||
[config]
|
||||
model="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
|
||||
```
|
||||
in the configuration.toml
|
||||
|
||||
#### Huggingface
|
||||
|
||||
**Local**
|
||||
You can run Huggingface models locally through either [VLLM](https://docs.litellm.ai/docs/providers/vllm) or [Ollama](https://docs.litellm.ai/docs/providers/ollama)
|
||||
|
||||
E.g. to use a new Huggingface model locally via Ollama, set:
|
||||
```
|
||||
[__init__.py]
|
||||
MAX_TOKENS = {
|
||||
"model-name-on-ollama": <max_tokens>
|
||||
}
|
||||
e.g.
|
||||
MAX_TOKENS={
|
||||
...,
|
||||
"llama2": 4096
|
||||
}
|
||||
|
||||
|
||||
[config] # in configuration.toml
|
||||
model = "ollama/llama2"
|
||||
|
||||
[ollama] # in .secrets.toml
|
||||
api_base = ... # the base url for your huggingface inference endpoint
|
||||
```
|
||||
|
||||
**Inference Endpoints**
|
||||
|
||||
To use a new model with Huggingface Inference Endpoints, for example, set:
|
||||
```
|
||||
[__init__.py]
|
||||
MAX_TOKENS = {
|
||||
"model-name-on-huggingface": <max_tokens>
|
||||
}
|
||||
e.g.
|
||||
MAX_TOKENS={
|
||||
...,
|
||||
"meta-llama/Llama-2-7b-chat-hf": 4096
|
||||
}
|
||||
[config] # in configuration.toml
|
||||
model = "huggingface/meta-llama/Llama-2-7b-chat-hf"
|
||||
|
||||
[huggingface] # in .secrets.toml
|
||||
key = ... # your huggingface api key
|
||||
api_base = ... # the base url for your huggingface inference endpoint
|
||||
```
|
||||
(you can obtain a Llama2 key from [here](https://replicate.com/replicate/llama-2-70b-chat/api))
|
||||
|
||||
#### Replicate
|
||||
|
||||
To use Llama2 model with Replicate, for example, set:
|
||||
```
|
||||
[config] # in configuration.toml
|
||||
model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
|
||||
[replicate] # in .secrets.toml
|
||||
key = ...
|
||||
```
|
||||
(you can obtain a Llama2 key from [here](https://replicate.com/replicate/llama-2-70b-chat/api))
|
||||
|
||||
|
||||
Also review the [AiHandler](pr_agent/algo/ai_handler.py) file for instruction how to set keys for other models.
|
||||
|
||||
### Working with large PRs
|
||||
|
||||
The default mode of CodiumAI is to have a single call per tool, using GPT-4, which has a token limit of 8000 tokens.
|
||||
This mode provide a very good speed-quality-cost tradeoff, and can handle most PRs successfully.
|
||||
When the PR is above the token limit, it employs a [PR Compression strategy](./PR_COMPRESSION.md).
|
||||
|
||||
However, for very large PRs, or in case you want to emphasize quality over speed and cost, there are 2 possible solutions:
|
||||
1) [Use a model](#changing-a-model) with larger context, like GPT-32K, or claude-100K. This solution will be applicable for all the tools.
|
||||
2) For the `/improve` tool, there is an ['extended' mode](./docs/IMPROVE.md) (`/improve --extended`),
|
||||
which divides the PR to chunks, and process each chunk separately. With this mode, regardless of the model, no compression will be done (but for large PRs, multiple model calls may occur)
|
||||
|
||||
### Appendix - additional configurations walkthrough
|
||||
|
||||
|
||||
#### Extra instructions
|
||||
All PR-Agent tools have a parameter called `extra_instructions`, that enables to add free-text extra instructions. Example usage:
|
||||
```
|
||||
/update_changelog --pr_update_changelog.extra_instructions="Make sure to update also the version ..."
|
||||
```
|
||||
|
||||
#### Patch Extra Lines
|
||||
By default, around any change in your PR, git patch provides 3 lines of context above and below the change.
|
||||
```
|
||||
@@ -12,5 +12,5 @@ def func1():
|
||||
code line that already existed in the file...
|
||||
code line that already existed in the file...
|
||||
code line that already existed in the file....
|
||||
-code line that was removed in the PR
|
||||
+new code line added in the PR
|
||||
code line that already existed in the file...
|
||||
code line that already existed in the file...
|
||||
code line that already existed in the file...
|
||||
```
|
||||
|
||||
For the `review`, `describe`, `ask` and `add_docs` tools, if the token budget allows, PR-Agent tries to increase the number of lines of context, via the parameter:
|
||||
```
|
||||
[config]
|
||||
patch_extra_lines=3
|
||||
```
|
||||
|
||||
Increasing this number provides more context to the model, but will also increase the token budget.
|
||||
If the PR is too large (see [PR Compression strategy](./PR_COMPRESSION.md)), PR-Agent automatically sets this number to 0, using the original git patch.
|
||||
|
||||
|
||||
#### Azure DevOps provider
|
||||
To use Azure DevOps provider use the following settings in configuration.toml:
|
||||
```
|
||||
[config]
|
||||
git_provider="azure"
|
||||
use_repo_settings_file=false
|
||||
```
|
||||
|
||||
And use the following settings (you have to replace the values) in .secrets.toml:
|
||||
```
|
||||
[azure_devops]
|
||||
org = "https://dev.azure.com/YOUR_ORGANIZATION/"
|
||||
pat = "YOUR_PAT_TOKEN"
|
||||
```
|
@ -18,6 +18,10 @@ FROM base as github_polling
|
||||
ADD pr_agent pr_agent
|
||||
CMD ["python", "pr_agent/servers/github_polling.py"]
|
||||
|
||||
FROM base as gitlab_webhook
|
||||
ADD pr_agent pr_agent
|
||||
CMD ["python", "pr_agent/servers/gitlab_webhook.py"]
|
||||
|
||||
FROM base as test
|
||||
ADD requirements-dev.txt .
|
||||
RUN pip install -r requirements-dev.txt && rm requirements-dev.txt
|
||||
|
15
docs/ADD_DOCUMENTATION.md
Normal file
@ -0,0 +1,15 @@
|
||||
# Add Documentation Tool
|
||||
The `add_docs` tool scans the PR code changes, and automatically suggests documentation for the undocumented code components (functions, classes, etc.).
|
||||
|
||||
It can be invoked manually by commenting on any PR:
|
||||
```
|
||||
/add_docs
|
||||
```
|
||||
For example:
|
||||
|
||||
<kbd><img src=./../pics/add_docs_comment.png width="768"></kbd>
|
||||
<kbd><img src=./../pics/add_docs.png width="768"></kbd>
|
||||
|
||||
### Configuration options
|
||||
- `docs_style`: The exact style of the documentation (for python docstring). you can choose between: `google`, `numpy`, `sphinx`, `restructuredtext`, `plain`. Default is `sphinx`.
|
||||
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".
|
11
docs/ASK.md
Normal file
@ -0,0 +1,11 @@
|
||||
# ASK Tool
|
||||
|
||||
The `ask` tool answers questions about the PR, based on the PR code changes.
|
||||
It can be invoked manually by commenting on any PR:
|
||||
```
|
||||
/ask "..."
|
||||
```
|
||||
For example:
|
||||
|
||||
<kbd><img src=./../pics/ask_comment.png width="768"></kbd>
|
||||
<kbd><img src=./../pics/ask.png width="768"></kbd>
|
62
docs/DESCRIBE.md
Normal file
@ -0,0 +1,62 @@
|
||||
# Describe Tool
|
||||
|
||||
The `describe` tool scans the PR code changes, and automatically generates PR description - title, type, summary, code walkthrough and labels.
|
||||
It can be invoked manually by commenting on any PR:
|
||||
```
|
||||
/describe
|
||||
```
|
||||
For example:
|
||||
|
||||
<kbd><img src=./../pics/describe_comment.png width="768"></kbd>
|
||||
|
||||
<kbd><img src=./../pics/describe.png width="768"></kbd>
|
||||
|
||||
The `describe` tool can also be triggered automatically every time a new PR is opened. See examples for automatic triggers for [GitHub App](https://github.com/Codium-ai/pr-agent/blob/main/Usage.md#github-app-automatic-tools) and [GitHub Action](https://github.com/Codium-ai/pr-agent/blob/main/Usage.md#working-with-github-action)
|
||||
|
||||
### Configuration options
|
||||
|
||||
Under the section 'pr_description', the [configuration file](./../pr_agent/settings/configuration.toml#L28) contains options to customize the 'describe' tool:
|
||||
|
||||
- `publish_labels`: if set to true, the tool will publish the labels to the PR. Default is true.
|
||||
|
||||
- `publish_description_as_comment`: if set to true, the tool will publish the description as a comment to the PR. If false, it will overwrite the origianl description. Default is false.
|
||||
|
||||
- `add_original_user_description`: if set to true, the tool will add the original user description to the generated description. Default is false.
|
||||
|
||||
- `keep_original_user_title`: if set to true, the tool will keep the original PR title, and won't change it. Default is false.
|
||||
|
||||
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".
|
||||
- To enable `custom labels`, apply the configuration changes described [here](./GENERATE_CUSTOM_LABELS.md#configuration-changes)
|
||||
### Markers template
|
||||
|
||||
markers enable to easily integrate user's content and auto-generated content, with a template-like mechanism.
|
||||
|
||||
For example, if the PR original description was:
|
||||
```
|
||||
User content...
|
||||
|
||||
|
||||
## PR Description:
|
||||
pr_agent:summary
|
||||
|
||||
## PR Walkthrough:
|
||||
pr_agent:walkthrough
|
||||
```
|
||||
The marker `pr_agent:summary` will be replaced with the PR summary, and `pr_agent:walkthrough` will be replaced with the PR walkthrough.
|
||||
|
||||
##### Example:
|
||||
```
|
||||
env:
|
||||
pr_description.use_description_markers: 'true'
|
||||
```
|
||||
|
||||
<kbd><img src=./../pics/describe_markers_before.png width="768"></kbd>
|
||||
|
||||
==>
|
||||
|
||||
<kbd><img src=./../pics/describe_markers_after.png width="768"></kbd>
|
||||
|
||||
##### Configuration params:
|
||||
|
||||
- `use_description_markers`: if set to true, the tool will use markers template. It replaces every marker of the form `pr_agent:marker_name` with the relevant content. Default is false.
|
||||
- `include_generated_by_header`: if set to true, the tool will add a dedicated header: 'Generated by PR Agent at ...' to any automatic content. Default is true.
|
41
docs/GENERATE_CUSTOM_LABELS.md
Normal file
@ -0,0 +1,41 @@
|
||||
# Generate Custom Labels
|
||||
The `generate_labels` tool scans the PR code changes, and given a list of labels and their descriptions, it automatically suggests labels that match the PR code changes.
|
||||
|
||||
It can be invoked manually by commenting on any PR:
|
||||
```
|
||||
/generate_labels
|
||||
```
|
||||
For example:
|
||||
|
||||
If we wish to add detect changes to SQL queries in a given PR, we can add the following custom label along with its description:
|
||||
|
||||
<kbd><img src=./../pics/custom_labels_list.png width="768"></kbd>
|
||||
|
||||
When running the `generate_labels` tool on a PR that includes changes in SQL queries, it will automatically suggest the custom label:
|
||||
<kbd><img src=./../pics/custom_label_published.png width="768"></kbd>
|
||||
|
||||
### How to enable custom labels
|
||||
|
||||
Note that in addition to the dedicated tool `generate_labels`, the custom labels will also be used by the `review` and `describe` tools.
|
||||
|
||||
#### CLI
|
||||
To enable custom labels, you need to apply the [configuration changes](#configuration-changes) to the [custom_labels file](./../pr_agent/settings/custom_labels.toml):
|
||||
|
||||
#### GitHub Action and GitHub App
|
||||
To enable custom labels, you need to apply the [configuration changes](#configuration-changes) to the local `.pr_agent.toml` file in you repository.
|
||||
|
||||
#### Configuration changes
|
||||
- Change `enable_custom_labels` to True: This will turn off the default labels and enable the custom labels provided in the custom_labels.toml file.
|
||||
- Add the custom labels. It should be formatted as follows:
|
||||
|
||||
```
|
||||
[config]
|
||||
enable_custom_labels=true
|
||||
|
||||
[custom_labels."Custom Label Name"]
|
||||
description = "Description of when AI should suggest this label"
|
||||
|
||||
[custom_labels."Custom Label 2"]
|
||||
description = "Description of when AI should suggest this label 2"
|
||||
```
|
||||
|
45
docs/IMPROVE.md
Normal file
@ -0,0 +1,45 @@
|
||||
# Improve Tool
|
||||
|
||||
The `improve` tool scans the PR code changes, and automatically generate committable suggestions for improving the PR code.
|
||||
It can be invoked manually by commenting on any PR:
|
||||
```
|
||||
/improve
|
||||
```
|
||||
For example:
|
||||
|
||||
<kbd><img src=./../pics/improve_comment.png width="768"></kbd>
|
||||
<kbd><img src=./../pics/improve.png width="768"></kbd>
|
||||
|
||||
The `improve` tool can also be triggered automatically every time a new PR is opened. See examples for automatic triggers for [GitHub App](https://github.com/Codium-ai/pr-agent/blob/main/Usage.md#github-app-automatic-tools) and [GitHub Action](https://github.com/Codium-ai/pr-agent/blob/main/Usage.md#working-with-github-action)
|
||||
|
||||
An extended mode, which does not involve PR Compression and provides more comprehensive suggestions, can be invoked by commenting on any PR:
|
||||
```
|
||||
/improve --extended
|
||||
```
|
||||
Note that the extended mode divides the PR code changes into chunks, up to the token limits, where each chunk is handled separately (multiple calls to GPT-4).
|
||||
Hence, the total number of suggestions is proportional to the number of chunks, i.e. the size of the PR.
|
||||
|
||||
### Configuration options
|
||||
|
||||
Under the section 'pr_code_suggestions', the [configuration file](./../pr_agent/settings/configuration.toml#L40) contains options to customize the 'improve' tool:
|
||||
|
||||
- `num_code_suggestions`: number of code suggestions provided by the 'improve' tool. Default is 4.
|
||||
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".
|
||||
- `rank_suggestions`: if set to true, the tool will rank the suggestions, based on importance. Default is false.
|
||||
|
||||
#### params for '/improve --extended' mode
|
||||
- `num_code_suggestions_per_chunk`: number of code suggestions provided by the 'improve' tool, per chunk. Default is 8.
|
||||
- `rank_extended_suggestions`: if set to true, the tool will rank the suggestions, based on importance. Default is true.
|
||||
- `max_number_of_calls`: maximum number of chunks. Default is 5.
|
||||
- `final_clip_factor`: factor to remove suggestions with low confidence. Default is 0.9.
|
||||
|
||||
|
||||
#### A note on code suggestions quality
|
||||
|
||||
- With current level of AI for code (GPT-4), mistakes can happen. Not all the suggestions will be perfect, and a user should not accept all of them automatically.
|
||||
|
||||
- Suggestions are not meant to be [simplistic](./../pr_agent/settings/pr_code_suggestions_prompts.toml#L34). Instead, they aim to give deep feedback and raise questions, ideas and thoughts to the user, who can then use his judgment, experience, and understanding of the code base.
|
||||
|
||||
- Recommended to use the 'extra_instructions' field to guide the model to suggestions that are more relevant to the specific needs of the project.
|
||||
|
||||
- Best quality will be obtained by using 'improve --extended' mode.
|
58
docs/REVIEW.md
Normal file
@ -0,0 +1,58 @@
|
||||
# Review Tool
|
||||
|
||||
The `review` tool scans the PR code changes, and automatically generates a PR review.
|
||||
It can be invoked manually by commenting on any PR:
|
||||
```
|
||||
/review
|
||||
```
|
||||
For example:
|
||||
|
||||
<kbd><img src=./../pics/review_comment.png width="768"></kbd>
|
||||
<kbd><img src=./../pics/describe.png width="768"></kbd>
|
||||
|
||||
The `review` tool can also be triggered automatically every time a new PR is opened. See examples for automatic triggers for [GitHub App](https://github.com/Codium-ai/pr-agent/blob/main/Usage.md#github-app-automatic-tools) and [GitHub Action](https://github.com/Codium-ai/pr-agent/blob/main/Usage.md#working-with-github-action)
|
||||
|
||||
### Configuration options
|
||||
|
||||
Under the section 'pr_reviewer', the [configuration file](./../pr_agent/settings/configuration.toml#L16) contains options to customize the 'review' tool:
|
||||
|
||||
- `require_focused_review`: if set to true, the tool will add a section - 'is the PR a focused one'. Default is false.
|
||||
- `require_score_review`: if set to true, the tool will add a section that scores the PR. Default is false.
|
||||
- `require_tests_review`: if set to true, the tool will add a section that checks if the PR contains tests. Default is true.
|
||||
- `require_security_review`: if set to true, the tool will add a section that checks if the PR contains security issues. Default is true.
|
||||
- `require_estimate_effort_to_review`: if set to true, the tool will add a section that estimates thed effort needed to review the PR. Default is true.
|
||||
- `num_code_suggestions`: number of code suggestions provided by the 'review' tool. Default is 4.
|
||||
- `inline_code_comments`: if set to true, the tool will publish the code suggestions as comments on the code diff. Default is false.
|
||||
- `automatic_review`: if set to false, no automatic reviews will be done. Default is true.
|
||||
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".
|
||||
- To enable `custom labels`, apply the configuration changes described [here](./GENERATE_CUSTOM_LABELS.md#configuration-changes)
|
||||
#### Incremental Mode
|
||||
For an incremental review, which only considers changes since the last PR-Agent review, this can be useful when working on the PR in an iterative manner, and you want to focus on the changes since the last review instead of reviewing the entire PR again, the following command can be used:
|
||||
```
|
||||
/improve -i
|
||||
```
|
||||
Note that the incremental mode is only available for GitHub.
|
||||
|
||||
<kbd><img src=./../pics/incremental_review.png width="768"></kbd>
|
||||
|
||||
#### PR Reflection
|
||||
By invoking:
|
||||
```
|
||||
/reflect_and_review
|
||||
```
|
||||
The tool will first ask the author questions about the PR, and will guide the review based on his answers.
|
||||
|
||||
<kbd><img src=./../pics/reflection_questions.png width="768"></kbd>
|
||||
<kbd><img src=./../pics/reflection_answers.png width="768"></kbd>
|
||||
<kbd><img src=./../pics/reflection_insights.png width="768"></kbd>
|
||||
|
||||
|
||||
#### A note on code suggestions quality
|
||||
|
||||
- With current level of AI for code (GPT-4), mistakes can happen. Not all the suggestions will be perfect, and a user should not accept all of them automatically.
|
||||
|
||||
- Suggestions are not meant to be [simplistic](./../pr_agent/settings/pr_reviewer_prompts.toml#L29). Instead, they aim to give deep feedback and raise questions, ideas and thoughts to the user, who can then use his judgment, experience, and understanding of the code base.
|
||||
|
||||
- Recommended to use the 'extra_instructions' field to guide the model to suggestions that are more relevant to the specific needs of the project.
|
||||
|
||||
- Unlike the 'review' feature, which does a lot of things, the ['improve --extended'](./IMPROVE.md) feature is dedicated only to suggestions, and usually gives better results.
|
31
docs/SIMILAR_ISSUE.md
Normal file
@ -0,0 +1,31 @@
|
||||
# Similar Issue Tool
|
||||
The similar issue tool retrieves the most similar issues to the current issue.
|
||||
It can be invoked manually by commenting on any PR:
|
||||
```
|
||||
/similar_issue
|
||||
```
|
||||
For example:
|
||||
|
||||
<kbd><img src=./../pics/similar_issue_original_issue.png width="768"></kbd>
|
||||
<kbd><img src=./../pics/similar_issue_comment.png width="768"></kbd>
|
||||
<kbd><img src=./../pics/similar_issue.png width="768"></kbd>
|
||||
|
||||
Note that to perform retrieval, the `similar_issue` tool indexes all the repo previous issues (once).
|
||||
|
||||
To enable usage of the '**similar issue**' tool, you need to set the following keys in `.secrets.toml` (or in the relevant environment variables):
|
||||
```
|
||||
[pinecone]
|
||||
api_key = "..."
|
||||
environment = "..."
|
||||
```
|
||||
These parameters can be obtained by registering to [Pinecone](https://app.pinecone.io/?sessionType=signup/).
|
||||
|
||||
|
||||
### How to use:
|
||||
- To invoke the 'similar issue' tool from **CLI**, run:
|
||||
`python3 cli.py --issue_url=... similar_issue`
|
||||
|
||||
- To invoke the 'similar' issue tool via online usage, [comment](https://github.com/Codium-ai/pr-agent/issues/178#issuecomment-1716934893) on a PR:
|
||||
`/similar_issue`
|
||||
|
||||
- You can also enable the 'similar issue' tool to run automatically when a new issue is opened, by adding it to the [pr_commands list in the github_app section](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L66)
|
11
docs/TOOLS_GUIDE.md
Normal file
@ -0,0 +1,11 @@
|
||||
## Tools Guide
|
||||
- [DESCRIBE](./DESCRIBE.md)
|
||||
- [REVIEW](./REVIEW.md)
|
||||
- [IMPROVE](./IMPROVE.md)
|
||||
- [ASK](./ASK.md)
|
||||
- [SIMILAR_ISSUE](./SIMILAR_ISSUE.md)
|
||||
- [UPDATE CHANGELOG](./UPDATE_CHANGELOG.md)
|
||||
- [ADD DOCUMENTATION](./ADD_DOCUMENTATION.md)
|
||||
- [GENERATE CUSTOM LABELS](./GENERATE_CUSTOM_LABELS.md)
|
||||
|
||||
See the **[installation guide](/INSTALL.md)** for instructions on how to setup PR-Agent.
|
19
docs/UPDATE_CHANGELOG.md
Normal file
@ -0,0 +1,19 @@
|
||||
# Update Changelog Tool
|
||||
|
||||
The `update_changelog` tool automatically updates the CHANGELOG.md file with the PR changes.
|
||||
It can be invoked manually by commenting on any PR:
|
||||
```
|
||||
/update_changelog
|
||||
```
|
||||
For example:
|
||||
|
||||
<kbd><img src=./../pics/update_changelog_comment.png width="768"></kbd>
|
||||
<kbd><img src=./../pics/update_changelog.png width="768"></kbd>
|
||||
|
||||
|
||||
### Configuration options
|
||||
|
||||
Under the section 'pr_update_changelog', the [configuration file](./../pr_agent/settings/configuration.toml#L50) contains options to customize the 'update changelog' tool:
|
||||
|
||||
- `push_changelog_changes`: whether to push the changes to CHANGELOG.md, or just print them. Default is false (print only).
|
||||
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...
|
BIN
pics/add_docs.png
Normal file
After Width: | Height: | Size: 325 KiB |
BIN
pics/add_docs_comment.png
Normal file
After Width: | Height: | Size: 51 KiB |
BIN
pics/ask.png
Normal file
After Width: | Height: | Size: 308 KiB |
BIN
pics/ask_comment.png
Normal file
After Width: | Height: | Size: 32 KiB |
BIN
pics/custom_label_published.png
Normal file
After Width: | Height: | Size: 253 KiB |
BIN
pics/custom_labels_list.png
Normal file
After Width: | Height: | Size: 84 KiB |
Before Width: | Height: | Size: 534 KiB |
BIN
pics/describe.png
Normal file
After Width: | Height: | Size: 244 KiB |
BIN
pics/describe_comment.png
Normal file
After Width: | Height: | Size: 17 KiB |
BIN
pics/describe_markers_after.png
Normal file
After Width: | Height: | Size: 224 KiB |
BIN
pics/describe_markers_before.png
Normal file
After Width: | Height: | Size: 30 KiB |
BIN
pics/improve.png
Normal file
After Width: | Height: | Size: 234 KiB |
BIN
pics/improve_comment.png
Normal file
After Width: | Height: | Size: 21 KiB |
BIN
pics/incremental_review.png
Normal file
After Width: | Height: | Size: 286 KiB |
BIN
pics/reflection_answers.png
Normal file
After Width: | Height: | Size: 29 KiB |
BIN
pics/reflection_insights.png
Normal file
After Width: | Height: | Size: 217 KiB |
BIN
pics/reflection_questions.png
Normal file
After Width: | Height: | Size: 86 KiB |
BIN
pics/review.png
Normal file
After Width: | Height: | Size: 190 KiB |
BIN
pics/review_comment.png
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
pics/similar_issue.png
Normal file
After Width: | Height: | Size: 66 KiB |
BIN
pics/similar_issue_comment.png
Normal file
After Width: | Height: | Size: 14 KiB |
BIN
pics/similar_issue_original_issue.png
Normal file
After Width: | Height: | Size: 138 KiB |
BIN
pics/update_changelog.png
Normal file
After Width: | Height: | Size: 122 KiB |
BIN
pics/update_changelog_comment.png
Normal file
After Width: | Height: | Size: 25 KiB |
@ -1,18 +1,18 @@
|
||||
import logging
|
||||
import os
|
||||
import shlex
|
||||
import tempfile
|
||||
|
||||
from pr_agent.algo.utils import update_settings_from_args
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.git_providers.utils import apply_repo_settings
|
||||
from pr_agent.tools.pr_add_docs import PRAddDocs
|
||||
from pr_agent.tools.pr_code_suggestions import PRCodeSuggestions
|
||||
from pr_agent.tools.pr_config import PRConfig
|
||||
from pr_agent.tools.pr_description import PRDescription
|
||||
from pr_agent.tools.pr_generate_labels import PRGenerateLabels
|
||||
from pr_agent.tools.pr_information_from_user import PRInformationFromUser
|
||||
from pr_agent.tools.pr_questions import PRQuestions
|
||||
from pr_agent.tools.pr_reviewer import PRReviewer
|
||||
from pr_agent.tools.pr_similar_issue import PRSimilarIssue
|
||||
from pr_agent.tools.pr_update_changelog import PRUpdateChangelog
|
||||
from pr_agent.tools.pr_config import PRConfig
|
||||
|
||||
command2class = {
|
||||
"auto_review": PRReviewer,
|
||||
@ -30,6 +30,9 @@ command2class = {
|
||||
"update_changelog": PRUpdateChangelog,
|
||||
"config": PRConfig,
|
||||
"settings": PRConfig,
|
||||
"similar_issue": PRSimilarIssue,
|
||||
"add_docs": PRAddDocs,
|
||||
"generate_labels": PRGenerateLabels,
|
||||
}
|
||||
|
||||
commands = list(command2class.keys())
|
||||
@ -40,22 +43,7 @@ class PRAgent:
|
||||
|
||||
async def handle_request(self, pr_url, request, notify=None) -> bool:
|
||||
# First, apply repo specific settings if exists
|
||||
if get_settings().config.use_repo_settings_file:
|
||||
repo_settings_file = None
|
||||
try:
|
||||
git_provider = get_git_provider()(pr_url)
|
||||
repo_settings = git_provider.get_repo_settings()
|
||||
if repo_settings:
|
||||
repo_settings_file = None
|
||||
fd, repo_settings_file = tempfile.mkstemp(suffix='.toml')
|
||||
os.write(fd, repo_settings)
|
||||
get_settings().load_file(repo_settings_file)
|
||||
finally:
|
||||
if repo_settings_file:
|
||||
try:
|
||||
os.remove(repo_settings_file)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to remove temporary settings file {repo_settings_file}", e)
|
||||
apply_repo_settings(pr_url)
|
||||
|
||||
# Then, apply user specific settings if exists
|
||||
request = request.replace("'", "\\'")
|
||||
@ -65,8 +53,8 @@ class PRAgent:
|
||||
args = update_settings_from_args(args)
|
||||
|
||||
action = action.lstrip("/").lower()
|
||||
if action == "reflect_and_review" and not get_settings().pr_reviewer.ask_and_reflect:
|
||||
action = "review"
|
||||
if action == "reflect_and_review":
|
||||
get_settings().pr_reviewer.ask_and_reflect = True
|
||||
if action == "answer":
|
||||
if notify:
|
||||
notify()
|
||||
@ -80,3 +68,4 @@ class PRAgent:
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
MAX_TOKENS = {
|
||||
'text-embedding-ada-002': 8000,
|
||||
'gpt-3.5-turbo': 4000,
|
||||
'gpt-3.5-turbo-0613': 4000,
|
||||
'gpt-3.5-turbo-0301': 4000,
|
||||
@ -11,4 +12,5 @@ MAX_TOKENS = {
|
||||
'claude-2': 100000,
|
||||
'command-nightly': 4096,
|
||||
'replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1': 4096,
|
||||
'meta-llama/Llama-2-7b-chat-hf': 4096
|
||||
}
|
||||
|
@ -1,12 +1,12 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
import litellm
|
||||
import openai
|
||||
from litellm import acompletion
|
||||
from openai.error import APIError, RateLimitError, Timeout, TryAgain
|
||||
from retry import retry
|
||||
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
OPENAI_RETRIES = 5
|
||||
|
||||
@ -26,7 +26,11 @@ class AiHandler:
|
||||
try:
|
||||
openai.api_key = get_settings().openai.key
|
||||
litellm.openai_key = get_settings().openai.key
|
||||
litellm.debugger = get_settings().config.litellm_debugger
|
||||
if get_settings().get("litellm.use_client"):
|
||||
litellm_token = get_settings().get("litellm.LITELLM_TOKEN")
|
||||
assert litellm_token, "LITELLM_TOKEN is required"
|
||||
os.environ["LITELLM_TOKEN"] = litellm_token
|
||||
litellm.use_client = True
|
||||
self.azure = False
|
||||
if get_settings().get("OPENAI.ORG", None):
|
||||
litellm.organization = get_settings().openai.org
|
||||
@ -48,6 +52,8 @@ class AiHandler:
|
||||
litellm.replicate_key = get_settings().replicate.key
|
||||
if get_settings().get("HUGGINGFACE.KEY", None):
|
||||
litellm.huggingface_key = get_settings().huggingface.key
|
||||
if get_settings().get("HUGGINGFACE.API_BASE", None):
|
||||
litellm.api_base = get_settings().huggingface.api_base
|
||||
except AttributeError as e:
|
||||
raise ValueError("OpenAI key is required") from e
|
||||
|
||||
@ -83,33 +89,34 @@ class AiHandler:
|
||||
try:
|
||||
deployment_id = self.deployment_id
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.debug(
|
||||
get_logger().debug(
|
||||
f"Generating completion with {model}"
|
||||
f"{(' from deployment ' + deployment_id) if deployment_id else ''}"
|
||||
)
|
||||
if self.azure:
|
||||
model = 'azure/' + model
|
||||
messages = [{"role": "system", "content": system}, {"role": "user", "content": user}]
|
||||
response = await acompletion(
|
||||
model=model,
|
||||
deployment_id=deployment_id,
|
||||
messages=[
|
||||
{"role": "system", "content": system},
|
||||
{"role": "user", "content": user}
|
||||
],
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
azure=self.azure,
|
||||
force_timeout=get_settings().config.ai_timeout
|
||||
)
|
||||
except (APIError, Timeout, TryAgain) as e:
|
||||
logging.error("Error during OpenAI inference: ", e)
|
||||
get_logger().error("Error during OpenAI inference: ", e)
|
||||
raise
|
||||
except (RateLimitError) as e:
|
||||
logging.error("Rate limit error during OpenAI inference: ", e)
|
||||
get_logger().error("Rate limit error during OpenAI inference: ", e)
|
||||
raise
|
||||
except (Exception) as e:
|
||||
logging.error("Unknown error during OpenAI inference: ", e)
|
||||
get_logger().error("Unknown error during OpenAI inference: ", e)
|
||||
raise TryAgain from e
|
||||
if response is None or len(response["choices"]) == 0:
|
||||
raise TryAgain
|
||||
resp = response["choices"][0]['message']['content']
|
||||
finish_reason = response["choices"][0]["finish_reason"]
|
||||
print(resp, finish_reason)
|
||||
usage = response.get("usage")
|
||||
get_logger().info("AI response", response=resp, messages=messages, finish_reason=finish_reason,
|
||||
model=model, usage=usage)
|
||||
return resp, finish_reason
|
||||
|
31
pr_agent/algo/file_filter.py
Normal file
@ -0,0 +1,31 @@
|
||||
import fnmatch
|
||||
import re
|
||||
|
||||
from pr_agent.config_loader import get_settings
|
||||
|
||||
def filter_ignored(files):
|
||||
"""
|
||||
Filter out files that match the ignore patterns.
|
||||
"""
|
||||
|
||||
try:
|
||||
# load regex patterns, and translate glob patterns to regex
|
||||
patterns = get_settings().ignore.regex
|
||||
patterns += [fnmatch.translate(glob) for glob in get_settings().ignore.glob]
|
||||
|
||||
# compile all valid patterns
|
||||
compiled_patterns = []
|
||||
for r in patterns:
|
||||
try:
|
||||
compiled_patterns.append(re.compile(r))
|
||||
except re.error:
|
||||
pass
|
||||
|
||||
# keep filenames that _don't_ match the ignore regex
|
||||
for r in compiled_patterns:
|
||||
files = [f for f in files if not r.match(f.filename)]
|
||||
|
||||
except Exception as e:
|
||||
print(f"Could not filter file list: {e}")
|
||||
|
||||
return files
|
@ -1,8 +1,9 @@
|
||||
from __future__ import annotations
|
||||
import logging
|
||||
|
||||
import re
|
||||
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
def extend_patch(original_file_str, patch_str, num_lines) -> str:
|
||||
@ -40,12 +41,16 @@ def extend_patch(original_file_str, patch_str, num_lines) -> str:
|
||||
extended_patch_lines.extend(
|
||||
original_lines[start1 + size1 - 1:start1 + size1 - 1 + num_lines])
|
||||
|
||||
res = list(match.groups())
|
||||
for i in range(len(res)):
|
||||
if res[i] is None:
|
||||
res[i] = 0
|
||||
try:
|
||||
start1, size1, start2, size2 = map(int, match.groups()[:4])
|
||||
start1, size1, start2, size2 = map(int, res[:4])
|
||||
except: # '@@ -0,0 +1 @@' case
|
||||
start1, size1, size2 = map(int, match.groups()[:3])
|
||||
start1, size1, size2 = map(int, res[:3])
|
||||
start2 = 0
|
||||
section_header = match.groups()[4]
|
||||
section_header = res[4]
|
||||
extended_start1 = max(1, start1 - num_lines)
|
||||
extended_size1 = size1 + (start1 - extended_start1) + num_lines
|
||||
extended_start2 = max(1, start2 - num_lines)
|
||||
@ -59,7 +64,7 @@ def extend_patch(original_file_str, patch_str, num_lines) -> str:
|
||||
extended_patch_lines.append(line)
|
||||
except Exception as e:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.error(f"Failed to extend patch: {e}")
|
||||
get_logger().error(f"Failed to extend patch: {e}")
|
||||
return patch_str
|
||||
|
||||
# finish previous hunk
|
||||
@ -130,14 +135,14 @@ def handle_patch_deletions(patch: str, original_file_content_str: str,
|
||||
if not new_file_content_str:
|
||||
# logic for handling deleted files - don't show patch, just show that the file was deleted
|
||||
if get_settings().config.verbosity_level > 0:
|
||||
logging.info(f"Processing file: {file_name}, minimizing deletion file")
|
||||
get_logger().info(f"Processing file: {file_name}, minimizing deletion file")
|
||||
patch = None # file was deleted
|
||||
else:
|
||||
patch_lines = patch.splitlines()
|
||||
patch_new = omit_deletion_hunks(patch_lines)
|
||||
if patch != patch_new:
|
||||
if get_settings().config.verbosity_level > 0:
|
||||
logging.info(f"Processing file: {file_name}, hunks were deleted")
|
||||
get_logger().info(f"Processing file: {file_name}, hunks were deleted")
|
||||
patch = patch_new
|
||||
return patch
|
||||
|
||||
@ -207,10 +212,15 @@ __old hunk__
|
||||
old_content_lines = []
|
||||
if match:
|
||||
prev_header_line = header_line
|
||||
|
||||
res = list(match.groups())
|
||||
for i in range(len(res)):
|
||||
if res[i] is None:
|
||||
res[i] = 0
|
||||
try:
|
||||
start1, size1, start2, size2 = map(int, match.groups()[:4])
|
||||
start1, size1, start2, size2 = map(int, res[:4])
|
||||
except: # '@@ -0,0 +1 @@' case
|
||||
start1, size1, size2 = map(int, match.groups()[:3])
|
||||
start1, size1, size2 = map(int, res[:3])
|
||||
start2 = 0
|
||||
|
||||
elif line.startswith('+'):
|
||||
|
@ -42,6 +42,11 @@ def sort_files_by_main_languages(languages: Dict, files: list):
|
||||
files_sorted = []
|
||||
rest_files = {}
|
||||
|
||||
# if no languages detected, put all files in the "Other" category
|
||||
if not languages:
|
||||
files_sorted = [({"language": "Other", "files": list(files_filtered)})]
|
||||
return files_sorted
|
||||
|
||||
main_extensions_flat = []
|
||||
for ext in main_extensions:
|
||||
main_extensions_flat.extend(ext)
|
||||
|
@ -1,7 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import difflib
|
||||
import logging
|
||||
import re
|
||||
import traceback
|
||||
from typing import Any, Callable, List, Tuple
|
||||
@ -11,9 +10,11 @@ from github import RateLimitExceededException
|
||||
from pr_agent.algo import MAX_TOKENS
|
||||
from pr_agent.algo.git_patch_processing import convert_to_hunks_with_lines_numbers, extend_patch, handle_patch_deletions
|
||||
from pr_agent.algo.language_handler import sort_files_by_main_languages
|
||||
from pr_agent.algo.file_filter import filter_ignored
|
||||
from pr_agent.algo.token_handler import TokenHandler, get_token_encoder
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers.git_provider import FilePatchInfo, GitProvider
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
DELETED_FILES_ = "Deleted files:\n"
|
||||
|
||||
@ -21,7 +22,6 @@ MORE_MODIFIED_FILES_ = "More modified files:\n"
|
||||
|
||||
OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD = 1000
|
||||
OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD = 600
|
||||
PATCH_EXTRA_LINES = 3
|
||||
|
||||
def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler, model: str,
|
||||
add_line_numbers_to_hunks: bool = False, disable_extra_lines: bool = False) -> str:
|
||||
@ -44,21 +44,24 @@ def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler, model: s
|
||||
"""
|
||||
|
||||
if disable_extra_lines:
|
||||
global PATCH_EXTRA_LINES
|
||||
PATCH_EXTRA_LINES = 0
|
||||
else:
|
||||
PATCH_EXTRA_LINES = get_settings().config.patch_extra_lines
|
||||
|
||||
try:
|
||||
diff_files = git_provider.get_diff_files()
|
||||
except RateLimitExceededException as e:
|
||||
logging.error(f"Rate limit exceeded for git provider API. original message {e}")
|
||||
get_logger().error(f"Rate limit exceeded for git provider API. original message {e}")
|
||||
raise
|
||||
|
||||
diff_files = filter_ignored(diff_files)
|
||||
|
||||
# get pr languages
|
||||
pr_languages = sort_files_by_main_languages(git_provider.get_languages(), diff_files)
|
||||
|
||||
# generate a standard diff string, with patch extension
|
||||
patches_extended, total_tokens, patches_extended_tokens = pr_generate_extended_diff(pr_languages, token_handler,
|
||||
add_line_numbers_to_hunks)
|
||||
patches_extended, total_tokens, patches_extended_tokens = pr_generate_extended_diff(
|
||||
pr_languages, token_handler, add_line_numbers_to_hunks, patch_extra_lines=PATCH_EXTRA_LINES)
|
||||
|
||||
# if we are under the limit, return the full diff
|
||||
if total_tokens + OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD < MAX_TOKENS[model]:
|
||||
@ -80,7 +83,8 @@ def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler, model: s
|
||||
|
||||
def pr_generate_extended_diff(pr_languages: list,
|
||||
token_handler: TokenHandler,
|
||||
add_line_numbers_to_hunks: bool) -> Tuple[list, int, list]:
|
||||
add_line_numbers_to_hunks: bool,
|
||||
patch_extra_lines: int = 0) -> Tuple[list, int, list]:
|
||||
"""
|
||||
Generate a standard diff string with patch extension, while counting the number of tokens used and applying diff
|
||||
minimization techniques if needed.
|
||||
@ -102,7 +106,7 @@ def pr_generate_extended_diff(pr_languages: list,
|
||||
continue
|
||||
|
||||
# extend each patch with extra lines of context
|
||||
extended_patch = extend_patch(original_file_content_str, patch, num_lines=PATCH_EXTRA_LINES)
|
||||
extended_patch = extend_patch(original_file_content_str, patch, num_lines=patch_extra_lines)
|
||||
full_extended_patch = f"\n\n## {file.filename}\n\n{extended_patch}\n"
|
||||
|
||||
if add_line_numbers_to_hunks:
|
||||
@ -176,7 +180,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
|
||||
|
||||
# Hard Stop, no more tokens
|
||||
if total_tokens > MAX_TOKENS[model] - OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD:
|
||||
logging.warning(f"File was fully skipped, no more tokens: {file.filename}.")
|
||||
get_logger().warning(f"File was fully skipped, no more tokens: {file.filename}.")
|
||||
continue
|
||||
|
||||
# If the patch is too large, just show the file name
|
||||
@ -185,7 +189,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
|
||||
# TODO: Option for alternative logic to remove hunks from the patch to reduce the number of tokens
|
||||
# until we meet the requirements
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.warning(f"Patch too large, minimizing it, {file.filename}")
|
||||
get_logger().warning(f"Patch too large, minimizing it, {file.filename}")
|
||||
if not modified_files_list:
|
||||
total_tokens += token_handler.count_tokens(MORE_MODIFIED_FILES_)
|
||||
modified_files_list.append(file.filename)
|
||||
@ -200,7 +204,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
|
||||
patches.append(patch_final)
|
||||
total_tokens += token_handler.count_tokens(patch_final)
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"Tokens: {total_tokens}, last filename: {file.filename}")
|
||||
get_logger().info(f"Tokens: {total_tokens}, last filename: {file.filename}")
|
||||
|
||||
return patches, modified_files_list, deleted_files_list
|
||||
|
||||
@ -214,7 +218,7 @@ async def retry_with_fallback_models(f: Callable):
|
||||
get_settings().set("openai.deployment_id", deployment_id)
|
||||
return await f(model)
|
||||
except Exception as e:
|
||||
logging.warning(
|
||||
get_logger().warning(
|
||||
f"Failed to generate prediction with {model}"
|
||||
f"{(' from deployment ' + deployment_id) if deployment_id else ''}: "
|
||||
f"{traceback.format_exc()}"
|
||||
@ -336,7 +340,7 @@ def clip_tokens(text: str, max_tokens: int) -> str:
|
||||
clipped_text = text[:num_output_chars]
|
||||
return clipped_text
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed to clip tokens: {e}")
|
||||
get_logger().warning(f"Failed to clip tokens: {e}")
|
||||
return text
|
||||
|
||||
|
||||
@ -347,25 +351,27 @@ def get_pr_multi_diffs(git_provider: GitProvider,
|
||||
"""
|
||||
Retrieves the diff files from a Git provider, sorts them by main language, and generates patches for each file.
|
||||
The patches are split into multiple groups based on the maximum number of tokens allowed for the given model.
|
||||
|
||||
|
||||
Args:
|
||||
git_provider (GitProvider): An object that provides access to Git provider APIs.
|
||||
token_handler (TokenHandler): An object that handles tokens in the context of a pull request.
|
||||
model (str): The name of the model.
|
||||
max_calls (int, optional): The maximum number of calls to retrieve diff files. Defaults to 5.
|
||||
|
||||
|
||||
Returns:
|
||||
List[str]: A list of final diff strings, split into multiple groups based on the maximum number of tokens allowed for the given model.
|
||||
|
||||
|
||||
Raises:
|
||||
RateLimitExceededException: If the rate limit for the Git provider API is exceeded.
|
||||
"""
|
||||
try:
|
||||
diff_files = git_provider.get_diff_files()
|
||||
except RateLimitExceededException as e:
|
||||
logging.error(f"Rate limit exceeded for git provider API. original message {e}")
|
||||
get_logger().error(f"Rate limit exceeded for git provider API. original message {e}")
|
||||
raise
|
||||
|
||||
diff_files = filter_ignored(diff_files)
|
||||
|
||||
# Sort files by main language
|
||||
pr_languages = sort_files_by_main_languages(git_provider.get_languages(), diff_files)
|
||||
|
||||
@ -381,7 +387,7 @@ def get_pr_multi_diffs(git_provider: GitProvider,
|
||||
for file in sorted_files:
|
||||
if call_number > max_calls:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"Reached max calls ({max_calls})")
|
||||
get_logger().info(f"Reached max calls ({max_calls})")
|
||||
break
|
||||
|
||||
original_file_content_str = file.base_file
|
||||
@ -404,13 +410,13 @@ def get_pr_multi_diffs(git_provider: GitProvider,
|
||||
total_tokens = token_handler.prompt_tokens
|
||||
call_number += 1
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"Call number: {call_number}")
|
||||
get_logger().info(f"Call number: {call_number}")
|
||||
|
||||
if patch:
|
||||
patches.append(patch)
|
||||
total_tokens += new_patch_tokens
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"Tokens: {total_tokens}, last filename: {file.filename}")
|
||||
get_logger().info(f"Tokens: {total_tokens}, last filename: {file.filename}")
|
||||
|
||||
# Add the last chunk
|
||||
if patches:
|
||||
|
@ -21,7 +21,7 @@ class TokenHandler:
|
||||
method.
|
||||
"""
|
||||
|
||||
def __init__(self, pr, vars: dict, system, user):
|
||||
def __init__(self, pr=None, vars: dict = {}, system="", user=""):
|
||||
"""
|
||||
Initializes the TokenHandler object.
|
||||
|
||||
@ -32,7 +32,8 @@ class TokenHandler:
|
||||
- user: The user string.
|
||||
"""
|
||||
self.encoder = get_token_encoder()
|
||||
self.prompt_tokens = self._get_system_user_tokens(pr, self.encoder, vars, system, user)
|
||||
if pr is not None:
|
||||
self.prompt_tokens = self._get_system_user_tokens(pr, self.encoder, vars, system, user)
|
||||
|
||||
def _get_system_user_tokens(self, pr, encoder, vars: dict, system, user):
|
||||
"""
|
||||
|
@ -2,7 +2,6 @@ from __future__ import annotations
|
||||
|
||||
import difflib
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import textwrap
|
||||
from datetime import datetime
|
||||
@ -11,6 +10,7 @@ from typing import Any, List
|
||||
import yaml
|
||||
from starlette_context import context
|
||||
from pr_agent.config_loader import get_settings, global_settings
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
def get_setting(key: str) -> Any:
|
||||
@ -20,7 +20,7 @@ def get_setting(key: str) -> Any:
|
||||
except Exception:
|
||||
return global_settings.get(key, None)
|
||||
|
||||
def convert_to_markdown(output_data: dict) -> str:
|
||||
def convert_to_markdown(output_data: dict, gfm_supported: bool=True) -> str:
|
||||
"""
|
||||
Convert a dictionary of data into markdown format.
|
||||
Args:
|
||||
@ -42,6 +42,7 @@ def convert_to_markdown(output_data: dict) -> str:
|
||||
"General suggestions": "💡",
|
||||
"Insights from user's answers": "📝",
|
||||
"Code feedback": "🤖",
|
||||
"Estimated effort to review [1-5]": "⏱️",
|
||||
}
|
||||
|
||||
for key, value in output_data.items():
|
||||
@ -49,27 +50,33 @@ def convert_to_markdown(output_data: dict) -> str:
|
||||
continue
|
||||
if isinstance(value, dict):
|
||||
markdown_text += f"## {key}\n\n"
|
||||
markdown_text += convert_to_markdown(value)
|
||||
markdown_text += convert_to_markdown(value, gfm_supported)
|
||||
elif isinstance(value, list):
|
||||
emoji = emojis.get(key, "")
|
||||
if key.lower() == 'code feedback':
|
||||
markdown_text += f"\n\n- **<details><summary> { emoji } Code feedback:**</summary>\n\n"
|
||||
if gfm_supported:
|
||||
markdown_text += f"\n\n- **<details><summary> { emoji } Code feedback:**</summary>\n\n"
|
||||
else:
|
||||
markdown_text += f"\n\n- **{emoji} Code feedback:**\n\n"
|
||||
else:
|
||||
markdown_text += f"- {emoji} **{key}:**\n\n"
|
||||
for item in value:
|
||||
if isinstance(item, dict) and key.lower() == 'code feedback':
|
||||
markdown_text += parse_code_suggestion(item)
|
||||
markdown_text += parse_code_suggestion(item, gfm_supported)
|
||||
elif item:
|
||||
markdown_text += f" - {item}\n"
|
||||
if key.lower() == 'code feedback':
|
||||
markdown_text += "</details>\n\n"
|
||||
if gfm_supported:
|
||||
markdown_text += "</details>\n\n"
|
||||
else:
|
||||
markdown_text += "\n\n"
|
||||
elif value != 'n/a':
|
||||
emoji = emojis.get(key, "")
|
||||
markdown_text += f"- {emoji} **{key}:** {value}\n"
|
||||
return markdown_text
|
||||
|
||||
|
||||
def parse_code_suggestion(code_suggestions: dict) -> str:
|
||||
def parse_code_suggestion(code_suggestions: dict, gfm_supported: bool=True) -> str:
|
||||
"""
|
||||
Convert a dictionary of data into markdown format.
|
||||
|
||||
@ -92,6 +99,10 @@ def parse_code_suggestion(code_suggestions: dict) -> str:
|
||||
markdown_text += f"\n - **{sub_key}:** {sub_value}\n"
|
||||
else:
|
||||
markdown_text += f" **{sub_key}:** {sub_value}\n"
|
||||
if not gfm_supported:
|
||||
if "relevant line" not in sub_key.lower(): # nicer presentation
|
||||
# markdown_text = markdown_text.rstrip('\n') + "\\\n" # works for gitlab
|
||||
markdown_text = markdown_text.rstrip('\n') + " \n" # works for gitlab and bitbucker
|
||||
|
||||
markdown_text += "\n"
|
||||
return markdown_text
|
||||
@ -149,7 +160,7 @@ def try_fix_json(review, max_iter=10, code_suggestions=False):
|
||||
iter_count += 1
|
||||
|
||||
if not valid_json:
|
||||
logging.error("Unable to decode JSON response from AI")
|
||||
get_logger().error("Unable to decode JSON response from AI")
|
||||
data = {}
|
||||
|
||||
return data
|
||||
@ -168,7 +179,7 @@ def fix_json_escape_char(json_message=None):
|
||||
Raises:
|
||||
None
|
||||
|
||||
"""
|
||||
"""
|
||||
try:
|
||||
result = json.loads(json_message)
|
||||
except Exception as e:
|
||||
@ -195,7 +206,7 @@ def convert_str_to_datetime(date_str):
|
||||
Example:
|
||||
>>> convert_str_to_datetime('Mon, 01 Jan 2022 12:00:00 UTC')
|
||||
datetime.datetime(2022, 1, 1, 12, 0, 0)
|
||||
"""
|
||||
"""
|
||||
datetime_format = '%a, %d %b %Y %H:%M:%S %Z'
|
||||
return datetime.strptime(date_str, datetime_format)
|
||||
|
||||
@ -220,7 +231,7 @@ def load_large_diff(filename, new_file_content_str: str, original_file_content_s
|
||||
diff = difflib.unified_diff(original_file_content_str.splitlines(keepends=True),
|
||||
new_file_content_str.splitlines(keepends=True))
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.warning(f"File was modified, but no patch was found. Manually creating patch: {filename}.")
|
||||
get_logger().warning(f"File was modified, but no patch was found. Manually creating patch: {filename}.")
|
||||
patch = ''.join(diff)
|
||||
except Exception:
|
||||
pass
|
||||
@ -252,12 +263,12 @@ def update_settings_from_args(args: List[str]) -> List[str]:
|
||||
vals = arg.split('=', 1)
|
||||
if len(vals) != 2:
|
||||
if len(vals) > 2: # --extended is a valid argument
|
||||
logging.error(f'Invalid argument format: {arg}')
|
||||
get_logger().error(f'Invalid argument format: {arg}')
|
||||
other_args.append(arg)
|
||||
continue
|
||||
key, value = _fix_key_value(*vals)
|
||||
get_settings().set(key, value)
|
||||
logging.info(f'Updated setting {key} to: "{value}"')
|
||||
get_logger().info(f'Updated setting {key} to: "{value}"')
|
||||
else:
|
||||
other_args.append(arg)
|
||||
return other_args
|
||||
@ -269,7 +280,7 @@ def _fix_key_value(key: str, value: str):
|
||||
try:
|
||||
value = yaml.safe_load(value)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to parse YAML for config override {key}={value}", exc_info=e)
|
||||
get_logger().error(f"Failed to parse YAML for config override {key}={value}", exc_info=e)
|
||||
return key, value
|
||||
|
||||
|
||||
@ -278,7 +289,7 @@ def load_yaml(review_text: str) -> dict:
|
||||
try:
|
||||
data = yaml.safe_load(review_text)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to parse AI prediction: {e}")
|
||||
get_logger().error(f"Failed to parse AI prediction: {e}")
|
||||
data = try_fix_yaml(review_text)
|
||||
return data
|
||||
|
||||
@ -289,8 +300,27 @@ def try_fix_yaml(review_text: str) -> dict:
|
||||
review_text_lines_tmp = '\n'.join(review_text_lines[:-i])
|
||||
try:
|
||||
data = yaml.load(review_text_lines_tmp, Loader=yaml.SafeLoader)
|
||||
logging.info(f"Successfully parsed AI prediction after removing {i} lines")
|
||||
get_logger().info(f"Successfully parsed AI prediction after removing {i} lines")
|
||||
break
|
||||
except:
|
||||
pass
|
||||
return data
|
||||
|
||||
|
||||
def set_custom_labels(variables):
|
||||
if not get_settings().config.enable_custom_labels:
|
||||
return
|
||||
|
||||
labels = get_settings().custom_labels
|
||||
if not labels:
|
||||
# set default labels
|
||||
labels = ['Bug fix', 'Tests', 'Bug fix with tests', 'Refactoring', 'Enhancement', 'Documentation', 'Other']
|
||||
labels_list = "\n - ".join(labels) if labels else ""
|
||||
labels_list = f" - {labels_list}" if labels_list else ""
|
||||
variables["custom_labels"] = labels_list
|
||||
return
|
||||
final_labels = ""
|
||||
for k, v in labels.items():
|
||||
final_labels += f" - {k} ({v['description']})\n"
|
||||
variables["custom_labels"] = final_labels
|
||||
variables["custom_labels_examples"] = f" - {list(labels.keys())[0]}"
|
||||
|
@ -1,11 +1,12 @@
|
||||
import argparse
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
|
||||
from pr_agent.agent.pr_agent import PRAgent, commands
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.log import setup_logger
|
||||
|
||||
setup_logger()
|
||||
|
||||
def run(inargs=None):
|
||||
parser = argparse.ArgumentParser(description='AI based pull request analyzer', usage=
|
||||
@ -17,6 +18,7 @@ For example:
|
||||
- cli.py --pr_url=... improve
|
||||
- cli.py --pr_url=... ask "write me a poem about this PR"
|
||||
- cli.py --pr_url=... reflect
|
||||
- cli.py --issue_url=... similar_issue
|
||||
|
||||
Supported commands:
|
||||
-review / review_pr - Add a review that includes a summary of the PR and specific suggestions for improvement.
|
||||
@ -37,14 +39,21 @@ Configuration:
|
||||
To edit any configuration parameter from 'configuration.toml', just add -config_path=<value>.
|
||||
For example: 'python cli.py --pr_url=... review --pr_reviewer.extra_instructions="focus on the file: ..."'
|
||||
""")
|
||||
parser.add_argument('--pr_url', type=str, help='The URL of the PR to review', required=True)
|
||||
parser.add_argument('--pr_url', type=str, help='The URL of the PR to review', default=None)
|
||||
parser.add_argument('--issue_url', type=str, help='The URL of the Issue to review', default=None)
|
||||
parser.add_argument('command', type=str, help='The', choices=commands, default='review')
|
||||
parser.add_argument('rest', nargs=argparse.REMAINDER, default=[])
|
||||
args = parser.parse_args(inargs)
|
||||
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
|
||||
if not args.pr_url and not args.issue_url:
|
||||
parser.print_help()
|
||||
return
|
||||
|
||||
command = args.command.lower()
|
||||
get_settings().set("CONFIG.CLI_MODE", True)
|
||||
result = asyncio.run(PRAgent().handle_request(args.pr_url, command + " " + " ".join(args.rest)))
|
||||
if args.issue_url:
|
||||
result = asyncio.run(PRAgent().handle_request(args.issue_url, command + " " + " ".join(args.rest)))
|
||||
else:
|
||||
result = asyncio.run(PRAgent().handle_request(args.pr_url, command + " " + " ".join(args.rest)))
|
||||
if not result:
|
||||
parser.print_help()
|
||||
|
||||
|
@ -14,6 +14,7 @@ global_settings = Dynaconf(
|
||||
settings_files=[join(current_dir, f) for f in [
|
||||
"settings/.secrets.toml",
|
||||
"settings/configuration.toml",
|
||||
"settings/ignore.toml",
|
||||
"settings/language_extensions.toml",
|
||||
"settings/pr_reviewer_prompts.toml",
|
||||
"settings/pr_questions_prompts.toml",
|
||||
@ -22,7 +23,10 @@ global_settings = Dynaconf(
|
||||
"settings/pr_sort_code_suggestions_prompts.toml",
|
||||
"settings/pr_information_from_user_prompts.toml",
|
||||
"settings/pr_update_changelog_prompts.toml",
|
||||
"settings_prod/.secrets.toml"
|
||||
"settings/pr_custom_labels.toml",
|
||||
"settings/pr_add_docs.toml",
|
||||
"settings_prod/.secrets.toml",
|
||||
"settings/custom_labels.toml"
|
||||
]]
|
||||
)
|
||||
|
||||
|
@ -5,6 +5,8 @@ from pr_agent.git_providers.github_provider import GithubProvider
|
||||
from pr_agent.git_providers.gitlab_provider import GitLabProvider
|
||||
from pr_agent.git_providers.local_git_provider import LocalGitProvider
|
||||
from pr_agent.git_providers.azuredevops_provider import AzureDevopsProvider
|
||||
from pr_agent.git_providers.gerrit_provider import GerritProvider
|
||||
|
||||
|
||||
_GIT_PROVIDERS = {
|
||||
'github': GithubProvider,
|
||||
@ -12,7 +14,8 @@ _GIT_PROVIDERS = {
|
||||
'bitbucket': BitbucketProvider,
|
||||
'azure': AzureDevopsProvider,
|
||||
'codecommit': CodeCommitProvider,
|
||||
'local' : LocalGitProvider
|
||||
'local' : LocalGitProvider,
|
||||
'gerrit': GerritProvider,
|
||||
}
|
||||
|
||||
def get_git_provider():
|
||||
|
@ -1,10 +1,11 @@
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import os
|
||||
|
||||
from ..log import get_logger
|
||||
|
||||
AZURE_DEVOPS_AVAILABLE = True
|
||||
try:
|
||||
from msrest.authentication import BasicAuthentication
|
||||
@ -38,7 +39,8 @@ class AzureDevopsProvider:
|
||||
self.set_pr(pr_url)
|
||||
|
||||
def is_supported(self, capability: str) -> bool:
|
||||
if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments', 'get_labels', 'remove_initial_comment']:
|
||||
if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments', 'get_labels',
|
||||
'remove_initial_comment', 'gfm_markdown']:
|
||||
return False
|
||||
return True
|
||||
|
||||
@ -54,7 +56,7 @@ class AzureDevopsProvider:
|
||||
path=".pr_agent.toml")
|
||||
return contents
|
||||
except Exception as e:
|
||||
logging.exception("get repo settings error")
|
||||
get_logger().exception("get repo settings error")
|
||||
return ""
|
||||
|
||||
def get_files(self):
|
||||
@ -87,6 +89,8 @@ class AzureDevopsProvider:
|
||||
changes_obj = self.azure_devops_client.get_changes(project=self.workspace_slug,
|
||||
repository_id=self.repo_slug, commit_id=c.commit_id)
|
||||
for i in changes_obj.changes:
|
||||
if(i['item']['gitObjectType'] == 'tree'):
|
||||
continue
|
||||
diffs.append(i['item']['path'])
|
||||
diff_types[i['item']['path']] = i['changeType']
|
||||
|
||||
@ -97,14 +101,18 @@ class AzureDevopsProvider:
|
||||
continue
|
||||
|
||||
version = GitVersionDescriptor(version=head_sha.commit_id, version_type='commit')
|
||||
new_file_content_str = self.azure_devops_client.get_item(repository_id=self.repo_slug,
|
||||
path=file,
|
||||
project=self.workspace_slug,
|
||||
version_descriptor=version,
|
||||
download=False,
|
||||
include_content=True)
|
||||
try:
|
||||
new_file_content_str = self.azure_devops_client.get_item(repository_id=self.repo_slug,
|
||||
path=file,
|
||||
project=self.workspace_slug,
|
||||
version_descriptor=version,
|
||||
download=False,
|
||||
include_content=True)
|
||||
|
||||
new_file_content_str = new_file_content_str.content
|
||||
new_file_content_str = new_file_content_str.content
|
||||
except Exception as error:
|
||||
get_logger().error("Failed to retrieve new file content of %s at version %s. Error: %s", file, version, str(error))
|
||||
new_file_content_str = ""
|
||||
|
||||
edit_type = EDIT_TYPE.MODIFIED
|
||||
if diff_types[file] == 'add':
|
||||
@ -115,13 +123,17 @@ class AzureDevopsProvider:
|
||||
edit_type = EDIT_TYPE.RENAMED
|
||||
|
||||
version = GitVersionDescriptor(version=base_sha.commit_id, version_type='commit')
|
||||
original_file_content_str = self.azure_devops_client.get_item(repository_id=self.repo_slug,
|
||||
try:
|
||||
original_file_content_str = self.azure_devops_client.get_item(repository_id=self.repo_slug,
|
||||
path=file,
|
||||
project=self.workspace_slug,
|
||||
version_descriptor=version,
|
||||
download=False,
|
||||
include_content=True)
|
||||
original_file_content_str = original_file_content_str.content
|
||||
original_file_content_str = original_file_content_str.content
|
||||
except Exception as error:
|
||||
get_logger().error("Failed to retrieve original file content of %s at version %s. Error: %s", file, version, str(error))
|
||||
original_file_content_str = ""
|
||||
|
||||
patch = load_large_diff(file, new_file_content_str, original_file_content_str)
|
||||
|
||||
@ -155,7 +167,7 @@ class AzureDevopsProvider:
|
||||
pull_request_id=self.pr_num,
|
||||
git_pull_request_to_update=updated_pr)
|
||||
except Exception as e:
|
||||
logging.exception(f"Could not update pull request {self.pr_num} description: {e}")
|
||||
get_logger().exception(f"Could not update pull request {self.pr_num} description: {e}")
|
||||
|
||||
def remove_initial_comment(self):
|
||||
return "" # not implemented yet
|
||||
@ -224,9 +236,6 @@ class AzureDevopsProvider:
|
||||
def _parse_pr_url(pr_url: str) -> Tuple[str, int]:
|
||||
parsed_url = urlparse(pr_url)
|
||||
|
||||
if 'azure.com' not in parsed_url.netloc:
|
||||
raise ValueError("The provided URL is not a valid Azure DevOps URL")
|
||||
|
||||
path_parts = parsed_url.path.strip('/').split('/')
|
||||
|
||||
if len(path_parts) < 6 or path_parts[4] != 'pullrequest':
|
||||
|
@ -1,5 +1,4 @@
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
@ -7,7 +6,9 @@ import requests
|
||||
from atlassian.bitbucket import Cloud
|
||||
from starlette_context import context
|
||||
|
||||
from ..algo.pr_processing import find_line_number_of_relevant_line_in_file
|
||||
from ..config_loader import get_settings
|
||||
from ..log import get_logger
|
||||
from .git_provider import FilePatchInfo, GitProvider
|
||||
|
||||
|
||||
@ -35,9 +36,8 @@ class BitbucketProvider(GitProvider):
|
||||
self.incremental = incremental
|
||||
if pr_url:
|
||||
self.set_pr(pr_url)
|
||||
self.bitbucket_comment_api_url = self.pr._BitbucketBase__data["links"][
|
||||
"comments"
|
||||
]["href"]
|
||||
self.bitbucket_comment_api_url = self.pr._BitbucketBase__data["links"]["comments"]["href"]
|
||||
self.bitbucket_pull_request_api_url = self.pr._BitbucketBase__data["links"]['self']['href']
|
||||
|
||||
def get_repo_settings(self):
|
||||
try:
|
||||
@ -61,14 +61,14 @@ class BitbucketProvider(GitProvider):
|
||||
|
||||
if not relevant_lines_start or relevant_lines_start == -1:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.exception(
|
||||
get_logger().exception(
|
||||
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}"
|
||||
)
|
||||
continue
|
||||
|
||||
if relevant_lines_end < relevant_lines_start:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.exception(
|
||||
get_logger().exception(
|
||||
f"Failed to publish code suggestion, "
|
||||
f"relevant_lines_end is {relevant_lines_end} and "
|
||||
f"relevant_lines_start is {relevant_lines_start}"
|
||||
@ -97,16 +97,11 @@ class BitbucketProvider(GitProvider):
|
||||
return True
|
||||
except Exception as e:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.error(f"Failed to publish code suggestion, error: {e}")
|
||||
get_logger().error(f"Failed to publish code suggestion, error: {e}")
|
||||
return False
|
||||
|
||||
def is_supported(self, capability: str) -> bool:
|
||||
if capability in [
|
||||
"get_issue_comments",
|
||||
"create_inline_comment",
|
||||
"publish_inline_comments",
|
||||
"get_labels",
|
||||
]:
|
||||
if capability in ['get_issue_comments', 'publish_inline_comments', 'get_labels', 'gfm_markdown']:
|
||||
return False
|
||||
return True
|
||||
|
||||
@ -147,21 +142,39 @@ class BitbucketProvider(GitProvider):
|
||||
def remove_initial_comment(self):
|
||||
try:
|
||||
for comment in self.temp_comments:
|
||||
self.pr.delete(f"comments/{comment}")
|
||||
self.remove_comment(comment)
|
||||
except Exception as e:
|
||||
logging.exception(f"Failed to remove temp comments, error: {e}")
|
||||
get_logger().exception(f"Failed to remove temp comments, error: {e}")
|
||||
|
||||
def publish_inline_comment(
|
||||
self, comment: str, from_line: int, to_line: int, file: str
|
||||
):
|
||||
payload = json.dumps(
|
||||
{
|
||||
"content": {
|
||||
"raw": comment,
|
||||
},
|
||||
"inline": {"to": from_line, "path": file},
|
||||
}
|
||||
)
|
||||
def remove_comment(self, comment):
|
||||
try:
|
||||
self.pr.delete(f"comments/{comment}")
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Failed to remove comment, error: {e}")
|
||||
|
||||
# funtion to create_inline_comment
|
||||
def create_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
||||
position, absolute_position = find_line_number_of_relevant_line_in_file(self.get_diff_files(), relevant_file.strip('`'), relevant_line_in_file)
|
||||
if position == -1:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
|
||||
subject_type = "FILE"
|
||||
else:
|
||||
subject_type = "LINE"
|
||||
path = relevant_file.strip()
|
||||
return dict(body=body, path=path, position=absolute_position) if subject_type == "LINE" else {}
|
||||
|
||||
|
||||
def publish_inline_comment(self, comment: str, from_line: int, file: str):
|
||||
payload = json.dumps( {
|
||||
"content": {
|
||||
"raw": comment,
|
||||
},
|
||||
"inline": {
|
||||
"to": from_line,
|
||||
"path": file
|
||||
},
|
||||
})
|
||||
response = requests.request(
|
||||
"POST", self.bitbucket_comment_api_url, data=payload, headers=self.headers
|
||||
)
|
||||
@ -169,9 +182,7 @@ class BitbucketProvider(GitProvider):
|
||||
|
||||
def publish_inline_comments(self, comments: list[dict]):
|
||||
for comment in comments:
|
||||
self.publish_inline_comment(
|
||||
comment["body"], comment["start_line"], comment["line"], comment["path"]
|
||||
)
|
||||
self.publish_inline_comment(comment['body'], comment['start_line'], comment['path'])
|
||||
|
||||
def get_title(self):
|
||||
return self.pr.title
|
||||
@ -238,16 +249,22 @@ class BitbucketProvider(GitProvider):
|
||||
|
||||
def get_commit_messages(self):
|
||||
return "" # not implemented yet
|
||||
|
||||
# bitbucket does not support labels
|
||||
def publish_description(self, pr_title: str, description: str):
|
||||
payload = json.dumps({
|
||||
"description": description,
|
||||
"title": pr_title
|
||||
|
||||
def publish_description(self, pr_title: str, pr_body: str):
|
||||
pass
|
||||
def create_inline_comment(
|
||||
self, body: str, relevant_file: str, relevant_line_in_file: str
|
||||
):
|
||||
pass
|
||||
})
|
||||
|
||||
def publish_labels(self, labels):
|
||||
pass
|
||||
response = requests.request("PUT", self.bitbucket_pull_request_api_url, headers=self.headers, data=payload)
|
||||
return response
|
||||
|
||||
# bitbucket does not support labels
|
||||
def publish_labels(self, pr_types: list):
|
||||
pass
|
||||
|
||||
# bitbucket does not support labels
|
||||
def get_labels(self):
|
||||
pass
|
||||
|
@ -54,17 +54,22 @@ class CodeCommitClient:
|
||||
def __init__(self):
|
||||
self.boto_client = None
|
||||
|
||||
def is_supported(self, capability: str) -> bool:
|
||||
if capability in ["gfm_markdown"]:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _connect_boto_client(self):
|
||||
try:
|
||||
self.boto_client = boto3.client("codecommit")
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to connect to AWS CodeCommit: {e}")
|
||||
raise ValueError(f"Failed to connect to AWS CodeCommit: {e}") from e
|
||||
|
||||
def get_differences(self, repo_name: int, destination_commit: str, source_commit: str):
|
||||
"""
|
||||
Get the differences between two commits in CodeCommit.
|
||||
|
||||
Parameters:
|
||||
Args:
|
||||
- repo_name: Name of the repository
|
||||
- destination_commit: Commit hash you want to merge into (the "before" hash) (usually on the main or master branch)
|
||||
- source_commit: Commit hash of the code you are adding (the "after" branch)
|
||||
@ -73,8 +78,8 @@ class CodeCommitClient:
|
||||
- List of CodeCommitDifferencesResponse objects
|
||||
|
||||
Boto3 Documentation:
|
||||
aws codecommit get-differences
|
||||
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codecommit/client/get_differences.html
|
||||
- aws codecommit get-differences
|
||||
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codecommit/client/get_differences.html
|
||||
"""
|
||||
if self.boto_client is None:
|
||||
self._connect_boto_client()
|
||||
@ -90,7 +95,11 @@ class CodeCommitClient:
|
||||
):
|
||||
differences.extend(page.get("differences", []))
|
||||
except botocore.exceptions.ClientError as e:
|
||||
raise ValueError(f"Failed to retrieve differences from CodeCommit PR #{self.pr_num}") from e
|
||||
if e.response["Error"]["Code"] == 'RepositoryDoesNotExistException':
|
||||
raise ValueError(f"CodeCommit cannot retrieve differences: Repository does not exist: {repo_name}") from e
|
||||
raise ValueError(f"CodeCommit cannot retrieve differences for {source_commit}..{destination_commit}") from e
|
||||
except Exception as e:
|
||||
raise ValueError(f"CodeCommit cannot retrieve differences for {source_commit}..{destination_commit}") from e
|
||||
|
||||
output = []
|
||||
for json in differences:
|
||||
@ -101,7 +110,7 @@ class CodeCommitClient:
|
||||
"""
|
||||
Retrieve a file from CodeCommit.
|
||||
|
||||
Parameters:
|
||||
Args:
|
||||
- repo_name: Name of the repository
|
||||
- file_path: Path to the file you are retrieving
|
||||
- sha_hash: Commit hash of the file you are retrieving
|
||||
@ -110,8 +119,8 @@ class CodeCommitClient:
|
||||
- File contents
|
||||
|
||||
Boto3 Documentation:
|
||||
aws codecommit get_file
|
||||
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codecommit/client/get_file.html
|
||||
- aws codecommit get_file
|
||||
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codecommit/client/get_file.html
|
||||
"""
|
||||
if not file_path:
|
||||
return ""
|
||||
@ -122,6 +131,8 @@ class CodeCommitClient:
|
||||
try:
|
||||
response = self.boto_client.get_file(repositoryName=repo_name, commitSpecifier=sha_hash, filePath=file_path)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response["Error"]["Code"] == 'RepositoryDoesNotExistException':
|
||||
raise ValueError(f"CodeCommit cannot retrieve PR: Repository does not exist: {repo_name}") from e
|
||||
# if the file does not exist, but is flagged as optional, then return an empty string
|
||||
if optional and e.response["Error"]["Code"] == 'FileDoesNotExistException':
|
||||
return ""
|
||||
@ -133,19 +144,20 @@ class CodeCommitClient:
|
||||
|
||||
return response.get("fileContent", "")
|
||||
|
||||
def get_pr(self, pr_number: int):
|
||||
def get_pr(self, repo_name: str, pr_number: int):
|
||||
"""
|
||||
Get a information about a CodeCommit PR.
|
||||
|
||||
Parameters:
|
||||
Args:
|
||||
- repo_name: Name of the repository
|
||||
- pr_number: The PR number you are requesting
|
||||
|
||||
Returns:
|
||||
- CodeCommitPullRequestResponse object
|
||||
|
||||
Boto3 Documentation:
|
||||
aws codecommit get_pull_request
|
||||
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codecommit/client/get_pull_request.html
|
||||
- aws codecommit get_pull_request
|
||||
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codecommit/client/get_pull_request.html
|
||||
"""
|
||||
if self.boto_client is None:
|
||||
self._connect_boto_client()
|
||||
@ -155,6 +167,8 @@ class CodeCommitClient:
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response["Error"]["Code"] == 'PullRequestDoesNotExistException':
|
||||
raise ValueError(f"CodeCommit cannot retrieve PR: PR number does not exist: {pr_number}") from e
|
||||
if e.response["Error"]["Code"] == 'RepositoryDoesNotExistException':
|
||||
raise ValueError(f"CodeCommit cannot retrieve PR: Repository does not exist: {repo_name}") from e
|
||||
raise ValueError(f"CodeCommit cannot retrieve PR: {pr_number}: boto client error") from e
|
||||
except Exception as e:
|
||||
raise ValueError(f"CodeCommit cannot retrieve PR: {pr_number}") from e
|
||||
@ -164,35 +178,95 @@ class CodeCommitClient:
|
||||
|
||||
return CodeCommitPullRequestResponse(response.get("pullRequest", {}))
|
||||
|
||||
def publish_comment(self, repo_name: str, pr_number: int, destination_commit: str, source_commit: str, comment: str):
|
||||
def publish_description(self, pr_number: int, pr_title: str, pr_body: str):
|
||||
"""
|
||||
Publish a comment to a pull request
|
||||
Set the title and description on a pull request
|
||||
|
||||
Parameters:
|
||||
- repo_name: name of the repository
|
||||
- pr_number: number of the pull request
|
||||
- destination_commit: The commit hash you want to merge into (the "before" hash) (usually on the main or master branch)
|
||||
- source_commit: The commit hash of the code you are adding (the "after" branch)
|
||||
- pr_comment: comment
|
||||
Args:
|
||||
- pr_number: the AWS CodeCommit pull request number
|
||||
- pr_title: title of the pull request
|
||||
- pr_body: body of the pull request
|
||||
|
||||
Returns:
|
||||
- None
|
||||
|
||||
Boto3 Documentation:
|
||||
aws codecommit post_comment_for_pull_request
|
||||
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codecommit/client/post_comment_for_pull_request.html
|
||||
- aws codecommit update_pull_request_title
|
||||
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codecommit/client/update_pull_request_title.html
|
||||
- aws codecommit update_pull_request_description
|
||||
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codecommit/client/update_pull_request_description.html
|
||||
"""
|
||||
if self.boto_client is None:
|
||||
self._connect_boto_client()
|
||||
|
||||
try:
|
||||
self.boto_client.post_comment_for_pull_request(
|
||||
pullRequestId=str(pr_number),
|
||||
repositoryName=repo_name,
|
||||
beforeCommitId=destination_commit,
|
||||
afterCommitId=source_commit,
|
||||
content=comment,
|
||||
)
|
||||
self.boto_client.update_pull_request_title(pullRequestId=str(pr_number), title=pr_title)
|
||||
self.boto_client.update_pull_request_description(pullRequestId=str(pr_number), description=pr_body)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response["Error"]["Code"] == 'PullRequestDoesNotExistException':
|
||||
raise ValueError(f"PR number does not exist: {pr_number}") from e
|
||||
if e.response["Error"]["Code"] == 'InvalidTitleException':
|
||||
raise ValueError(f"Invalid title for PR number: {pr_number}") from e
|
||||
if e.response["Error"]["Code"] == 'InvalidDescriptionException':
|
||||
raise ValueError(f"Invalid description for PR number: {pr_number}") from e
|
||||
if e.response["Error"]["Code"] == 'PullRequestAlreadyClosedException':
|
||||
raise ValueError(f"PR is already closed: PR number: {pr_number}") from e
|
||||
raise ValueError(f"Boto3 client error calling publish_description") from e
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error calling publish_description") from e
|
||||
|
||||
def publish_comment(self, repo_name: str, pr_number: int, destination_commit: str, source_commit: str, comment: str, annotation_file: str = None, annotation_line: int = None):
|
||||
"""
|
||||
Publish a comment to a pull request
|
||||
|
||||
Args:
|
||||
- repo_name: name of the repository
|
||||
- pr_number: number of the pull request
|
||||
- destination_commit: The commit hash you want to merge into (the "before" hash) (usually on the main or master branch)
|
||||
- source_commit: The commit hash of the code you are adding (the "after" branch)
|
||||
- comment: The comment you want to publish
|
||||
- annotation_file: The file you want to annotate (optional)
|
||||
- annotation_line: The line number you want to annotate (optional)
|
||||
|
||||
Comment annotations for CodeCommit are different than GitHub.
|
||||
CodeCommit only designates the starting line number for the comment.
|
||||
It does not support the ending line number to highlight a range of lines.
|
||||
|
||||
Returns:
|
||||
- None
|
||||
|
||||
Boto3 Documentation:
|
||||
- aws codecommit post_comment_for_pull_request
|
||||
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codecommit/client/post_comment_for_pull_request.html
|
||||
"""
|
||||
if self.boto_client is None:
|
||||
self._connect_boto_client()
|
||||
|
||||
try:
|
||||
# If the comment has code annotations,
|
||||
# then set the file path and line number in the location dictionary
|
||||
if annotation_file and annotation_line:
|
||||
self.boto_client.post_comment_for_pull_request(
|
||||
pullRequestId=str(pr_number),
|
||||
repositoryName=repo_name,
|
||||
beforeCommitId=destination_commit,
|
||||
afterCommitId=source_commit,
|
||||
content=comment,
|
||||
location={
|
||||
"filePath": annotation_file,
|
||||
"filePosition": annotation_line,
|
||||
"relativeFileVersion": "AFTER",
|
||||
},
|
||||
)
|
||||
else:
|
||||
# The comment does not have code annotations
|
||||
self.boto_client.post_comment_for_pull_request(
|
||||
pullRequestId=str(pr_number),
|
||||
repositoryName=repo_name,
|
||||
beforeCommitId=destination_commit,
|
||||
afterCommitId=source_commit,
|
||||
content=comment,
|
||||
)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
if e.response["Error"]["Code"] == 'RepositoryDoesNotExistException':
|
||||
raise ValueError(f"Repository does not exist: {repo_name}") from e
|
||||
|
@ -1,16 +1,16 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from collections import Counter
|
||||
from typing import List, Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from ..algo.language_handler import is_valid_file, language_extension_map
|
||||
from ..algo.pr_processing import clip_tokens
|
||||
from ..algo.utils import load_large_diff
|
||||
from ..config_loader import get_settings
|
||||
from .git_provider import EDIT_TYPE, FilePatchInfo, GitProvider, IncrementalPR
|
||||
from pr_agent.git_providers.codecommit_client import CodeCommitClient
|
||||
|
||||
from ..algo.language_handler import is_valid_file, language_extension_map
|
||||
from ..algo.utils import load_large_diff
|
||||
from .git_provider import EDIT_TYPE, FilePatchInfo, GitProvider
|
||||
from ..log import get_logger
|
||||
|
||||
|
||||
class PullRequestCCMimic:
|
||||
"""
|
||||
@ -73,6 +73,7 @@ class CodeCommitProvider(GitProvider):
|
||||
"create_inline_comment",
|
||||
"publish_inline_comments",
|
||||
"get_labels",
|
||||
"gfm_markdown"
|
||||
]:
|
||||
return False
|
||||
return True
|
||||
@ -153,26 +154,63 @@ class CodeCommitProvider(GitProvider):
|
||||
return self.diff_files
|
||||
|
||||
def publish_description(self, pr_title: str, pr_body: str):
|
||||
return "" # not implemented yet
|
||||
try:
|
||||
self.codecommit_client.publish_description(
|
||||
pr_number=self.pr_num,
|
||||
pr_title=pr_title,
|
||||
pr_body=CodeCommitProvider._add_additional_newlines(pr_body),
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValueError(f"CodeCommit Cannot publish description for PR: {self.pr_num}") from e
|
||||
|
||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||
if is_temporary:
|
||||
logging.info(pr_comment)
|
||||
get_logger().info(pr_comment)
|
||||
return
|
||||
|
||||
pr_comment = CodeCommitProvider._remove_markdown_html(pr_comment)
|
||||
pr_comment = CodeCommitProvider._add_additional_newlines(pr_comment)
|
||||
|
||||
try:
|
||||
self.codecommit_client.publish_comment(
|
||||
repo_name=self.repo_name,
|
||||
pr_number=str(self.pr_num),
|
||||
pr_number=self.pr_num,
|
||||
destination_commit=self.pr.destination_commit,
|
||||
source_commit=self.pr.source_commit,
|
||||
comment=pr_comment,
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValueError(f"CodeCommit Cannot post comment for PR: {self.pr_num}") from e
|
||||
raise ValueError(f"CodeCommit Cannot publish comment for PR: {self.pr_num}") from e
|
||||
|
||||
def publish_code_suggestions(self, code_suggestions: list) -> bool:
|
||||
return [""] # not implemented yet
|
||||
counter = 1
|
||||
for suggestion in code_suggestions:
|
||||
# Verify that each suggestion has the required keys
|
||||
if not all(key in suggestion for key in ["body", "relevant_file", "relevant_lines_start"]):
|
||||
get_logger().warning(f"Skipping code suggestion #{counter}: Each suggestion must have 'body', 'relevant_file', 'relevant_lines_start' keys")
|
||||
continue
|
||||
|
||||
# Publish the code suggestion to CodeCommit
|
||||
try:
|
||||
get_logger().debug(f"Code Suggestion #{counter} in file: {suggestion['relevant_file']}: {suggestion['relevant_lines_start']}")
|
||||
self.codecommit_client.publish_comment(
|
||||
repo_name=self.repo_name,
|
||||
pr_number=self.pr_num,
|
||||
destination_commit=self.pr.destination_commit,
|
||||
source_commit=self.pr.source_commit,
|
||||
comment=suggestion["body"],
|
||||
annotation_file=suggestion["relevant_file"],
|
||||
annotation_line=suggestion["relevant_lines_start"],
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValueError(f"CodeCommit Cannot publish code suggestions for PR: {self.pr_num}") from e
|
||||
|
||||
counter += 1
|
||||
|
||||
# The calling function passes in a list of code suggestions, and this function publishes each suggestion one at a time.
|
||||
# If we were to return False here, the calling function will attempt to publish the same list of code suggestions again, one at a time.
|
||||
# Since this function publishes the suggestions one at a time anyway, we always return True here to avoid the retry.
|
||||
return True
|
||||
|
||||
def publish_labels(self, labels):
|
||||
return [""] # not implemented yet
|
||||
@ -183,7 +221,11 @@ class CodeCommitProvider(GitProvider):
|
||||
def remove_initial_comment(self):
|
||||
return "" # not implemented yet
|
||||
|
||||
def remove_comment(self, comment):
|
||||
return "" # not implemented yet
|
||||
|
||||
def publish_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
||||
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codecommit/client/post_comment_for_compared_commit.html
|
||||
raise NotImplementedError("CodeCommit provider does not support publishing inline comments yet")
|
||||
|
||||
def create_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
||||
@ -193,14 +235,26 @@ class CodeCommitProvider(GitProvider):
|
||||
raise NotImplementedError("CodeCommit provider does not support publishing inline comments yet")
|
||||
|
||||
def get_title(self):
|
||||
return self.pr.get("title", "")
|
||||
return self.pr.title
|
||||
|
||||
def get_pr_id(self):
|
||||
"""
|
||||
Returns the PR ID in the format: "repo_name/pr_number".
|
||||
Note: This is an internal identifier for PR-Agent,
|
||||
and is not the same as the CodeCommit PR identifier.
|
||||
"""
|
||||
try:
|
||||
pr_id = f"{self.repo_name}/{self.pr_num}"
|
||||
return pr_id
|
||||
except:
|
||||
return ""
|
||||
|
||||
def get_languages(self):
|
||||
"""
|
||||
Returns a dictionary of languages, containing the percentage of each language used in the PR.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary where each key is a language name and the corresponding value is the percentage of that language in the PR.
|
||||
- dict: A dictionary where each key is a language name and the corresponding value is the percentage of that language in the PR.
|
||||
"""
|
||||
commit_files = self.get_files()
|
||||
filenames = [ item.filename for item in commit_files ]
|
||||
@ -244,18 +298,29 @@ class CodeCommitProvider(GitProvider):
|
||||
return self.codecommit_client.get_file(self.repo_name, settings_filename, self.pr.source_commit, optional=True)
|
||||
|
||||
def add_eyes_reaction(self, issue_comment_id: int) -> Optional[int]:
|
||||
get_logger().info("CodeCommit provider does not support eyes reaction yet")
|
||||
return True
|
||||
|
||||
def remove_reaction(self, issue_comment_id: int, reaction_id: int) -> bool:
|
||||
get_logger().info("CodeCommit provider does not support removing reactions yet")
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def _parse_pr_url(pr_url: str) -> Tuple[str, int]:
|
||||
"""
|
||||
Parse the CodeCommit PR URL and return the repository name and PR number.
|
||||
|
||||
Args:
|
||||
- pr_url: the full AWS CodeCommit pull request URL
|
||||
|
||||
Returns:
|
||||
- Tuple[str, int]: A tuple containing the repository name and PR number.
|
||||
"""
|
||||
# Example PR URL:
|
||||
# https://us-east-1.console.aws.amazon.com/codesuite/codecommit/repositories/__MY_REPO__/pull-requests/123456"
|
||||
parsed_url = urlparse(pr_url)
|
||||
|
||||
if "us-east-1.console.aws.amazon.com" not in parsed_url.netloc:
|
||||
if not CodeCommitProvider._is_valid_codecommit_hostname(parsed_url.netloc):
|
||||
raise ValueError(f"The provided URL is not a valid CodeCommit URL: {pr_url}")
|
||||
|
||||
path_parts = parsed_url.path.strip("/").split("/")
|
||||
@ -278,17 +343,33 @@ class CodeCommitProvider(GitProvider):
|
||||
|
||||
return repo_name, pr_number
|
||||
|
||||
@staticmethod
|
||||
def _is_valid_codecommit_hostname(hostname: str) -> bool:
|
||||
"""
|
||||
Check if the provided hostname is a valid AWS CodeCommit hostname.
|
||||
|
||||
This is not an exhaustive check of AWS region names,
|
||||
but instead uses a regex to check for matching AWS region patterns.
|
||||
|
||||
Args:
|
||||
- hostname: the hostname to check
|
||||
|
||||
Returns:
|
||||
- bool: True if the hostname is valid, False otherwise.
|
||||
"""
|
||||
return re.match(r"^[a-z]{2}-(gov-)?[a-z]+-\d\.console\.aws\.amazon\.com$", hostname) is not None
|
||||
|
||||
def _get_pr(self):
|
||||
response = self.codecommit_client.get_pr(self.pr_num)
|
||||
response = self.codecommit_client.get_pr(self.repo_name, self.pr_num)
|
||||
|
||||
if len(response.targets) == 0:
|
||||
raise ValueError(f"No files found in CodeCommit PR: {self.pr_num}")
|
||||
|
||||
# TODO: implement support for multiple commits in one CodeCommit PR
|
||||
# for now, we are only using the first commit in the PR
|
||||
# TODO: implement support for multiple targets in one CodeCommit PR
|
||||
# for now, we are only using the first target in the PR
|
||||
if len(response.targets) > 1:
|
||||
logging.warning(
|
||||
"Multiple commits in one PR is not supported for CodeCommit yet. Continuing, using the first commit only..."
|
||||
get_logger().warning(
|
||||
"Multiple targets in one PR is not supported for CodeCommit yet. Continuing, using the first target only..."
|
||||
)
|
||||
|
||||
# Return our object that mimics PullRequest class from the PyGithub library
|
||||
@ -306,13 +387,52 @@ class CodeCommitProvider(GitProvider):
|
||||
return "" # not implemented yet
|
||||
|
||||
@staticmethod
|
||||
def _get_edit_type(codecommit_change_type):
|
||||
def _add_additional_newlines(body: str) -> str:
|
||||
"""
|
||||
Replace single newlines in a PR body with double newlines.
|
||||
|
||||
CodeCommit Markdown does not seem to render as well as GitHub Markdown,
|
||||
so we add additional newlines to the PR body to make it more readable in CodeCommit.
|
||||
|
||||
Args:
|
||||
- body: the PR body
|
||||
|
||||
Returns:
|
||||
- str: the PR body with the double newlines added
|
||||
"""
|
||||
return re.sub(r'(?<!\n)\n(?!\n)', '\n\n', body)
|
||||
|
||||
@staticmethod
|
||||
def _remove_markdown_html(comment: str) -> str:
|
||||
"""
|
||||
Remove the HTML tags from a PR comment.
|
||||
|
||||
CodeCommit Markdown does not seem to render as well as GitHub Markdown,
|
||||
so we remove the HTML tags from the PR comment to make it more readable in CodeCommit.
|
||||
|
||||
Args:
|
||||
- comment: the PR comment
|
||||
|
||||
Returns:
|
||||
- str: the PR comment with the HTML tags removed
|
||||
"""
|
||||
comment = comment.replace("<details>", "")
|
||||
comment = comment.replace("</details>", "")
|
||||
comment = comment.replace("<summary>", "")
|
||||
comment = comment.replace("</summary>", "")
|
||||
return comment
|
||||
|
||||
@staticmethod
|
||||
def _get_edit_type(codecommit_change_type: str):
|
||||
"""
|
||||
Convert the CodeCommit change type string to the EDIT_TYPE enum.
|
||||
The CodeCommit change type string is returned from the get_differences SDK method.
|
||||
|
||||
Args:
|
||||
- codecommit_change_type: the CodeCommit change type string
|
||||
|
||||
Returns:
|
||||
An EDIT_TYPE enum representing the modified, added, deleted, or renamed file in the PR diff.
|
||||
- An EDIT_TYPE enum representing the modified, added, deleted, or renamed file in the PR diff.
|
||||
"""
|
||||
t = codecommit_change_type.upper()
|
||||
edit_type = None
|
||||
@ -333,6 +453,12 @@ class CodeCommitProvider(GitProvider):
|
||||
The returned extensions will include the dot "." prefix,
|
||||
to accommodate for the dots in the existing language_extension_map settings.
|
||||
Filenames with no extension will return an empty string for the extension.
|
||||
|
||||
Args:
|
||||
- filenames: a list of filenames
|
||||
|
||||
Returns:
|
||||
- list: A list of file extensions, including the dot "." prefix.
|
||||
"""
|
||||
extensions = []
|
||||
for filename in filenames:
|
||||
@ -349,6 +475,12 @@ class CodeCommitProvider(GitProvider):
|
||||
Return a dictionary containing the programming language name (as the key),
|
||||
and the percentage that language is used (as the value),
|
||||
given a list of file extensions.
|
||||
|
||||
Args:
|
||||
- extensions: a list of file extensions
|
||||
|
||||
Returns:
|
||||
- dict: A dictionary where each key is a language name and the corresponding value is the percentage of that language in the PR.
|
||||
"""
|
||||
total_files = len(extensions)
|
||||
if total_files == 0:
|
||||
|
403
pr_agent/git_providers/gerrit_provider.py
Normal file
@ -0,0 +1,403 @@
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import shutil
|
||||
import subprocess
|
||||
import uuid
|
||||
from collections import Counter, namedtuple
|
||||
from pathlib import Path
|
||||
from tempfile import NamedTemporaryFile, mkdtemp
|
||||
|
||||
import requests
|
||||
import urllib3.util
|
||||
from git import Repo
|
||||
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers.git_provider import EDIT_TYPE, FilePatchInfo, GitProvider
|
||||
from pr_agent.git_providers.local_git_provider import PullRequestMimic
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
def _call(*command, **kwargs) -> (int, str, str):
|
||||
res = subprocess.run(
|
||||
command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
check=True,
|
||||
**kwargs,
|
||||
)
|
||||
return res.stdout.decode()
|
||||
|
||||
|
||||
def clone(url, directory):
|
||||
get_logger().info("Cloning %s to %s", url, directory)
|
||||
stdout = _call('git', 'clone', "--depth", "1", url, directory)
|
||||
get_logger().info(stdout)
|
||||
|
||||
|
||||
def fetch(url, refspec, cwd):
|
||||
get_logger().info("Fetching %s %s", url, refspec)
|
||||
stdout = _call(
|
||||
'git', 'fetch', '--depth', '2', url, refspec,
|
||||
cwd=cwd
|
||||
)
|
||||
get_logger().info(stdout)
|
||||
|
||||
|
||||
def checkout(cwd):
|
||||
get_logger().info("Checking out")
|
||||
stdout = _call('git', 'checkout', "FETCH_HEAD", cwd=cwd)
|
||||
get_logger().info(stdout)
|
||||
|
||||
|
||||
def show(*args, cwd=None):
|
||||
get_logger().info("Show")
|
||||
return _call('git', 'show', *args, cwd=cwd)
|
||||
|
||||
|
||||
def diff(*args, cwd=None):
|
||||
get_logger().info("Diff")
|
||||
patch = _call('git', 'diff', *args, cwd=cwd)
|
||||
if not patch:
|
||||
get_logger().warning("No changes found")
|
||||
return
|
||||
return patch
|
||||
|
||||
|
||||
def reset_local_changes(cwd):
|
||||
get_logger().info("Reset local changes")
|
||||
_call('git', 'checkout', "--force", cwd=cwd)
|
||||
|
||||
|
||||
def add_comment(url: urllib3.util.Url, refspec, message):
|
||||
*_, patchset, changenum = refspec.rsplit("/")
|
||||
message = "'" + message.replace("'", "'\"'\"'") + "'"
|
||||
return _call(
|
||||
"ssh",
|
||||
"-p", str(url.port),
|
||||
f"{url.auth}@{url.host}",
|
||||
"gerrit", "review",
|
||||
"--message", message,
|
||||
# "--code-review", score,
|
||||
f"{patchset},{changenum}",
|
||||
)
|
||||
|
||||
|
||||
def list_comments(url: urllib3.util.Url, refspec):
|
||||
*_, patchset, _ = refspec.rsplit("/")
|
||||
stdout = _call(
|
||||
"ssh",
|
||||
"-p", str(url.port),
|
||||
f"{url.auth}@{url.host}",
|
||||
"gerrit", "query",
|
||||
"--comments",
|
||||
"--current-patch-set", patchset,
|
||||
"--format", "JSON",
|
||||
)
|
||||
change_set, *_ = stdout.splitlines()
|
||||
return json.loads(change_set)["currentPatchSet"]["comments"]
|
||||
|
||||
|
||||
def prepare_repo(url: urllib3.util.Url, project, refspec):
|
||||
repo_url = (f"{url.scheme}://{url.auth}@{url.host}:{url.port}/{project}")
|
||||
|
||||
directory = pathlib.Path(mkdtemp())
|
||||
clone(repo_url, directory),
|
||||
fetch(repo_url, refspec, cwd=directory)
|
||||
checkout(cwd=directory)
|
||||
return directory
|
||||
|
||||
|
||||
def adopt_to_gerrit_message(message):
|
||||
lines = message.splitlines()
|
||||
buf = []
|
||||
for line in lines:
|
||||
# remove markdown formatting
|
||||
line = (line.replace("*", "")
|
||||
.replace("``", "`")
|
||||
.replace("<details>", "")
|
||||
.replace("</details>", "")
|
||||
.replace("<summary>", "")
|
||||
.replace("</summary>", ""))
|
||||
|
||||
line = line.strip()
|
||||
if line.startswith('#'):
|
||||
buf.append("\n" +
|
||||
line.replace('#', '').removesuffix(":").strip() +
|
||||
":")
|
||||
continue
|
||||
elif line.startswith('-'):
|
||||
buf.append(line.removeprefix('-').strip())
|
||||
continue
|
||||
else:
|
||||
buf.append(line)
|
||||
return "\n".join(buf).strip()
|
||||
|
||||
|
||||
def add_suggestion(src_filename, context: str, start, end: int):
|
||||
with (
|
||||
NamedTemporaryFile("w", delete=False) as tmp,
|
||||
open(src_filename, "r") as src
|
||||
):
|
||||
lines = src.readlines()
|
||||
tmp.writelines(lines[:start - 1])
|
||||
if context:
|
||||
tmp.write(context)
|
||||
tmp.writelines(lines[end:])
|
||||
|
||||
shutil.copy(tmp.name, src_filename)
|
||||
os.remove(tmp.name)
|
||||
|
||||
|
||||
def upload_patch(patch, path):
|
||||
patch_server_endpoint = get_settings().get(
|
||||
'gerrit.patch_server_endpoint')
|
||||
patch_server_token = get_settings().get(
|
||||
'gerrit.patch_server_token')
|
||||
|
||||
response = requests.post(
|
||||
patch_server_endpoint,
|
||||
json={
|
||||
"content": patch,
|
||||
"path": path,
|
||||
},
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {patch_server_token}",
|
||||
}
|
||||
)
|
||||
response.raise_for_status()
|
||||
patch_server_endpoint = patch_server_endpoint.rstrip("/")
|
||||
return patch_server_endpoint + "/" + path
|
||||
|
||||
|
||||
class GerritProvider(GitProvider):
|
||||
|
||||
def __init__(self, key: str, incremental=False):
|
||||
self.project, self.refspec = key.split(':')
|
||||
assert self.project, "Project name is required"
|
||||
assert self.refspec, "Refspec is required"
|
||||
base_url = get_settings().get('gerrit.url')
|
||||
assert base_url, "Gerrit URL is required"
|
||||
user = get_settings().get('gerrit.user')
|
||||
assert user, "Gerrit user is required"
|
||||
|
||||
parsed = urllib3.util.parse_url(base_url)
|
||||
self.parsed_url = urllib3.util.parse_url(
|
||||
f"{parsed.scheme}://{user}@{parsed.host}:{parsed.port}"
|
||||
)
|
||||
|
||||
self.repo_path = prepare_repo(
|
||||
self.parsed_url, self.project, self.refspec
|
||||
)
|
||||
self.repo = Repo(self.repo_path)
|
||||
assert self.repo
|
||||
|
||||
self.pr = PullRequestMimic(self.get_pr_title(), self.get_diff_files())
|
||||
|
||||
def get_pr_title(self):
|
||||
"""
|
||||
Substitutes the branch-name as the PR-mimic title.
|
||||
"""
|
||||
return self.repo.branches[0].name
|
||||
|
||||
def get_issue_comments(self):
|
||||
comments = list_comments(self.parsed_url, self.refspec)
|
||||
Comments = namedtuple('Comments', ['reversed'])
|
||||
Comment = namedtuple('Comment', ['body'])
|
||||
return Comments([Comment(c['message']) for c in reversed(comments)])
|
||||
|
||||
def get_labels(self):
|
||||
raise NotImplementedError(
|
||||
'Getting labels is not implemented for the gerrit provider')
|
||||
|
||||
def add_eyes_reaction(self, issue_comment_id: int):
|
||||
raise NotImplementedError(
|
||||
'Adding reactions is not implemented for the gerrit provider')
|
||||
|
||||
def remove_reaction(self, issue_comment_id: int, reaction_id: int):
|
||||
raise NotImplementedError(
|
||||
'Removing reactions is not implemented for the gerrit provider')
|
||||
|
||||
def get_commit_messages(self):
|
||||
return [self.repo.head.commit.message]
|
||||
|
||||
def get_repo_settings(self):
|
||||
try:
|
||||
with open(self.repo_path / ".pr_agent.toml", 'rb') as f:
|
||||
contents = f.read()
|
||||
return contents
|
||||
except OSError:
|
||||
return b""
|
||||
|
||||
def get_diff_files(self) -> list[FilePatchInfo]:
|
||||
diffs = self.repo.head.commit.diff(
|
||||
self.repo.head.commit.parents[0], # previous commit
|
||||
create_patch=True,
|
||||
R=True
|
||||
)
|
||||
|
||||
diff_files = []
|
||||
for diff_item in diffs:
|
||||
if diff_item.a_blob is not None:
|
||||
original_file_content_str = (
|
||||
diff_item.a_blob.data_stream.read().decode('utf-8')
|
||||
)
|
||||
else:
|
||||
original_file_content_str = "" # empty file
|
||||
if diff_item.b_blob is not None:
|
||||
new_file_content_str = diff_item.b_blob.data_stream.read(). \
|
||||
decode('utf-8')
|
||||
else:
|
||||
new_file_content_str = "" # empty file
|
||||
edit_type = EDIT_TYPE.MODIFIED
|
||||
if diff_item.new_file:
|
||||
edit_type = EDIT_TYPE.ADDED
|
||||
elif diff_item.deleted_file:
|
||||
edit_type = EDIT_TYPE.DELETED
|
||||
elif diff_item.renamed_file:
|
||||
edit_type = EDIT_TYPE.RENAMED
|
||||
diff_files.append(
|
||||
FilePatchInfo(
|
||||
original_file_content_str,
|
||||
new_file_content_str,
|
||||
diff_item.diff.decode('utf-8'),
|
||||
diff_item.b_path,
|
||||
edit_type=edit_type,
|
||||
old_filename=None
|
||||
if diff_item.a_path == diff_item.b_path
|
||||
else diff_item.a_path
|
||||
)
|
||||
)
|
||||
self.diff_files = diff_files
|
||||
return diff_files
|
||||
|
||||
def get_files(self):
|
||||
diff_index = self.repo.head.commit.diff(
|
||||
self.repo.head.commit.parents[0], # previous commit
|
||||
R=True
|
||||
)
|
||||
# Get the list of changed files
|
||||
diff_files = [item.a_path for item in diff_index]
|
||||
return diff_files
|
||||
|
||||
def get_languages(self):
|
||||
"""
|
||||
Calculate percentage of languages in repository. Used for hunk
|
||||
prioritisation.
|
||||
"""
|
||||
# Get all files in repository
|
||||
filepaths = [Path(item.path) for item in
|
||||
self.repo.tree().traverse() if item.type == 'blob']
|
||||
# Identify language by file extension and count
|
||||
lang_count = Counter(
|
||||
ext.lstrip('.') for filepath in filepaths for ext in
|
||||
[filepath.suffix.lower()])
|
||||
# Convert counts to percentages
|
||||
total_files = len(filepaths)
|
||||
lang_percentage = {lang: count / total_files * 100 for lang, count
|
||||
in lang_count.items()}
|
||||
return lang_percentage
|
||||
|
||||
def get_pr_description_full(self):
|
||||
return self.repo.head.commit.message
|
||||
|
||||
def get_user_id(self):
|
||||
return self.repo.head.commit.author.email
|
||||
|
||||
def is_supported(self, capability: str) -> bool:
|
||||
if capability in [
|
||||
# 'get_issue_comments',
|
||||
'create_inline_comment',
|
||||
'publish_inline_comments',
|
||||
'get_labels',
|
||||
'gfm_markdown'
|
||||
]:
|
||||
return False
|
||||
return True
|
||||
|
||||
def split_suggestion(self, msg) -> tuple[str, str]:
|
||||
is_code_context = False
|
||||
description = []
|
||||
context = []
|
||||
for line in msg.splitlines():
|
||||
if line.startswith('```suggestion'):
|
||||
is_code_context = True
|
||||
continue
|
||||
if line.startswith('```'):
|
||||
is_code_context = False
|
||||
continue
|
||||
if is_code_context:
|
||||
context.append(line)
|
||||
else:
|
||||
description.append(
|
||||
line.replace('*', '')
|
||||
)
|
||||
|
||||
return (
|
||||
'\n'.join(description),
|
||||
'\n'.join(context) + '\n' if context else ''
|
||||
)
|
||||
|
||||
def publish_code_suggestions(self, code_suggestions: list):
|
||||
msg = []
|
||||
for suggestion in code_suggestions:
|
||||
description, code = self.split_suggestion(suggestion['body'])
|
||||
add_suggestion(
|
||||
pathlib.Path(self.repo_path) / suggestion["relevant_file"],
|
||||
code,
|
||||
suggestion["relevant_lines_start"],
|
||||
suggestion["relevant_lines_end"],
|
||||
)
|
||||
patch = diff(cwd=self.repo_path)
|
||||
patch_id = uuid.uuid4().hex[0:4]
|
||||
path = "/".join(["codium-ai", self.refspec, patch_id])
|
||||
full_path = upload_patch(patch, path)
|
||||
reset_local_changes(self.repo_path)
|
||||
msg.append(f'* {description}\n{full_path}')
|
||||
|
||||
if msg:
|
||||
add_comment(self.parsed_url, self.refspec, "\n".join(msg))
|
||||
return True
|
||||
|
||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||
if not is_temporary:
|
||||
msg = adopt_to_gerrit_message(pr_comment)
|
||||
add_comment(self.parsed_url, self.refspec, msg)
|
||||
|
||||
def publish_description(self, pr_title: str, pr_body: str):
|
||||
msg = adopt_to_gerrit_message(pr_body)
|
||||
add_comment(self.parsed_url, self.refspec, pr_title + '\n' + msg)
|
||||
|
||||
def publish_inline_comments(self, comments: list[dict]):
|
||||
raise NotImplementedError(
|
||||
'Publishing inline comments is not implemented for the gerrit '
|
||||
'provider')
|
||||
|
||||
def publish_inline_comment(self, body: str, relevant_file: str,
|
||||
relevant_line_in_file: str):
|
||||
raise NotImplementedError(
|
||||
'Publishing inline comments is not implemented for the gerrit '
|
||||
'provider')
|
||||
|
||||
def create_inline_comment(self, body: str, relevant_file: str,
|
||||
relevant_line_in_file: str):
|
||||
raise NotImplementedError(
|
||||
'Creating inline comments is not implemented for the gerrit '
|
||||
'provider')
|
||||
|
||||
def publish_labels(self, labels):
|
||||
# Not applicable to the local git provider,
|
||||
# but required by the interface
|
||||
pass
|
||||
|
||||
def remove_initial_comment(self):
|
||||
# remove repo, cloned in previous steps
|
||||
# shutil.rmtree(self.repo_path)
|
||||
pass
|
||||
|
||||
def remove_comment(self, comment):
|
||||
pass
|
||||
|
||||
def get_pr_branch(self):
|
||||
return self.repo.head
|
@ -1,4 +1,3 @@
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
|
||||
@ -6,6 +5,8 @@ from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
class EDIT_TYPE(Enum):
|
||||
ADDED = 1
|
||||
@ -70,6 +71,10 @@ class GitProvider(ABC):
|
||||
def remove_initial_comment(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def remove_comment(self, comment):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_languages(self):
|
||||
pass
|
||||
@ -86,11 +91,11 @@ class GitProvider(ABC):
|
||||
def get_pr_description_full(self) -> str:
|
||||
pass
|
||||
|
||||
def get_pr_description(self) -> str:
|
||||
def get_pr_description(self, *, full: bool = True) -> str:
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.algo.pr_processing import clip_tokens
|
||||
max_tokens = get_settings().get("CONFIG.MAX_DESCRIPTION_TOKENS", None)
|
||||
description = self.get_pr_description_full()
|
||||
description = self.get_pr_description_full() if full else self.get_user_description()
|
||||
if max_tokens:
|
||||
return clip_tokens(description, max_tokens)
|
||||
return description
|
||||
@ -127,11 +132,18 @@ class GitProvider(ABC):
|
||||
def get_commit_messages(self):
|
||||
pass
|
||||
|
||||
def get_pr_id(self):
|
||||
return ""
|
||||
|
||||
def get_main_pr_language(languages, files) -> str:
|
||||
"""
|
||||
Get the main language of the commit. Return an empty string if cannot determine.
|
||||
"""
|
||||
main_language_str = ""
|
||||
if not languages:
|
||||
get_logger().info("No languages detected")
|
||||
return main_language_str
|
||||
|
||||
try:
|
||||
top_language = max(languages, key=languages.get).lower()
|
||||
|
||||
@ -161,12 +173,11 @@ def get_main_pr_language(languages, files) -> str:
|
||||
most_common_extension == 'scala' and top_language == 'scala' or \
|
||||
most_common_extension == 'kt' and top_language == 'kotlin' or \
|
||||
most_common_extension == 'pl' and top_language == 'perl' or \
|
||||
most_common_extension == 'swift' and top_language == 'swift' or \
|
||||
most_common_extension == top_language:
|
||||
main_language_str = top_language
|
||||
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
get_logger().exception(e)
|
||||
pass
|
||||
|
||||
return main_language_str
|
||||
|
@ -1,20 +1,19 @@
|
||||
import logging
|
||||
import hashlib
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Optional, Tuple, Any
|
||||
from typing import Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from github import AppAuthentication, Auth, Github, GithubException, Reaction
|
||||
from github import AppAuthentication, Auth, Github, GithubException
|
||||
from retry import retry
|
||||
from starlette_context import context
|
||||
|
||||
from .git_provider import FilePatchInfo, GitProvider, IncrementalPR
|
||||
from ..algo.language_handler import is_valid_file
|
||||
from ..algo.pr_processing import clip_tokens, find_line_number_of_relevant_line_in_file
|
||||
from ..algo.utils import load_large_diff
|
||||
from ..algo.pr_processing import find_line_number_of_relevant_line_in_file, clip_tokens
|
||||
from ..config_loader import get_settings
|
||||
from ..log import get_logger
|
||||
from ..servers.utils import RateLimitExceeded
|
||||
from .git_provider import FilePatchInfo, GitProvider, IncrementalPR
|
||||
|
||||
|
||||
class GithubProvider(GitProvider):
|
||||
@ -32,7 +31,7 @@ class GithubProvider(GitProvider):
|
||||
self.diff_files = None
|
||||
self.git_files = None
|
||||
self.incremental = incremental
|
||||
if pr_url:
|
||||
if pr_url and 'pull' in pr_url:
|
||||
self.set_pr(pr_url)
|
||||
self.last_commit_id = list(self.pr.get_commits())[-1]
|
||||
|
||||
@ -51,20 +50,20 @@ class GithubProvider(GitProvider):
|
||||
def get_incremental_commits(self):
|
||||
self.commits = list(self.pr.get_commits())
|
||||
|
||||
self.get_previous_review()
|
||||
self.previous_review = self.get_previous_review(full=True, incremental=True)
|
||||
if self.previous_review:
|
||||
self.incremental.commits_range = self.get_commit_range()
|
||||
# Get all files changed during the commit range
|
||||
self.file_set = dict()
|
||||
for commit in self.incremental.commits_range:
|
||||
if commit.commit.message.startswith(f"Merge branch '{self._get_repo().default_branch}'"):
|
||||
logging.info(f"Skipping merge commit {commit.commit.message}")
|
||||
get_logger().info(f"Skipping merge commit {commit.commit.message}")
|
||||
continue
|
||||
self.file_set.update({file.filename: file for file in commit.files})
|
||||
|
||||
def get_commit_range(self):
|
||||
last_review_time = self.previous_review.created_at
|
||||
first_new_commit_index = 0
|
||||
first_new_commit_index = None
|
||||
for index in range(len(self.commits) - 1, -1, -1):
|
||||
if self.commits[index].commit.author.date > last_review_time:
|
||||
self.incremental.first_new_commit_sha = self.commits[index].sha
|
||||
@ -72,15 +71,21 @@ class GithubProvider(GitProvider):
|
||||
else:
|
||||
self.incremental.last_seen_commit_sha = self.commits[index].sha
|
||||
break
|
||||
return self.commits[first_new_commit_index:]
|
||||
return self.commits[first_new_commit_index:] if first_new_commit_index is not None else []
|
||||
|
||||
def get_previous_review(self):
|
||||
self.previous_review = None
|
||||
self.comments = list(self.pr.get_issue_comments())
|
||||
def get_previous_review(self, *, full: bool, incremental: bool):
|
||||
if not (full or incremental):
|
||||
raise ValueError("At least one of full or incremental must be True")
|
||||
if not getattr(self, "comments", None):
|
||||
self.comments = list(self.pr.get_issue_comments())
|
||||
prefixes = []
|
||||
if full:
|
||||
prefixes.append("## PR Analysis")
|
||||
if incremental:
|
||||
prefixes.append("## Incremental PR Review")
|
||||
for index in range(len(self.comments) - 1, -1, -1):
|
||||
if self.comments[index].body.startswith("## PR Analysis"):
|
||||
self.previous_review = self.comments[index]
|
||||
break
|
||||
if any(self.comments[index].body.startswith(prefix) for prefix in prefixes):
|
||||
return self.comments[index]
|
||||
|
||||
def get_files(self):
|
||||
if self.incremental.is_incremental and self.file_set:
|
||||
@ -130,7 +135,7 @@ class GithubProvider(GitProvider):
|
||||
return diff_files
|
||||
|
||||
except GithubException.RateLimitExceededException as e:
|
||||
logging.error(f"Rate limit exceeded for GitHub API. Original message: {e}")
|
||||
get_logger().error(f"Rate limit exceeded for GitHub API. Original message: {e}")
|
||||
raise RateLimitExceeded("Rate limit exceeded for GitHub API.") from e
|
||||
|
||||
def publish_description(self, pr_title: str, pr_body: str):
|
||||
@ -138,7 +143,7 @@ class GithubProvider(GitProvider):
|
||||
|
||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||
if is_temporary and not get_settings().config.publish_output_progress:
|
||||
logging.debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
|
||||
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
|
||||
return
|
||||
response = self.pr.create_issue_comment(pr_comment)
|
||||
if hasattr(response, "user") and hasattr(response.user, "login"):
|
||||
@ -156,7 +161,7 @@ class GithubProvider(GitProvider):
|
||||
position, absolute_position = find_line_number_of_relevant_line_in_file(self.diff_files, relevant_file.strip('`'), relevant_line_in_file)
|
||||
if position == -1:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
|
||||
get_logger().info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
|
||||
subject_type = "FILE"
|
||||
else:
|
||||
subject_type = "LINE"
|
||||
@ -179,13 +184,13 @@ class GithubProvider(GitProvider):
|
||||
|
||||
if not relevant_lines_start or relevant_lines_start == -1:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.exception(
|
||||
get_logger().exception(
|
||||
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}")
|
||||
continue
|
||||
|
||||
if relevant_lines_end < relevant_lines_start:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.exception(f"Failed to publish code suggestion, "
|
||||
get_logger().exception(f"Failed to publish code suggestion, "
|
||||
f"relevant_lines_end is {relevant_lines_end} and "
|
||||
f"relevant_lines_start is {relevant_lines_start}")
|
||||
continue
|
||||
@ -212,16 +217,22 @@ class GithubProvider(GitProvider):
|
||||
return True
|
||||
except Exception as e:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.error(f"Failed to publish code suggestion, error: {e}")
|
||||
get_logger().error(f"Failed to publish code suggestion, error: {e}")
|
||||
return False
|
||||
|
||||
def remove_initial_comment(self):
|
||||
try:
|
||||
for comment in getattr(self.pr, 'comments_list', []):
|
||||
if comment.is_temporary:
|
||||
comment.delete()
|
||||
self.remove_comment(comment)
|
||||
except Exception as e:
|
||||
logging.exception(f"Failed to remove initial comment, error: {e}")
|
||||
get_logger().exception(f"Failed to remove initial comment, error: {e}")
|
||||
|
||||
def remove_comment(self, comment):
|
||||
try:
|
||||
comment.delete()
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Failed to remove comment, error: {e}")
|
||||
|
||||
def get_title(self):
|
||||
return self.pr.title
|
||||
@ -239,9 +250,10 @@ class GithubProvider(GitProvider):
|
||||
def get_user_id(self):
|
||||
if not self.github_user_id:
|
||||
try:
|
||||
self.github_user_id = self.github_client.get_user().login
|
||||
self.github_user_id = self.github_client.get_user().raw_data['login']
|
||||
except Exception as e:
|
||||
logging.exception(f"Failed to get user id, error: {e}")
|
||||
self.github_user_id = ""
|
||||
# logging.exception(f"Failed to get user id, error: {e}")
|
||||
return self.github_user_id
|
||||
|
||||
def get_notifications(self, since: datetime):
|
||||
@ -258,7 +270,10 @@ class GithubProvider(GitProvider):
|
||||
|
||||
def get_repo_settings(self):
|
||||
try:
|
||||
contents = self.repo_obj.get_contents(".pr_agent.toml", ref=self.pr.head.sha).decoded_content
|
||||
# contents = self.repo_obj.get_contents(".pr_agent.toml", ref=self.pr.head.sha).decoded_content
|
||||
|
||||
# more logical to take 'pr_agent.toml' from the default branch
|
||||
contents = self.repo_obj.get_contents(".pr_agent.toml").decoded_content
|
||||
return contents
|
||||
except Exception:
|
||||
return ""
|
||||
@ -268,7 +283,7 @@ class GithubProvider(GitProvider):
|
||||
reaction = self.pr.get_issue_comment(issue_comment_id).create_reaction("eyes")
|
||||
return reaction.id
|
||||
except Exception as e:
|
||||
logging.exception(f"Failed to add eyes reaction, error: {e}")
|
||||
get_logger().exception(f"Failed to add eyes reaction, error: {e}")
|
||||
return None
|
||||
|
||||
def remove_reaction(self, issue_comment_id: int, reaction_id: int) -> bool:
|
||||
@ -276,7 +291,7 @@ class GithubProvider(GitProvider):
|
||||
self.pr.get_issue_comment(issue_comment_id).delete_reaction(reaction_id)
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.exception(f"Failed to remove eyes reaction, error: {e}")
|
||||
get_logger().exception(f"Failed to remove eyes reaction, error: {e}")
|
||||
return False
|
||||
|
||||
|
||||
@ -309,6 +324,35 @@ class GithubProvider(GitProvider):
|
||||
|
||||
return repo_name, pr_number
|
||||
|
||||
@staticmethod
|
||||
def _parse_issue_url(issue_url: str) -> Tuple[str, int]:
|
||||
parsed_url = urlparse(issue_url)
|
||||
|
||||
if 'github.com' not in parsed_url.netloc:
|
||||
raise ValueError("The provided URL is not a valid GitHub URL")
|
||||
|
||||
path_parts = parsed_url.path.strip('/').split('/')
|
||||
if 'api.github.com' in parsed_url.netloc:
|
||||
if len(path_parts) < 5 or path_parts[3] != 'issues':
|
||||
raise ValueError("The provided URL does not appear to be a GitHub ISSUE URL")
|
||||
repo_name = '/'.join(path_parts[1:3])
|
||||
try:
|
||||
issue_number = int(path_parts[4])
|
||||
except ValueError as e:
|
||||
raise ValueError("Unable to convert issue number to integer") from e
|
||||
return repo_name, issue_number
|
||||
|
||||
if len(path_parts) < 4 or path_parts[2] != 'issues':
|
||||
raise ValueError("The provided URL does not appear to be a GitHub PR issue")
|
||||
|
||||
repo_name = '/'.join(path_parts[:2])
|
||||
try:
|
||||
issue_number = int(path_parts[3])
|
||||
except ValueError as e:
|
||||
raise ValueError("Unable to convert issue number to integer") from e
|
||||
|
||||
return repo_name, issue_number
|
||||
|
||||
def _get_github_client(self):
|
||||
deployment_type = get_settings().get("GITHUB.DEPLOYMENT_TYPE", "user")
|
||||
|
||||
@ -366,13 +410,13 @@ class GithubProvider(GitProvider):
|
||||
"PUT", f"{self.pr.issue_url}/labels", input=post_parameters
|
||||
)
|
||||
except Exception as e:
|
||||
logging.exception(f"Failed to publish labels, error: {e}")
|
||||
get_logger().exception(f"Failed to publish labels, error: {e}")
|
||||
|
||||
def get_labels(self):
|
||||
try:
|
||||
return [label.name for label in self.pr.labels]
|
||||
except Exception as e:
|
||||
logging.exception(f"Failed to get labels, error: {e}")
|
||||
get_logger().exception(f"Failed to get labels, error: {e}")
|
||||
return []
|
||||
|
||||
def get_commit_messages(self):
|
||||
@ -414,6 +458,13 @@ class GithubProvider(GitProvider):
|
||||
return link
|
||||
except Exception as e:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"Failed adding line link, error: {e}")
|
||||
get_logger().info(f"Failed adding line link, error: {e}")
|
||||
|
||||
return ""
|
||||
|
||||
def get_pr_id(self):
|
||||
try:
|
||||
pr_id = f"{self.repo}/{self.pr_num}"
|
||||
return pr_id
|
||||
except:
|
||||
return ""
|
||||
|
@ -1,4 +1,4 @@
|
||||
import logging
|
||||
import hashlib
|
||||
import re
|
||||
from typing import Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
@ -7,12 +7,12 @@ import gitlab
|
||||
from gitlab import GitlabGetError
|
||||
|
||||
from ..algo.language_handler import is_valid_file
|
||||
from ..algo.pr_processing import clip_tokens
|
||||
from ..algo.pr_processing import clip_tokens, find_line_number_of_relevant_line_in_file
|
||||
from ..algo.utils import load_large_diff
|
||||
from ..config_loader import get_settings
|
||||
from .git_provider import EDIT_TYPE, FilePatchInfo, GitProvider
|
||||
from ..log import get_logger
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
class DiffNotFoundError(Exception):
|
||||
"""Raised when the diff for a merge request cannot be found."""
|
||||
@ -43,7 +43,7 @@ class GitLabProvider(GitProvider):
|
||||
self.incremental = incremental
|
||||
|
||||
def is_supported(self, capability: str) -> bool:
|
||||
if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments']:
|
||||
if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments', 'gfm_markdown']:
|
||||
return False
|
||||
return True
|
||||
|
||||
@ -58,7 +58,7 @@ class GitLabProvider(GitProvider):
|
||||
try:
|
||||
self.last_diff = self.mr.diffs.list(get_all=True)[-1]
|
||||
except IndexError as e:
|
||||
logger.error(f"Could not get diff for merge request {self.id_mr}")
|
||||
get_logger().error(f"Could not get diff for merge request {self.id_mr}")
|
||||
raise DiffNotFoundError(f"Could not get diff for merge request {self.id_mr}") from e
|
||||
|
||||
|
||||
@ -98,7 +98,7 @@ class GitLabProvider(GitProvider):
|
||||
if isinstance(new_file_content_str, bytes):
|
||||
new_file_content_str = bytes.decode(new_file_content_str, 'utf-8')
|
||||
except UnicodeDecodeError:
|
||||
logging.warning(
|
||||
get_logger().warning(
|
||||
f"Cannot decode file {diff['old_path']} or {diff['new_path']} in merge request {self.id_mr}")
|
||||
|
||||
edit_type = EDIT_TYPE.MODIFIED
|
||||
@ -134,7 +134,7 @@ class GitLabProvider(GitProvider):
|
||||
self.mr.description = pr_body
|
||||
self.mr.save()
|
||||
except Exception as e:
|
||||
logging.exception(f"Could not update merge request {self.id_mr} description: {e}")
|
||||
get_logger().exception(f"Could not update merge request {self.id_mr} description: {e}")
|
||||
|
||||
def publish_comment(self, mr_comment: str, is_temporary: bool = False):
|
||||
comment = self.mr.notes.create({'body': mr_comment})
|
||||
@ -156,12 +156,12 @@ class GitLabProvider(GitProvider):
|
||||
def send_inline_comment(self,body: str,edit_type: str,found: bool,relevant_file: str,relevant_line_in_file: int,
|
||||
source_line_no: int, target_file: str,target_line_no: int) -> None:
|
||||
if not found:
|
||||
logging.info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
|
||||
get_logger().info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
|
||||
else:
|
||||
# in order to have exact sha's we have to find correct diff for this change
|
||||
diff = self.get_relevant_diff(relevant_file, relevant_line_in_file)
|
||||
if diff is None:
|
||||
logger.error(f"Could not get diff for merge request {self.id_mr}")
|
||||
get_logger().error(f"Could not get diff for merge request {self.id_mr}")
|
||||
raise DiffNotFoundError(f"Could not get diff for merge request {self.id_mr}")
|
||||
pos_obj = {'position_type': 'text',
|
||||
'new_path': target_file.filename,
|
||||
@ -174,24 +174,23 @@ class GitLabProvider(GitProvider):
|
||||
else:
|
||||
pos_obj['new_line'] = target_line_no - 1
|
||||
pos_obj['old_line'] = source_line_no - 1
|
||||
logging.debug(f"Creating comment in {self.id_mr} with body {body} and position {pos_obj}")
|
||||
self.mr.discussions.create({'body': body,
|
||||
'position': pos_obj})
|
||||
get_logger().debug(f"Creating comment in {self.id_mr} with body {body} and position {pos_obj}")
|
||||
self.mr.discussions.create({'body': body, 'position': pos_obj})
|
||||
|
||||
def get_relevant_diff(self, relevant_file: str, relevant_line_in_file: int) -> Optional[dict]:
|
||||
changes = self.mr.changes() # Retrieve the changes for the merge request once
|
||||
if not changes:
|
||||
logging.error('No changes found for the merge request.')
|
||||
get_logger().error('No changes found for the merge request.')
|
||||
return None
|
||||
all_diffs = self.mr.diffs.list(get_all=True)
|
||||
if not all_diffs:
|
||||
logging.error('No diffs found for the merge request.')
|
||||
get_logger().error('No diffs found for the merge request.')
|
||||
return None
|
||||
for diff in all_diffs:
|
||||
for change in changes['changes']:
|
||||
if change['new_path'] == relevant_file and relevant_line_in_file in change['diff']:
|
||||
return diff
|
||||
logging.debug(
|
||||
get_logger().debug(
|
||||
f'No relevant diff found for {relevant_file} {relevant_line_in_file}. Falling back to last diff.')
|
||||
return self.last_diff # fallback to last_diff if no relevant diff is found
|
||||
|
||||
@ -226,7 +225,10 @@ class GitLabProvider(GitProvider):
|
||||
self.send_inline_comment(body, edit_type, found, relevant_file, relevant_line_in_file, source_line_no,
|
||||
target_file, target_line_no)
|
||||
except Exception as e:
|
||||
logging.exception(f"Could not publish code suggestion:\nsuggestion: {suggestion}\nerror: {e}")
|
||||
get_logger().exception(f"Could not publish code suggestion:\nsuggestion: {suggestion}\nerror: {e}")
|
||||
|
||||
# note that we publish suggestions one-by-one. so, if one fails, the rest will still be published
|
||||
return True
|
||||
|
||||
def search_line(self, relevant_file, relevant_line_in_file):
|
||||
target_file = None
|
||||
@ -285,9 +287,15 @@ class GitLabProvider(GitProvider):
|
||||
def remove_initial_comment(self):
|
||||
try:
|
||||
for comment in self.temp_comments:
|
||||
comment.delete()
|
||||
self.remove_comment(comment)
|
||||
except Exception as e:
|
||||
logging.exception(f"Failed to remove temp comments, error: {e}")
|
||||
get_logger().exception(f"Failed to remove temp comments, error: {e}")
|
||||
|
||||
def remove_comment(self, comment):
|
||||
try:
|
||||
comment.delete()
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Failed to remove comment, error: {e}")
|
||||
|
||||
def get_title(self):
|
||||
return self.mr.title
|
||||
@ -307,7 +315,7 @@ class GitLabProvider(GitProvider):
|
||||
|
||||
def get_repo_settings(self):
|
||||
try:
|
||||
contents = self.gl.projects.get(self.id_project).files.get(file_path='.pr_agent.toml', ref=self.mr.source_branch)
|
||||
contents = self.gl.projects.get(self.id_project).files.get(file_path='.pr_agent.toml', ref=self.mr.target_branch).decode()
|
||||
return contents
|
||||
except Exception:
|
||||
return ""
|
||||
@ -355,7 +363,7 @@ class GitLabProvider(GitProvider):
|
||||
self.mr.labels = list(set(pr_types))
|
||||
self.mr.save()
|
||||
except Exception as e:
|
||||
logging.exception(f"Failed to publish labels, error: {e}")
|
||||
get_logger().exception(f"Failed to publish labels, error: {e}")
|
||||
|
||||
def publish_inline_comments(self, comments: list[dict]):
|
||||
pass
|
||||
@ -378,4 +386,35 @@ class GitLabProvider(GitProvider):
|
||||
commit_messages_str = ""
|
||||
if max_tokens:
|
||||
commit_messages_str = clip_tokens(commit_messages_str, max_tokens)
|
||||
return commit_messages_str
|
||||
return commit_messages_str
|
||||
|
||||
def get_pr_id(self):
|
||||
try:
|
||||
pr_id = self.mr.web_url
|
||||
return pr_id
|
||||
except:
|
||||
return ""
|
||||
|
||||
def generate_link_to_relevant_line_number(self, suggestion) -> str:
|
||||
try:
|
||||
relevant_file = suggestion['relevant file'].strip('`').strip("'")
|
||||
relevant_line_str = suggestion['relevant line']
|
||||
if not relevant_line_str:
|
||||
return ""
|
||||
|
||||
position, absolute_position = find_line_number_of_relevant_line_in_file \
|
||||
(self.diff_files, relevant_file, relevant_line_str)
|
||||
|
||||
if absolute_position != -1:
|
||||
# link to right file only
|
||||
link = f"https://gitlab.com/codiumai/pr-agent/-/blob/{self.mr.source_branch}/{relevant_file}?ref_type=heads#L{absolute_position}"
|
||||
|
||||
# # link to diff
|
||||
# sha_file = hashlib.sha1(relevant_file.encode('utf-8')).hexdigest()
|
||||
# link = f"{self.pr.web_url}/diffs#{sha_file}_{absolute_position}_{absolute_position}"
|
||||
return link
|
||||
except Exception as e:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"Failed adding line link, error: {e}")
|
||||
|
||||
return ""
|
||||
|
@ -1,4 +1,3 @@
|
||||
import logging
|
||||
from collections import Counter
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
@ -7,6 +6,7 @@ from git import Repo
|
||||
|
||||
from pr_agent.config_loader import _find_repository_root, get_settings
|
||||
from pr_agent.git_providers.git_provider import EDIT_TYPE, FilePatchInfo, GitProvider
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
class PullRequestMimic:
|
||||
@ -49,14 +49,15 @@ class LocalGitProvider(GitProvider):
|
||||
"""
|
||||
Prepare the repository for PR-mimic generation.
|
||||
"""
|
||||
logging.debug('Preparing repository for PR-mimic generation...')
|
||||
get_logger().debug('Preparing repository for PR-mimic generation...')
|
||||
if self.repo.is_dirty():
|
||||
raise ValueError('The repository is not in a clean state. Please commit or stash pending changes.')
|
||||
if self.target_branch_name not in self.repo.heads:
|
||||
raise KeyError(f'Branch: {self.target_branch_name} does not exist')
|
||||
|
||||
def is_supported(self, capability: str) -> bool:
|
||||
if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments', 'get_labels']:
|
||||
if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments', 'get_labels',
|
||||
'gfm_markdown']:
|
||||
return False
|
||||
return True
|
||||
|
||||
@ -139,6 +140,9 @@ class LocalGitProvider(GitProvider):
|
||||
def remove_initial_comment(self):
|
||||
pass # Not applicable to the local git provider, but required by the interface
|
||||
|
||||
def remove_comment(self, comment):
|
||||
pass # Not applicable to the local git provider, but required by the interface
|
||||
|
||||
def get_languages(self):
|
||||
"""
|
||||
Calculate percentage of languages in repository. Used for hunk prioritisation.
|
||||
|
36
pr_agent/git_providers/utils.py
Normal file
@ -0,0 +1,36 @@
|
||||
import copy
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
from dynaconf import Dynaconf
|
||||
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
def apply_repo_settings(pr_url):
|
||||
if get_settings().config.use_repo_settings_file:
|
||||
repo_settings_file = None
|
||||
try:
|
||||
git_provider = get_git_provider()(pr_url)
|
||||
repo_settings = git_provider.get_repo_settings()
|
||||
if repo_settings:
|
||||
repo_settings_file = None
|
||||
fd, repo_settings_file = tempfile.mkstemp(suffix='.toml')
|
||||
os.write(fd, repo_settings)
|
||||
new_settings = Dynaconf(settings_files=[repo_settings_file])
|
||||
for section, contents in new_settings.as_dict().items():
|
||||
section_dict = copy.deepcopy(get_settings().as_dict().get(section, {}))
|
||||
for key, value in contents.items():
|
||||
section_dict[key] = value
|
||||
get_settings().unset(section)
|
||||
get_settings().set(section, section_dict, merge=False)
|
||||
get_logger().info(f"Applying repo settings for section {section}, contents: {contents}")
|
||||
|
||||
finally:
|
||||
if repo_settings_file:
|
||||
try:
|
||||
os.remove(repo_settings_file)
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed to remove temporary settings file {repo_settings_file}", e)
|
40
pr_agent/log/__init__.py
Normal file
@ -0,0 +1,40 @@
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
from enum import Enum
|
||||
|
||||
from loguru import logger
|
||||
|
||||
|
||||
class LoggingFormat(str, Enum):
|
||||
CONSOLE = "CONSOLE"
|
||||
JSON = "JSON"
|
||||
|
||||
|
||||
def json_format(record: dict) -> str:
|
||||
return record["message"]
|
||||
|
||||
|
||||
def setup_logger(level: str = "INFO", fmt: LoggingFormat = LoggingFormat.CONSOLE):
|
||||
level: int = logging.getLevelName(level.upper())
|
||||
if type(level) is not int:
|
||||
level = logging.INFO
|
||||
|
||||
if fmt == LoggingFormat.JSON:
|
||||
logger.remove(None)
|
||||
logger.add(
|
||||
sys.stdout,
|
||||
level=level,
|
||||
format="{message}",
|
||||
colorize=False,
|
||||
serialize=True,
|
||||
)
|
||||
elif fmt == LoggingFormat.CONSOLE:
|
||||
logger.remove(None)
|
||||
logger.add(sys.stdout, level=level, colorize=True)
|
||||
|
||||
return logger
|
||||
|
||||
|
||||
def get_logger(*args, **kwargs):
|
||||
return logger
|
@ -1,9 +1,8 @@
|
||||
import ujson
|
||||
|
||||
from google.cloud import storage
|
||||
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers.gitlab_provider import logger
|
||||
from pr_agent.log import get_logger
|
||||
from pr_agent.secret_providers.secret_provider import SecretProvider
|
||||
|
||||
|
||||
@ -15,7 +14,7 @@ class GoogleCloudStorageSecretProvider(SecretProvider):
|
||||
self.bucket_name = get_settings().google_cloud_storage.bucket_name
|
||||
self.bucket = self.client.bucket(self.bucket_name)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize Google Cloud Storage Secret Provider: {e}")
|
||||
get_logger().error(f"Failed to initialize Google Cloud Storage Secret Provider: {e}")
|
||||
raise e
|
||||
|
||||
def get_secret(self, secret_name: str) -> str:
|
||||
@ -23,7 +22,7 @@ class GoogleCloudStorageSecretProvider(SecretProvider):
|
||||
blob = self.bucket.blob(secret_name)
|
||||
return blob.download_as_string()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get secret {secret_name} from Google Cloud Storage: {e}")
|
||||
get_logger().error(f"Failed to get secret {secret_name} from Google Cloud Storage: {e}")
|
||||
return ""
|
||||
|
||||
def store_secret(self, secret_name: str, secret_value: str):
|
||||
@ -31,5 +30,5 @@ class GoogleCloudStorageSecretProvider(SecretProvider):
|
||||
blob = self.bucket.blob(secret_name)
|
||||
blob.upload_from_string(secret_value)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to store secret {secret_name} in Google Cloud Storage: {e}")
|
||||
get_logger().error(f"Failed to store secret {secret_name} in Google Cloud Storage: {e}")
|
||||
raise e
|
||||
|
@ -1,9 +1,7 @@
|
||||
import copy
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
import jwt
|
||||
@ -18,9 +16,10 @@ from starlette_context.middleware import RawContextMiddleware
|
||||
|
||||
from pr_agent.agent.pr_agent import PRAgent
|
||||
from pr_agent.config_loader import get_settings, global_settings
|
||||
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
||||
from pr_agent.secret_providers import get_secret_provider
|
||||
|
||||
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
||||
setup_logger(fmt=LoggingFormat.JSON)
|
||||
router = APIRouter()
|
||||
secret_provider = get_secret_provider()
|
||||
|
||||
@ -49,7 +48,7 @@ async def get_bearer_token(shared_secret: str, client_key: str):
|
||||
bearer_token = response.json()["access_token"]
|
||||
return bearer_token
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to get bearer token: {e}")
|
||||
get_logger().error(f"Failed to get bearer token: {e}")
|
||||
raise e
|
||||
|
||||
@router.get("/")
|
||||
@ -60,21 +59,23 @@ async def handle_manifest(request: Request, response: Response):
|
||||
manifest = manifest.replace("app_key", get_settings().bitbucket.app_key)
|
||||
manifest = manifest.replace("base_url", get_settings().bitbucket.base_url)
|
||||
except:
|
||||
logging.error("Failed to replace api_key in Bitbucket manifest, trying to continue")
|
||||
get_logger().error("Failed to replace api_key in Bitbucket manifest, trying to continue")
|
||||
manifest_obj = json.loads(manifest)
|
||||
return JSONResponse(manifest_obj)
|
||||
|
||||
@router.post("/webhook")
|
||||
async def handle_github_webhooks(background_tasks: BackgroundTasks, request: Request):
|
||||
print(request.headers)
|
||||
log_context = {"server_type": "bitbucket_app"}
|
||||
get_logger().debug(request.headers)
|
||||
jwt_header = request.headers.get("authorization", None)
|
||||
if jwt_header:
|
||||
input_jwt = jwt_header.split(" ")[1]
|
||||
data = await request.json()
|
||||
print(data)
|
||||
get_logger().debug(data)
|
||||
async def inner():
|
||||
try:
|
||||
owner = data["data"]["repository"]["owner"]["username"]
|
||||
log_context["sender"] = owner
|
||||
secrets = json.loads(secret_provider.get_secret(owner))
|
||||
shared_secret = secrets["shared_secret"]
|
||||
client_key = secrets["client_key"]
|
||||
@ -86,13 +87,19 @@ async def handle_github_webhooks(background_tasks: BackgroundTasks, request: Req
|
||||
agent = PRAgent()
|
||||
if event == "pullrequest:created":
|
||||
pr_url = data["data"]["pullrequest"]["links"]["html"]["href"]
|
||||
await agent.handle_request(pr_url, "review")
|
||||
log_context["api_url"] = pr_url
|
||||
log_context["event"] = "pull_request"
|
||||
with get_logger().contextualize(**log_context):
|
||||
await agent.handle_request(pr_url, "review")
|
||||
elif event == "pullrequest:comment_created":
|
||||
pr_url = data["data"]["pullrequest"]["links"]["html"]["href"]
|
||||
log_context["api_url"] = pr_url
|
||||
log_context["event"] = "comment"
|
||||
comment_body = data["data"]["comment"]["content"]["raw"]
|
||||
await agent.handle_request(pr_url, comment_body)
|
||||
with get_logger().contextualize(**log_context):
|
||||
await agent.handle_request(pr_url, comment_body)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to handle webhook: {e}")
|
||||
get_logger().error(f"Failed to handle webhook: {e}")
|
||||
background_tasks.add_task(inner)
|
||||
return "OK"
|
||||
|
||||
@ -103,9 +110,10 @@ async def handle_github_webhooks(request: Request, response: Response):
|
||||
@router.post("/installed")
|
||||
async def handle_installed_webhooks(request: Request, response: Response):
|
||||
try:
|
||||
print(request.headers)
|
||||
get_logger().info("handle_installed_webhooks")
|
||||
get_logger().info(request.headers)
|
||||
data = await request.json()
|
||||
print(data)
|
||||
get_logger().info(data)
|
||||
shared_secret = data["sharedSecret"]
|
||||
client_key = data["clientKey"]
|
||||
username = data["principal"]["username"]
|
||||
@ -115,13 +123,15 @@ async def handle_installed_webhooks(request: Request, response: Response):
|
||||
}
|
||||
secret_provider.store_secret(username, json.dumps(secrets))
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to register user: {e}")
|
||||
get_logger().error(f"Failed to register user: {e}")
|
||||
return JSONResponse({"error": "Unable to register user"}, status_code=500)
|
||||
|
||||
@router.post("/uninstalled")
|
||||
async def handle_uninstalled_webhooks(request: Request, response: Response):
|
||||
get_logger().info("handle_uninstalled_webhooks")
|
||||
|
||||
data = await request.json()
|
||||
print(data)
|
||||
get_logger().info(data)
|
||||
|
||||
|
||||
def start():
|
||||
|
77
pr_agent/servers/gerrit_server.py
Normal file
@ -0,0 +1,77 @@
|
||||
import copy
|
||||
from enum import Enum
|
||||
from json import JSONDecodeError
|
||||
|
||||
import uvicorn
|
||||
from fastapi import APIRouter, FastAPI, HTTPException
|
||||
from pydantic import BaseModel
|
||||
from starlette.middleware import Middleware
|
||||
from starlette_context import context
|
||||
from starlette_context.middleware import RawContextMiddleware
|
||||
|
||||
from pr_agent.agent.pr_agent import PRAgent
|
||||
from pr_agent.config_loader import get_settings, global_settings
|
||||
from pr_agent.log import get_logger, setup_logger
|
||||
|
||||
setup_logger()
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
class Action(str, Enum):
|
||||
review = "review"
|
||||
describe = "describe"
|
||||
ask = "ask"
|
||||
improve = "improve"
|
||||
reflect = "reflect"
|
||||
answer = "answer"
|
||||
|
||||
|
||||
class Item(BaseModel):
|
||||
refspec: str
|
||||
project: str
|
||||
msg: str
|
||||
|
||||
|
||||
@router.post("/api/v1/gerrit/{action}")
|
||||
async def handle_gerrit_request(action: Action, item: Item):
|
||||
get_logger().debug("Received a Gerrit request")
|
||||
context["settings"] = copy.deepcopy(global_settings)
|
||||
|
||||
if action == Action.ask:
|
||||
if not item.msg:
|
||||
return HTTPException(
|
||||
status_code=400,
|
||||
detail="msg is required for ask command"
|
||||
)
|
||||
await PRAgent().handle_request(
|
||||
f"{item.project}:{item.refspec}",
|
||||
f"/{item.msg.strip()}"
|
||||
)
|
||||
|
||||
|
||||
async def get_body(request):
|
||||
try:
|
||||
body = await request.json()
|
||||
except JSONDecodeError as e:
|
||||
get_logger().error("Error parsing request body", e)
|
||||
return {}
|
||||
return body
|
||||
|
||||
|
||||
@router.get("/")
|
||||
async def root():
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
def start():
|
||||
# to prevent adding help messages with the output
|
||||
get_settings().set("CONFIG.CLI_MODE", True)
|
||||
middleware = [Middleware(RawContextMiddleware)]
|
||||
app = FastAPI(middleware=middleware)
|
||||
app.include_router(router)
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=3000)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
start()
|
@ -5,6 +5,10 @@ import os
|
||||
from pr_agent.agent.pr_agent import PRAgent
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.git_providers.utils import apply_repo_settings
|
||||
from pr_agent.log import get_logger
|
||||
from pr_agent.tools.pr_code_suggestions import PRCodeSuggestions
|
||||
from pr_agent.tools.pr_description import PRDescription
|
||||
from pr_agent.tools.pr_reviewer import PRReviewer
|
||||
|
||||
|
||||
@ -12,12 +16,11 @@ async def run_action():
|
||||
# Get environment variables
|
||||
GITHUB_EVENT_NAME = os.environ.get('GITHUB_EVENT_NAME')
|
||||
GITHUB_EVENT_PATH = os.environ.get('GITHUB_EVENT_PATH')
|
||||
OPENAI_KEY = os.environ.get('OPENAI_KEY')
|
||||
OPENAI_ORG = os.environ.get('OPENAI_ORG')
|
||||
OPENAI_KEY = os.environ.get('OPENAI_KEY') or os.environ.get('OPENAI.KEY')
|
||||
OPENAI_ORG = os.environ.get('OPENAI_ORG') or os.environ.get('OPENAI.ORG')
|
||||
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')
|
||||
get_settings().set("CONFIG.PUBLISH_OUTPUT_PROGRESS", False)
|
||||
|
||||
|
||||
# Check if required environment variables are set
|
||||
if not GITHUB_EVENT_NAME:
|
||||
print("GITHUB_EVENT_NAME not set")
|
||||
@ -47,13 +50,30 @@ async def run_action():
|
||||
print(f"Failed to parse JSON: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
get_logger().info("Applying repo settings")
|
||||
pr_url = event_payload.get("pull_request", {}).get("html_url")
|
||||
if pr_url:
|
||||
apply_repo_settings(pr_url)
|
||||
get_logger().info(f"enable_custom_labels: {get_settings().config.enable_custom_labels}")
|
||||
except Exception as e:
|
||||
get_logger().info(f"github action: failed to apply repo settings: {e}")
|
||||
|
||||
# Handle pull request event
|
||||
if GITHUB_EVENT_NAME == "pull_request":
|
||||
action = event_payload.get("action")
|
||||
if action in ["opened", "reopened"]:
|
||||
pr_url = event_payload.get("pull_request", {}).get("url")
|
||||
if pr_url:
|
||||
await PRReviewer(pr_url).run()
|
||||
auto_review = os.environ.get('github_action.auto_review', None)
|
||||
if auto_review is None or (isinstance(auto_review, str) and auto_review.lower() == 'true'):
|
||||
await PRReviewer(pr_url).run()
|
||||
auto_describe = os.environ.get('github_action.auto_describe', None)
|
||||
if isinstance(auto_describe, str) and auto_describe.lower() == 'true':
|
||||
await PRDescription(pr_url).run()
|
||||
auto_improve = os.environ.get('github_action.auto_improve', None)
|
||||
if isinstance(auto_improve, str) and auto_improve.lower() == 'true':
|
||||
await PRCodeSuggestions(pr_url).run()
|
||||
|
||||
# Handle issue comment event
|
||||
elif GITHUB_EVENT_NAME == "issue_comment":
|
||||
@ -61,12 +81,21 @@ async def run_action():
|
||||
if action in ["created", "edited"]:
|
||||
comment_body = event_payload.get("comment", {}).get("body")
|
||||
if comment_body:
|
||||
pr_url = event_payload.get("issue", {}).get("pull_request", {}).get("url")
|
||||
if pr_url:
|
||||
is_pr = False
|
||||
# check if issue is pull request
|
||||
if event_payload.get("issue", {}).get("pull_request"):
|
||||
url = event_payload.get("issue", {}).get("pull_request", {}).get("url")
|
||||
is_pr = True
|
||||
else:
|
||||
url = event_payload.get("issue", {}).get("url")
|
||||
if url:
|
||||
body = comment_body.strip().lower()
|
||||
comment_id = event_payload.get("comment", {}).get("id")
|
||||
provider = get_git_provider()(pr_url=pr_url)
|
||||
await PRAgent().handle_request(pr_url, body, notify=lambda: provider.add_eyes_reaction(comment_id))
|
||||
provider = get_git_provider()(pr_url=url)
|
||||
if is_pr:
|
||||
await PRAgent().handle_request(url, body, notify=lambda: provider.add_eyes_reaction(comment_id))
|
||||
else:
|
||||
await PRAgent().handle_request(url, body)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -1,9 +1,7 @@
|
||||
import copy
|
||||
import logging
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
from typing import Any, Dict
|
||||
import asyncio.locks
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
import uvicorn
|
||||
from fastapi import APIRouter, FastAPI, HTTPException, Request, Response
|
||||
@ -15,9 +13,13 @@ from pr_agent.agent.pr_agent import PRAgent
|
||||
from pr_agent.algo.utils import update_settings_from_args
|
||||
from pr_agent.config_loader import get_settings, global_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.servers.utils import verify_signature
|
||||
from pr_agent.git_providers.utils import apply_repo_settings
|
||||
from pr_agent.git_providers.git_provider import IncrementalPR
|
||||
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
||||
from pr_agent.servers.utils import verify_signature, DefaultDictWithTimeout
|
||||
|
||||
setup_logger(fmt=LoggingFormat.JSON)
|
||||
|
||||
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@ -28,11 +30,11 @@ async def handle_github_webhooks(request: Request, response: Response):
|
||||
Verifies the request signature, parses the request body, and passes it to the handle_request function for further
|
||||
processing.
|
||||
"""
|
||||
logging.debug("Received a GitHub webhook")
|
||||
get_logger().debug("Received a GitHub webhook")
|
||||
|
||||
body = await get_body(request)
|
||||
|
||||
logging.debug(f'Request body:\n{body}')
|
||||
get_logger().debug(f'Request body:\n{body}')
|
||||
installation_id = body.get("installation", {}).get("id")
|
||||
context["installation_id"] = installation_id
|
||||
context["settings"] = copy.deepcopy(global_settings)
|
||||
@ -44,13 +46,14 @@ async def handle_github_webhooks(request: Request, response: Response):
|
||||
@router.post("/api/v1/marketplace_webhooks")
|
||||
async def handle_marketplace_webhooks(request: Request, response: Response):
|
||||
body = await get_body(request)
|
||||
logging.info(f'Request body:\n{body}')
|
||||
get_logger().info(f'Request body:\n{body}')
|
||||
|
||||
|
||||
async def get_body(request):
|
||||
try:
|
||||
body = await request.json()
|
||||
except Exception as e:
|
||||
logging.error("Error parsing request body", e)
|
||||
get_logger().error("Error parsing request body", e)
|
||||
raise HTTPException(status_code=400, detail="Error parsing request body") from e
|
||||
webhook_secret = getattr(get_settings().github, 'webhook_secret', None)
|
||||
if webhook_secret:
|
||||
@ -60,7 +63,9 @@ async def get_body(request):
|
||||
return body
|
||||
|
||||
|
||||
_duplicate_requests_cache = {}
|
||||
_duplicate_requests_cache = DefaultDictWithTimeout(ttl=get_settings().github_app.duplicate_requests_cache_ttl)
|
||||
_duplicate_push_triggers = DefaultDictWithTimeout(ttl=get_settings().github_app.push_trigger_pending_tasks_ttl)
|
||||
_pending_task_duplicate_push_conditions = DefaultDictWithTimeout(asyncio.locks.Condition, ttl=get_settings().github_app.push_trigger_pending_tasks_ttl)
|
||||
|
||||
|
||||
async def handle_request(body: Dict[str, Any], event: str):
|
||||
@ -76,8 +81,8 @@ async def handle_request(body: Dict[str, Any], event: str):
|
||||
return {}
|
||||
agent = PRAgent()
|
||||
bot_user = get_settings().github_app.bot_user
|
||||
logging.info(f"action: '{action}'")
|
||||
logging.info(f"event: '{event}'")
|
||||
sender = body.get("sender", {}).get("login")
|
||||
log_context = {"action": action, "event": event, "sender": sender, "server_type": "github_app"}
|
||||
|
||||
if get_settings().github_app.duplicate_requests_cache and _is_duplicate_request(body):
|
||||
return {}
|
||||
@ -87,71 +92,142 @@ async def handle_request(body: Dict[str, Any], event: str):
|
||||
if "comment" not in body:
|
||||
return {}
|
||||
comment_body = body.get("comment", {}).get("body")
|
||||
sender = body.get("sender", {}).get("login")
|
||||
if sender and bot_user in sender:
|
||||
logging.info(f"Ignoring comment from {bot_user} user")
|
||||
get_logger().info(f"Ignoring comment from {bot_user} user")
|
||||
return {}
|
||||
logging.info(f"Processing comment from {sender} user")
|
||||
get_logger().info(f"Processing comment from {sender} user")
|
||||
if "issue" in body and "pull_request" in body["issue"] and "url" in body["issue"]["pull_request"]:
|
||||
api_url = body["issue"]["pull_request"]["url"]
|
||||
elif "comment" in body and "pull_request_url" in body["comment"]:
|
||||
api_url = body["comment"]["pull_request_url"]
|
||||
else:
|
||||
return {}
|
||||
logging.info(f"Handling comment because of event={event} and action={action}")
|
||||
log_context["api_url"] = api_url
|
||||
get_logger().info(body)
|
||||
get_logger().info(f"Handling comment because of event={event} and action={action}")
|
||||
comment_id = body.get("comment", {}).get("id")
|
||||
provider = get_git_provider()(pr_url=api_url)
|
||||
await agent.handle_request(api_url, comment_body, notify=lambda: provider.add_eyes_reaction(comment_id))
|
||||
with get_logger().contextualize(**log_context):
|
||||
await agent.handle_request(api_url, comment_body, notify=lambda: provider.add_eyes_reaction(comment_id))
|
||||
|
||||
# handle pull_request event:
|
||||
# automatically review opened/reopened/ready_for_review PRs as long as they're not in draft,
|
||||
# as well as direct review requests from the bot
|
||||
elif event == 'pull_request':
|
||||
pull_request = body.get("pull_request")
|
||||
if not pull_request:
|
||||
return {}
|
||||
api_url = pull_request.get("url")
|
||||
if not api_url:
|
||||
return {}
|
||||
if pull_request.get("draft", True) or pull_request.get("state") != "open" or pull_request.get("user", {}).get("login", "") == bot_user:
|
||||
elif event == 'pull_request' and action != 'synchronize':
|
||||
pull_request, api_url = _check_pull_request_event(action, body, log_context, bot_user)
|
||||
if not (pull_request and api_url):
|
||||
return {}
|
||||
if action in get_settings().github_app.handle_pr_actions:
|
||||
if action == "review_requested":
|
||||
if body.get("requested_reviewer", {}).get("login", "") != bot_user:
|
||||
return {}
|
||||
if pull_request.get("created_at") == pull_request.get("updated_at"):
|
||||
# avoid double reviews when opening a PR for the first time
|
||||
return {}
|
||||
logging.info(f"Performing review because of event={event} and action={action}")
|
||||
for command in get_settings().github_app.pr_commands:
|
||||
split_command = command.split(" ")
|
||||
command = split_command[0]
|
||||
args = split_command[1:]
|
||||
other_args = update_settings_from_args(args)
|
||||
new_command = ' '.join([command] + other_args)
|
||||
logging.info(f"Performing command: {new_command}")
|
||||
await agent.handle_request(api_url, new_command)
|
||||
get_logger().info(f"Performing review for {api_url=} because of {event=} and {action=}")
|
||||
await _perform_commands(get_settings().github_app.pr_commands, agent, body, api_url, log_context)
|
||||
|
||||
logging.info("event or action does not require handling")
|
||||
# handle pull_request event with synchronize action - "push trigger" for new commits
|
||||
elif event == 'pull_request' and action == 'synchronize' and get_settings().github_app.handle_push_trigger:
|
||||
pull_request, api_url = _check_pull_request_event(action, body, log_context, bot_user)
|
||||
if not (pull_request and api_url):
|
||||
return {}
|
||||
|
||||
# TODO: do we still want to get the list of commits to filter bot/merge commits?
|
||||
before_sha = body.get("before")
|
||||
after_sha = body.get("after")
|
||||
merge_commit_sha = pull_request.get("merge_commit_sha")
|
||||
if before_sha == after_sha:
|
||||
return {}
|
||||
if get_settings().github_app.push_trigger_ignore_merge_commits and after_sha == merge_commit_sha:
|
||||
return {}
|
||||
if get_settings().github_app.push_trigger_ignore_bot_commits and body.get("sender", {}).get("login", "") == bot_user:
|
||||
return {}
|
||||
|
||||
# Prevent triggering multiple times for subsequent push triggers when one is enough:
|
||||
# The first push will trigger the processing, and if there's a second push in the meanwhile it will wait.
|
||||
# Any more events will be discarded, because they will all trigger the exact same processing on the PR.
|
||||
# We let the second event wait instead of discarding it because while the first event was being processed,
|
||||
# more commits may have been pushed that led to the subsequent events,
|
||||
# so we keep just one waiting as a delegate to trigger the processing for the new commits when done waiting.
|
||||
current_active_tasks = _duplicate_push_triggers.setdefault(api_url, 0)
|
||||
max_active_tasks = 2 if get_settings().github_app.push_trigger_pending_tasks_backlog else 1
|
||||
if current_active_tasks < max_active_tasks:
|
||||
# first task can enter, and second tasks too if backlog is enabled
|
||||
get_logger().info(
|
||||
f"Continue processing push trigger for {api_url=} because there are {current_active_tasks} active tasks"
|
||||
)
|
||||
_duplicate_push_triggers[api_url] += 1
|
||||
else:
|
||||
get_logger().info(
|
||||
f"Skipping push trigger for {api_url=} because another event already triggered the same processing"
|
||||
)
|
||||
return {}
|
||||
async with _pending_task_duplicate_push_conditions[api_url]:
|
||||
if current_active_tasks == 1:
|
||||
# second task waits
|
||||
get_logger().info(
|
||||
f"Waiting to process push trigger for {api_url=} because the first task is still in progress"
|
||||
)
|
||||
await _pending_task_duplicate_push_conditions[api_url].wait()
|
||||
get_logger().info(f"Finished waiting to process push trigger for {api_url=} - continue with flow")
|
||||
|
||||
try:
|
||||
if get_settings().github_app.push_trigger_wait_for_initial_review and not get_git_provider()(api_url, incremental=IncrementalPR(True)).previous_review:
|
||||
get_logger().info(f"Skipping incremental review because there was no initial review for {api_url=} yet")
|
||||
return {}
|
||||
get_logger().info(f"Performing incremental review for {api_url=} because of {event=} and {action=}")
|
||||
await _perform_commands(get_settings().github_app.push_commands, agent, body, api_url, log_context)
|
||||
|
||||
finally:
|
||||
# release the waiting task block
|
||||
async with _pending_task_duplicate_push_conditions[api_url]:
|
||||
_pending_task_duplicate_push_conditions[api_url].notify(1)
|
||||
_duplicate_push_triggers[api_url] -= 1
|
||||
|
||||
get_logger().info("event or action does not require handling")
|
||||
return {}
|
||||
|
||||
|
||||
def _check_pull_request_event(action: str, body: dict, log_context: dict, bot_user: str) -> Tuple[Dict[str, Any], str]:
|
||||
invalid_result = {}, ""
|
||||
pull_request = body.get("pull_request")
|
||||
if not pull_request:
|
||||
return invalid_result
|
||||
api_url = pull_request.get("url")
|
||||
if not api_url:
|
||||
return invalid_result
|
||||
log_context["api_url"] = api_url
|
||||
if pull_request.get("draft", True) or pull_request.get("state") != "open" or pull_request.get("user", {}).get("login", "") == bot_user:
|
||||
return invalid_result
|
||||
if action in ("review_requested", "synchronize") and pull_request.get("created_at") == pull_request.get("updated_at"):
|
||||
# avoid double reviews when opening a PR for the first time
|
||||
return invalid_result
|
||||
return pull_request, api_url
|
||||
|
||||
|
||||
async def _perform_commands(commands: List[str], agent: PRAgent, body: dict, api_url: str, log_context: dict):
|
||||
apply_repo_settings(api_url)
|
||||
for command in commands:
|
||||
split_command = command.split(" ")
|
||||
command = split_command[0]
|
||||
args = split_command[1:]
|
||||
other_args = update_settings_from_args(args)
|
||||
new_command = ' '.join([command] + other_args)
|
||||
get_logger().info(body)
|
||||
get_logger().info(f"Performing command: {new_command}")
|
||||
with get_logger().contextualize(**log_context):
|
||||
await agent.handle_request(api_url, new_command)
|
||||
|
||||
|
||||
def _is_duplicate_request(body: Dict[str, Any]) -> bool:
|
||||
"""
|
||||
In some deployments its possible to get duplicate requests if the handling is long,
|
||||
This function checks if the request is duplicate and if so - ignores it.
|
||||
"""
|
||||
request_hash = hash(str(body))
|
||||
logging.info(f"request_hash: {request_hash}")
|
||||
request_time = time.monotonic()
|
||||
ttl = get_settings().github_app.duplicate_requests_cache_ttl # in seconds
|
||||
to_delete = [key for key, key_time in _duplicate_requests_cache.items() if request_time - key_time > ttl]
|
||||
for key in to_delete:
|
||||
del _duplicate_requests_cache[key]
|
||||
is_duplicate = request_hash in _duplicate_requests_cache
|
||||
_duplicate_requests_cache[request_hash] = request_time
|
||||
get_logger().info(f"request_hash: {request_hash}")
|
||||
is_duplicate = _duplicate_requests_cache.get(request_hash, False)
|
||||
_duplicate_requests_cache[request_hash] = True
|
||||
if is_duplicate:
|
||||
logging.info(f"Ignoring duplicate request {request_hash}")
|
||||
get_logger().info(f"Ignoring duplicate request {request_hash}")
|
||||
return is_duplicate
|
||||
|
||||
|
||||
|
@ -1,6 +1,4 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import aiohttp
|
||||
@ -8,9 +6,10 @@ import aiohttp
|
||||
from pr_agent.agent.pr_agent import PRAgent
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
||||
from pr_agent.servers.help import bot_help_text
|
||||
|
||||
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
||||
setup_logger(fmt=LoggingFormat.JSON)
|
||||
NOTIFICATION_URL = "https://api.github.com/notifications"
|
||||
|
||||
|
||||
@ -94,7 +93,7 @@ async def polling_loop():
|
||||
comment_body = comment['body'] if 'body' in comment else ''
|
||||
commenter_github_user = comment['user']['login'] \
|
||||
if 'user' in comment else ''
|
||||
logging.info(f"Commenter: {commenter_github_user}\nComment: {comment_body}")
|
||||
get_logger().info(f"Commenter: {commenter_github_user}\nComment: {comment_body}")
|
||||
user_tag = "@" + user_id
|
||||
if user_tag not in comment_body:
|
||||
continue
|
||||
@ -112,7 +111,7 @@ async def polling_loop():
|
||||
print(f"Failed to fetch notifications. Status code: {response.status}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Exception during processing of a notification: {e}")
|
||||
get_logger().error(f"Exception during processing of a notification: {e}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -1,43 +1,84 @@
|
||||
import logging
|
||||
import copy
|
||||
import json
|
||||
|
||||
import uvicorn
|
||||
from fastapi import APIRouter, FastAPI, Request, status
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
from fastapi.responses import JSONResponse
|
||||
from starlette.background import BackgroundTasks
|
||||
from starlette.middleware import Middleware
|
||||
from starlette_context import context
|
||||
from starlette_context.middleware import RawContextMiddleware
|
||||
|
||||
from pr_agent.agent.pr_agent import PRAgent
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.config_loader import get_settings, global_settings
|
||||
from pr_agent.log import LoggingFormat, get_logger, setup_logger
|
||||
from pr_agent.secret_providers import get_secret_provider
|
||||
|
||||
app = FastAPI()
|
||||
setup_logger(fmt=LoggingFormat.JSON)
|
||||
router = APIRouter()
|
||||
|
||||
secret_provider = get_secret_provider() if get_settings().get("CONFIG.SECRET_PROVIDER") else None
|
||||
|
||||
|
||||
def handle_request(background_tasks: BackgroundTasks, url: str, body: str, log_context: dict):
|
||||
log_context["action"] = body
|
||||
log_context["event"] = "pull_request" if body == "/review" else "comment"
|
||||
log_context["api_url"] = url
|
||||
with get_logger().contextualize(**log_context):
|
||||
background_tasks.add_task(PRAgent().handle_request, url, body)
|
||||
|
||||
|
||||
@router.post("/webhook")
|
||||
async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
|
||||
log_context = {"server_type": "gitlab_app"}
|
||||
if request.headers.get("X-Gitlab-Token") and secret_provider:
|
||||
request_token = request.headers.get("X-Gitlab-Token")
|
||||
secret = secret_provider.get_secret(request_token)
|
||||
try:
|
||||
secret_dict = json.loads(secret)
|
||||
gitlab_token = secret_dict["gitlab_token"]
|
||||
log_context["sender"] = secret_dict["id"]
|
||||
context["settings"] = copy.deepcopy(global_settings)
|
||||
context["settings"].gitlab.personal_access_token = gitlab_token
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed to validate secret {request_token}: {e}")
|
||||
return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED, content=jsonable_encoder({"message": "unauthorized"}))
|
||||
elif get_settings().get("GITLAB.SHARED_SECRET"):
|
||||
secret = get_settings().get("GITLAB.SHARED_SECRET")
|
||||
if not request.headers.get("X-Gitlab-Token") == secret:
|
||||
return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED, content=jsonable_encoder({"message": "unauthorized"}))
|
||||
else:
|
||||
return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED, content=jsonable_encoder({"message": "unauthorized"}))
|
||||
gitlab_token = get_settings().get("GITLAB.PERSONAL_ACCESS_TOKEN", None)
|
||||
if not gitlab_token:
|
||||
return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED, content=jsonable_encoder({"message": "unauthorized"}))
|
||||
data = await request.json()
|
||||
get_logger().info(json.dumps(data))
|
||||
if data.get('object_kind') == 'merge_request' and data['object_attributes'].get('action') in ['open', 'reopen']:
|
||||
logging.info(f"A merge request has been opened: {data['object_attributes'].get('title')}")
|
||||
get_logger().info(f"A merge request has been opened: {data['object_attributes'].get('title')}")
|
||||
url = data['object_attributes'].get('url')
|
||||
background_tasks.add_task(PRAgent().handle_request, url, "/review")
|
||||
handle_request(background_tasks, url, "/review", log_context)
|
||||
elif data.get('object_kind') == 'note' and data['event_type'] == 'note':
|
||||
if 'merge_request' in data:
|
||||
mr = data['merge_request']
|
||||
url = mr.get('url')
|
||||
body = data.get('object_attributes', {}).get('note')
|
||||
background_tasks.add_task(PRAgent().handle_request, url, body)
|
||||
handle_request(background_tasks, url, body, log_context)
|
||||
return JSONResponse(status_code=status.HTTP_200_OK, content=jsonable_encoder({"message": "success"}))
|
||||
|
||||
|
||||
@router.get("/")
|
||||
async def root():
|
||||
return {"status": "ok"}
|
||||
|
||||
def start():
|
||||
gitlab_url = get_settings().get("GITLAB.URL", None)
|
||||
if not gitlab_url:
|
||||
raise ValueError("GITLAB.URL is not set")
|
||||
gitlab_token = get_settings().get("GITLAB.PERSONAL_ACCESS_TOKEN", None)
|
||||
if not gitlab_token:
|
||||
raise ValueError("GITLAB.PERSONAL_ACCESS_TOKEN is not set")
|
||||
get_settings().config.git_provider = "gitlab"
|
||||
|
||||
app = FastAPI()
|
||||
middleware = [Middleware(RawContextMiddleware)]
|
||||
app = FastAPI(middleware=middleware)
|
||||
app.include_router(router)
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=3000)
|
||||
|
@ -1,12 +1,10 @@
|
||||
import logging
|
||||
|
||||
from fastapi import FastAPI
|
||||
from mangum import Mangum
|
||||
|
||||
from pr_agent.log import setup_logger
|
||||
from pr_agent.servers.github_app import router
|
||||
|
||||
logger = logging.getLogger()
|
||||
logger.setLevel(logging.DEBUG)
|
||||
setup_logger()
|
||||
|
||||
app = FastAPI()
|
||||
app.include_router(router)
|
||||
|
@ -1,5 +1,8 @@
|
||||
import hashlib
|
||||
import hmac
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from typing import Callable, Any
|
||||
|
||||
from fastapi import HTTPException
|
||||
|
||||
@ -25,3 +28,59 @@ def verify_signature(payload_body, secret_token, signature_header):
|
||||
class RateLimitExceeded(Exception):
|
||||
"""Raised when the git provider API rate limit has been exceeded."""
|
||||
pass
|
||||
|
||||
|
||||
class DefaultDictWithTimeout(defaultdict):
|
||||
"""A defaultdict with a time-to-live (TTL)."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
default_factory: Callable[[], Any] = None,
|
||||
ttl: int = None,
|
||||
refresh_interval: int = 60,
|
||||
update_key_time_on_get: bool = True,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
default_factory: The default factory to use for keys that are not in the dictionary.
|
||||
ttl: The time-to-live (TTL) in seconds.
|
||||
refresh_interval: How often to refresh the dict and delete items older than the TTL.
|
||||
update_key_time_on_get: Whether to update the access time of a key also on get (or only when set).
|
||||
"""
|
||||
super().__init__(default_factory, *args, **kwargs)
|
||||
self.__key_times = dict()
|
||||
self.__ttl = ttl
|
||||
self.__refresh_interval = refresh_interval
|
||||
self.__update_key_time_on_get = update_key_time_on_get
|
||||
self.__last_refresh = self.__time() - self.__refresh_interval
|
||||
|
||||
@staticmethod
|
||||
def __time():
|
||||
return time.monotonic()
|
||||
|
||||
def __refresh(self):
|
||||
if self.__ttl is None:
|
||||
return
|
||||
request_time = self.__time()
|
||||
if request_time - self.__last_refresh > self.__refresh_interval:
|
||||
return
|
||||
to_delete = [key for key, key_time in self.__key_times.items() if request_time - key_time > self.__ttl]
|
||||
for key in to_delete:
|
||||
del self[key]
|
||||
self.__last_refresh = request_time
|
||||
|
||||
def __getitem__(self, __key):
|
||||
if self.__update_key_time_on_get:
|
||||
self.__key_times[__key] = self.__time()
|
||||
self.__refresh()
|
||||
return super().__getitem__(__key)
|
||||
|
||||
def __setitem__(self, __key, __value):
|
||||
self.__key_times[__key] = self.__time()
|
||||
return super().__setitem__(__key, __value)
|
||||
|
||||
def __delitem__(self, __key):
|
||||
del self.__key_times[__key]
|
||||
return super().__delitem__(__key)
|
||||
|
@ -16,6 +16,10 @@ key = "" # Acquire through https://platform.openai.com
|
||||
#deployment_id = "" # The deployment name you chose when you deployed the engine
|
||||
#fallback_deployments = [] # For each fallback model specified in configuration.toml in the [config] section, specify the appropriate deployment_id
|
||||
|
||||
[pinecone]
|
||||
api_key = "..."
|
||||
environment = "gcp-starter"
|
||||
|
||||
[anthropic]
|
||||
key = "" # Optional, uncomment if you want to use Anthropic. Acquire through https://www.anthropic.com/
|
||||
|
||||
@ -24,6 +28,14 @@ key = "" # Optional, uncomment if you want to use Cohere. Acquire through https:
|
||||
|
||||
[replicate]
|
||||
key = "" # Optional, uncomment if you want to use Replicate. Acquire through https://replicate.com/
|
||||
|
||||
[huggingface]
|
||||
key = "" # Optional, uncomment if you want to use Huggingface Inference API. Acquire through https://huggingface.co/docs/api-inference/quicktour
|
||||
api_base = "" # the base url for your huggingface inference endpoint
|
||||
|
||||
[ollama]
|
||||
api_base = "" # the base url for your huggingface inference endpoint
|
||||
|
||||
[github]
|
||||
# ---- Set the following only for deployment type == "user"
|
||||
user_token = "" # A GitHub personal access token with 'repo' scope.
|
||||
@ -43,5 +55,12 @@ webhook_secret = "<WEBHOOK SECRET>" # Optional, may be commented out.
|
||||
personal_access_token = ""
|
||||
|
||||
[bitbucket]
|
||||
# Bitbucket personal bearer token
|
||||
# For Bitbucket personal/repository bearer token
|
||||
bearer_token = ""
|
||||
|
||||
# For Bitbucket app
|
||||
app_key = ""
|
||||
base_url = ""
|
||||
|
||||
[litellm]
|
||||
LITELLM_TOKEN = "" # see https://docs.litellm.ai/docs/debugging/hosted_debugging for details and instructions on how to get a token
|
||||
|
@ -10,26 +10,37 @@ use_repo_settings_file=true
|
||||
ai_timeout=180
|
||||
max_description_tokens = 500
|
||||
max_commits_tokens = 500
|
||||
litellm_debugger=false
|
||||
patch_extra_lines = 3
|
||||
secret_provider="google_cloud_storage"
|
||||
cli_mode=false
|
||||
|
||||
[pr_reviewer] # /review #
|
||||
require_focused_review=false
|
||||
require_score_review=false
|
||||
require_tests_review=true
|
||||
require_security_review=true
|
||||
require_estimate_effort_to_review=true
|
||||
num_code_suggestions=4
|
||||
inline_code_comments = false
|
||||
ask_and_reflect=false
|
||||
automatic_review=true
|
||||
remove_previous_review_comment=false
|
||||
extra_instructions = ""
|
||||
|
||||
[pr_description] # /describe #
|
||||
publish_labels=true
|
||||
publish_description_as_comment=false
|
||||
add_original_user_description=false
|
||||
keep_original_user_title=false
|
||||
use_bullet_points=true
|
||||
extra_instructions = ""
|
||||
|
||||
# markers
|
||||
use_description_markers=false
|
||||
include_generated_by_header=true
|
||||
|
||||
#custom_labels = ['Bug fix', 'Tests', 'Bug fix with tests', 'Refactoring', 'Enhancement', 'Documentation', 'Other']
|
||||
|
||||
[pr_questions] # /ask #
|
||||
|
||||
[pr_code_suggestions] # /improve #
|
||||
@ -42,6 +53,10 @@ rank_extended_suggestions = true
|
||||
max_number_of_calls = 5
|
||||
final_clip_factor = 0.9
|
||||
|
||||
[pr_add_docs] # /add_docs #
|
||||
extra_instructions = ""
|
||||
docs_style = "Sphinx Style" # "Google Style with Args, Returns, Attributes...etc", "Numpy Style", "Sphinx Style", "PEP257", "reStructuredText"
|
||||
|
||||
[pr_update_changelog] # /update_changelog #
|
||||
push_changelog_changes=false
|
||||
extra_instructions = ""
|
||||
@ -53,6 +68,11 @@ extra_instructions = ""
|
||||
deployment_type = "user"
|
||||
ratelimit_retries = 5
|
||||
|
||||
[github_action]
|
||||
# auto_review = true # set as env var in .github/workflows/pr-agent.yaml
|
||||
# auto_describe = true # set as env var in .github/workflows/pr-agent.yaml
|
||||
# auto_improve = true # set as env var in .github/workflows/pr-agent.yaml
|
||||
|
||||
[github_app]
|
||||
# these toggles allows running the github app from custom deployments
|
||||
bot_user = "github-actions[bot]"
|
||||
@ -67,6 +87,27 @@ pr_commands = [
|
||||
"/describe --pr_description.add_original_user_description=true --pr_description.keep_original_user_title=true",
|
||||
"/auto_review",
|
||||
]
|
||||
# settings for "pull_request" event with "synchronize" action - used to detect and handle push triggers for new commits
|
||||
handle_push_trigger = false
|
||||
push_trigger_ignore_bot_commits = true
|
||||
push_trigger_ignore_merge_commits = true
|
||||
push_trigger_wait_for_initial_review = true
|
||||
push_trigger_pending_tasks_backlog = true
|
||||
push_trigger_pending_tasks_ttl = 300
|
||||
push_commands = [
|
||||
"/describe --pr_description.add_original_user_description=true --pr_description.keep_original_user_title=true",
|
||||
"""/auto_review -i \
|
||||
--pr_reviewer.require_focused_review=false \
|
||||
--pr_reviewer.require_score_review=false \
|
||||
--pr_reviewer.require_tests_review=false \
|
||||
--pr_reviewer.require_security_review=false \
|
||||
--pr_reviewer.require_estimate_effort_to_review=false \
|
||||
--pr_reviewer.num_code_suggestions=0 \
|
||||
--pr_reviewer.inline_code_comments=false \
|
||||
--pr_reviewer.remove_previous_review_comment=true \
|
||||
--pr_reviewer.extra_instructions='' \
|
||||
"""
|
||||
]
|
||||
|
||||
[gitlab]
|
||||
# URL to the gitlab service
|
||||
@ -84,4 +125,27 @@ polling_interval_seconds = 30
|
||||
[local]
|
||||
# LocalGitProvider settings - uncomment to use paths other than default
|
||||
# description_path= "path/to/description.md"
|
||||
# review_path= "path/to/review.md"
|
||||
# review_path= "path/to/review.md"
|
||||
|
||||
[gerrit]
|
||||
# endpoint to the gerrit service
|
||||
# url = "ssh://gerrit.example.com:29418"
|
||||
# user for gerrit authentication
|
||||
# user = "ai-reviewer"
|
||||
# patch server where patches will be saved
|
||||
# patch_server_endpoint = "http://127.0.0.1:5000/patch"
|
||||
# token to authenticate in the patch server
|
||||
# patch_server_token = ""
|
||||
|
||||
[litellm]
|
||||
#use_client = false
|
||||
|
||||
[pr_similar_issue]
|
||||
skip_comments = false
|
||||
force_update_dataset = false
|
||||
max_issues_to_scan = 500
|
||||
|
||||
[pinecone]
|
||||
# fill and place in .secrets.toml
|
||||
#api_key = ...
|
||||
# environment = "gcp-starter"
|
||||
|
18
pr_agent/settings/custom_labels.toml
Normal file
@ -0,0 +1,18 @@
|
||||
[config]
|
||||
enable_custom_labels=false
|
||||
|
||||
## template for custom labels
|
||||
#[custom_labels."Bug fix"]
|
||||
#description = "Fixes a bug in the code"
|
||||
#[custom_labels."Tests"]
|
||||
#description = "Adds or modifies tests"
|
||||
#[custom_labels."Bug fix with tests"]
|
||||
#description = "Fixes a bug in the code and adds or modifies tests"
|
||||
#[custom_labels."Refactoring"]
|
||||
#description = "Code refactoring without changing functionality"
|
||||
#[custom_labels."Enhancement"]
|
||||
#description = "Adds new features or functionality"
|
||||
#[custom_labels."Documentation"]
|
||||
#description = "Adds or modifies documentation"
|
||||
#[custom_labels."Other"]
|
||||
#description = "Other changes that do not fit in any of the above categories"
|
11
pr_agent/settings/ignore.toml
Normal file
@ -0,0 +1,11 @@
|
||||
[ignore]
|
||||
|
||||
glob = [
|
||||
# Ignore files and directories matching these glob patterns.
|
||||
# See https://docs.python.org/3/library/glob.html
|
||||
'vendor/**',
|
||||
]
|
||||
regex = [
|
||||
# Ignore files and directories matching these regex patterns.
|
||||
# See https://learnbyexample.github.io/python-regex-cheatsheet/
|
||||
]
|
@ -53,7 +53,8 @@ default = [
|
||||
'xz',
|
||||
'zip',
|
||||
'zst',
|
||||
'snap'
|
||||
'snap',
|
||||
'lockb'
|
||||
]
|
||||
extra = [
|
||||
'md',
|
||||
@ -432,3 +433,6 @@ reStructuredText = [".rst", ".rest", ".rest.txt", ".rst.txt", ]
|
||||
wisp = [".wisp", ]
|
||||
xBase = [".prg", ".prw", ]
|
||||
|
||||
[docs_blacklist_extensions]
|
||||
# Disable docs for these extensions of text files and scripts that are not programming languages of function, classes and methods
|
||||
docs_blacklist = ['sql', 'txt', 'yaml', 'json', 'xml', 'md', 'rst', 'rest', 'rest.txt', 'rst.txt', 'mdpolicy', 'mdown', 'markdown', 'mdwn', 'mkd', 'mkdn', 'mkdown', 'sh']
|
117
pr_agent/settings/pr_add_docs.toml
Normal file
@ -0,0 +1,117 @@
|
||||
[pr_add_docs_prompt]
|
||||
system="""You are a language model called PR-Code-Documentation Agent, that specializes in generating documentation for code.
|
||||
Your task is to generate meaningfull {{ docs_for_language }} to a PR (the '+' lines).
|
||||
|
||||
Example for a PR Diff input:
|
||||
'
|
||||
## src/file1.py
|
||||
|
||||
@@ -12,3 +12,5 @@ def func1():
|
||||
__new hunk__
|
||||
12 code line that already existed in the file...
|
||||
13 code line that already existed in the file....
|
||||
14 +new code line1 added in the PR
|
||||
15 +new code line2 added in the PR
|
||||
16 code line that already existed in the file...
|
||||
__old hunk__
|
||||
code line that already existed in the file...
|
||||
-code line that was removed in the PR
|
||||
code line that already existed in the file...
|
||||
|
||||
|
||||
@@ ... @@ def func2():
|
||||
__new hunk__
|
||||
...
|
||||
__old hunk__
|
||||
...
|
||||
|
||||
|
||||
## src/file2.py
|
||||
...
|
||||
'
|
||||
|
||||
Specific instructions:
|
||||
- Try to identify edited/added code components (classes/functions/methods...) that are undocumented. and generate {{ docs_for_language }} for each one.
|
||||
- If there are documented (any type of {{ language }} documentation) code components in the PR, Don't generate {{ docs_for_language }} for them.
|
||||
- Ignore code components that don't appear fully in the '__new hunk__' section. For example. you must see the component header and body,
|
||||
- Make sure the {{ docs_for_language }} starts and ends with standart {{ language }} {{ docs_for_language }} signs.
|
||||
- The {{ docs_for_language }} should be in standard format.
|
||||
- Provide the exact line number (inclusive) where the {{ docs_for_language }} should be added.
|
||||
|
||||
|
||||
{%- if extra_instructions %}
|
||||
|
||||
Extra instructions from the user:
|
||||
'
|
||||
{{ extra_instructions }}
|
||||
'
|
||||
{%- endif %}
|
||||
|
||||
You must use the following YAML schema to format your answer:
|
||||
```yaml
|
||||
Code Documentation:
|
||||
type: array
|
||||
uniqueItems: true
|
||||
items:
|
||||
relevant file:
|
||||
type: string
|
||||
description: the relevant file full path
|
||||
relevant line:
|
||||
type: integer
|
||||
description: |-
|
||||
The relevant line number from a '__new hunk__' section where the {{ docs_for_language }} should be added.
|
||||
doc placement:
|
||||
type: string
|
||||
enum:
|
||||
- before
|
||||
- after
|
||||
description: |-
|
||||
The {{ docs_for_language }} placement relative to the relevant line (code component).
|
||||
documentation:
|
||||
type: string
|
||||
description: |-
|
||||
The {{ docs_for_language }} content. It should be complete, correctly formatted and indented, and without line numbers.
|
||||
```
|
||||
|
||||
Example output:
|
||||
```yaml
|
||||
Code Documentation:
|
||||
- relevant file: |-
|
||||
src/file1.py
|
||||
relevant lines: 12
|
||||
doc placement: after
|
||||
documentation: |-
|
||||
\"\"\"
|
||||
This is a python docstring for func1.
|
||||
\"\"\"
|
||||
- ...
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
Each YAML output MUST be after a newline, indented, with block scalar indicator ('|-').
|
||||
Don't repeat the prompt in the answer, and avoid outputting the 'type' and 'description' fields.
|
||||
"""
|
||||
|
||||
user="""PR Info:
|
||||
|
||||
Title: '{{ title }}'
|
||||
|
||||
Branch: '{{ branch }}'
|
||||
|
||||
Description: '{{description}}'
|
||||
|
||||
{%- if language %}
|
||||
|
||||
Main language: {{language}}
|
||||
{%- endif %}
|
||||
|
||||
|
||||
The PR Diff:
|
||||
```
|
||||
{{- diff|trim }}
|
||||
```
|
||||
|
||||
Response (should be a valid YAML, and nothing else):
|
||||
```yaml
|
||||
"""
|
@ -1,6 +1,6 @@
|
||||
[pr_code_suggestions_prompt]
|
||||
system="""You are a language model called PR-Code-Reviewer, that specializes in suggesting code improvements for Pull Request (PR).
|
||||
Your task is to provide meaningful and actionable code suggestions, to improve the new code presented in a PR.
|
||||
Your task is to provide meaningful and actionable code suggestions, to improve the new code presented in a PR (the '+' lines in the diff).
|
||||
|
||||
Example for a PR Diff input:
|
||||
'
|
||||
@ -31,14 +31,13 @@ __old hunk__
|
||||
'
|
||||
|
||||
Specific instructions:
|
||||
- Provide up to {{ num_code_suggestions }} code suggestions.
|
||||
- Provide up to {{ num_code_suggestions }} code suggestions. Try to provide diverse and insightful suggestions.
|
||||
- Prioritize suggestions that address major problems, issues and bugs in the code.
|
||||
As a second priority, suggestions should focus on best practices, code readability, maintainability, enhancments, performance, and other aspects.
|
||||
Don't suggest to add docstring or type hints.
|
||||
Try to provide diverse and insightful suggestions.
|
||||
- Don't suggest to add docstring, type hints, or comments.
|
||||
- Suggestions should refer only to code from the '__new hunk__' sections, and focus on new lines of code (lines starting with '+').
|
||||
Avoid making suggestions that have already been implemented in the PR code. For example, if you want to add logs, or change a variable to const, or anything else, make sure it isn't already in the '__new hunk__' code.
|
||||
For each suggestion, make sure to take into consideration also the context, meaning the lines before and after the relevant code.
|
||||
- Avoid making suggestions that have already been implemented in the PR code. For example, if you want to add logs, or change a variable to const, or anything else, make sure it isn't already in the '__new hunk__' code.
|
||||
- For each suggestion, make sure to take into consideration also the context, meaning the lines before and after the relevant code.
|
||||
- Provide the exact line numbers range (inclusive) for each issue.
|
||||
- Assume there is additional relevant code, that is not included in the diff.
|
||||
|
||||
@ -46,7 +45,9 @@ Specific instructions:
|
||||
{%- if extra_instructions %}
|
||||
|
||||
Extra instructions from the user:
|
||||
'
|
||||
{{ extra_instructions }}
|
||||
'
|
||||
{%- endif %}
|
||||
|
||||
You must use the following YAML schema to format your answer:
|
||||
@ -68,12 +69,17 @@ Code suggestions:
|
||||
type: string
|
||||
description: |-
|
||||
a code snippet showing the relevant code lines from a '__new hunk__' section.
|
||||
It must be continuous, correctly formatted and indented, and without line numbers.
|
||||
relevant lines:
|
||||
type: string
|
||||
It must be contiguous, correctly formatted and indented, and without line numbers.
|
||||
relevant lines start:
|
||||
type: integer
|
||||
description: |-
|
||||
the relevant lines from a '__new hunk__' section, in the format of 'start_line-end_line'.
|
||||
For example: '10-15'. They should be derived from the hunk line numbers, and correspond to the 'existing code' snippet above.
|
||||
The relevant line number from a '__new hunk__' section where the suggestion starts (inclusive).
|
||||
Should be derived from the hunk line numbers, and correspond to the 'existing code' snippet above.
|
||||
relevant lines end:
|
||||
type: integer
|
||||
description: |-
|
||||
The relevant line number from a '__new hunk__' section where the suggestion ends (inclusive).
|
||||
Should be derived from the hunk line numbers, and correspond to the 'existing code' snippet above.
|
||||
improved code:
|
||||
type: string
|
||||
description: |-
|
||||
@ -90,7 +96,8 @@ Code suggestions:
|
||||
Add a docstring to func1()
|
||||
existing code: |-
|
||||
def func1():
|
||||
relevant lines: '12-12'
|
||||
relevant lines start: 12
|
||||
relevant lines end: 12
|
||||
improved code: |-
|
||||
...
|
||||
```
|
||||
|
72
pr_agent/settings/pr_custom_labels.toml
Normal file
@ -0,0 +1,72 @@
|
||||
[pr_custom_labels_prompt]
|
||||
system="""You are CodiumAI-PR-Reviewer, a language model designed to review git pull requests.
|
||||
Your task is to label the type of the PR content.
|
||||
- Make sure not to focus the new PR code (the '+' lines).
|
||||
- If needed, each YAML output should be in block scalar format ('|-')
|
||||
{%- if extra_instructions %}
|
||||
|
||||
Extra instructions from the user:
|
||||
'
|
||||
{{ extra_instructions }}
|
||||
'
|
||||
{% endif %}
|
||||
|
||||
You must use the following YAML schema to format your answer:
|
||||
```yaml
|
||||
PR Type:
|
||||
type: array
|
||||
{%- if enable_custom_labels %}
|
||||
description: One or more labels that describe the PR type. Don't output the description in the parentheses.
|
||||
{%- endif %}
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
{%- if enable_custom_labels %}
|
||||
{{ custom_labels }}
|
||||
{%- else %}
|
||||
- Bug fix
|
||||
- Tests
|
||||
- Refactoring
|
||||
- Enhancement
|
||||
- Documentation
|
||||
- Other
|
||||
{%- endif %}
|
||||
|
||||
Example output:
|
||||
```yaml
|
||||
PR Type:
|
||||
{%- if enable_custom_labels %}
|
||||
{{ custom_labels_examples }}
|
||||
{%- else %}
|
||||
- Bug fix
|
||||
- Tests
|
||||
{%- endif %}
|
||||
```
|
||||
|
||||
Make sure to output a valid YAML. Don't repeat the prompt in the answer, and avoid outputting the 'type' and 'description' fields.
|
||||
"""
|
||||
|
||||
user="""PR Info:
|
||||
Previous title: '{{title}}'
|
||||
Previous description: '{{description}}'
|
||||
Branch: '{{branch}}'
|
||||
{%- if language %}
|
||||
|
||||
Main language: {{language}}
|
||||
{%- endif %}
|
||||
{%- if commit_messages_str %}
|
||||
|
||||
Commit messages:
|
||||
{{commit_messages_str}}
|
||||
{%- endif %}
|
||||
|
||||
|
||||
The PR Git Diff:
|
||||
```
|
||||
{{diff}}
|
||||
```
|
||||
Note that lines in the diff body are prefixed with a symbol that represents the type of change: '-' for deletions, '+' for additions, and ' ' (a space) for unchanged lines.
|
||||
|
||||
Response (should be a valid YAML, and nothing else):
|
||||
```yaml
|
||||
"""
|
@ -7,7 +7,9 @@ Your task is to provide full description of the PR content.
|
||||
{%- if extra_instructions %}
|
||||
|
||||
Extra instructions from the user:
|
||||
'
|
||||
{{ extra_instructions }}
|
||||
'
|
||||
{% endif %}
|
||||
|
||||
You must use the following YAML schema to format your answer:
|
||||
@ -17,19 +19,26 @@ PR Title:
|
||||
description: an informative title for the PR, describing its main theme
|
||||
PR Type:
|
||||
type: array
|
||||
{%- if enable_custom_labels %}
|
||||
description: One or more labels that describe the PR type. Don't output the description in the parentheses.
|
||||
{%- endif %}
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
{%- if enable_custom_labels %}
|
||||
{{ custom_labels }}
|
||||
{%- else %}
|
||||
- Bug fix
|
||||
- Tests
|
||||
- Bug fix with tests
|
||||
- Refactoring
|
||||
- Enhancement
|
||||
- Documentation
|
||||
- Other
|
||||
{%- endif %}
|
||||
PR Description:
|
||||
type: string
|
||||
description: an informative and concise description of the PR
|
||||
description: an informative and concise description of the PR.
|
||||
{%- if use_bullet_points %} Use bullet points. {% endif %}
|
||||
PR Main Files Walkthrough:
|
||||
type: array
|
||||
maxItems: 10
|
||||
@ -49,7 +58,11 @@ Example output:
|
||||
PR Title: |-
|
||||
...
|
||||
PR Type:
|
||||
{%- if enable_custom_labels %}
|
||||
{{ custom_labels_examples }}
|
||||
{%- else %}
|
||||
- Bug fix
|
||||
{%- endif %}
|
||||
PR Description: |-
|
||||
...
|
||||
PR Main Files Walkthrough:
|
||||
|
@ -22,20 +22,22 @@ code line that already existed in the file....
|
||||
...
|
||||
'
|
||||
|
||||
Thre review should focus on new code added in the PR (lines starting with '+'), and not on code that already existed in the file (lines starting with '-', or without prefix).
|
||||
The review should focus on new code added in the PR (lines starting with '+'), and not on code that already existed in the file (lines starting with '-', or without prefix).
|
||||
|
||||
{%- if num_code_suggestions > 0 %}
|
||||
- Provide up to {{ num_code_suggestions }} code suggestions.
|
||||
- Provide up to {{ num_code_suggestions }} code suggestions. Try to provide diverse and insightful suggestions.
|
||||
- Focus on important suggestions like fixing code problems, issues and bugs. As a second priority, provide suggestions for meaningful code improvements, like performance, vulnerability, modularity, and best practices.
|
||||
- Avoid making suggestions that have already been implemented in the PR code. For example, if you want to add logs, or change a variable to const, or anything else, make sure it isn't already in the PR code.
|
||||
- Don't suggest to add docstring or type hints.
|
||||
- Don't suggest to add docstring, type hints, or comments.
|
||||
- Suggestions should focus on improving the new code added in the PR (lines starting with '+')
|
||||
{%- endif %}
|
||||
|
||||
{%- if extra_instructions %}
|
||||
|
||||
Extra instructions from the user:
|
||||
'
|
||||
{{ extra_instructions }}
|
||||
'
|
||||
{% endif %}
|
||||
|
||||
You must use the following YAML schema to format your answer:
|
||||
@ -49,13 +51,22 @@ PR Analysis:
|
||||
description: summary of the PR in 2-3 sentences.
|
||||
Type of PR:
|
||||
type: string
|
||||
{%- if enable_custom_labels %}
|
||||
description: One or more labels that describe the PR type. Don't output the description in the parentheses.
|
||||
{%- endif %}
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
{%- if enable_custom_labels %}
|
||||
{{ custom_labels }}
|
||||
{%- else %}
|
||||
- Bug fix
|
||||
- Tests
|
||||
- Refactoring
|
||||
- Enhancement
|
||||
- Documentation
|
||||
- Other
|
||||
{%- endif %}
|
||||
{%- if require_score %}
|
||||
Score:
|
||||
type: int
|
||||
@ -85,14 +96,22 @@ PR Analysis:
|
||||
code diff changes are too scattered, then the PR is not focused. Explain
|
||||
your answer shortly.
|
||||
{%- endif %}
|
||||
{%- if require_estimate_effort_to_review %}
|
||||
Estimated effort to review [1-5]:
|
||||
type: string
|
||||
description: >-
|
||||
Estimate, on a scale of 1-5 (inclusive), the time and effort required to review this PR by an experienced and knowledgeable developer. 1 means short and easy review , 5 means long and hard review.
|
||||
Take into account the size, complexity, quality, and the needed changes of the PR code diff.
|
||||
Explain your answer shortly (1-2 sentences).
|
||||
{%- endif %}
|
||||
PR Feedback:
|
||||
General suggestions:
|
||||
type: string
|
||||
description: |-
|
||||
General suggestions and feedback for the contributors and maintainers of
|
||||
this PR. May include important suggestions for the overall structure,
|
||||
primary purpose, best practices, critical bugs, and other aspects of the
|
||||
PR. Don't address PR title and description, or lack of tests. Explain your suggestions.
|
||||
General suggestions and feedback for the contributors and maintainers of this PR.
|
||||
May include important suggestions for the overall structure,
|
||||
primary purpose, best practices, critical bugs, and other aspects of the PR.
|
||||
Don't address PR title and description, or lack of tests. Explain your suggestions.
|
||||
{%- if num_code_suggestions > 0 %}
|
||||
Code feedback:
|
||||
type: array
|
||||
@ -105,11 +124,10 @@ PR Feedback:
|
||||
suggestion:
|
||||
type: string
|
||||
description: |-
|
||||
a concrete suggestion for meaningfully improving the new PR code. Also
|
||||
describe how, specifically, the suggestion can be applied to new PR
|
||||
code. Add tags with importance measure that matches each suggestion
|
||||
('important' or 'medium'). Do not make suggestions for updating or
|
||||
adding docstrings, renaming PR title and description, or linter like.
|
||||
a concrete suggestion for meaningfully improving the new PR code.
|
||||
Also describe how, specifically, the suggestion can be applied to new PR code.
|
||||
Add tags with importance measure that matches each suggestion ('important' or 'medium').
|
||||
Do not make suggestions for updating or adding docstrings, renaming PR title and description, or linter like.
|
||||
relevant line:
|
||||
type: string
|
||||
description: |-
|
||||
@ -121,8 +139,7 @@ PR Feedback:
|
||||
Security concerns:
|
||||
type: string
|
||||
description: >-
|
||||
yes\\no question: does this PR code introduce possible security concerns or
|
||||
issues, like SQL injection, XSS, CSRF, and others ? If answered 'yes',explain your answer shortly
|
||||
yes\\no question: does this PR code introduce possible vulnerabilities such as exposure of sensitive information (e.g., API keys, secrets, passwords), or security concerns like SQL injection, XSS, CSRF, and others ? If answered 'yes', explain your answer briefly.
|
||||
{%- endif %}
|
||||
```
|
||||
|
||||
@ -143,6 +160,9 @@ PR Analysis:
|
||||
{%- if require_focused %}
|
||||
Focused PR: no, because ...
|
||||
{%- endif %}
|
||||
{%- if require_estimate_effort_to_review %}
|
||||
Estimated effort to review [1-5]: 3, because ...
|
||||
{%- endif %}
|
||||
PR Feedback:
|
||||
General PR suggestions: |-
|
||||
...
|
||||
@ -185,7 +205,9 @@ Here are questions to better understand the PR. Use the answers to provide bette
|
||||
{{question_str|trim}}
|
||||
|
||||
User answers:
|
||||
'
|
||||
{{answer_str|trim}}
|
||||
'
|
||||
######
|
||||
{%- endif %}
|
||||
|
||||
|
@ -8,7 +8,9 @@ Your task is to update the CHANGELOG.md file of the project, to shortly summariz
|
||||
{%- if extra_instructions %}
|
||||
|
||||
Extra instructions from the user:
|
||||
'
|
||||
{{ extra_instructions }}
|
||||
'
|
||||
{%- endif %}
|
||||
"""
|
||||
|
||||
|
179
pr_agent/tools/pr_add_docs.py
Normal file
@ -0,0 +1,179 @@
|
||||
import copy
|
||||
import textwrap
|
||||
from typing import Dict
|
||||
|
||||
from jinja2 import Environment, StrictUndefined
|
||||
|
||||
from pr_agent.algo.ai_handler import AiHandler
|
||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.algo.utils import load_yaml
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
class PRAddDocs:
|
||||
def __init__(self, pr_url: str, cli_mode=False, args: list = None):
|
||||
|
||||
self.git_provider = get_git_provider()(pr_url)
|
||||
self.main_language = get_main_pr_language(
|
||||
self.git_provider.get_languages(), self.git_provider.get_files()
|
||||
)
|
||||
|
||||
self.ai_handler = AiHandler()
|
||||
self.patches_diff = None
|
||||
self.prediction = None
|
||||
self.cli_mode = cli_mode
|
||||
self.vars = {
|
||||
"title": self.git_provider.pr.title,
|
||||
"branch": self.git_provider.get_pr_branch(),
|
||||
"description": self.git_provider.get_pr_description(),
|
||||
"language": self.main_language,
|
||||
"diff": "", # empty diff for initial calculation
|
||||
"extra_instructions": get_settings().pr_add_docs.extra_instructions,
|
||||
"commit_messages_str": self.git_provider.get_commit_messages(),
|
||||
'docs_for_language': get_docs_for_language(self.main_language,
|
||||
get_settings().pr_add_docs.docs_style),
|
||||
}
|
||||
self.token_handler = TokenHandler(self.git_provider.pr,
|
||||
self.vars,
|
||||
get_settings().pr_add_docs_prompt.system,
|
||||
get_settings().pr_add_docs_prompt.user)
|
||||
|
||||
async def run(self):
|
||||
try:
|
||||
get_logger().info('Generating code Docs for PR...')
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Generating Documentation...", is_temporary=True)
|
||||
|
||||
get_logger().info('Preparing PR documentation...')
|
||||
await retry_with_fallback_models(self._prepare_prediction)
|
||||
data = self._prepare_pr_code_docs()
|
||||
if (not data) or (not 'Code Documentation' in data):
|
||||
get_logger().info('No code documentation found for PR.')
|
||||
return
|
||||
|
||||
if get_settings().config.publish_output:
|
||||
get_logger().info('Pushing PR documentation...')
|
||||
self.git_provider.remove_initial_comment()
|
||||
get_logger().info('Pushing inline code documentation...')
|
||||
self.push_inline_docs(data)
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed to generate code documentation for PR, error: {e}")
|
||||
|
||||
async def _prepare_prediction(self, model: str):
|
||||
get_logger().info('Getting PR diff...')
|
||||
|
||||
# Disable adding docs to scripts and other non-relevant text files
|
||||
from pr_agent.algo.language_handler import bad_extensions
|
||||
bad_extensions += get_settings().docs_blacklist_extensions.docs_blacklist
|
||||
|
||||
self.patches_diff = get_pr_diff(self.git_provider,
|
||||
self.token_handler,
|
||||
model,
|
||||
add_line_numbers_to_hunks=True,
|
||||
disable_extra_lines=False)
|
||||
|
||||
get_logger().info('Getting AI prediction...')
|
||||
self.prediction = await self._get_prediction(model)
|
||||
|
||||
async def _get_prediction(self, model: str):
|
||||
variables = copy.deepcopy(self.vars)
|
||||
variables["diff"] = self.patches_diff # update diff
|
||||
environment = Environment(undefined=StrictUndefined)
|
||||
system_prompt = environment.from_string(get_settings().pr_add_docs_prompt.system).render(variables)
|
||||
user_prompt = environment.from_string(get_settings().pr_add_docs_prompt.user).render(variables)
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
||||
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
||||
system=system_prompt, user=user_prompt)
|
||||
|
||||
return response
|
||||
|
||||
def _prepare_pr_code_docs(self) -> Dict:
|
||||
docs = self.prediction.strip()
|
||||
data = load_yaml(docs)
|
||||
if isinstance(data, list):
|
||||
data = {'Code Documentation': data}
|
||||
return data
|
||||
|
||||
def push_inline_docs(self, data):
|
||||
docs = []
|
||||
|
||||
if not data['Code Documentation']:
|
||||
return self.git_provider.publish_comment('No code documentation found to improve this PR.')
|
||||
|
||||
for d in data['Code Documentation']:
|
||||
try:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"add_docs: {d}")
|
||||
relevant_file = d['relevant file'].strip()
|
||||
relevant_line = int(d['relevant line']) # absolute position
|
||||
documentation = d['documentation']
|
||||
doc_placement = d['doc placement'].strip()
|
||||
if documentation:
|
||||
new_code_snippet = self.dedent_code(relevant_file, relevant_line, documentation, doc_placement,
|
||||
add_original_line=True)
|
||||
|
||||
body = f"**Suggestion:** Proposed documentation\n```suggestion\n" + new_code_snippet + "\n```"
|
||||
docs.append({'body': body, 'relevant_file': relevant_file,
|
||||
'relevant_lines_start': relevant_line,
|
||||
'relevant_lines_end': relevant_line})
|
||||
except Exception:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"Could not parse code docs: {d}")
|
||||
|
||||
is_successful = self.git_provider.publish_code_suggestions(docs)
|
||||
if not is_successful:
|
||||
get_logger().info("Failed to publish code docs, trying to publish each docs separately")
|
||||
for doc_suggestion in docs:
|
||||
self.git_provider.publish_code_suggestions([doc_suggestion])
|
||||
|
||||
def dedent_code(self, relevant_file, relevant_lines_start, new_code_snippet, doc_placement='after',
|
||||
add_original_line=False):
|
||||
try: # dedent code snippet
|
||||
self.diff_files = self.git_provider.diff_files if self.git_provider.diff_files \
|
||||
else self.git_provider.get_diff_files()
|
||||
original_initial_line = None
|
||||
for file in self.diff_files:
|
||||
if file.filename.strip() == relevant_file:
|
||||
original_initial_line = file.head_file.splitlines()[relevant_lines_start - 1]
|
||||
break
|
||||
if original_initial_line:
|
||||
if doc_placement == 'after':
|
||||
line = file.head_file.splitlines()[relevant_lines_start]
|
||||
else:
|
||||
line = original_initial_line
|
||||
suggested_initial_line = new_code_snippet.splitlines()[0]
|
||||
original_initial_spaces = len(line) - len(line.lstrip())
|
||||
suggested_initial_spaces = len(suggested_initial_line) - len(suggested_initial_line.lstrip())
|
||||
delta_spaces = original_initial_spaces - suggested_initial_spaces
|
||||
if delta_spaces > 0:
|
||||
new_code_snippet = textwrap.indent(new_code_snippet, delta_spaces * " ").rstrip('\n')
|
||||
if add_original_line:
|
||||
if doc_placement == 'after':
|
||||
new_code_snippet = original_initial_line + "\n" + new_code_snippet
|
||||
else:
|
||||
new_code_snippet = new_code_snippet.rstrip() + "\n" + original_initial_line
|
||||
except Exception as e:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"Could not dedent code snippet for file {relevant_file}, error: {e}")
|
||||
|
||||
return new_code_snippet
|
||||
|
||||
|
||||
def get_docs_for_language(language, style):
|
||||
language = language.lower()
|
||||
if language == 'java':
|
||||
return "Javadocs"
|
||||
elif language in ['python', 'lisp', 'clojure']:
|
||||
return f"Docstring ({style})"
|
||||
elif language in ['javascript', 'typescript']:
|
||||
return "JSdocs"
|
||||
elif language == 'c++':
|
||||
return "Doxygen"
|
||||
else:
|
||||
return "Docs"
|
@ -1,16 +1,17 @@
|
||||
import copy
|
||||
import logging
|
||||
import textwrap
|
||||
from typing import List, Dict
|
||||
from typing import Dict, List
|
||||
|
||||
from jinja2 import Environment, StrictUndefined
|
||||
|
||||
from pr_agent.algo.ai_handler import AiHandler
|
||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models, get_pr_multi_diffs
|
||||
from pr_agent.algo.pr_processing import get_pr_diff, get_pr_multi_diffs, retry_with_fallback_models
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.algo.utils import load_yaml
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import BitbucketProvider, get_git_provider
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
class PRCodeSuggestions:
|
||||
@ -22,7 +23,10 @@ class PRCodeSuggestions:
|
||||
)
|
||||
|
||||
# extended mode
|
||||
self.is_extended = any(["extended" in arg for arg in args])
|
||||
try:
|
||||
self.is_extended = any(["extended" in arg for arg in args])
|
||||
except:
|
||||
self.is_extended = False
|
||||
if self.is_extended:
|
||||
num_code_suggestions = get_settings().pr_code_suggestions.num_code_suggestions_per_chunk
|
||||
else:
|
||||
@ -48,37 +52,43 @@ class PRCodeSuggestions:
|
||||
get_settings().pr_code_suggestions_prompt.user)
|
||||
|
||||
async def run(self):
|
||||
logging.info('Generating code suggestions for PR...')
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Preparing review...", is_temporary=True)
|
||||
try:
|
||||
get_logger().info('Generating code suggestions for PR...')
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Preparing review...", is_temporary=True)
|
||||
|
||||
logging.info('Preparing PR review...')
|
||||
if not self.is_extended:
|
||||
await retry_with_fallback_models(self._prepare_prediction)
|
||||
data = self._prepare_pr_code_suggestions()
|
||||
else:
|
||||
data = await retry_with_fallback_models(self._prepare_prediction_extended)
|
||||
get_logger().info('Preparing PR review...')
|
||||
if not self.is_extended:
|
||||
await retry_with_fallback_models(self._prepare_prediction)
|
||||
data = self._prepare_pr_code_suggestions()
|
||||
else:
|
||||
data = await retry_with_fallback_models(self._prepare_prediction_extended)
|
||||
if (not data) or (not 'Code suggestions' in data):
|
||||
get_logger().info('No code suggestions found for PR.')
|
||||
return
|
||||
|
||||
if (not self.is_extended and get_settings().pr_code_suggestions.rank_suggestions) or \
|
||||
(self.is_extended and get_settings().pr_code_suggestions.rank_extended_suggestions):
|
||||
logging.info('Ranking Suggestions...')
|
||||
data['Code suggestions'] = await self.rank_suggestions(data['Code suggestions'])
|
||||
if (not self.is_extended and get_settings().pr_code_suggestions.rank_suggestions) or \
|
||||
(self.is_extended and get_settings().pr_code_suggestions.rank_extended_suggestions):
|
||||
get_logger().info('Ranking Suggestions...')
|
||||
data['Code suggestions'] = await self.rank_suggestions(data['Code suggestions'])
|
||||
|
||||
if get_settings().config.publish_output:
|
||||
logging.info('Pushing PR review...')
|
||||
self.git_provider.remove_initial_comment()
|
||||
logging.info('Pushing inline code suggestions...')
|
||||
self.push_inline_code_suggestions(data)
|
||||
if get_settings().config.publish_output:
|
||||
get_logger().info('Pushing PR review...')
|
||||
self.git_provider.remove_initial_comment()
|
||||
get_logger().info('Pushing inline code suggestions...')
|
||||
self.push_inline_code_suggestions(data)
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed to generate code suggestions for PR, error: {e}")
|
||||
|
||||
async def _prepare_prediction(self, model: str):
|
||||
logging.info('Getting PR diff...')
|
||||
get_logger().info('Getting PR diff...')
|
||||
self.patches_diff = get_pr_diff(self.git_provider,
|
||||
self.token_handler,
|
||||
model,
|
||||
add_line_numbers_to_hunks=True,
|
||||
disable_extra_lines=True)
|
||||
|
||||
logging.info('Getting AI prediction...')
|
||||
get_logger().info('Getting AI prediction...')
|
||||
self.prediction = await self._get_prediction(model)
|
||||
|
||||
async def _get_prediction(self, model: str):
|
||||
@ -88,8 +98,8 @@ class PRCodeSuggestions:
|
||||
system_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.system).render(variables)
|
||||
user_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.user).render(variables)
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
||||
logging.info(f"\nUser prompt:\n{user_prompt}")
|
||||
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
||||
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
||||
system=system_prompt, user=user_prompt)
|
||||
|
||||
@ -111,13 +121,10 @@ class PRCodeSuggestions:
|
||||
for d in data['Code suggestions']:
|
||||
try:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"suggestion: {d}")
|
||||
get_logger().info(f"suggestion: {d}")
|
||||
relevant_file = d['relevant file'].strip()
|
||||
relevant_lines_str = d['relevant lines'].strip()
|
||||
if ',' in relevant_lines_str: # handling 'relevant lines': '181, 190' or '178-184, 188-194'
|
||||
relevant_lines_str = relevant_lines_str.split(',')[0]
|
||||
relevant_lines_start = int(relevant_lines_str.split('-')[0]) # absolute position
|
||||
relevant_lines_end = int(relevant_lines_str.split('-')[-1])
|
||||
relevant_lines_start = int(d['relevant lines start']) # absolute position
|
||||
relevant_lines_end = int(d['relevant lines end'])
|
||||
content = d['suggestion content']
|
||||
new_code_snippet = d['improved code']
|
||||
|
||||
@ -130,11 +137,11 @@ class PRCodeSuggestions:
|
||||
'relevant_lines_end': relevant_lines_end})
|
||||
except Exception:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"Could not parse suggestion: {d}")
|
||||
get_logger().info(f"Could not parse suggestion: {d}")
|
||||
|
||||
is_successful = self.git_provider.publish_code_suggestions(code_suggestions)
|
||||
if not is_successful:
|
||||
logging.info("Failed to publish code suggestions, trying to publish each suggestion separately")
|
||||
get_logger().info("Failed to publish code suggestions, trying to publish each suggestion separately")
|
||||
for code_suggestion in code_suggestions:
|
||||
self.git_provider.publish_code_suggestions([code_suggestion])
|
||||
|
||||
@ -156,19 +163,19 @@ class PRCodeSuggestions:
|
||||
new_code_snippet = textwrap.indent(new_code_snippet, delta_spaces * " ").rstrip('\n')
|
||||
except Exception as e:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"Could not dedent code snippet for file {relevant_file}, error: {e}")
|
||||
get_logger().info(f"Could not dedent code snippet for file {relevant_file}, error: {e}")
|
||||
|
||||
return new_code_snippet
|
||||
|
||||
async def _prepare_prediction_extended(self, model: str) -> dict:
|
||||
logging.info('Getting PR diff...')
|
||||
get_logger().info('Getting PR diff...')
|
||||
patches_diff_list = get_pr_multi_diffs(self.git_provider, self.token_handler, model,
|
||||
max_calls=get_settings().pr_code_suggestions.max_number_of_calls)
|
||||
|
||||
logging.info('Getting multi AI predictions...')
|
||||
get_logger().info('Getting multi AI predictions...')
|
||||
prediction_list = []
|
||||
for i, patches_diff in enumerate(patches_diff_list):
|
||||
logging.info(f"Processing chunk {i + 1} of {len(patches_diff_list)}")
|
||||
get_logger().info(f"Processing chunk {i + 1} of {len(patches_diff_list)}")
|
||||
self.patches_diff = patches_diff
|
||||
prediction = await self._get_prediction(model)
|
||||
prediction_list.append(prediction)
|
||||
@ -216,8 +223,8 @@ class PRCodeSuggestions:
|
||||
variables)
|
||||
user_prompt = environment.from_string(get_settings().pr_sort_code_suggestions_prompt.user).render(variables)
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
||||
logging.info(f"\nUser prompt:\n{user_prompt}")
|
||||
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
||||
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, system=system_prompt,
|
||||
user=user_prompt)
|
||||
|
||||
@ -232,7 +239,7 @@ class PRCodeSuggestions:
|
||||
data_sorted = data_sorted[:new_len]
|
||||
except Exception as e:
|
||||
if get_settings().config.verbosity_level >= 1:
|
||||
logging.info(f"Could not sort suggestions, error: {e}")
|
||||
get_logger().info(f"Could not sort suggestions, error: {e}")
|
||||
data_sorted = suggestion_list
|
||||
|
||||
return data_sorted
|
||||
|
@ -1,7 +1,6 @@
|
||||
import logging
|
||||
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
class PRConfig:
|
||||
@ -19,11 +18,11 @@ class PRConfig:
|
||||
self.git_provider = get_git_provider()(pr_url)
|
||||
|
||||
async def run(self):
|
||||
logging.info('Getting configuration settings...')
|
||||
logging.info('Preparing configs...')
|
||||
get_logger().info('Getting configuration settings...')
|
||||
get_logger().info('Preparing configs...')
|
||||
pr_comment = self._prepare_pr_configs()
|
||||
if get_settings().config.publish_output:
|
||||
logging.info('Pushing configs...')
|
||||
get_logger().info('Pushing configs...')
|
||||
self.git_provider.publish_comment(pr_comment)
|
||||
self.git_provider.remove_initial_comment()
|
||||
return ""
|
||||
@ -44,5 +43,5 @@ class PRConfig:
|
||||
comment_str += f"\n{header.lower()}.{key.lower()} = {repr(value) if isinstance(value, str) else value}"
|
||||
comment_str += " "
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"comment_str:\n{comment_str}")
|
||||
get_logger().info(f"comment_str:\n{comment_str}")
|
||||
return comment_str
|
||||
|
@ -1,6 +1,5 @@
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from typing import List, Tuple
|
||||
|
||||
from jinja2 import Environment, StrictUndefined
|
||||
@ -8,10 +7,11 @@ from jinja2 import Environment, StrictUndefined
|
||||
from pr_agent.algo.ai_handler import AiHandler
|
||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.algo.utils import load_yaml
|
||||
from pr_agent.algo.utils import load_yaml, set_custom_labels
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
class PRDescription:
|
||||
@ -28,6 +28,7 @@ class PRDescription:
|
||||
self.main_pr_language = get_main_pr_language(
|
||||
self.git_provider.get_languages(), self.git_provider.get_files()
|
||||
)
|
||||
self.pr_id = self.git_provider.get_pr_id()
|
||||
|
||||
# Initialize the AI handler
|
||||
self.ai_handler = AiHandler()
|
||||
@ -36,11 +37,15 @@ class PRDescription:
|
||||
self.vars = {
|
||||
"title": self.git_provider.pr.title,
|
||||
"branch": self.git_provider.get_pr_branch(),
|
||||
"description": self.git_provider.get_pr_description(),
|
||||
"description": self.git_provider.get_pr_description(full=False),
|
||||
"language": self.main_pr_language,
|
||||
"diff": "", # empty diff for initial calculation
|
||||
"use_bullet_points": get_settings().pr_description.use_bullet_points,
|
||||
"extra_instructions": get_settings().pr_description.extra_instructions,
|
||||
"commit_messages_str": self.git_provider.get_commit_messages()
|
||||
"commit_messages_str": self.git_provider.get_commit_messages(),
|
||||
"enable_custom_labels": get_settings().config.enable_custom_labels,
|
||||
"custom_labels": "",
|
||||
"custom_labels_examples": "",
|
||||
}
|
||||
|
||||
self.user_description = self.git_provider.get_user_description()
|
||||
@ -61,27 +66,44 @@ class PRDescription:
|
||||
"""
|
||||
Generates a PR description using an AI model and publishes it to the PR.
|
||||
"""
|
||||
logging.info('Generating a PR description...')
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Preparing pr description...", is_temporary=True)
|
||||
|
||||
await retry_with_fallback_models(self._prepare_prediction)
|
||||
|
||||
logging.info('Preparing answer...')
|
||||
pr_title, pr_body, pr_types, markdown_text = self._prepare_pr_answer()
|
||||
|
||||
if get_settings().config.publish_output:
|
||||
logging.info('Pushing answer...')
|
||||
if get_settings().pr_description.publish_description_as_comment:
|
||||
self.git_provider.publish_comment(markdown_text)
|
||||
|
||||
try:
|
||||
get_logger().info(f"Generating a PR description {self.pr_id}")
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Preparing PR description...", is_temporary=True)
|
||||
|
||||
await retry_with_fallback_models(self._prepare_prediction)
|
||||
|
||||
get_logger().info(f"Preparing answer {self.pr_id}")
|
||||
if self.prediction:
|
||||
self._prepare_data()
|
||||
else:
|
||||
self.git_provider.publish_description(pr_title, pr_body)
|
||||
if self.git_provider.is_supported("get_labels"):
|
||||
current_labels = self.git_provider.get_labels()
|
||||
if current_labels is None:
|
||||
current_labels = []
|
||||
self.git_provider.publish_labels(pr_types + current_labels)
|
||||
self.git_provider.remove_initial_comment()
|
||||
return None
|
||||
|
||||
pr_labels = []
|
||||
if get_settings().pr_description.publish_labels:
|
||||
pr_labels = self._prepare_labels()
|
||||
|
||||
if get_settings().pr_description.use_description_markers:
|
||||
pr_title, pr_body = self._prepare_pr_answer_with_markers()
|
||||
else:
|
||||
pr_title, pr_body, = self._prepare_pr_answer()
|
||||
full_markdown_description = f"## Title\n\n{pr_title}\n\n___\n{pr_body}"
|
||||
|
||||
if get_settings().config.publish_output:
|
||||
get_logger().info(f"Pushing answer {self.pr_id}")
|
||||
if get_settings().pr_description.publish_description_as_comment:
|
||||
self.git_provider.publish_comment(full_markdown_description)
|
||||
else:
|
||||
self.git_provider.publish_description(pr_title, pr_body)
|
||||
if get_settings().pr_description.publish_labels and self.git_provider.is_supported("get_labels"):
|
||||
current_labels = self.git_provider.get_labels()
|
||||
if current_labels is None:
|
||||
current_labels = []
|
||||
self.git_provider.publish_labels(pr_labels + current_labels)
|
||||
self.git_provider.remove_initial_comment()
|
||||
except Exception as e:
|
||||
get_logger().error(f"Error generating PR description {self.pr_id}: {e}")
|
||||
|
||||
return ""
|
||||
|
||||
@ -99,9 +121,12 @@ class PRDescription:
|
||||
Any exceptions raised by the 'get_pr_diff' and '_get_prediction' functions.
|
||||
|
||||
"""
|
||||
logging.info('Getting PR diff...')
|
||||
if get_settings().pr_description.use_description_markers and 'pr_agent:' not in self.user_description:
|
||||
return None
|
||||
|
||||
get_logger().info(f"Getting PR diff {self.pr_id}")
|
||||
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
|
||||
logging.info('Getting AI prediction...')
|
||||
get_logger().info(f"Getting AI prediction {self.pr_id}")
|
||||
self.prediction = await self._get_prediction(model)
|
||||
|
||||
async def _get_prediction(self, model: str) -> str:
|
||||
@ -118,12 +143,13 @@ class PRDescription:
|
||||
variables["diff"] = self.patches_diff # update diff
|
||||
|
||||
environment = Environment(undefined=StrictUndefined)
|
||||
set_custom_labels(variables)
|
||||
system_prompt = environment.from_string(get_settings().pr_description_prompt.system).render(variables)
|
||||
user_prompt = environment.from_string(get_settings().pr_description_prompt.user).render(variables)
|
||||
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
||||
logging.info(f"\nUser prompt:\n{user_prompt}")
|
||||
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
||||
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
||||
|
||||
response, finish_reason = await self.ai_handler.chat_completion(
|
||||
model=model,
|
||||
@ -134,34 +160,75 @@ class PRDescription:
|
||||
|
||||
return response
|
||||
|
||||
def _prepare_pr_answer(self) -> Tuple[str, str, List[str], str]:
|
||||
def _prepare_data(self):
|
||||
# Load the AI prediction data into a dictionary
|
||||
self.data = load_yaml(self.prediction.strip())
|
||||
|
||||
if get_settings().pr_description.add_original_user_description and self.user_description:
|
||||
self.data["User Description"] = self.user_description
|
||||
|
||||
|
||||
def _prepare_labels(self) -> List[str]:
|
||||
pr_types = []
|
||||
|
||||
# If the 'PR Type' key is present in the dictionary, split its value by comma and assign it to 'pr_types'
|
||||
if 'PR Type' in self.data:
|
||||
if type(self.data['PR Type']) == list:
|
||||
pr_types = self.data['PR Type']
|
||||
elif type(self.data['PR Type']) == str:
|
||||
pr_types = self.data['PR Type'].split(',')
|
||||
|
||||
return pr_types
|
||||
|
||||
def _prepare_pr_answer_with_markers(self) -> Tuple[str, str]:
|
||||
get_logger().info(f"Using description marker replacements {self.pr_id}")
|
||||
title = self.vars["title"]
|
||||
body = self.user_description
|
||||
if get_settings().pr_description.include_generated_by_header:
|
||||
ai_header = f"### 🤖 Generated by PR Agent at {self.git_provider.last_commit_id.sha}\n\n"
|
||||
else:
|
||||
ai_header = ""
|
||||
|
||||
ai_type = self.data.get('PR Type')
|
||||
if ai_type and not re.search(r'<!--\s*pr_agent:type\s*-->', body):
|
||||
pr_type = f"{ai_header}{ai_type}"
|
||||
body = body.replace('pr_agent:type', pr_type)
|
||||
|
||||
ai_summary = self.data.get('PR Description')
|
||||
if ai_summary and not re.search(r'<!--\s*pr_agent:summary\s*-->', body):
|
||||
summary = f"{ai_header}{ai_summary}"
|
||||
body = body.replace('pr_agent:summary', summary)
|
||||
|
||||
if not re.search(r'<!--\s*pr_agent:walkthrough\s*-->', body):
|
||||
ai_walkthrough = self.data.get('PR Main Files Walkthrough')
|
||||
if ai_walkthrough:
|
||||
walkthrough = str(ai_header)
|
||||
for file in ai_walkthrough:
|
||||
filename = file['filename'].replace("'", "`")
|
||||
description = file['changes in file'].replace("'", "`")
|
||||
walkthrough += f'- `{filename}`: {description}\n'
|
||||
|
||||
body = body.replace('pr_agent:walkthrough', walkthrough)
|
||||
|
||||
return title, body
|
||||
|
||||
def _prepare_pr_answer(self) -> Tuple[str, str]:
|
||||
"""
|
||||
Prepare the PR description based on the AI prediction data.
|
||||
|
||||
Returns:
|
||||
- title: a string containing the PR title.
|
||||
- pr_body: a string containing the PR body in a markdown format.
|
||||
- pr_types: a list of strings containing the PR types.
|
||||
- markdown_text: a string containing the AI prediction data in a markdown format. used for publishing a comment
|
||||
- pr_body: a string containing the PR description body in a markdown format.
|
||||
"""
|
||||
# Load the AI prediction data into a dictionary
|
||||
data = load_yaml(self.prediction.strip())
|
||||
|
||||
if get_settings().pr_description.add_original_user_description and self.user_description:
|
||||
data["User Description"] = self.user_description
|
||||
|
||||
# Initialization
|
||||
pr_types = []
|
||||
|
||||
# If the 'PR Type' key is present in the dictionary, split its value by comma and assign it to 'pr_types'
|
||||
if 'PR Type' in data:
|
||||
if type(data['PR Type']) == list:
|
||||
pr_types = data['PR Type']
|
||||
elif type(data['PR Type']) == str:
|
||||
pr_types = data['PR Type'].split(',')
|
||||
# Iterate over the dictionary items and append the key and value to 'markdown_text' in a markdown format
|
||||
markdown_text = ""
|
||||
for key, value in self.data.items():
|
||||
markdown_text += f"## {key}\n\n"
|
||||
markdown_text += f"{value}\n\n"
|
||||
|
||||
# Remove the 'PR Title' key from the dictionary
|
||||
ai_title = data.pop('PR Title')
|
||||
ai_title = self.data.pop('PR Title', self.vars["title"])
|
||||
if get_settings().pr_description.keep_original_user_title:
|
||||
# Assign the original PR title to the 'title' variable
|
||||
title = self.vars["title"]
|
||||
@ -172,25 +239,27 @@ class PRDescription:
|
||||
# Iterate over the remaining dictionary items and append the key and value to 'pr_body' in a markdown format,
|
||||
# except for the items containing the word 'walkthrough'
|
||||
pr_body = ""
|
||||
for idx, (key, value) in enumerate(data.items()):
|
||||
for idx, (key, value) in enumerate(self.data.items()):
|
||||
pr_body += f"## {key}:\n"
|
||||
if 'walkthrough' in key.lower():
|
||||
# for filename, description in value.items():
|
||||
if self.git_provider.is_supported("gfm_markdown"):
|
||||
pr_body += "<details> <summary>files:</summary>\n\n"
|
||||
for file in value:
|
||||
filename = file['filename'].replace("'", "`")
|
||||
description = file['changes in file']
|
||||
pr_body += f'`{filename}`: {description}\n'
|
||||
if self.git_provider.is_supported("gfm_markdown"):
|
||||
pr_body +="</details>\n"
|
||||
else:
|
||||
# if the value is a list, join its items by comma
|
||||
if type(value) == list:
|
||||
value = ', '.join(v for v in value)
|
||||
pr_body += f"{value}\n"
|
||||
if idx < len(data) - 1:
|
||||
if idx < len(self.data) - 1:
|
||||
pr_body += "\n___\n"
|
||||
|
||||
markdown_text = f"## Title\n\n{title}\n\n___\n{pr_body}"
|
||||
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"title:\n{title}\n{pr_body}")
|
||||
get_logger().info(f"title:\n{title}\n{pr_body}")
|
||||
|
||||
return title, pr_body, pr_types, markdown_text
|
||||
return title, pr_body
|
163
pr_agent/tools/pr_generate_labels.py
Normal file
@ -0,0 +1,163 @@
|
||||
import copy
|
||||
import re
|
||||
from typing import List, Tuple
|
||||
|
||||
from jinja2 import Environment, StrictUndefined
|
||||
|
||||
from pr_agent.algo.ai_handler import AiHandler
|
||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.algo.utils import load_yaml, set_custom_labels
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
class PRGenerateLabels:
|
||||
def __init__(self, pr_url: str, args: list = None):
|
||||
"""
|
||||
Initialize the PRGenerateLabels object with the necessary attributes and objects for generating labels
|
||||
corresponding to the PR using an AI model.
|
||||
Args:
|
||||
pr_url (str): The URL of the pull request.
|
||||
args (list, optional): List of arguments passed to the PRGenerateLabels class. Defaults to None.
|
||||
"""
|
||||
# Initialize the git provider and main PR language
|
||||
self.git_provider = get_git_provider()(pr_url)
|
||||
self.main_pr_language = get_main_pr_language(
|
||||
self.git_provider.get_languages(), self.git_provider.get_files()
|
||||
)
|
||||
self.pr_id = self.git_provider.get_pr_id()
|
||||
|
||||
# Initialize the AI handler
|
||||
self.ai_handler = AiHandler()
|
||||
|
||||
# Initialize the variables dictionary
|
||||
self.vars = {
|
||||
"title": self.git_provider.pr.title,
|
||||
"branch": self.git_provider.get_pr_branch(),
|
||||
"description": self.git_provider.get_pr_description(full=False),
|
||||
"language": self.main_pr_language,
|
||||
"diff": "", # empty diff for initial calculation
|
||||
"use_bullet_points": get_settings().pr_description.use_bullet_points,
|
||||
"extra_instructions": get_settings().pr_description.extra_instructions,
|
||||
"commit_messages_str": self.git_provider.get_commit_messages(),
|
||||
"custom_labels": "",
|
||||
"custom_labels_examples": "",
|
||||
"enable_custom_labels": get_settings().config.enable_custom_labels,
|
||||
}
|
||||
|
||||
# Initialize the token handler
|
||||
self.token_handler = TokenHandler(
|
||||
self.git_provider.pr,
|
||||
self.vars,
|
||||
get_settings().pr_custom_labels_prompt.system,
|
||||
get_settings().pr_custom_labels_prompt.user,
|
||||
)
|
||||
|
||||
# Initialize patches_diff and prediction attributes
|
||||
self.patches_diff = None
|
||||
self.prediction = None
|
||||
|
||||
async def run(self):
|
||||
"""
|
||||
Generates a PR labels using an AI model and publishes it to the PR.
|
||||
"""
|
||||
|
||||
try:
|
||||
get_logger().info(f"Generating a PR labels {self.pr_id}")
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Preparing PR labels...", is_temporary=True)
|
||||
|
||||
await retry_with_fallback_models(self._prepare_prediction)
|
||||
|
||||
get_logger().info(f"Preparing answer {self.pr_id}")
|
||||
if self.prediction:
|
||||
self._prepare_data()
|
||||
else:
|
||||
return None
|
||||
|
||||
pr_labels = self._prepare_labels()
|
||||
|
||||
if get_settings().config.publish_output:
|
||||
get_logger().info(f"Pushing labels {self.pr_id}")
|
||||
if self.git_provider.is_supported("get_labels"):
|
||||
current_labels = self.git_provider.get_labels()
|
||||
if current_labels is None:
|
||||
current_labels = []
|
||||
self.git_provider.publish_labels(pr_labels + current_labels)
|
||||
self.git_provider.remove_initial_comment()
|
||||
except Exception as e:
|
||||
get_logger().error(f"Error generating PR labels {self.pr_id}: {e}")
|
||||
|
||||
return ""
|
||||
|
||||
async def _prepare_prediction(self, model: str) -> None:
|
||||
"""
|
||||
Prepare the AI prediction for the PR labels based on the provided model.
|
||||
|
||||
Args:
|
||||
model (str): The name of the model to be used for generating the prediction.
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
Raises:
|
||||
Any exceptions raised by the 'get_pr_diff' and '_get_prediction' functions.
|
||||
|
||||
"""
|
||||
|
||||
get_logger().info(f"Getting PR diff {self.pr_id}")
|
||||
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
|
||||
get_logger().info(f"Getting AI prediction {self.pr_id}")
|
||||
self.prediction = await self._get_prediction(model)
|
||||
|
||||
async def _get_prediction(self, model: str) -> str:
|
||||
"""
|
||||
Generate an AI prediction for the PR labels based on the provided model.
|
||||
|
||||
Args:
|
||||
model (str): The name of the model to be used for generating the prediction.
|
||||
|
||||
Returns:
|
||||
str: The generated AI prediction.
|
||||
"""
|
||||
variables = copy.deepcopy(self.vars)
|
||||
variables["diff"] = self.patches_diff # update diff
|
||||
|
||||
environment = Environment(undefined=StrictUndefined)
|
||||
set_custom_labels(variables)
|
||||
system_prompt = environment.from_string(get_settings().pr_custom_labels_prompt.system).render(variables)
|
||||
user_prompt = environment.from_string(get_settings().pr_custom_labels_prompt.user).render(variables)
|
||||
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
||||
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
||||
|
||||
response, finish_reason = await self.ai_handler.chat_completion(
|
||||
model=model,
|
||||
temperature=0.2,
|
||||
system=system_prompt,
|
||||
user=user_prompt
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
def _prepare_data(self):
|
||||
# Load the AI prediction data into a dictionary
|
||||
self.data = load_yaml(self.prediction.strip())
|
||||
|
||||
|
||||
|
||||
def _prepare_labels(self) -> List[str]:
|
||||
pr_types = []
|
||||
|
||||
# If the 'PR Type' key is present in the dictionary, split its value by comma and assign it to 'pr_types'
|
||||
if 'PR Type' in self.data:
|
||||
if type(self.data['PR Type']) == list:
|
||||
pr_types = self.data['PR Type']
|
||||
elif type(self.data['PR Type']) == str:
|
||||
pr_types = self.data['PR Type'].split(',')
|
||||
|
||||
return pr_types
|
@ -1,5 +1,4 @@
|
||||
import copy
|
||||
import logging
|
||||
|
||||
from jinja2 import Environment, StrictUndefined
|
||||
|
||||
@ -9,6 +8,7 @@ from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
class PRInformationFromUser:
|
||||
@ -34,22 +34,22 @@ class PRInformationFromUser:
|
||||
self.prediction = None
|
||||
|
||||
async def run(self):
|
||||
logging.info('Generating question to the user...')
|
||||
get_logger().info('Generating question to the user...')
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Preparing questions...", is_temporary=True)
|
||||
await retry_with_fallback_models(self._prepare_prediction)
|
||||
logging.info('Preparing questions...')
|
||||
get_logger().info('Preparing questions...')
|
||||
pr_comment = self._prepare_pr_answer()
|
||||
if get_settings().config.publish_output:
|
||||
logging.info('Pushing questions...')
|
||||
get_logger().info('Pushing questions...')
|
||||
self.git_provider.publish_comment(pr_comment)
|
||||
self.git_provider.remove_initial_comment()
|
||||
return ""
|
||||
|
||||
async def _prepare_prediction(self, model):
|
||||
logging.info('Getting PR diff...')
|
||||
get_logger().info('Getting PR diff...')
|
||||
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
|
||||
logging.info('Getting AI prediction...')
|
||||
get_logger().info('Getting AI prediction...')
|
||||
self.prediction = await self._get_prediction(model)
|
||||
|
||||
async def _get_prediction(self, model: str):
|
||||
@ -59,8 +59,8 @@ class PRInformationFromUser:
|
||||
system_prompt = environment.from_string(get_settings().pr_information_from_user_prompt.system).render(variables)
|
||||
user_prompt = environment.from_string(get_settings().pr_information_from_user_prompt.user).render(variables)
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
||||
logging.info(f"\nUser prompt:\n{user_prompt}")
|
||||
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
||||
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
||||
system=system_prompt, user=user_prompt)
|
||||
return response
|
||||
@ -68,7 +68,7 @@ class PRInformationFromUser:
|
||||
def _prepare_pr_answer(self) -> str:
|
||||
model_output = self.prediction.strip()
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"answer_str:\n{model_output}")
|
||||
get_logger().info(f"answer_str:\n{model_output}")
|
||||
answer_str = f"{model_output}\n\n Please respond to the questions above in the following format:\n\n" +\
|
||||
"\n>/answer\n>1) ...\n>2) ...\n>...\n"
|
||||
return answer_str
|
||||
|
@ -1,5 +1,4 @@
|
||||
import copy
|
||||
import logging
|
||||
|
||||
from jinja2 import Environment, StrictUndefined
|
||||
|
||||
@ -9,6 +8,7 @@ from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
|
||||
class PRQuestions:
|
||||
@ -44,22 +44,22 @@ class PRQuestions:
|
||||
return question_str
|
||||
|
||||
async def run(self):
|
||||
logging.info('Answering a PR question...')
|
||||
get_logger().info('Answering a PR question...')
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Preparing answer...", is_temporary=True)
|
||||
await retry_with_fallback_models(self._prepare_prediction)
|
||||
logging.info('Preparing answer...')
|
||||
get_logger().info('Preparing answer...')
|
||||
pr_comment = self._prepare_pr_answer()
|
||||
if get_settings().config.publish_output:
|
||||
logging.info('Pushing answer...')
|
||||
get_logger().info('Pushing answer...')
|
||||
self.git_provider.publish_comment(pr_comment)
|
||||
self.git_provider.remove_initial_comment()
|
||||
return ""
|
||||
|
||||
async def _prepare_prediction(self, model: str):
|
||||
logging.info('Getting PR diff...')
|
||||
get_logger().info('Getting PR diff...')
|
||||
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
|
||||
logging.info('Getting AI prediction...')
|
||||
get_logger().info('Getting AI prediction...')
|
||||
self.prediction = await self._get_prediction(model)
|
||||
|
||||
async def _get_prediction(self, model: str):
|
||||
@ -69,8 +69,8 @@ class PRQuestions:
|
||||
system_prompt = environment.from_string(get_settings().pr_questions_prompt.system).render(variables)
|
||||
user_prompt = environment.from_string(get_settings().pr_questions_prompt.user).render(variables)
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
||||
logging.info(f"\nUser prompt:\n{user_prompt}")
|
||||
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
||||
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
||||
system=system_prompt, user=user_prompt)
|
||||
return response
|
||||
@ -79,5 +79,5 @@ class PRQuestions:
|
||||
answer_str = f"Question: {self.question_str}\n\n"
|
||||
answer_str += f"Answer:\n{self.prediction.strip()}\n\n"
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"answer_str:\n{answer_str}")
|
||||
get_logger().info(f"answer_str:\n{answer_str}")
|
||||
return answer_str
|
||||
|
@ -1,6 +1,4 @@
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
from typing import List, Tuple
|
||||
|
||||
@ -9,13 +7,13 @@ from jinja2 import Environment, StrictUndefined
|
||||
from yaml import SafeLoader
|
||||
|
||||
from pr_agent.algo.ai_handler import AiHandler
|
||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models, \
|
||||
find_line_number_of_relevant_line_in_file, clip_tokens
|
||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.algo.utils import convert_to_markdown, try_fix_json, try_fix_yaml, load_yaml
|
||||
from pr_agent.algo.utils import convert_to_markdown, load_yaml, try_fix_yaml, set_custom_labels
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.git_providers.git_provider import IncrementalPR, get_main_pr_language
|
||||
from pr_agent.log import get_logger
|
||||
from pr_agent.servers.help import actions_help_text, bot_help_text
|
||||
|
||||
|
||||
@ -59,11 +57,14 @@ class PRReviewer:
|
||||
"require_tests": get_settings().pr_reviewer.require_tests_review,
|
||||
"require_security": get_settings().pr_reviewer.require_security_review,
|
||||
"require_focused": get_settings().pr_reviewer.require_focused_review,
|
||||
"require_estimate_effort_to_review": get_settings().pr_reviewer.require_estimate_effort_to_review,
|
||||
'num_code_suggestions': get_settings().pr_reviewer.num_code_suggestions,
|
||||
'question_str': question_str,
|
||||
'answer_str': answer_str,
|
||||
"extra_instructions": get_settings().pr_reviewer.extra_instructions,
|
||||
"commit_messages_str": self.git_provider.get_commit_messages(),
|
||||
"custom_labels": "",
|
||||
"enable_custom_labels": get_settings().config.enable_custom_labels,
|
||||
}
|
||||
|
||||
self.token_handler = TokenHandler(
|
||||
@ -94,28 +95,37 @@ class PRReviewer:
|
||||
"""
|
||||
Review the pull request and generate feedback.
|
||||
"""
|
||||
if self.is_auto and not get_settings().pr_reviewer.automatic_review:
|
||||
logging.info(f'Automatic review is disabled {self.pr_url}')
|
||||
return None
|
||||
|
||||
logging.info(f'Reviewing PR: {self.pr_url} ...')
|
||||
try:
|
||||
if self.is_auto and not get_settings().pr_reviewer.automatic_review:
|
||||
get_logger().info(f'Automatic review is disabled {self.pr_url}')
|
||||
return None
|
||||
if self.is_auto and self.incremental.is_incremental and not self.incremental.first_new_commit_sha:
|
||||
get_logger().info(f"Incremental review is enabled for {self.pr_url} but there are no new commits")
|
||||
return None
|
||||
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Preparing review...", is_temporary=True)
|
||||
|
||||
await retry_with_fallback_models(self._prepare_prediction)
|
||||
|
||||
logging.info('Preparing PR review...')
|
||||
pr_comment = self._prepare_pr_review()
|
||||
|
||||
if get_settings().config.publish_output:
|
||||
logging.info('Pushing PR review...')
|
||||
self.git_provider.publish_comment(pr_comment)
|
||||
self.git_provider.remove_initial_comment()
|
||||
|
||||
if get_settings().pr_reviewer.inline_code_comments:
|
||||
logging.info('Pushing inline code comments...')
|
||||
self._publish_inline_code_comments()
|
||||
get_logger().info(f'Reviewing PR: {self.pr_url} ...')
|
||||
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Preparing review...", is_temporary=True)
|
||||
|
||||
await retry_with_fallback_models(self._prepare_prediction)
|
||||
|
||||
get_logger().info('Preparing PR review...')
|
||||
pr_comment = self._prepare_pr_review()
|
||||
|
||||
if get_settings().config.publish_output:
|
||||
get_logger().info('Pushing PR review...')
|
||||
previous_review_comment = self._get_previous_review_comment()
|
||||
self.git_provider.publish_comment(pr_comment)
|
||||
self.git_provider.remove_initial_comment()
|
||||
if previous_review_comment:
|
||||
self._remove_previous_review_comment(previous_review_comment)
|
||||
if get_settings().pr_reviewer.inline_code_comments:
|
||||
get_logger().info('Pushing inline code comments...')
|
||||
self._publish_inline_code_comments()
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed to review PR: {e}")
|
||||
|
||||
async def _prepare_prediction(self, model: str) -> None:
|
||||
"""
|
||||
@ -127,9 +137,9 @@ class PRReviewer:
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
logging.info('Getting PR diff...')
|
||||
get_logger().info('Getting PR diff...')
|
||||
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
|
||||
logging.info('Getting AI prediction...')
|
||||
get_logger().info('Getting AI prediction...')
|
||||
self.prediction = await self._get_prediction(model)
|
||||
|
||||
async def _get_prediction(self, model: str) -> str:
|
||||
@ -146,12 +156,13 @@ class PRReviewer:
|
||||
variables["diff"] = self.patches_diff # update diff
|
||||
|
||||
environment = Environment(undefined=StrictUndefined)
|
||||
set_custom_labels(variables)
|
||||
system_prompt = environment.from_string(get_settings().pr_review_prompt.system).render(variables)
|
||||
user_prompt = environment.from_string(get_settings().pr_review_prompt.user).render(variables)
|
||||
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
||||
logging.info(f"\nUser prompt:\n{user_prompt}")
|
||||
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
||||
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
||||
|
||||
response, finish_reason = await self.ai_handler.chat_completion(
|
||||
model=model,
|
||||
@ -204,30 +215,51 @@ class PRReviewer:
|
||||
link = self.git_provider.generate_link_to_relevant_line_number(suggestion)
|
||||
if link:
|
||||
suggestion['relevant line'] = f"[{suggestion['relevant line']}]({link})"
|
||||
else:
|
||||
pass
|
||||
# try:
|
||||
# relevant_file = suggestion['relevant file'].strip('`').strip("'")
|
||||
# relevant_line_str = suggestion['relevant line']
|
||||
# if not relevant_line_str:
|
||||
# return ""
|
||||
#
|
||||
# position, absolute_position = find_line_number_of_relevant_line_in_file(
|
||||
# self.git_provider.diff_files, relevant_file, relevant_line_str)
|
||||
# if absolute_position != -1:
|
||||
# suggestion[
|
||||
# 'relevant line'] = f"{suggestion['relevant line']} (line {absolute_position})"
|
||||
# except:
|
||||
# pass
|
||||
|
||||
|
||||
# Add incremental review section
|
||||
if self.incremental.is_incremental:
|
||||
last_commit_url = f"{self.git_provider.get_pr_url()}/commits/" \
|
||||
f"{self.git_provider.incremental.first_new_commit_sha}"
|
||||
last_commit_msg = self.incremental.commits_range[0].commit.message if self.incremental.commits_range else ""
|
||||
incremental_review_markdown_text = f"Starting from commit {last_commit_url}"
|
||||
if last_commit_msg:
|
||||
incremental_review_markdown_text += f" \n_({last_commit_msg.splitlines(keepends=False)[0]})_"
|
||||
data = OrderedDict(data)
|
||||
data.update({'Incremental PR Review': {
|
||||
"⏮️ Review for commits since previous PR-Agent review": f"Starting from commit {last_commit_url}"}})
|
||||
"⏮️ Review for commits since previous PR-Agent review": incremental_review_markdown_text}})
|
||||
data.move_to_end('Incremental PR Review', last=False)
|
||||
|
||||
markdown_text = convert_to_markdown(data)
|
||||
markdown_text = convert_to_markdown(data, self.git_provider.is_supported("gfm_markdown"))
|
||||
user = self.git_provider.get_user_id()
|
||||
|
||||
# Add help text if not in CLI mode
|
||||
if not get_settings().get("CONFIG.CLI_MODE", False):
|
||||
markdown_text += "\n### How to use\n"
|
||||
if user and '[bot]' not in user:
|
||||
bot_user = "[bot]" if get_settings().github_app.override_deployment_type else get_settings().github_app.bot_user
|
||||
if user and bot_user not in user:
|
||||
markdown_text += bot_help_text(user)
|
||||
else:
|
||||
markdown_text += actions_help_text
|
||||
|
||||
# Log markdown response if verbosity level is high
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"Markdown response:\n{markdown_text}")
|
||||
get_logger().info(f"Markdown response:\n{markdown_text}")
|
||||
|
||||
if markdown_text == None or len(markdown_text) == 0:
|
||||
markdown_text = ""
|
||||
@ -246,7 +278,7 @@ class PRReviewer:
|
||||
try:
|
||||
data = yaml.load(review_text, Loader=SafeLoader)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to parse AI prediction: {e}")
|
||||
get_logger().error(f"Failed to parse AI prediction: {e}")
|
||||
data = try_fix_yaml(review_text)
|
||||
|
||||
comments: List[str] = []
|
||||
@ -255,7 +287,7 @@ class PRReviewer:
|
||||
relevant_line_in_file = suggestion.get('relevant line', '').strip()
|
||||
content = suggestion.get('suggestion', '')
|
||||
if not relevant_file or not relevant_line_in_file or not content:
|
||||
logging.info("Skipping inline comment with missing file/line/content")
|
||||
get_logger().info("Skipping inline comment with missing file/line/content")
|
||||
continue
|
||||
|
||||
if self.git_provider.is_supported("create_inline_comment"):
|
||||
@ -266,7 +298,7 @@ class PRReviewer:
|
||||
self.git_provider.publish_inline_comment(content, relevant_file, relevant_line_in_file)
|
||||
|
||||
if comments:
|
||||
self.git_provider.publish_inline_comments(comments)
|
||||
self.git_provider.publish_inline_comments(comments)
|
||||
|
||||
def _get_user_answers(self) -> Tuple[str, str]:
|
||||
"""
|
||||
@ -291,3 +323,26 @@ class PRReviewer:
|
||||
break
|
||||
|
||||
return question_str, answer_str
|
||||
|
||||
def _get_previous_review_comment(self):
|
||||
"""
|
||||
Get the previous review comment if it exists.
|
||||
"""
|
||||
try:
|
||||
if get_settings().pr_reviewer.remove_previous_review_comment and hasattr(self.git_provider, "get_previous_review"):
|
||||
return self.git_provider.get_previous_review(
|
||||
full=not self.incremental.is_incremental,
|
||||
incremental=self.incremental.is_incremental,
|
||||
)
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Failed to get previous review comment, error: {e}")
|
||||
|
||||
def _remove_previous_review_comment(self, comment):
|
||||
"""
|
||||
Remove the previous review comment if it exists.
|
||||
"""
|
||||
try:
|
||||
if get_settings().pr_reviewer.remove_previous_review_comment and comment:
|
||||
self.git_provider.remove_comment(comment)
|
||||
except Exception as e:
|
||||
get_logger().exception(f"Failed to remove previous review comment, error: {e}")
|
||||
|
294
pr_agent/tools/pr_similar_issue.py
Normal file
@ -0,0 +1,294 @@
|
||||
import time
|
||||
from enum import Enum
|
||||
from typing import List
|
||||
|
||||
import openai
|
||||
import pandas as pd
|
||||
import pinecone
|
||||
from pinecone_datasets import Dataset, DatasetMetadata
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from pr_agent.algo import MAX_TOKENS
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
MODEL = "text-embedding-ada-002"
|
||||
|
||||
|
||||
class PRSimilarIssue:
|
||||
def __init__(self, issue_url: str, args: list = None):
|
||||
if get_settings().config.git_provider != "github":
|
||||
raise Exception("Only github is supported for similar issue tool")
|
||||
|
||||
self.cli_mode = get_settings().CONFIG.CLI_MODE
|
||||
self.max_issues_to_scan = get_settings().pr_similar_issue.max_issues_to_scan
|
||||
self.issue_url = issue_url
|
||||
self.git_provider = get_git_provider()()
|
||||
repo_name, issue_number = self.git_provider._parse_issue_url(issue_url.split('=')[-1])
|
||||
self.git_provider.repo = repo_name
|
||||
self.git_provider.repo_obj = self.git_provider.github_client.get_repo(repo_name)
|
||||
self.token_handler = TokenHandler()
|
||||
repo_obj = self.git_provider.repo_obj
|
||||
repo_name_for_index = self.repo_name_for_index = repo_obj.full_name.lower().replace('/', '-').replace('_/', '-')
|
||||
index_name = self.index_name = "codium-ai-pr-agent-issues"
|
||||
|
||||
# assuming pinecone api key and environment are set in secrets file
|
||||
try:
|
||||
api_key = get_settings().pinecone.api_key
|
||||
environment = get_settings().pinecone.environment
|
||||
except Exception:
|
||||
if not self.cli_mode:
|
||||
repo_name, original_issue_number = self.git_provider._parse_issue_url(self.issue_url.split('=')[-1])
|
||||
issue_main = self.git_provider.repo_obj.get_issue(original_issue_number)
|
||||
issue_main.create_comment("Please set pinecone api key and environment in secrets file")
|
||||
raise Exception("Please set pinecone api key and environment in secrets file")
|
||||
|
||||
# check if index exists, and if repo is already indexed
|
||||
run_from_scratch = False
|
||||
if run_from_scratch: # for debugging
|
||||
pinecone.init(api_key=api_key, environment=environment)
|
||||
if index_name in pinecone.list_indexes():
|
||||
get_logger().info('Removing index...')
|
||||
pinecone.delete_index(index_name)
|
||||
get_logger().info('Done')
|
||||
|
||||
upsert = True
|
||||
pinecone.init(api_key=api_key, environment=environment)
|
||||
if not index_name in pinecone.list_indexes():
|
||||
run_from_scratch = True
|
||||
upsert = False
|
||||
else:
|
||||
if get_settings().pr_similar_issue.force_update_dataset:
|
||||
upsert = True
|
||||
else:
|
||||
pinecone_index = pinecone.Index(index_name=index_name)
|
||||
res = pinecone_index.fetch([f"example_issue_{repo_name_for_index}"]).to_dict()
|
||||
if res["vectors"]:
|
||||
upsert = False
|
||||
|
||||
if run_from_scratch or upsert: # index the entire repo
|
||||
get_logger().info('Indexing the entire repo...')
|
||||
|
||||
get_logger().info('Getting issues...')
|
||||
issues = list(repo_obj.get_issues(state='all'))
|
||||
get_logger().info('Done')
|
||||
self._update_index_with_issues(issues, repo_name_for_index, upsert=upsert)
|
||||
else: # update index if needed
|
||||
pinecone_index = pinecone.Index(index_name=index_name)
|
||||
issues_to_update = []
|
||||
issues_paginated_list = repo_obj.get_issues(state='all')
|
||||
counter = 1
|
||||
for issue in issues_paginated_list:
|
||||
if issue.pull_request:
|
||||
continue
|
||||
issue_str, comments, number = self._process_issue(issue)
|
||||
issue_key = f"issue_{number}"
|
||||
id = issue_key + "." + "issue"
|
||||
res = pinecone_index.fetch([id]).to_dict()
|
||||
is_new_issue = True
|
||||
for vector in res["vectors"].values():
|
||||
if vector['metadata']['repo'] == repo_name_for_index:
|
||||
is_new_issue = False
|
||||
break
|
||||
if is_new_issue:
|
||||
counter += 1
|
||||
issues_to_update.append(issue)
|
||||
else:
|
||||
break
|
||||
|
||||
if issues_to_update:
|
||||
get_logger().info(f'Updating index with {counter} new issues...')
|
||||
self._update_index_with_issues(issues_to_update, repo_name_for_index, upsert=True)
|
||||
else:
|
||||
get_logger().info('No new issues to update')
|
||||
|
||||
async def run(self):
|
||||
get_logger().info('Getting issue...')
|
||||
repo_name, original_issue_number = self.git_provider._parse_issue_url(self.issue_url.split('=')[-1])
|
||||
issue_main = self.git_provider.repo_obj.get_issue(original_issue_number)
|
||||
issue_str, comments, number = self._process_issue(issue_main)
|
||||
openai.api_key = get_settings().openai.key
|
||||
get_logger().info('Done')
|
||||
|
||||
get_logger().info('Querying...')
|
||||
res = openai.Embedding.create(input=[issue_str], engine=MODEL)
|
||||
embeds = [record['embedding'] for record in res['data']]
|
||||
pinecone_index = pinecone.Index(index_name=self.index_name)
|
||||
res = pinecone_index.query(embeds[0],
|
||||
top_k=5,
|
||||
filter={"repo": self.repo_name_for_index},
|
||||
include_metadata=True).to_dict()
|
||||
relevant_issues_number_list = []
|
||||
relevant_comment_number_list = []
|
||||
score_list = []
|
||||
for r in res['matches']:
|
||||
# skip example issue
|
||||
if 'example_issue_' in r["id"]:
|
||||
continue
|
||||
|
||||
try:
|
||||
issue_number = int(r["id"].split('.')[0].split('_')[-1])
|
||||
except:
|
||||
get_logger().debug(f"Failed to parse issue number from {r['id']}")
|
||||
continue
|
||||
|
||||
if original_issue_number == issue_number:
|
||||
continue
|
||||
if issue_number not in relevant_issues_number_list:
|
||||
relevant_issues_number_list.append(issue_number)
|
||||
if 'comment' in r["id"]:
|
||||
relevant_comment_number_list.append(int(r["id"].split('.')[1].split('_')[-1]))
|
||||
else:
|
||||
relevant_comment_number_list.append(-1)
|
||||
score_list.append(str("{:.2f}".format(r['score'])))
|
||||
get_logger().info('Done')
|
||||
|
||||
get_logger().info('Publishing response...')
|
||||
similar_issues_str = "### Similar Issues\n___\n\n"
|
||||
for i, issue_number_similar in enumerate(relevant_issues_number_list):
|
||||
issue = self.git_provider.repo_obj.get_issue(issue_number_similar)
|
||||
title = issue.title
|
||||
url = issue.html_url
|
||||
if relevant_comment_number_list[i] != -1:
|
||||
url = list(issue.get_comments())[relevant_comment_number_list[i]].html_url
|
||||
similar_issues_str += f"{i + 1}. **[{title}]({url})** (score={score_list[i]})\n\n"
|
||||
if get_settings().config.publish_output:
|
||||
response = issue_main.create_comment(similar_issues_str)
|
||||
get_logger().info(similar_issues_str)
|
||||
get_logger().info('Done')
|
||||
|
||||
def _process_issue(self, issue):
|
||||
header = issue.title
|
||||
body = issue.body
|
||||
number = issue.number
|
||||
if get_settings().pr_similar_issue.skip_comments:
|
||||
comments = []
|
||||
else:
|
||||
comments = list(issue.get_comments())
|
||||
issue_str = f"Issue Header: \"{header}\"\n\nIssue Body:\n{body}"
|
||||
return issue_str, comments, number
|
||||
|
||||
def _update_index_with_issues(self, issues_list, repo_name_for_index, upsert=False):
|
||||
get_logger().info('Processing issues...')
|
||||
corpus = Corpus()
|
||||
example_issue_record = Record(
|
||||
id=f"example_issue_{repo_name_for_index}",
|
||||
text="example_issue",
|
||||
metadata=Metadata(repo=repo_name_for_index)
|
||||
)
|
||||
corpus.append(example_issue_record)
|
||||
|
||||
counter = 0
|
||||
for issue in issues_list:
|
||||
if issue.pull_request:
|
||||
continue
|
||||
|
||||
counter += 1
|
||||
if counter % 100 == 0:
|
||||
get_logger().info(f"Scanned {counter} issues")
|
||||
if counter >= self.max_issues_to_scan:
|
||||
get_logger().info(f"Scanned {self.max_issues_to_scan} issues, stopping")
|
||||
break
|
||||
|
||||
issue_str, comments, number = self._process_issue(issue)
|
||||
issue_key = f"issue_{number}"
|
||||
username = issue.user.login
|
||||
created_at = str(issue.created_at)
|
||||
if len(issue_str) < 8000 or \
|
||||
self.token_handler.count_tokens(issue_str) < MAX_TOKENS[MODEL]: # fast reject first
|
||||
issue_record = Record(
|
||||
id=issue_key + "." + "issue",
|
||||
text=issue_str,
|
||||
metadata=Metadata(repo=repo_name_for_index,
|
||||
username=username,
|
||||
created_at=created_at,
|
||||
level=IssueLevel.ISSUE)
|
||||
)
|
||||
corpus.append(issue_record)
|
||||
if comments:
|
||||
for j, comment in enumerate(comments):
|
||||
comment_body = comment.body
|
||||
num_words_comment = len(comment_body.split())
|
||||
if num_words_comment < 10 or not isinstance(comment_body, str):
|
||||
continue
|
||||
|
||||
if len(comment_body) < 8000 or \
|
||||
self.token_handler.count_tokens(comment_body) < MAX_TOKENS[MODEL]:
|
||||
comment_record = Record(
|
||||
id=issue_key + ".comment_" + str(j + 1),
|
||||
text=comment_body,
|
||||
metadata=Metadata(repo=repo_name_for_index,
|
||||
username=username, # use issue username for all comments
|
||||
created_at=created_at,
|
||||
level=IssueLevel.COMMENT)
|
||||
)
|
||||
corpus.append(comment_record)
|
||||
df = pd.DataFrame(corpus.dict()["documents"])
|
||||
get_logger().info('Done')
|
||||
|
||||
get_logger().info('Embedding...')
|
||||
openai.api_key = get_settings().openai.key
|
||||
list_to_encode = list(df["text"].values)
|
||||
try:
|
||||
res = openai.Embedding.create(input=list_to_encode, engine=MODEL)
|
||||
embeds = [record['embedding'] for record in res['data']]
|
||||
except:
|
||||
embeds = []
|
||||
get_logger().error('Failed to embed entire list, embedding one by one...')
|
||||
for i, text in enumerate(list_to_encode):
|
||||
try:
|
||||
res = openai.Embedding.create(input=[text], engine=MODEL)
|
||||
embeds.append(res['data'][0]['embedding'])
|
||||
except:
|
||||
embeds.append([0] * 1536)
|
||||
df["values"] = embeds
|
||||
meta = DatasetMetadata.empty()
|
||||
meta.dense_model.dimension = len(embeds[0])
|
||||
ds = Dataset.from_pandas(df, meta)
|
||||
get_logger().info('Done')
|
||||
|
||||
api_key = get_settings().pinecone.api_key
|
||||
environment = get_settings().pinecone.environment
|
||||
if not upsert:
|
||||
get_logger().info('Creating index from scratch...')
|
||||
ds.to_pinecone_index(self.index_name, api_key=api_key, environment=environment)
|
||||
time.sleep(15) # wait for pinecone to finalize indexing before querying
|
||||
else:
|
||||
get_logger().info('Upserting index...')
|
||||
namespace = ""
|
||||
batch_size: int = 100
|
||||
concurrency: int = 10
|
||||
pinecone.init(api_key=api_key, environment=environment)
|
||||
ds._upsert_to_index(self.index_name, namespace, batch_size, concurrency)
|
||||
time.sleep(5) # wait for pinecone to finalize upserting before querying
|
||||
get_logger().info('Done')
|
||||
|
||||
|
||||
class IssueLevel(str, Enum):
|
||||
ISSUE = "issue"
|
||||
COMMENT = "comment"
|
||||
|
||||
|
||||
class Metadata(BaseModel):
|
||||
repo: str
|
||||
username: str = Field(default="@codium")
|
||||
created_at: str = Field(default="01-01-1970 00:00:00.00000")
|
||||
level: IssueLevel = Field(default=IssueLevel.ISSUE)
|
||||
|
||||
class Config:
|
||||
use_enum_values = True
|
||||
|
||||
|
||||
class Record(BaseModel):
|
||||
id: str
|
||||
text: str
|
||||
metadata: Metadata
|
||||
|
||||
|
||||
class Corpus(BaseModel):
|
||||
documents: List[Record] = Field(default=[])
|
||||
|
||||
def append(self, r: Record):
|
||||
self.documents.append(r)
|
@ -1,5 +1,4 @@
|
||||
import copy
|
||||
import logging
|
||||
from datetime import date
|
||||
from time import sleep
|
||||
from typing import Tuple
|
||||
@ -10,8 +9,9 @@ from pr_agent.algo.ai_handler import AiHandler
|
||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import GithubProvider, get_git_provider
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||
from pr_agent.log import get_logger
|
||||
|
||||
CHANGELOG_LINES = 50
|
||||
|
||||
@ -46,28 +46,28 @@ class PRUpdateChangelog:
|
||||
get_settings().pr_update_changelog_prompt.user)
|
||||
|
||||
async def run(self):
|
||||
assert type(self.git_provider) == GithubProvider, "Currently only Github is supported"
|
||||
# assert type(self.git_provider) == GithubProvider, "Currently only Github is supported"
|
||||
|
||||
logging.info('Updating the changelog...')
|
||||
get_logger().info('Updating the changelog...')
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Preparing changelog updates...", is_temporary=True)
|
||||
await retry_with_fallback_models(self._prepare_prediction)
|
||||
logging.info('Preparing PR changelog updates...')
|
||||
get_logger().info('Preparing PR changelog updates...')
|
||||
new_file_content, answer = self._prepare_changelog_update()
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.remove_initial_comment()
|
||||
logging.info('Publishing changelog updates...')
|
||||
get_logger().info('Publishing changelog updates...')
|
||||
if self.commit_changelog:
|
||||
logging.info('Pushing PR changelog updates to repo...')
|
||||
get_logger().info('Pushing PR changelog updates to repo...')
|
||||
self._push_changelog_update(new_file_content, answer)
|
||||
else:
|
||||
logging.info('Publishing PR changelog as comment...')
|
||||
get_logger().info('Publishing PR changelog as comment...')
|
||||
self.git_provider.publish_comment(f"**Changelog updates:**\n\n{answer}")
|
||||
|
||||
async def _prepare_prediction(self, model: str):
|
||||
logging.info('Getting PR diff...')
|
||||
get_logger().info('Getting PR diff...')
|
||||
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
|
||||
logging.info('Getting AI prediction...')
|
||||
get_logger().info('Getting AI prediction...')
|
||||
self.prediction = await self._get_prediction(model)
|
||||
|
||||
async def _get_prediction(self, model: str):
|
||||
@ -77,8 +77,8 @@ class PRUpdateChangelog:
|
||||
system_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.system).render(variables)
|
||||
user_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.user).render(variables)
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
||||
logging.info(f"\nUser prompt:\n{user_prompt}")
|
||||
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
||||
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
||||
system=system_prompt, user=user_prompt)
|
||||
|
||||
@ -100,7 +100,7 @@ class PRUpdateChangelog:
|
||||
"\n>'/update_changelog --pr_update_changelog.push_changelog_changes=true'\n"
|
||||
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"answer:\n{answer}")
|
||||
get_logger().info(f"answer:\n{answer}")
|
||||
|
||||
return new_file_content, answer
|
||||
|
||||
@ -149,7 +149,7 @@ Example:
|
||||
except Exception:
|
||||
self.changelog_file_str = ""
|
||||
if self.commit_changelog:
|
||||
logging.info("No CHANGELOG.md file found in the repository. Creating one...")
|
||||
get_logger().info("No CHANGELOG.md file found in the repository. Creating one...")
|
||||
changelog_file = self.git_provider.repo_obj.create_file(path="CHANGELOG.md",
|
||||
message='add CHANGELOG.md',
|
||||
content="",
|
||||
|
@ -7,13 +7,18 @@ Jinja2==3.1.2
|
||||
tiktoken==0.4.0
|
||||
uvicorn==0.22.0
|
||||
python-gitlab==3.15.0
|
||||
pytest~=7.4.0
|
||||
aiohttp~=3.8.4
|
||||
pytest==7.4.0
|
||||
aiohttp==3.8.4
|
||||
atlassian-python-api==3.39.0
|
||||
GitPython~=3.1.32
|
||||
GitPython==3.1.32
|
||||
PyYAML==6.0
|
||||
starlette-context==0.3.6
|
||||
litellm~=0.1.445
|
||||
boto3~=1.28.25
|
||||
litellm~=0.1.574
|
||||
boto3==1.28.25
|
||||
google-cloud-storage==2.10.0
|
||||
ujson==5.8.0
|
||||
azure-devops==7.1.0b3
|
||||
msrest==0.7.1
|
||||
pinecone-client
|
||||
pinecone-datasets @ git+https://github.com/mrT23/pinecone-datasets.git@main
|
||||
loguru==0.7.2
|
||||
|
@ -110,7 +110,7 @@ class TestCodeCommitProvider:
|
||||
# Mock the response from the AWS client for get_pull_request method
|
||||
api.boto_client.get_pull_request.return_value = {
|
||||
"pullRequest": {
|
||||
"pullRequestId": "3",
|
||||
"pullRequestId": "321",
|
||||
"title": "My PR",
|
||||
"description": "My PR description",
|
||||
"pullRequestTargets": [
|
||||
@ -125,7 +125,7 @@ class TestCodeCommitProvider:
|
||||
}
|
||||
}
|
||||
|
||||
pr = api.get_pr(321)
|
||||
pr = api.get_pr("my_test_repo", 321)
|
||||
|
||||
assert pr.title == "My PR"
|
||||
assert pr.description == "My PR description"
|
||||
|
@ -1,6 +1,8 @@
|
||||
import pytest
|
||||
from unittest.mock import patch
|
||||
from pr_agent.git_providers.codecommit_provider import CodeCommitFile
|
||||
from pr_agent.git_providers.codecommit_provider import CodeCommitProvider
|
||||
from pr_agent.git_providers.codecommit_provider import PullRequestCCMimic
|
||||
from pr_agent.git_providers.git_provider import EDIT_TYPE
|
||||
|
||||
|
||||
@ -25,12 +27,64 @@ class TestCodeCommitFile:
|
||||
|
||||
|
||||
class TestCodeCommitProvider:
|
||||
def test_get_title(self):
|
||||
# Test that the get_title() function returns the PR title
|
||||
with patch.object(CodeCommitProvider, "__init__", lambda x, y: None):
|
||||
provider = CodeCommitProvider(None)
|
||||
provider.pr = PullRequestCCMimic("My Test PR Title", [])
|
||||
assert provider.get_title() == "My Test PR Title"
|
||||
|
||||
def test_get_pr_id(self):
|
||||
# Test that the get_pr_id() function returns the correct ID
|
||||
with patch.object(CodeCommitProvider, "__init__", lambda x, y: None):
|
||||
provider = CodeCommitProvider(None)
|
||||
provider.repo_name = "my_test_repo"
|
||||
provider.pr_num = 321
|
||||
assert provider.get_pr_id() == "my_test_repo/321"
|
||||
|
||||
def test_parse_pr_url(self):
|
||||
# Test that the _parse_pr_url() function can extract the repo name and PR number from a CodeCommit URL
|
||||
url = "https://us-east-1.console.aws.amazon.com/codesuite/codecommit/repositories/my_test_repo/pull-requests/321"
|
||||
repo_name, pr_number = CodeCommitProvider._parse_pr_url(url)
|
||||
assert repo_name == "my_test_repo"
|
||||
assert pr_number == 321
|
||||
|
||||
def test_is_valid_codecommit_hostname(self):
|
||||
# Test the various AWS regions
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("af-south-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("ap-east-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("ap-northeast-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("ap-northeast-2.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("ap-northeast-3.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("ap-south-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("ap-south-2.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("ap-southeast-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("ap-southeast-2.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("ap-southeast-3.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("ap-southeast-4.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("ca-central-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("eu-central-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("eu-central-2.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("eu-north-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("eu-south-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("eu-south-2.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("eu-west-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("eu-west-2.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("eu-west-3.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("il-central-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("me-central-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("me-south-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("sa-east-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("us-east-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("us-east-2.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("us-gov-east-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("us-gov-west-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("us-west-1.console.aws.amazon.com")
|
||||
assert CodeCommitProvider._is_valid_codecommit_hostname("us-west-2.console.aws.amazon.com")
|
||||
# Test non-AWS regions
|
||||
assert not CodeCommitProvider._is_valid_codecommit_hostname("no-such-region.console.aws.amazon.com")
|
||||
assert not CodeCommitProvider._is_valid_codecommit_hostname("console.aws.amazon.com")
|
||||
|
||||
# Test that an error is raised when an invalid CodeCommit URL is provided to the set_pr() method of the CodeCommitProvider class.
|
||||
# Generated by CodiumAI
|
||||
def test_invalid_codecommit_url(self):
|
||||
@ -106,6 +160,7 @@ class TestCodeCommitProvider:
|
||||
assert percentages == {}
|
||||
|
||||
def test_get_edit_type(self):
|
||||
# Test that the _get_edit_type() function can convert a CodeCommit letter to an EDIT_TYPE enum
|
||||
assert CodeCommitProvider._get_edit_type("A") == EDIT_TYPE.ADDED
|
||||
assert CodeCommitProvider._get_edit_type("D") == EDIT_TYPE.DELETED
|
||||
assert CodeCommitProvider._get_edit_type("M") == EDIT_TYPE.MODIFIED
|
||||
@ -117,3 +172,18 @@ class TestCodeCommitProvider:
|
||||
assert CodeCommitProvider._get_edit_type("r") == EDIT_TYPE.RENAMED
|
||||
|
||||
assert CodeCommitProvider._get_edit_type("X") is None
|
||||
|
||||
def test_add_additional_newlines(self):
|
||||
# a short string to test adding double newlines
|
||||
input = "abc\ndef\n\n___\nghi\njkl\nmno\n\npqr\n"
|
||||
expect = "abc\n\ndef\n\n___\n\nghi\n\njkl\n\nmno\n\npqr\n\n"
|
||||
assert CodeCommitProvider._add_additional_newlines(input) == expect
|
||||
# a test example from a real PR
|
||||
input = "## PR Type:\nEnhancement\n\n___\n## PR Description:\nThis PR introduces a new feature to the script, allowing users to filter servers by name.\n\n___\n## PR Main Files Walkthrough:\n`foo`: The foo script has been updated to include a new command line option `-f` or `--filter`.\n`bar`: The bar script has been updated to list stopped servers.\n"
|
||||
expect = "## PR Type:\n\nEnhancement\n\n___\n\n## PR Description:\n\nThis PR introduces a new feature to the script, allowing users to filter servers by name.\n\n___\n\n## PR Main Files Walkthrough:\n\n`foo`: The foo script has been updated to include a new command line option `-f` or `--filter`.\n\n`bar`: The bar script has been updated to list stopped servers.\n\n"
|
||||
assert CodeCommitProvider._add_additional_newlines(input) == expect
|
||||
|
||||
def test_remove_markdown_html(self):
|
||||
input = "## PR Feedback\n<details><summary>Code feedback:</summary>\nfile foo\n</summary>\n"
|
||||
expect = "## PR Feedback\nCode feedback:\nfile foo\n\n"
|
||||
assert CodeCommitProvider._remove_markdown_html(input) == expect
|
||||
|
80
tests/unittest/test_file_filter.py
Normal file
@ -0,0 +1,80 @@
|
||||
import pytest
|
||||
from pr_agent.algo.file_filter import filter_ignored
|
||||
from pr_agent.config_loader import global_settings
|
||||
|
||||
class TestIgnoreFilter:
|
||||
def test_no_ignores(self):
|
||||
"""
|
||||
Test no files are ignored when no patterns are specified.
|
||||
"""
|
||||
files = [
|
||||
type('', (object,), {'filename': 'file1.py'})(),
|
||||
type('', (object,), {'filename': 'file2.java'})(),
|
||||
type('', (object,), {'filename': 'file3.cpp'})(),
|
||||
type('', (object,), {'filename': 'file4.py'})(),
|
||||
type('', (object,), {'filename': 'file5.py'})()
|
||||
]
|
||||
assert filter_ignored(files) == files, "Expected all files to be returned when no ignore patterns are given."
|
||||
|
||||
def test_glob_ignores(self, monkeypatch):
|
||||
"""
|
||||
Test files are ignored when glob patterns are specified.
|
||||
"""
|
||||
monkeypatch.setattr(global_settings.ignore, 'glob', ['*.py'])
|
||||
|
||||
files = [
|
||||
type('', (object,), {'filename': 'file1.py'})(),
|
||||
type('', (object,), {'filename': 'file2.java'})(),
|
||||
type('', (object,), {'filename': 'file3.cpp'})(),
|
||||
type('', (object,), {'filename': 'file4.py'})(),
|
||||
type('', (object,), {'filename': 'file5.py'})()
|
||||
]
|
||||
expected = [
|
||||
files[1],
|
||||
files[2]
|
||||
]
|
||||
|
||||
filtered_files = filter_ignored(files)
|
||||
assert filtered_files == expected, f"Expected {[file.filename for file in expected]}, but got {[file.filename for file in filtered_files]}."
|
||||
|
||||
def test_regex_ignores(self, monkeypatch):
|
||||
"""
|
||||
Test files are ignored when regex patterns are specified.
|
||||
"""
|
||||
monkeypatch.setattr(global_settings.ignore, 'regex', ['^file[2-4]\..*$'])
|
||||
|
||||
files = [
|
||||
type('', (object,), {'filename': 'file1.py'})(),
|
||||
type('', (object,), {'filename': 'file2.java'})(),
|
||||
type('', (object,), {'filename': 'file3.cpp'})(),
|
||||
type('', (object,), {'filename': 'file4.py'})(),
|
||||
type('', (object,), {'filename': 'file5.py'})()
|
||||
]
|
||||
expected = [
|
||||
files[0],
|
||||
files[4]
|
||||
]
|
||||
|
||||
filtered_files = filter_ignored(files)
|
||||
assert filtered_files == expected, f"Expected {[file.filename for file in expected]}, but got {[file.filename for file in filtered_files]}."
|
||||
|
||||
def test_invalid_regex(self, monkeypatch):
|
||||
"""
|
||||
Test invalid patterns are quietly ignored.
|
||||
"""
|
||||
monkeypatch.setattr(global_settings.ignore, 'regex', ['(((||', '^file[2-4]\..*$'])
|
||||
|
||||
files = [
|
||||
type('', (object,), {'filename': 'file1.py'})(),
|
||||
type('', (object,), {'filename': 'file2.java'})(),
|
||||
type('', (object,), {'filename': 'file3.cpp'})(),
|
||||
type('', (object,), {'filename': 'file4.py'})(),
|
||||
type('', (object,), {'filename': 'file5.py'})()
|
||||
]
|
||||
expected = [
|
||||
files[0],
|
||||
files[4]
|
||||
]
|
||||
|
||||
filtered_files = filter_ignored(files)
|
||||
assert filtered_files == expected, f"Expected {[file.filename for file in expected]}, but got {[file.filename for file in filtered_files]}."
|
@ -43,18 +43,6 @@ class TestHandlePatchDeletions:
|
||||
assert handle_patch_deletions(patch, original_file_content_str, new_file_content_str,
|
||||
file_name) == patch.rstrip()
|
||||
|
||||
# Tests that handle_patch_deletions logs a message when verbosity_level is greater than 0
|
||||
def test_handle_patch_deletions_happy_path_verbosity_level_greater_than_0(self, caplog):
|
||||
patch = '--- a/file.py\n+++ b/file.py\n@@ -1,2 +1,2 @@\n-foo\n-bar\n+baz\n'
|
||||
original_file_content_str = 'foo\nbar\n'
|
||||
new_file_content_str = ''
|
||||
file_name = 'file.py'
|
||||
get_settings().config.verbosity_level = 1
|
||||
|
||||
with caplog.at_level(logging.INFO):
|
||||
handle_patch_deletions(patch, original_file_content_str, new_file_content_str, file_name)
|
||||
assert any("Processing file" in message for message in caplog.messages)
|
||||
|
||||
# Tests that handle_patch_deletions returns 'File was deleted' when new_file_content_str is empty
|
||||
def test_handle_patch_deletions_edge_case_new_file_content_empty(self):
|
||||
patch = '--- a/file.py\n+++ b/file.py\n@@ -1,2 +1,2 @@\n-foo\n-bar\n'
|
||||
|