mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-13 17:20:38 +08:00
Compare commits
117 Commits
ok/readme_
...
pg/pip_pac
Author | SHA1 | Date | |
---|---|---|---|
5c527eca66 | |||
b4ca52c7d8 | |||
a78d741292 | |||
a50e137bba | |||
92c0522f4d | |||
6a72df2981 | |||
808ca48605 | |||
c827cbc0ae | |||
48fcb46d4f | |||
66b94599ec | |||
231efb33c1 | |||
eb798dae6f | |||
52576c79b3 | |||
cce2a79a1f | |||
413e5f6d77 | |||
09ca848d4c | |||
801923789b | |||
cfb696dfd5 | |||
2e7a0a88fa | |||
1dbbafc30a | |||
d8eae7faab | |||
14eceb6e61 | |||
884317c4f7 | |||
c5f4b229b8 | |||
5a2a17ec25 | |||
1bd47b0d53 | |||
7531ccd31f | |||
3b19827ae2 | |||
ea6e1811c1 | |||
bc2cf75b76 | |||
9e1e0766b7 | |||
ccde68293f | |||
99d53af28d | |||
5ea607be58 | |||
e3846a480e | |||
a60a58794c | |||
8ae5faca53 | |||
28d6adf62a | |||
1229fba346 | |||
59a59ebf66 | |||
36ab12c486 | |||
0254e3d04a | |||
f6036e936e | |||
10a07e497d | |||
3b334805ee | |||
b6f6c903a0 | |||
55637a5620 | |||
404cc0a00e | |||
0815e2024c | |||
41dcb75e8e | |||
d23daf880f | |||
d1a8a610e9 | |||
918549a4fc | |||
8f482cd41a | |||
34096059ff | |||
2dfbfec8c2 | |||
6170995665 | |||
ca42a54bc3 | |||
c0610afe2a | |||
d4cbcc465c | |||
adb3f17258 | |||
2c03a67312 | |||
55eb741965 | |||
8e6518f071 | |||
c9c95d60d4 | |||
02ecaa340f | |||
cca809e91c | |||
57ff46ecc1 | |||
3819d52eb0 | |||
3072325d2c | |||
abca2fdcb7 | |||
4d84f76948 | |||
dd8f6eb923 | |||
b9c25e487a | |||
1bf27c38a7 | |||
1f987380ed | |||
cd8bbbf889 | |||
8e5498ee97 | |||
0412d7aca0 | |||
1eac3245d9 | |||
cd51bef7f7 | |||
e8aa33fa0b | |||
54b021b02c | |||
32151e3d9a | |||
32358678e6 | |||
42e32664a1 | |||
1e97236a15 | |||
321f7bce46 | |||
02a1d8dbfc | |||
e34f9d8d1c | |||
35dac012bd | |||
21ced18f50 | |||
fca78cf395 | |||
d1b91b0ea3 | |||
76e00acbdb | |||
2f83e7738c | |||
f4a226b0f7 | |||
f5e2838fc3 | |||
bbdfd2c3d4 | |||
74572e1768 | |||
f0a17b863c | |||
86fd84e113 | |||
d5b9be23d3 | |||
057bb3932f | |||
05f29cc406 | |||
63c4c7e584 | |||
1ea23cab96 | |||
e99f9fd59f | |||
fdf6a3e833 | |||
79cb94b4c2 | |||
9adec7cc10 | |||
1f0df47b4d | |||
a71a12791b | |||
23fa834721 | |||
2e246869d0 | |||
2f9546e144 | |||
6d91f44634 |
2
.gitignore
vendored
2
.gitignore
vendored
@ -2,3 +2,5 @@
|
|||||||
venv/
|
venv/
|
||||||
pr_agent/settings/.secrets.toml
|
pr_agent/settings/.secrets.toml
|
||||||
__pycache__
|
__pycache__
|
||||||
|
dist/
|
||||||
|
*.egg-info/
|
6
CHANGELOG.md
Normal file
6
CHANGELOG.md
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
## 2023-07-26
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- New feature for updating the CHANGELOG.md based on the contents of a PR.
|
||||||
|
- Added support for this feature for the Github provider.
|
||||||
|
- New configuration settings and prompts for the changelog update feature.
|
@ -1,8 +1,8 @@
|
|||||||
FROM python:3.10 as base
|
FROM python:3.10 as base
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
ADD requirements.txt .
|
ADD pyproject.toml .
|
||||||
RUN pip install -r requirements.txt && rm requirements.txt
|
RUN pip install . && rm pyproject.toml
|
||||||
ENV PYTHONPATH=/app
|
ENV PYTHONPATH=/app
|
||||||
ADD pr_agent pr_agent
|
ADD pr_agent pr_agent
|
||||||
ADD github_action/entrypoint.sh /
|
ADD github_action/entrypoint.sh /
|
||||||
|
@ -95,9 +95,10 @@ cp pr_agent/settings/.secrets_template.toml pr_agent/settings/.secrets.toml
|
|||||||
# Edit .secrets.toml file
|
# Edit .secrets.toml file
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Run the appropriate Python scripts from the scripts folder:
|
4. Add the pr_agent folder to your PYTHONPATH, then run the cli.py script:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
export PYTHONPATH=[$PYTHONPATH:]<PATH to pr_agent folder>
|
||||||
python pr_agent/cli.py --pr_url <pr_url> review
|
python pr_agent/cli.py --pr_url <pr_url> review
|
||||||
python pr_agent/cli.py --pr_url <pr_url> ask <your question>
|
python pr_agent/cli.py --pr_url <pr_url> ask <your question>
|
||||||
python pr_agent/cli.py --pr_url <pr_url> describe
|
python pr_agent/cli.py --pr_url <pr_url> describe
|
||||||
|
42
README.md
42
README.md
@ -4,15 +4,18 @@
|
|||||||
|
|
||||||
<img src="./pics/logo-dark.png#gh-dark-mode-only" width="330"/>
|
<img src="./pics/logo-dark.png#gh-dark-mode-only" width="330"/>
|
||||||
<img src="./pics/logo-light.png#gh-light-mode-only" width="330"/><br/>
|
<img src="./pics/logo-light.png#gh-light-mode-only" width="330"/><br/>
|
||||||
Making pull-requests less painful with an AI agent
|
Making pull requests less painful with an AI agent
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
[](https://github.com/Codium-ai/pr-agent/blob/main/LICENSE)
|
[](https://github.com/Codium-ai/pr-agent/blob/main/LICENSE)
|
||||||
[](https://discord.com/channels/1057273017547378788/1126104260430528613)
|
[](https://discord.com/channels/1057273017547378788/1126104260430528613)
|
||||||
|
<a href="https://github.com/Codium-ai/pr-agent/commits/main">
|
||||||
|
<img alt="GitHub" src="https://img.shields.io/github/last-commit/Codium-ai/pr-agent/main?style=for-the-badge" height="20">
|
||||||
|
</a>
|
||||||
</div>
|
</div>
|
||||||
<div style="text-align:left;">
|
<div style="text-align:left;">
|
||||||
|
|
||||||
CodiumAI `PR-Agent` is an open-source tool aiming to help developers review pull-requests faster and more efficiently. It automatically analyzes the pull-request and can provide several types of feedback:
|
CodiumAI `PR-Agent` is an open-source tool aiming to help developers review pull requests faster and more efficiently. It automatically analyzes the pull request and can provide several types of feedback:
|
||||||
|
|
||||||
**Auto-Description**: Automatically generating PR description - title, type, summary, code walkthrough and PR labels.
|
**Auto-Description**: Automatically generating PR description - title, type, summary, code walkthrough and PR labels.
|
||||||
\
|
\
|
||||||
@ -27,25 +30,31 @@ CodiumAI `PR-Agent` is an open-source tool aiming to help developers review pull
|
|||||||
<h4>/describe:</h4>
|
<h4>/describe:</h4>
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<p float="center">
|
<p float="center">
|
||||||
<img src="https://codium.ai/images/describe.gif" width="800">
|
<img src="https://www.codium.ai/images/describe-2.gif" width="800">
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
<h4>/review:</h4>
|
<h4>/review:</h4>
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<p float="center">
|
<p float="center">
|
||||||
<img src="https://codium.ai/images/review.gif" width="800">
|
<img src="https://www.codium.ai/images/review-2.gif" width="800">
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
<h4>/reflect_and_review:</h4>
|
||||||
|
<div align="center">
|
||||||
|
<p float="center">
|
||||||
|
<img src="https://www.codium.ai/images/reflect_and_review.gif" width="800">
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
<h4>/ask:</h4>
|
<h4>/ask:</h4>
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<p float="center">
|
<p float="center">
|
||||||
<img src="https://codium.ai/images/ask.gif" width="800">
|
<img src="https://www.codium.ai/images/ask-2.gif" width="800">
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
<h4>/improve:</h4>
|
<h4>/improve:</h4>
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<p float="center">
|
<p float="center">
|
||||||
<img src="https://codium.ai/images/improve.gif" width="800">
|
<img src="https://www.codium.ai/images/improve-2.gif" width="800">
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
<div align="left">
|
<div align="left">
|
||||||
@ -57,6 +66,7 @@ CodiumAI `PR-Agent` is an open-source tool aiming to help developers review pull
|
|||||||
- [Usage and tools](#usage-and-tools)
|
- [Usage and tools](#usage-and-tools)
|
||||||
- [Configuration](./CONFIGURATION.md)
|
- [Configuration](./CONFIGURATION.md)
|
||||||
- [How it works](#how-it-works)
|
- [How it works](#how-it-works)
|
||||||
|
- [Why use PR-Agent](#why-use-pr-agent)
|
||||||
- [Roadmap](#roadmap)
|
- [Roadmap](#roadmap)
|
||||||
- [Similar projects](#similar-projects)
|
- [Similar projects](#similar-projects)
|
||||||
</div>
|
</div>
|
||||||
@ -72,9 +82,11 @@ CodiumAI `PR-Agent` is an open-source tool aiming to help developers review pull
|
|||||||
| | Auto-Description | :white_check_mark: | :white_check_mark: | |
|
| | Auto-Description | :white_check_mark: | :white_check_mark: | |
|
||||||
| | Improve Code | :white_check_mark: | :white_check_mark: | |
|
| | Improve Code | :white_check_mark: | :white_check_mark: | |
|
||||||
| | Reflect and Review | :white_check_mark: | | |
|
| | Reflect and Review | :white_check_mark: | | |
|
||||||
|
| | Update CHANGELOG.md | :white_check_mark: | | |
|
||||||
| | | | | |
|
| | | | | |
|
||||||
| USAGE | CLI | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| USAGE | CLI | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
| | Tagging bot | :white_check_mark: | :white_check_mark: | |
|
| | App / webhook | :white_check_mark: | :white_check_mark: | |
|
||||||
|
| | Tagging bot | :white_check_mark: | | |
|
||||||
| | Actions | :white_check_mark: | | |
|
| | Actions | :white_check_mark: | | |
|
||||||
| | | | | |
|
| | | | | |
|
||||||
| CORE | PR compression | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
| CORE | PR compression | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||||
@ -88,6 +100,7 @@ Examples for invoking the different tools via the CLI:
|
|||||||
- **Improve**: python cli.py --pr-url=<pr_url> improve
|
- **Improve**: python cli.py --pr-url=<pr_url> improve
|
||||||
- **Ask**: python cli.py --pr-url=<pr_url> ask "Write me a poem about this PR"
|
- **Ask**: python cli.py --pr-url=<pr_url> ask "Write me a poem about this PR"
|
||||||
- **Reflect**: python cli.py --pr-url=<pr_url> reflect
|
- **Reflect**: python cli.py --pr-url=<pr_url> reflect
|
||||||
|
- **Update changelog**: python cli.py --pr-url=<pr_url> update_changelog
|
||||||
|
|
||||||
"<pr_url>" is the url of the relevant PR (for example: https://github.com/Codium-ai/pr-agent/pull/50).
|
"<pr_url>" is the url of the relevant PR (for example: https://github.com/Codium-ai/pr-agent/pull/50).
|
||||||
|
|
||||||
@ -97,7 +110,7 @@ In the [configuration](./CONFIGURATION.md) file you can select your git provider
|
|||||||
|
|
||||||
Try GPT-4 powered PR-Agent on your public GitHub repository for free. Just mention `@CodiumAI-Agent` and add the desired command in any PR comment! The agent will generate a response based on your command.
|
Try GPT-4 powered PR-Agent on your public GitHub repository for free. Just mention `@CodiumAI-Agent` and add the desired command in any PR comment! The agent will generate a response based on your command.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
To set up your own PR-Agent, see the [Installation](#installation) section
|
To set up your own PR-Agent, see the [Installation](#installation) section
|
||||||
|
|
||||||
@ -136,6 +149,19 @@ There are several ways to use PR-Agent:
|
|||||||
|
|
||||||
Check out the [PR Compression strategy](./PR_COMPRESSION.md) page for more details on how we convert a code diff to a manageable LLM prompt
|
Check out the [PR Compression strategy](./PR_COMPRESSION.md) page for more details on how we convert a code diff to a manageable LLM prompt
|
||||||
|
|
||||||
|
## Why use PR-Agent?
|
||||||
|
|
||||||
|
A reasonable question that can be asked is: `"Why use PR-Agent? What make it stand out from existing tools?"`
|
||||||
|
|
||||||
|
Here are some of the reasons why:
|
||||||
|
|
||||||
|
- We emphasize **real-life practical usage**. Each tool (review, improve, ask, ...) has a single GPT-4 call, no more. We feel that this is critical for realistic team usage - obtaining an answer quickly (~30 seconds) and affordably.
|
||||||
|
- Our [PR Compression strategy](./PR_COMPRESSION.md) is a core ability that enables to effectively tackle both short and long PRs.
|
||||||
|
- Our JSON prompting strategy enables to have **modular, customizable tools**. For example, the '/review' tool categories can be controlled via the configuration file. Adding additional categories is easy and accessible.
|
||||||
|
- We support **multiple git providers** (GitHub, Gitlab, Bitbucket), and multiple ways to use the tool (CLI, GitHub Action, Docker, ...).
|
||||||
|
- We are open-source, and welcome contributions from the community.
|
||||||
|
|
||||||
|
|
||||||
## Roadmap
|
## Roadmap
|
||||||
|
|
||||||
- [ ] Support open-source models, as a replacement for OpenAI models. (Note - a minimal requirement for each open-source model is to have 8k+ context, and good support for generating JSON as an output)
|
- [ ] Support open-source models, as a replacement for OpenAI models. (Note - a minimal requirement for each open-source model is to have 8k+ context, and good support for generating JSON as an output)
|
||||||
|
@ -1,5 +1,8 @@
|
|||||||
name: 'PR Agent'
|
name: 'Codium PR Agent'
|
||||||
description: 'Summarize, review and suggest improvements for pull requests'
|
description: 'Summarize, review and suggest improvements for pull requests'
|
||||||
|
branding:
|
||||||
|
icon: 'award'
|
||||||
|
color: 'green'
|
||||||
runs:
|
runs:
|
||||||
using: 'docker'
|
using: 'docker'
|
||||||
image: 'Dockerfile.github_action_dockerhub'
|
image: 'Dockerfile.github_action_dockerhub'
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
FROM python:3.10 as base
|
FROM python:3.10 as base
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
ADD requirements.txt .
|
ADD pyproject.toml .
|
||||||
RUN pip install -r requirements.txt && rm requirements.txt
|
RUN pip install . && rm pyproject.toml
|
||||||
ENV PYTHONPATH=/app
|
ENV PYTHONPATH=/app
|
||||||
ADD pr_agent pr_agent
|
ADD pr_agent pr_agent
|
||||||
|
|
||||||
|
@ -4,9 +4,9 @@ RUN yum update -y && \
|
|||||||
yum install -y gcc python3-devel && \
|
yum install -y gcc python3-devel && \
|
||||||
yum clean all
|
yum clean all
|
||||||
|
|
||||||
ADD requirements.txt .
|
ADD pyproject.toml .
|
||||||
RUN pip install -r requirements.txt && rm requirements.txt
|
RUN pip install . && rm pyproject.toml
|
||||||
RUN pip install mangum==16.0.0
|
RUN pip install mangum==0.17.0
|
||||||
COPY pr_agent/ ${LAMBDA_TASK_ROOT}/pr_agent/
|
COPY pr_agent/ ${LAMBDA_TASK_ROOT}/pr_agent/
|
||||||
|
|
||||||
CMD ["pr_agent.servers.serverless.serverless"]
|
CMD ["pr_agent.servers.serverless.serverless"]
|
||||||
|
@ -6,6 +6,7 @@ from pr_agent.tools.pr_description import PRDescription
|
|||||||
from pr_agent.tools.pr_information_from_user import PRInformationFromUser
|
from pr_agent.tools.pr_information_from_user import PRInformationFromUser
|
||||||
from pr_agent.tools.pr_questions import PRQuestions
|
from pr_agent.tools.pr_questions import PRQuestions
|
||||||
from pr_agent.tools.pr_reviewer import PRReviewer
|
from pr_agent.tools.pr_reviewer import PRReviewer
|
||||||
|
from pr_agent.tools.pr_update_changelog import PRUpdateChangelog
|
||||||
|
|
||||||
|
|
||||||
class PRAgent:
|
class PRAgent:
|
||||||
@ -26,7 +27,9 @@ class PRAgent:
|
|||||||
elif any(cmd == action for cmd in ["/improve", "/improve_code"]):
|
elif any(cmd == action for cmd in ["/improve", "/improve_code"]):
|
||||||
await PRCodeSuggestions(pr_url).suggest()
|
await PRCodeSuggestions(pr_url).suggest()
|
||||||
elif any(cmd == action for cmd in ["/ask", "/ask_question"]):
|
elif any(cmd == action for cmd in ["/ask", "/ask_question"]):
|
||||||
await PRQuestions(pr_url, args).answer()
|
await PRQuestions(pr_url, args=args).answer()
|
||||||
|
elif any(cmd == action for cmd in ["/update_changelog"]):
|
||||||
|
await PRUpdateChangelog(pr_url, args=args).update_changelog()
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
import openai
|
import openai
|
||||||
from openai.error import APIError, Timeout, TryAgain
|
from openai.error import APIError, Timeout, TryAgain, RateLimitError
|
||||||
from retry import retry
|
from retry import retry
|
||||||
|
|
||||||
from pr_agent.config_loader import settings
|
from pr_agent.config_loader import settings
|
||||||
|
|
||||||
OPENAI_RETRIES=2
|
OPENAI_RETRIES=5
|
||||||
|
|
||||||
class AiHandler:
|
class AiHandler:
|
||||||
"""
|
"""
|
||||||
@ -34,7 +34,7 @@ class AiHandler:
|
|||||||
except AttributeError as e:
|
except AttributeError as e:
|
||||||
raise ValueError("OpenAI key is required") from e
|
raise ValueError("OpenAI key is required") from e
|
||||||
|
|
||||||
@retry(exceptions=(APIError, Timeout, TryAgain, AttributeError),
|
@retry(exceptions=(APIError, Timeout, TryAgain, AttributeError, RateLimitError),
|
||||||
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
|
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
|
||||||
async def chat_completion(self, model: str, temperature: float, system: str, user: str):
|
async def chat_completion(self, model: str, temperature: float, system: str, user: str):
|
||||||
"""
|
"""
|
||||||
@ -69,6 +69,12 @@ class AiHandler:
|
|||||||
except (APIError, Timeout, TryAgain) as e:
|
except (APIError, Timeout, TryAgain) as e:
|
||||||
logging.error("Error during OpenAI inference: ", e)
|
logging.error("Error during OpenAI inference: ", e)
|
||||||
raise
|
raise
|
||||||
|
except (RateLimitError) as e:
|
||||||
|
logging.error("Rate limit error during OpenAI inference: ", e)
|
||||||
|
raise
|
||||||
|
except (Exception) as e:
|
||||||
|
logging.error("Unknown error during OpenAI inference: ", e)
|
||||||
|
raise TryAgain from e
|
||||||
if response is None or len(response.choices) == 0:
|
if response is None or len(response.choices) == 0:
|
||||||
raise TryAgain
|
raise TryAgain
|
||||||
resp = response.choices[0]['message']['content']
|
resp = response.choices[0]['message']['content']
|
||||||
|
File diff suppressed because one or more lines are too long
@ -1,8 +1,11 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import Tuple, Union
|
from typing import Tuple, Union, Callable, List
|
||||||
|
|
||||||
|
from github import RateLimitExceededException
|
||||||
|
|
||||||
|
from pr_agent.algo import MAX_TOKENS
|
||||||
from pr_agent.algo.git_patch_processing import convert_to_hunks_with_lines_numbers, extend_patch, handle_patch_deletions
|
from pr_agent.algo.git_patch_processing import convert_to_hunks_with_lines_numbers, extend_patch, handle_patch_deletions
|
||||||
from pr_agent.algo.language_handler import sort_files_by_main_languages
|
from pr_agent.algo.language_handler import sort_files_by_main_languages
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
@ -10,7 +13,6 @@ from pr_agent.algo.utils import load_large_diff
|
|||||||
from pr_agent.config_loader import settings
|
from pr_agent.config_loader import settings
|
||||||
from pr_agent.git_providers.git_provider import GitProvider
|
from pr_agent.git_providers.git_provider import GitProvider
|
||||||
|
|
||||||
|
|
||||||
DELETED_FILES_ = "Deleted files:\n"
|
DELETED_FILES_ = "Deleted files:\n"
|
||||||
|
|
||||||
MORE_MODIFIED_FILES_ = "More modified files:\n"
|
MORE_MODIFIED_FILES_ = "More modified files:\n"
|
||||||
@ -19,15 +21,15 @@ OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD = 1000
|
|||||||
OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD = 600
|
OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD = 600
|
||||||
PATCH_EXTRA_LINES = 3
|
PATCH_EXTRA_LINES = 3
|
||||||
|
|
||||||
|
def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler, model: str,
|
||||||
def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler,
|
add_line_numbers_to_hunks: bool = False, disable_extra_lines: bool = False) -> str:
|
||||||
add_line_numbers_to_hunks: bool = False, disable_extra_lines: bool =False) -> str:
|
|
||||||
"""
|
"""
|
||||||
Returns a string with the diff of the pull request, applying diff minimization techniques if needed.
|
Returns a string with the diff of the pull request, applying diff minimization techniques if needed.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
git_provider (GitProvider): An object of the GitProvider class representing the Git provider used for the pull request.
|
git_provider (GitProvider): An object of the GitProvider class representing the Git provider used for the pull request.
|
||||||
token_handler (TokenHandler): An object of the TokenHandler class used for handling tokens in the context of the pull request.
|
token_handler (TokenHandler): An object of the TokenHandler class used for handling tokens in the context of the pull request.
|
||||||
|
model (str): The name of the model used for tokenization.
|
||||||
add_line_numbers_to_hunks (bool, optional): A boolean indicating whether to add line numbers to the hunks in the diff. Defaults to False.
|
add_line_numbers_to_hunks (bool, optional): A boolean indicating whether to add line numbers to the hunks in the diff. Defaults to False.
|
||||||
disable_extra_lines (bool, optional): A boolean indicating whether to disable the extension of each patch with extra lines of context. Defaults to False.
|
disable_extra_lines (bool, optional): A boolean indicating whether to disable the extension of each patch with extra lines of context. Defaults to False.
|
||||||
|
|
||||||
@ -39,7 +41,11 @@ def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler,
|
|||||||
global PATCH_EXTRA_LINES
|
global PATCH_EXTRA_LINES
|
||||||
PATCH_EXTRA_LINES = 0
|
PATCH_EXTRA_LINES = 0
|
||||||
|
|
||||||
|
try:
|
||||||
diff_files = list(git_provider.get_diff_files())
|
diff_files = list(git_provider.get_diff_files())
|
||||||
|
except RateLimitExceededException as e:
|
||||||
|
logging.error(f"Rate limit exceeded for git provider API. original message {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
# get pr languages
|
# get pr languages
|
||||||
pr_languages = sort_files_by_main_languages(git_provider.get_languages(), diff_files)
|
pr_languages = sort_files_by_main_languages(git_provider.get_languages(), diff_files)
|
||||||
@ -49,12 +55,12 @@ def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler,
|
|||||||
add_line_numbers_to_hunks)
|
add_line_numbers_to_hunks)
|
||||||
|
|
||||||
# if we are under the limit, return the full diff
|
# if we are under the limit, return the full diff
|
||||||
if total_tokens + OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD < token_handler.limit:
|
if total_tokens + OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD < MAX_TOKENS[model]:
|
||||||
return "\n".join(patches_extended)
|
return "\n".join(patches_extended)
|
||||||
|
|
||||||
# if we are over the limit, start pruning
|
# if we are over the limit, start pruning
|
||||||
patches_compressed, modified_file_names, deleted_file_names = \
|
patches_compressed, modified_file_names, deleted_file_names = \
|
||||||
pr_generate_compressed_diff(pr_languages, token_handler, add_line_numbers_to_hunks)
|
pr_generate_compressed_diff(pr_languages, token_handler, model, add_line_numbers_to_hunks)
|
||||||
|
|
||||||
final_diff = "\n".join(patches_compressed)
|
final_diff = "\n".join(patches_compressed)
|
||||||
if modified_file_names:
|
if modified_file_names:
|
||||||
@ -110,13 +116,14 @@ def pr_generate_extended_diff(pr_languages: list, token_handler: TokenHandler,
|
|||||||
return patches_extended, total_tokens
|
return patches_extended, total_tokens
|
||||||
|
|
||||||
|
|
||||||
def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler,
|
def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, model: str,
|
||||||
convert_hunks_to_line_numbers: bool) -> Tuple[list, list, list]:
|
convert_hunks_to_line_numbers: bool) -> Tuple[list, list, list]:
|
||||||
"""
|
"""
|
||||||
Generate a compressed diff string for a pull request, using diff minimization techniques to reduce the number of tokens used.
|
Generate a compressed diff string for a pull request, using diff minimization techniques to reduce the number of tokens used.
|
||||||
Args:
|
Args:
|
||||||
top_langs (list): A list of dictionaries representing the languages used in the pull request and their corresponding files.
|
top_langs (list): A list of dictionaries representing the languages used in the pull request and their corresponding files.
|
||||||
token_handler (TokenHandler): An object of the TokenHandler class used for handling tokens in the context of the pull request.
|
token_handler (TokenHandler): An object of the TokenHandler class used for handling tokens in the context of the pull request.
|
||||||
|
model (str): The model used for tokenization.
|
||||||
convert_hunks_to_line_numbers (bool): A boolean indicating whether to convert hunks to line numbers in the diff.
|
convert_hunks_to_line_numbers (bool): A boolean indicating whether to convert hunks to line numbers in the diff.
|
||||||
Returns:
|
Returns:
|
||||||
Tuple[list, list, list]: A tuple containing the following lists:
|
Tuple[list, list, list]: A tuple containing the following lists:
|
||||||
@ -132,7 +139,6 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler,
|
|||||||
4. Minimize all remaining files when you reach token limit
|
4. Minimize all remaining files when you reach token limit
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
patches = []
|
patches = []
|
||||||
modified_files_list = []
|
modified_files_list = []
|
||||||
deleted_files_list = []
|
deleted_files_list = []
|
||||||
@ -166,12 +172,12 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler,
|
|||||||
new_patch_tokens = token_handler.count_tokens(patch)
|
new_patch_tokens = token_handler.count_tokens(patch)
|
||||||
|
|
||||||
# Hard Stop, no more tokens
|
# Hard Stop, no more tokens
|
||||||
if total_tokens > token_handler.limit - OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD:
|
if total_tokens > MAX_TOKENS[model] - OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD:
|
||||||
logging.warning(f"File was fully skipped, no more tokens: {file.filename}.")
|
logging.warning(f"File was fully skipped, no more tokens: {file.filename}.")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# If the patch is too large, just show the file name
|
# If the patch is too large, just show the file name
|
||||||
if total_tokens + new_patch_tokens > token_handler.limit - OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD:
|
if total_tokens + new_patch_tokens > MAX_TOKENS[model] - OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD:
|
||||||
# Current logic is to skip the patch if it's too large
|
# Current logic is to skip the patch if it's too large
|
||||||
# TODO: Option for alternative logic to remove hunks from the patch to reduce the number of tokens
|
# TODO: Option for alternative logic to remove hunks from the patch to reduce the number of tokens
|
||||||
# until we meet the requirements
|
# until we meet the requirements
|
||||||
@ -196,3 +202,16 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler,
|
|||||||
return patches, modified_files_list, deleted_files_list
|
return patches, modified_files_list, deleted_files_list
|
||||||
|
|
||||||
|
|
||||||
|
async def retry_with_fallback_models(f: Callable):
|
||||||
|
model = settings.config.model
|
||||||
|
fallback_models = settings.config.fallback_models
|
||||||
|
if not isinstance(fallback_models, list):
|
||||||
|
fallback_models = [fallback_models]
|
||||||
|
all_models = [model] + fallback_models
|
||||||
|
for i, model in enumerate(all_models):
|
||||||
|
try:
|
||||||
|
return await f(model)
|
||||||
|
except Exception as e:
|
||||||
|
logging.warning(f"Failed to generate prediction with {model}: {e}")
|
||||||
|
if i == len(all_models) - 1: # If it's the last iteration
|
||||||
|
raise # Re-raise the last exception
|
||||||
|
@ -26,7 +26,6 @@ class TokenHandler:
|
|||||||
- user: The user string.
|
- user: The user string.
|
||||||
"""
|
"""
|
||||||
self.encoder = encoding_for_model(settings.config.model)
|
self.encoder = encoding_for_model(settings.config.model)
|
||||||
self.limit = MAX_TOKENS[settings.config.model]
|
|
||||||
self.prompt_tokens = self._get_system_user_tokens(pr, self.encoder, vars, system, user)
|
self.prompt_tokens = self._get_system_user_tokens(pr, self.encoder, vars, system, user)
|
||||||
|
|
||||||
def _get_system_user_tokens(self, pr, encoder, vars: dict, system, user):
|
def _get_system_user_tokens(self, pr, encoder, vars: dict, system, user):
|
||||||
|
@ -8,10 +8,12 @@ from pr_agent.tools.pr_description import PRDescription
|
|||||||
from pr_agent.tools.pr_information_from_user import PRInformationFromUser
|
from pr_agent.tools.pr_information_from_user import PRInformationFromUser
|
||||||
from pr_agent.tools.pr_questions import PRQuestions
|
from pr_agent.tools.pr_questions import PRQuestions
|
||||||
from pr_agent.tools.pr_reviewer import PRReviewer
|
from pr_agent.tools.pr_reviewer import PRReviewer
|
||||||
|
from pr_agent.tools.pr_update_changelog import PRUpdateChangelog
|
||||||
|
|
||||||
|
|
||||||
def run(args=None):
|
def run(args=None):
|
||||||
parser = argparse.ArgumentParser(description='AI based pull request analyzer', usage="""\
|
parser = argparse.ArgumentParser(description='AI based pull request analyzer', usage=
|
||||||
|
"""\
|
||||||
Usage: cli.py --pr-url <URL on supported git hosting service> <command> [<args>].
|
Usage: cli.py --pr-url <URL on supported git hosting service> <command> [<args>].
|
||||||
For example:
|
For example:
|
||||||
- cli.py --pr-url=... review
|
- cli.py --pr-url=... review
|
||||||
@ -26,50 +28,82 @@ ask / ask_question [question] - Ask a question about the PR.
|
|||||||
describe / describe_pr - Modify the PR title and description based on the PR's contents.
|
describe / describe_pr - Modify the PR title and description based on the PR's contents.
|
||||||
improve / improve_code - Suggest improvements to the code in the PR as pull request comments ready to commit.
|
improve / improve_code - Suggest improvements to the code in the PR as pull request comments ready to commit.
|
||||||
reflect - Ask the PR author questions about the PR.
|
reflect - Ask the PR author questions about the PR.
|
||||||
|
update_changelog - Update the changelog based on the PR's contents.
|
||||||
""")
|
""")
|
||||||
parser.add_argument('--pr_url', type=str, help='The URL of the PR to review', required=True)
|
parser.add_argument('--pr_url', type=str, help='The URL of the PR to review', required=True)
|
||||||
parser.add_argument('command', type=str, help='The', choices=['review', 'review_pr',
|
parser.add_argument('command', type=str, help='The', choices=['review', 'review_pr',
|
||||||
'ask', 'ask_question',
|
'ask', 'ask_question',
|
||||||
'describe', 'describe_pr',
|
'describe', 'describe_pr',
|
||||||
'improve', 'improve_code',
|
'improve', 'improve_code',
|
||||||
'reflect', 'review_after_reflect'],
|
'reflect', 'review_after_reflect',
|
||||||
|
'update_changelog'],
|
||||||
default='review')
|
default='review')
|
||||||
parser.add_argument('rest', nargs=argparse.REMAINDER, default=[])
|
parser.add_argument('rest', nargs=argparse.REMAINDER, default=[])
|
||||||
args = parser.parse_args(args)
|
args = parser.parse_args(args)
|
||||||
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
|
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
|
||||||
command = args.command.lower()
|
command = args.command.lower()
|
||||||
if command in ['ask', 'ask_question']:
|
commands = {
|
||||||
if len(args.rest) == 0:
|
'ask': _handle_ask_command,
|
||||||
print("Please specify a question")
|
'ask_question': _handle_ask_command,
|
||||||
parser.print_help()
|
'describe': _handle_describe_command,
|
||||||
return
|
'describe_pr': _handle_describe_command,
|
||||||
print(f"Question: {' '.join(args.rest)} about PR {args.pr_url}")
|
'improve': _handle_improve_command,
|
||||||
reviewer = PRQuestions(args.pr_url, args.rest)
|
'improve_code': _handle_improve_command,
|
||||||
asyncio.run(reviewer.answer())
|
'review': _handle_review_command,
|
||||||
elif command in ['describe', 'describe_pr']:
|
'review_pr': _handle_review_command,
|
||||||
print(f"PR description: {args.pr_url}")
|
'reflect': _handle_reflect_command,
|
||||||
reviewer = PRDescription(args.pr_url)
|
'review_after_reflect': _handle_review_after_reflect_command,
|
||||||
asyncio.run(reviewer.describe())
|
'update_changelog': _handle_update_changelog,
|
||||||
elif command in ['improve', 'improve_code']:
|
}
|
||||||
print(f"PR code suggestions: {args.pr_url}")
|
if command in commands:
|
||||||
reviewer = PRCodeSuggestions(args.pr_url)
|
commands[command](args.pr_url, args.rest)
|
||||||
asyncio.run(reviewer.suggest())
|
|
||||||
elif command in ['review', 'review_pr']:
|
|
||||||
print(f"Reviewing PR: {args.pr_url}")
|
|
||||||
reviewer = PRReviewer(args.pr_url, cli_mode=True, args=args.rest)
|
|
||||||
asyncio.run(reviewer.review())
|
|
||||||
elif command in ['reflect']:
|
|
||||||
print(f"Asking the PR author questions: {args.pr_url}")
|
|
||||||
reviewer = PRInformationFromUser(args.pr_url)
|
|
||||||
asyncio.run(reviewer.generate_questions())
|
|
||||||
elif command in ['review_after_reflect']:
|
|
||||||
print(f"Processing author's answers and sending review: {args.pr_url}")
|
|
||||||
reviewer = PRReviewer(args.pr_url, cli_mode=True, is_answer=True)
|
|
||||||
asyncio.run(reviewer.review())
|
|
||||||
else:
|
else:
|
||||||
print(f"Unknown command: {command}")
|
print(f"Unknown command: {command}")
|
||||||
parser.print_help()
|
parser.print_help()
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_ask_command(pr_url: str, rest: list):
|
||||||
|
if len(rest) == 0:
|
||||||
|
print("Please specify a question")
|
||||||
|
return
|
||||||
|
print(f"Question: {' '.join(rest)} about PR {pr_url}")
|
||||||
|
reviewer = PRQuestions(pr_url, rest)
|
||||||
|
asyncio.run(reviewer.answer())
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_describe_command(pr_url: str, rest: list):
|
||||||
|
print(f"PR description: {pr_url}")
|
||||||
|
reviewer = PRDescription(pr_url)
|
||||||
|
asyncio.run(reviewer.describe())
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_improve_command(pr_url: str, rest: list):
|
||||||
|
print(f"PR code suggestions: {pr_url}")
|
||||||
|
reviewer = PRCodeSuggestions(pr_url)
|
||||||
|
asyncio.run(reviewer.suggest())
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_review_command(pr_url: str, rest: list):
|
||||||
|
print(f"Reviewing PR: {pr_url}")
|
||||||
|
reviewer = PRReviewer(pr_url, cli_mode=True, args=rest)
|
||||||
|
asyncio.run(reviewer.review())
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_reflect_command(pr_url: str, rest: list):
|
||||||
|
print(f"Asking the PR author questions: {pr_url}")
|
||||||
|
reviewer = PRInformationFromUser(pr_url)
|
||||||
|
asyncio.run(reviewer.generate_questions())
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_review_after_reflect_command(pr_url: str, rest: list):
|
||||||
|
print(f"Processing author's answers and sending review: {pr_url}")
|
||||||
|
reviewer = PRReviewer(pr_url, cli_mode=True, is_answer=True)
|
||||||
|
asyncio.run(reviewer.review())
|
||||||
|
|
||||||
|
def _handle_update_changelog(pr_url: str, rest: list):
|
||||||
|
print(f"Updating changlog for: {pr_url}")
|
||||||
|
reviewer = PRUpdateChangelog(pr_url, cli_mode=True, args=rest)
|
||||||
|
asyncio.run(reviewer.update_changelog())
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
run()
|
run()
|
||||||
|
@ -1,7 +1,11 @@
|
|||||||
from os.path import abspath, dirname, join
|
from os.path import abspath, dirname, join
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
from dynaconf import Dynaconf
|
from dynaconf import Dynaconf
|
||||||
|
|
||||||
|
PR_AGENT_TOML_KEY = 'pr-agent'
|
||||||
|
|
||||||
current_dir = dirname(abspath(__file__))
|
current_dir = dirname(abspath(__file__))
|
||||||
settings = Dynaconf(
|
settings = Dynaconf(
|
||||||
envvar_prefix=False,
|
envvar_prefix=False,
|
||||||
@ -9,11 +13,42 @@ settings = Dynaconf(
|
|||||||
settings_files=[join(current_dir, f) for f in [
|
settings_files=[join(current_dir, f) for f in [
|
||||||
"settings/.secrets.toml",
|
"settings/.secrets.toml",
|
||||||
"settings/configuration.toml",
|
"settings/configuration.toml",
|
||||||
|
"settings/language_extensions.toml",
|
||||||
"settings/pr_reviewer_prompts.toml",
|
"settings/pr_reviewer_prompts.toml",
|
||||||
"settings/pr_questions_prompts.toml",
|
"settings/pr_questions_prompts.toml",
|
||||||
"settings/pr_description_prompts.toml",
|
"settings/pr_description_prompts.toml",
|
||||||
"settings/pr_code_suggestions_prompts.toml",
|
"settings/pr_code_suggestions_prompts.toml",
|
||||||
"settings/pr_information_from_user_prompts.toml",
|
"settings/pr_information_from_user_prompts.toml",
|
||||||
|
"settings/pr_update_changelog.toml",
|
||||||
"settings_prod/.secrets.toml"
|
"settings_prod/.secrets.toml"
|
||||||
]]
|
]]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Add local configuration from pyproject.toml of the project being reviewed
|
||||||
|
def _find_repository_root() -> Path:
|
||||||
|
"""
|
||||||
|
Identify project root directory by recursively searching for the .git directory in the parent directories.
|
||||||
|
"""
|
||||||
|
cwd = Path.cwd().resolve()
|
||||||
|
no_way_up = False
|
||||||
|
while not no_way_up:
|
||||||
|
no_way_up = cwd == cwd.parent
|
||||||
|
if (cwd / ".git").is_dir():
|
||||||
|
return cwd
|
||||||
|
cwd = cwd.parent
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _find_pyproject() -> Optional[Path]:
|
||||||
|
"""
|
||||||
|
Search for file pyproject.toml in the repository root.
|
||||||
|
"""
|
||||||
|
repo_root = _find_repository_root()
|
||||||
|
if repo_root:
|
||||||
|
pyproject = _find_repository_root() / "pyproject.toml"
|
||||||
|
return pyproject if pyproject.is_file() else None
|
||||||
|
return None
|
||||||
|
|
||||||
|
pyproject_path = _find_pyproject()
|
||||||
|
if pyproject_path is not None:
|
||||||
|
settings.load_file(pyproject_path, env=f'tool.{PR_AGENT_TOML_KEY}')
|
||||||
|
@ -2,11 +2,13 @@ from pr_agent.config_loader import settings
|
|||||||
from pr_agent.git_providers.bitbucket_provider import BitbucketProvider
|
from pr_agent.git_providers.bitbucket_provider import BitbucketProvider
|
||||||
from pr_agent.git_providers.github_provider import GithubProvider
|
from pr_agent.git_providers.github_provider import GithubProvider
|
||||||
from pr_agent.git_providers.gitlab_provider import GitLabProvider
|
from pr_agent.git_providers.gitlab_provider import GitLabProvider
|
||||||
|
from pr_agent.git_providers.local_git_provider import LocalGitProvider
|
||||||
|
|
||||||
_GIT_PROVIDERS = {
|
_GIT_PROVIDERS = {
|
||||||
'github': GithubProvider,
|
'github': GithubProvider,
|
||||||
'gitlab': GitLabProvider,
|
'gitlab': GitLabProvider,
|
||||||
'bitbucket': BitbucketProvider,
|
'bitbucket': BitbucketProvider,
|
||||||
|
'local' : LocalGitProvider
|
||||||
}
|
}
|
||||||
|
|
||||||
def get_git_provider():
|
def get_git_provider():
|
||||||
|
@ -27,7 +27,7 @@ class BitbucketProvider:
|
|||||||
self.set_pr(pr_url)
|
self.set_pr(pr_url)
|
||||||
|
|
||||||
def is_supported(self, capability: str) -> bool:
|
def is_supported(self, capability: str) -> bool:
|
||||||
if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments']:
|
if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments', 'get_labels']:
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -60,6 +60,10 @@ class GitProvider(ABC):
|
|||||||
def publish_labels(self, labels):
|
def publish_labels(self, labels):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_labels(self):
|
||||||
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def remove_initial_comment(self):
|
def remove_initial_comment(self):
|
||||||
pass
|
pass
|
||||||
@ -132,3 +136,4 @@ class IncrementalPR:
|
|||||||
self.commits_range = None
|
self.commits_range = None
|
||||||
self.first_new_commit_sha = None
|
self.first_new_commit_sha = None
|
||||||
self.last_seen_commit_sha = None
|
self.last_seen_commit_sha = None
|
||||||
|
|
||||||
|
@ -3,13 +3,15 @@ from datetime import datetime
|
|||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from github import AppAuthentication, Github, Auth
|
from github import AppAuthentication, Auth, Github, GithubException
|
||||||
|
from retry import retry
|
||||||
|
|
||||||
from pr_agent.config_loader import settings
|
from pr_agent.config_loader import settings
|
||||||
|
|
||||||
from .git_provider import FilePatchInfo, GitProvider, IncrementalPR
|
|
||||||
from ..algo.language_handler import is_valid_file
|
from ..algo.language_handler import is_valid_file
|
||||||
from ..algo.utils import load_large_diff
|
from ..algo.utils import load_large_diff
|
||||||
|
from .git_provider import FilePatchInfo, GitProvider, IncrementalPR
|
||||||
|
from ..servers.utils import RateLimitExceeded
|
||||||
|
|
||||||
|
|
||||||
class GithubProvider(GitProvider):
|
class GithubProvider(GitProvider):
|
||||||
@ -78,7 +80,10 @@ class GithubProvider(GitProvider):
|
|||||||
return self.file_set.values()
|
return self.file_set.values()
|
||||||
return self.pr.get_files()
|
return self.pr.get_files()
|
||||||
|
|
||||||
|
@retry(exceptions=RateLimitExceeded,
|
||||||
|
tries=settings.github.ratelimit_retries, delay=2, backoff=2, jitter=(1, 3))
|
||||||
def get_diff_files(self) -> list[FilePatchInfo]:
|
def get_diff_files(self) -> list[FilePatchInfo]:
|
||||||
|
try:
|
||||||
files = self.get_files()
|
files = self.get_files()
|
||||||
diff_files = []
|
diff_files = []
|
||||||
for file in files:
|
for file in files:
|
||||||
@ -86,7 +91,8 @@ class GithubProvider(GitProvider):
|
|||||||
new_file_content_str = self._get_pr_file_content(file, self.pr.head.sha)
|
new_file_content_str = self._get_pr_file_content(file, self.pr.head.sha)
|
||||||
patch = file.patch
|
patch = file.patch
|
||||||
if self.incremental.is_incremental and self.file_set:
|
if self.incremental.is_incremental and self.file_set:
|
||||||
original_file_content_str = self._get_pr_file_content(file, self.incremental.last_seen_commit_sha)
|
original_file_content_str = self._get_pr_file_content(file,
|
||||||
|
self.incremental.last_seen_commit_sha)
|
||||||
patch = load_large_diff(file,
|
patch = load_large_diff(file,
|
||||||
new_file_content_str,
|
new_file_content_str,
|
||||||
original_file_content_str,
|
original_file_content_str,
|
||||||
@ -99,12 +105,18 @@ class GithubProvider(GitProvider):
|
|||||||
FilePatchInfo(original_file_content_str, new_file_content_str, patch, file.filename))
|
FilePatchInfo(original_file_content_str, new_file_content_str, patch, file.filename))
|
||||||
self.diff_files = diff_files
|
self.diff_files = diff_files
|
||||||
return diff_files
|
return diff_files
|
||||||
|
except GithubException.RateLimitExceededException as e:
|
||||||
|
logging.error(f"Rate limit exceeded for GitHub API. Original message: {e}")
|
||||||
|
raise RateLimitExceeded("Rate limit exceeded for GitHub API.") from e
|
||||||
|
|
||||||
def publish_description(self, pr_title: str, pr_body: str):
|
def publish_description(self, pr_title: str, pr_body: str):
|
||||||
self.pr.edit(title=pr_title, body=pr_body)
|
self.pr.edit(title=pr_title, body=pr_body)
|
||||||
# self.pr.create_issue_comment(pr_comment)
|
# self.pr.create_issue_comment(pr_comment)
|
||||||
|
|
||||||
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||||
|
if is_temporary and not settings.config.publish_output_progress:
|
||||||
|
logging.debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
|
||||||
|
return
|
||||||
response = self.pr.create_issue_comment(pr_comment)
|
response = self.pr.create_issue_comment(pr_comment)
|
||||||
if hasattr(response, "user") and hasattr(response.user, "login"):
|
if hasattr(response, "user") and hasattr(response.user, "login"):
|
||||||
self.github_user_id = response.user.login
|
self.github_user_id = response.user.login
|
||||||
@ -149,10 +161,8 @@ class GithubProvider(GitProvider):
|
|||||||
def publish_code_suggestions(self, code_suggestions: list):
|
def publish_code_suggestions(self, code_suggestions: list):
|
||||||
"""
|
"""
|
||||||
Publishes code suggestions as comments on the PR.
|
Publishes code suggestions as comments on the PR.
|
||||||
In practice current APU enables to send only one code suggestion per comment. Might change in the future.
|
|
||||||
"""
|
"""
|
||||||
post_parameters_list = []
|
post_parameters_list = []
|
||||||
import github.PullRequestComment
|
|
||||||
for suggestion in code_suggestions:
|
for suggestion in code_suggestions:
|
||||||
body = suggestion['body']
|
body = suggestion['body']
|
||||||
relevant_file = suggestion['relevant_file']
|
relevant_file = suggestion['relevant_file']
|
||||||
@ -175,7 +185,6 @@ class GithubProvider(GitProvider):
|
|||||||
if relevant_lines_end > relevant_lines_start:
|
if relevant_lines_end > relevant_lines_start:
|
||||||
post_parameters = {
|
post_parameters = {
|
||||||
"body": body,
|
"body": body,
|
||||||
"commit_id": self.last_commit_id._identity,
|
|
||||||
"path": relevant_file,
|
"path": relevant_file,
|
||||||
"line": relevant_lines_end,
|
"line": relevant_lines_end,
|
||||||
"start_line": relevant_lines_start,
|
"start_line": relevant_lines_start,
|
||||||
@ -184,19 +193,14 @@ class GithubProvider(GitProvider):
|
|||||||
else: # API is different for single line comments
|
else: # API is different for single line comments
|
||||||
post_parameters = {
|
post_parameters = {
|
||||||
"body": body,
|
"body": body,
|
||||||
"commit_id": self.last_commit_id._identity,
|
|
||||||
"path": relevant_file,
|
"path": relevant_file,
|
||||||
"line": relevant_lines_start,
|
"line": relevant_lines_start,
|
||||||
"side": "RIGHT",
|
"side": "RIGHT",
|
||||||
}
|
}
|
||||||
|
post_parameters_list.append(post_parameters)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
headers, data = self.pr._requester.requestJsonAndCheck(
|
self.pr.create_review(commit=self.last_commit_id, comments=post_parameters_list)
|
||||||
"POST", f"{self.pr.url}/comments", input=post_parameters
|
|
||||||
)
|
|
||||||
github.PullRequestComment.PullRequestComment(
|
|
||||||
self.pr._requester, headers, data, completed=True
|
|
||||||
)
|
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if settings.config.verbosity_level >= 2:
|
if settings.config.verbosity_level >= 2:
|
||||||
@ -205,7 +209,7 @@ class GithubProvider(GitProvider):
|
|||||||
|
|
||||||
def remove_initial_comment(self):
|
def remove_initial_comment(self):
|
||||||
try:
|
try:
|
||||||
for comment in self.pr.comments_list:
|
for comment in getattr(self.pr, 'comments_list', []):
|
||||||
if comment.is_temporary:
|
if comment.is_temporary:
|
||||||
comment.delete()
|
comment.delete()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -327,5 +331,12 @@ class GithubProvider(GitProvider):
|
|||||||
headers, data = self.pr._requester.requestJsonAndCheck(
|
headers, data = self.pr._requester.requestJsonAndCheck(
|
||||||
"PUT", f"{self.pr.issue_url}/labels", input=post_parameters
|
"PUT", f"{self.pr.issue_url}/labels", input=post_parameters
|
||||||
)
|
)
|
||||||
except:
|
except Exception as e:
|
||||||
logging.exception("Failed to publish labels")
|
logging.exception(f"Failed to publish labels, error: {e}")
|
||||||
|
|
||||||
|
def get_labels(self):
|
||||||
|
try:
|
||||||
|
return [label.name for label in self.pr.labels]
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(f"Failed to get labels, error: {e}")
|
||||||
|
return []
|
@ -8,11 +8,12 @@ from gitlab import GitlabGetError
|
|||||||
|
|
||||||
from pr_agent.config_loader import settings
|
from pr_agent.config_loader import settings
|
||||||
|
|
||||||
from .git_provider import EDIT_TYPE, FilePatchInfo, GitProvider
|
|
||||||
from ..algo.language_handler import is_valid_file
|
from ..algo.language_handler import is_valid_file
|
||||||
|
from .git_provider import EDIT_TYPE, FilePatchInfo, GitProvider
|
||||||
|
|
||||||
|
|
||||||
class GitLabProvider(GitProvider):
|
class GitLabProvider(GitProvider):
|
||||||
|
|
||||||
def __init__(self, merge_request_url: Optional[str] = None, incremental: Optional[bool] = False):
|
def __init__(self, merge_request_url: Optional[str] = None, incremental: Optional[bool] = False):
|
||||||
gitlab_url = settings.get("GITLAB.URL", None)
|
gitlab_url = settings.get("GITLAB.URL", None)
|
||||||
if not gitlab_url:
|
if not gitlab_url:
|
||||||
@ -21,8 +22,8 @@ class GitLabProvider(GitProvider):
|
|||||||
if not gitlab_access_token:
|
if not gitlab_access_token:
|
||||||
raise ValueError("GitLab personal access token is not set in the config file")
|
raise ValueError("GitLab personal access token is not set in the config file")
|
||||||
self.gl = gitlab.Gitlab(
|
self.gl = gitlab.Gitlab(
|
||||||
gitlab_url,
|
url=gitlab_url,
|
||||||
gitlab_access_token
|
oauth_token=gitlab_access_token
|
||||||
)
|
)
|
||||||
self.id_project = None
|
self.id_project = None
|
||||||
self.id_mr = None
|
self.id_mr = None
|
||||||
@ -112,7 +113,7 @@ class GitLabProvider(GitProvider):
|
|||||||
def create_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
def create_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
||||||
raise NotImplementedError("Gitlab provider does not support creating inline comments yet")
|
raise NotImplementedError("Gitlab provider does not support creating inline comments yet")
|
||||||
|
|
||||||
def create_inline_comment(self, comments: list[dict]):
|
def create_inline_comments(self, comments: list[dict]):
|
||||||
raise NotImplementedError("Gitlab provider does not support publishing inline comments yet")
|
raise NotImplementedError("Gitlab provider does not support publishing inline comments yet")
|
||||||
|
|
||||||
def send_inline_comment(self, body, edit_type, found, relevant_file, relevant_line_in_file, source_line_no,
|
def send_inline_comment(self, body, edit_type, found, relevant_file, relevant_line_in_file, source_line_no,
|
||||||
@ -236,20 +237,30 @@ class GitLabProvider(GitProvider):
|
|||||||
def get_issue_comments(self):
|
def get_issue_comments(self):
|
||||||
raise NotImplementedError("GitLab provider does not support issue comments yet")
|
raise NotImplementedError("GitLab provider does not support issue comments yet")
|
||||||
|
|
||||||
def _parse_merge_request_url(self, merge_request_url: str) -> Tuple[int, int]:
|
def _parse_merge_request_url(self, merge_request_url: str) -> Tuple[str, int]:
|
||||||
parsed_url = urlparse(merge_request_url)
|
parsed_url = urlparse(merge_request_url)
|
||||||
|
|
||||||
path_parts = parsed_url.path.strip('/').split('/')
|
path_parts = parsed_url.path.strip('/').split('/')
|
||||||
if path_parts[-2] != 'merge_requests':
|
if 'merge_requests' not in path_parts:
|
||||||
raise ValueError("The provided URL does not appear to be a GitLab merge request URL")
|
raise ValueError("The provided URL does not appear to be a GitLab merge request URL")
|
||||||
|
|
||||||
|
mr_index = path_parts.index('merge_requests')
|
||||||
|
# Ensure there is an ID after 'merge_requests'
|
||||||
|
if len(path_parts) <= mr_index + 1:
|
||||||
|
raise ValueError("The provided URL does not contain a merge request ID")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
mr_id = int(path_parts[-1])
|
mr_id = int(path_parts[mr_index + 1])
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
raise ValueError("Unable to convert merge request ID to integer") from e
|
raise ValueError("Unable to convert merge request ID to integer") from e
|
||||||
|
|
||||||
# Gitlab supports access by both project numeric ID as well as 'namespace/project_name'
|
# Handle special delimiter (-)
|
||||||
return "/".join(path_parts[:2]), mr_id
|
project_path = "/".join(path_parts[:mr_index])
|
||||||
|
if project_path.endswith('/-'):
|
||||||
|
project_path = project_path[:-2]
|
||||||
|
|
||||||
|
# Return the path before 'merge_requests' and the ID
|
||||||
|
return project_path, mr_id
|
||||||
|
|
||||||
def _get_merge_request(self):
|
def _get_merge_request(self):
|
||||||
mr = self.gl.projects.get(self.id_project).mergerequests.get(self.id_mr)
|
mr = self.gl.projects.get(self.id_project).mergerequests.get(self.id_mr)
|
||||||
@ -258,5 +269,15 @@ class GitLabProvider(GitProvider):
|
|||||||
def get_user_id(self):
|
def get_user_id(self):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def publish_labels(self, labels):
|
def publish_labels(self, pr_types):
|
||||||
|
try:
|
||||||
|
self.mr.labels = list(set(pr_types))
|
||||||
|
self.mr.save()
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(f"Failed to publish labels, error: {e}")
|
||||||
|
|
||||||
|
def publish_inline_comments(self, comments: list[dict]):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def get_labels(self):
|
||||||
|
return self.mr.labels
|
||||||
|
178
pr_agent/git_providers/local_git_provider.py
Normal file
178
pr_agent/git_providers/local_git_provider.py
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
import logging
|
||||||
|
from collections import Counter
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from git import Repo
|
||||||
|
|
||||||
|
from pr_agent.config_loader import _find_repository_root, settings
|
||||||
|
from pr_agent.git_providers.git_provider import EDIT_TYPE, FilePatchInfo, GitProvider
|
||||||
|
|
||||||
|
|
||||||
|
class PullRequestMimic:
|
||||||
|
"""
|
||||||
|
This class mimics the PullRequest class from the PyGithub library for the LocalGitProvider.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, title: str, diff_files: List[FilePatchInfo]):
|
||||||
|
self.title = title
|
||||||
|
self.diff_files = diff_files
|
||||||
|
|
||||||
|
|
||||||
|
class LocalGitProvider(GitProvider):
|
||||||
|
"""
|
||||||
|
This class implements the GitProvider interface for local git repositories.
|
||||||
|
It mimics the PR functionality of the GitProvider interface,
|
||||||
|
but does not require a hosted git repository.
|
||||||
|
Instead of providing a PR url, the user provides a local branch path to generate a diff-patch.
|
||||||
|
For the MVP it only supports the /review and /describe capabilities.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, target_branch_name, incremental=False):
|
||||||
|
self.repo_path = _find_repository_root()
|
||||||
|
if self.repo_path is None:
|
||||||
|
raise ValueError('Could not find repository root')
|
||||||
|
self.repo = Repo(self.repo_path)
|
||||||
|
self.head_branch_name = self.repo.head.ref.name
|
||||||
|
self.target_branch_name = target_branch_name
|
||||||
|
self._prepare_repo()
|
||||||
|
self.diff_files = None
|
||||||
|
self.pr = PullRequestMimic(self.get_pr_title(), self.get_diff_files())
|
||||||
|
self.description_path = settings.get('local.description_path') \
|
||||||
|
if settings.get('local.description_path') is not None else self.repo_path / 'description.md'
|
||||||
|
self.review_path = settings.get('local.review_path') \
|
||||||
|
if settings.get('local.review_path') is not None else self.repo_path / 'review.md'
|
||||||
|
# inline code comments are not supported for local git repositories
|
||||||
|
settings.pr_reviewer.inline_code_comments = False
|
||||||
|
|
||||||
|
def _prepare_repo(self):
|
||||||
|
"""
|
||||||
|
Prepare the repository for PR-mimic generation.
|
||||||
|
"""
|
||||||
|
logging.debug('Preparing repository for PR-mimic generation...')
|
||||||
|
if self.repo.is_dirty():
|
||||||
|
raise ValueError('The repository is not in a clean state. Please commit or stash pending changes.')
|
||||||
|
if self.target_branch_name not in self.repo.heads:
|
||||||
|
raise KeyError(f'Branch: {self.target_branch_name} does not exist')
|
||||||
|
|
||||||
|
def is_supported(self, capability: str) -> bool:
|
||||||
|
if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments', 'get_labels']:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_diff_files(self) -> list[FilePatchInfo]:
|
||||||
|
diffs = self.repo.head.commit.diff(
|
||||||
|
self.repo.merge_base(self.repo.head, self.repo.branches[self.target_branch_name]),
|
||||||
|
create_patch=True,
|
||||||
|
R=True
|
||||||
|
)
|
||||||
|
diff_files = []
|
||||||
|
for diff_item in diffs:
|
||||||
|
if diff_item.a_blob is not None:
|
||||||
|
original_file_content_str = diff_item.a_blob.data_stream.read().decode('utf-8')
|
||||||
|
else:
|
||||||
|
original_file_content_str = "" # empty file
|
||||||
|
if diff_item.b_blob is not None:
|
||||||
|
new_file_content_str = diff_item.b_blob.data_stream.read().decode('utf-8')
|
||||||
|
else:
|
||||||
|
new_file_content_str = "" # empty file
|
||||||
|
edit_type = EDIT_TYPE.MODIFIED
|
||||||
|
if diff_item.new_file:
|
||||||
|
edit_type = EDIT_TYPE.ADDED
|
||||||
|
elif diff_item.deleted_file:
|
||||||
|
edit_type = EDIT_TYPE.DELETED
|
||||||
|
elif diff_item.renamed_file:
|
||||||
|
edit_type = EDIT_TYPE.RENAMED
|
||||||
|
diff_files.append(
|
||||||
|
FilePatchInfo(original_file_content_str,
|
||||||
|
new_file_content_str,
|
||||||
|
diff_item.diff.decode('utf-8'),
|
||||||
|
diff_item.b_path,
|
||||||
|
edit_type=edit_type,
|
||||||
|
old_filename=None if diff_item.a_path == diff_item.b_path else diff_item.a_path
|
||||||
|
)
|
||||||
|
)
|
||||||
|
self.diff_files = diff_files
|
||||||
|
return diff_files
|
||||||
|
|
||||||
|
def get_files(self) -> List[str]:
|
||||||
|
"""
|
||||||
|
Returns a list of files with changes in the diff.
|
||||||
|
"""
|
||||||
|
diff_index = self.repo.head.commit.diff(
|
||||||
|
self.repo.merge_base(self.repo.head, self.repo.branches[self.target_branch_name]),
|
||||||
|
R=True
|
||||||
|
)
|
||||||
|
# Get the list of changed files
|
||||||
|
diff_files = [item.a_path for item in diff_index]
|
||||||
|
return diff_files
|
||||||
|
|
||||||
|
def publish_description(self, pr_title: str, pr_body: str):
|
||||||
|
with open(self.description_path, "w") as file:
|
||||||
|
# Write the string to the file
|
||||||
|
file.write(pr_title + '\n' + pr_body)
|
||||||
|
|
||||||
|
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
|
||||||
|
with open(self.review_path, "w") as file:
|
||||||
|
# Write the string to the file
|
||||||
|
file.write(pr_comment)
|
||||||
|
|
||||||
|
def publish_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
||||||
|
raise NotImplementedError('Publishing inline comments is not implemented for the local git provider')
|
||||||
|
|
||||||
|
def create_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
|
||||||
|
raise NotImplementedError('Creating inline comments is not implemented for the local git provider')
|
||||||
|
|
||||||
|
def publish_inline_comments(self, comments: list[dict]):
|
||||||
|
raise NotImplementedError('Publishing inline comments is not implemented for the local git provider')
|
||||||
|
|
||||||
|
def publish_code_suggestion(self, body: str, relevant_file: str,
|
||||||
|
relevant_lines_start: int, relevant_lines_end: int):
|
||||||
|
raise NotImplementedError('Publishing code suggestions is not implemented for the local git provider')
|
||||||
|
|
||||||
|
def publish_code_suggestions(self, code_suggestions: list):
|
||||||
|
raise NotImplementedError('Publishing code suggestions is not implemented for the local git provider')
|
||||||
|
|
||||||
|
def publish_labels(self, labels):
|
||||||
|
pass # Not applicable to the local git provider, but required by the interface
|
||||||
|
|
||||||
|
def remove_initial_comment(self):
|
||||||
|
pass # Not applicable to the local git provider, but required by the interface
|
||||||
|
|
||||||
|
def get_languages(self):
|
||||||
|
"""
|
||||||
|
Calculate percentage of languages in repository. Used for hunk prioritisation.
|
||||||
|
"""
|
||||||
|
# Get all files in repository
|
||||||
|
filepaths = [Path(item.path) for item in self.repo.tree().traverse() if item.type == 'blob']
|
||||||
|
# Identify language by file extension and count
|
||||||
|
lang_count = Counter(ext.lstrip('.') for filepath in filepaths for ext in [filepath.suffix.lower()])
|
||||||
|
# Convert counts to percentages
|
||||||
|
total_files = len(filepaths)
|
||||||
|
lang_percentage = {lang: count / total_files * 100 for lang, count in lang_count.items()}
|
||||||
|
return lang_percentage
|
||||||
|
|
||||||
|
def get_pr_branch(self):
|
||||||
|
return self.repo.head
|
||||||
|
|
||||||
|
def get_user_id(self):
|
||||||
|
return -1 # Not used anywhere for the local provider, but required by the interface
|
||||||
|
|
||||||
|
def get_pr_description(self):
|
||||||
|
commits_diff = list(self.repo.iter_commits(self.target_branch_name + '..HEAD'))
|
||||||
|
# Get the commit messages and concatenate
|
||||||
|
commit_messages = " ".join([commit.message for commit in commits_diff])
|
||||||
|
# TODO Handle the description better - maybe use gpt-3.5 summarisation here?
|
||||||
|
return commit_messages[:200] # Use max 200 characters
|
||||||
|
|
||||||
|
def get_pr_title(self):
|
||||||
|
"""
|
||||||
|
Substitutes the branch-name as the PR-mimic title.
|
||||||
|
"""
|
||||||
|
return self.head_branch_name
|
||||||
|
|
||||||
|
def get_issue_comments(self):
|
||||||
|
raise NotImplementedError('Getting issue comments is not implemented for the local git provider')
|
||||||
|
|
||||||
|
def get_labels(self):
|
||||||
|
raise NotImplementedError('Getting labels is not implemented for the local git provider')
|
@ -8,46 +8,57 @@ from pr_agent.tools.pr_reviewer import PRReviewer
|
|||||||
|
|
||||||
|
|
||||||
async def run_action():
|
async def run_action():
|
||||||
GITHUB_EVENT_NAME = os.environ.get('GITHUB_EVENT_NAME', None)
|
# Get environment variables
|
||||||
|
GITHUB_EVENT_NAME = os.environ.get('GITHUB_EVENT_NAME')
|
||||||
|
GITHUB_EVENT_PATH = os.environ.get('GITHUB_EVENT_PATH')
|
||||||
|
OPENAI_KEY = os.environ.get('OPENAI_KEY')
|
||||||
|
OPENAI_ORG = os.environ.get('OPENAI_ORG')
|
||||||
|
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')
|
||||||
|
|
||||||
|
# Check if required environment variables are set
|
||||||
if not GITHUB_EVENT_NAME:
|
if not GITHUB_EVENT_NAME:
|
||||||
print("GITHUB_EVENT_NAME not set")
|
print("GITHUB_EVENT_NAME not set")
|
||||||
return
|
return
|
||||||
GITHUB_EVENT_PATH = os.environ.get('GITHUB_EVENT_PATH', None)
|
|
||||||
if not GITHUB_EVENT_PATH:
|
if not GITHUB_EVENT_PATH:
|
||||||
print("GITHUB_EVENT_PATH not set")
|
print("GITHUB_EVENT_PATH not set")
|
||||||
return
|
return
|
||||||
try:
|
|
||||||
event_payload = json.load(open(GITHUB_EVENT_PATH, 'r'))
|
|
||||||
except json.decoder.JSONDecodeError as e:
|
|
||||||
print(f"Failed to parse JSON: {e}")
|
|
||||||
return
|
|
||||||
OPENAI_KEY = os.environ.get('OPENAI_KEY', None)
|
|
||||||
if not OPENAI_KEY:
|
if not OPENAI_KEY:
|
||||||
print("OPENAI_KEY not set")
|
print("OPENAI_KEY not set")
|
||||||
return
|
return
|
||||||
OPENAI_ORG = os.environ.get('OPENAI_ORG', None)
|
|
||||||
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN', None)
|
|
||||||
if not GITHUB_TOKEN:
|
if not GITHUB_TOKEN:
|
||||||
print("GITHUB_TOKEN not set")
|
print("GITHUB_TOKEN not set")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Set the environment variables in the settings
|
||||||
settings.set("OPENAI.KEY", OPENAI_KEY)
|
settings.set("OPENAI.KEY", OPENAI_KEY)
|
||||||
if OPENAI_ORG:
|
if OPENAI_ORG:
|
||||||
settings.set("OPENAI.ORG", OPENAI_ORG)
|
settings.set("OPENAI.ORG", OPENAI_ORG)
|
||||||
settings.set("GITHUB.USER_TOKEN", GITHUB_TOKEN)
|
settings.set("GITHUB.USER_TOKEN", GITHUB_TOKEN)
|
||||||
settings.set("GITHUB.DEPLOYMENT_TYPE", "user")
|
settings.set("GITHUB.DEPLOYMENT_TYPE", "user")
|
||||||
|
|
||||||
|
# Load the event payload
|
||||||
|
try:
|
||||||
|
with open(GITHUB_EVENT_PATH, 'r') as f:
|
||||||
|
event_payload = json.load(f)
|
||||||
|
except json.decoder.JSONDecodeError as e:
|
||||||
|
print(f"Failed to parse JSON: {e}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Handle pull request event
|
||||||
if GITHUB_EVENT_NAME == "pull_request":
|
if GITHUB_EVENT_NAME == "pull_request":
|
||||||
action = event_payload.get("action", None)
|
action = event_payload.get("action")
|
||||||
if action in ["opened", "reopened"]:
|
if action in ["opened", "reopened"]:
|
||||||
pr_url = event_payload.get("pull_request", {}).get("url", None)
|
pr_url = event_payload.get("pull_request", {}).get("url")
|
||||||
if pr_url:
|
if pr_url:
|
||||||
await PRReviewer(pr_url).review()
|
await PRReviewer(pr_url).review()
|
||||||
|
|
||||||
|
# Handle issue comment event
|
||||||
elif GITHUB_EVENT_NAME == "issue_comment":
|
elif GITHUB_EVENT_NAME == "issue_comment":
|
||||||
action = event_payload.get("action", None)
|
action = event_payload.get("action")
|
||||||
if action in ["created", "edited"]:
|
if action in ["created", "edited"]:
|
||||||
comment_body = event_payload.get("comment", {}).get("body", None)
|
comment_body = event_payload.get("comment", {}).get("body")
|
||||||
if comment_body:
|
if comment_body:
|
||||||
pr_url = event_payload.get("issue", {}).get("pull_request", {}).get("url", None)
|
pr_url = event_payload.get("issue", {}).get("pull_request", {}).get("url")
|
||||||
if pr_url:
|
if pr_url:
|
||||||
body = comment_body.strip().lower()
|
body = comment_body.strip().lower()
|
||||||
await PRAgent().handle_request(pr_url, body)
|
await PRAgent().handle_request(pr_url, body)
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
from typing import Dict, Any
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -14,50 +15,65 @@ router = APIRouter()
|
|||||||
|
|
||||||
@router.post("/api/v1/github_webhooks")
|
@router.post("/api/v1/github_webhooks")
|
||||||
async def handle_github_webhooks(request: Request, response: Response):
|
async def handle_github_webhooks(request: Request, response: Response):
|
||||||
logging.debug("Received a github webhook")
|
"""
|
||||||
|
Receives and processes incoming GitHub webhook requests.
|
||||||
|
Verifies the request signature, parses the request body, and passes it to the handle_request function for further processing.
|
||||||
|
"""
|
||||||
|
logging.debug("Received a GitHub webhook")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
body = await request.json()
|
body = await request.json()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error("Error parsing request body", e)
|
logging.error("Error parsing request body", e)
|
||||||
raise HTTPException(status_code=400, detail="Error parsing request body") from e
|
raise HTTPException(status_code=400, detail="Error parsing request body") from e
|
||||||
|
|
||||||
body_bytes = await request.body()
|
body_bytes = await request.body()
|
||||||
signature_header = request.headers.get('x-hub-signature-256', None)
|
signature_header = request.headers.get('x-hub-signature-256', None)
|
||||||
try:
|
|
||||||
webhook_secret = settings.github.webhook_secret
|
webhook_secret = getattr(settings.github, 'webhook_secret', None)
|
||||||
except AttributeError:
|
|
||||||
webhook_secret = None
|
|
||||||
if webhook_secret:
|
if webhook_secret:
|
||||||
verify_signature(body_bytes, webhook_secret, signature_header)
|
verify_signature(body_bytes, webhook_secret, signature_header)
|
||||||
|
|
||||||
logging.debug(f'Request body:\n{body}')
|
logging.debug(f'Request body:\n{body}')
|
||||||
|
|
||||||
return await handle_request(body)
|
return await handle_request(body)
|
||||||
|
|
||||||
|
|
||||||
async def handle_request(body):
|
async def handle_request(body: Dict[str, Any]):
|
||||||
action = body.get("action", None)
|
"""
|
||||||
installation_id = body.get("installation", {}).get("id", None)
|
Handle incoming GitHub webhook requests.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
body: The request body.
|
||||||
|
"""
|
||||||
|
action = body.get("action")
|
||||||
|
installation_id = body.get("installation", {}).get("id")
|
||||||
settings.set("GITHUB.INSTALLATION_ID", installation_id)
|
settings.set("GITHUB.INSTALLATION_ID", installation_id)
|
||||||
agent = PRAgent()
|
agent = PRAgent()
|
||||||
|
|
||||||
if action == 'created':
|
if action == 'created':
|
||||||
if "comment" not in body:
|
if "comment" not in body:
|
||||||
return {}
|
return {}
|
||||||
comment_body = body.get("comment", {}).get("body", None)
|
comment_body = body.get("comment", {}).get("body")
|
||||||
if 'sender' in body and 'login' in body['sender'] and 'bot' in body['sender']['login']:
|
sender = body.get("sender", {}).get("login")
|
||||||
|
if sender and 'bot' in sender:
|
||||||
return {}
|
return {}
|
||||||
if "issue" not in body and "pull_request" not in body["issue"]:
|
if "issue" not in body or "pull_request" not in body["issue"]:
|
||||||
return {}
|
return {}
|
||||||
pull_request = body["issue"]["pull_request"]
|
pull_request = body["issue"]["pull_request"]
|
||||||
api_url = pull_request.get("url", None)
|
api_url = pull_request.get("url")
|
||||||
await agent.handle_request(api_url, comment_body)
|
await agent.handle_request(api_url, comment_body)
|
||||||
|
|
||||||
elif action in ["opened"] or 'reopened' in action:
|
elif action in ["opened"] or 'reopened' in action:
|
||||||
pull_request = body.get("pull_request", None)
|
pull_request = body.get("pull_request")
|
||||||
if not pull_request:
|
if not pull_request:
|
||||||
return {}
|
return {}
|
||||||
api_url = pull_request.get("url", None)
|
api_url = pull_request.get("url")
|
||||||
if api_url is None:
|
if not api_url:
|
||||||
return {}
|
return {}
|
||||||
await agent.handle_request(api_url, "/review")
|
await agent.handle_request(api_url, "/review")
|
||||||
else:
|
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
@ -15,28 +15,40 @@ NOTIFICATION_URL = "https://api.github.com/notifications"
|
|||||||
|
|
||||||
|
|
||||||
def now() -> str:
|
def now() -> str:
|
||||||
|
"""
|
||||||
|
Get the current UTC time in ISO 8601 format.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The current UTC time in ISO 8601 format.
|
||||||
|
"""
|
||||||
now_utc = datetime.now(timezone.utc).isoformat()
|
now_utc = datetime.now(timezone.utc).isoformat()
|
||||||
now_utc = now_utc.replace("+00:00", "Z")
|
now_utc = now_utc.replace("+00:00", "Z")
|
||||||
return now_utc
|
return now_utc
|
||||||
|
|
||||||
|
|
||||||
async def polling_loop():
|
async def polling_loop():
|
||||||
|
"""
|
||||||
|
Polls for notifications and handles them accordingly.
|
||||||
|
"""
|
||||||
handled_ids = set()
|
handled_ids = set()
|
||||||
since = [now()]
|
since = [now()]
|
||||||
last_modified = [None]
|
last_modified = [None]
|
||||||
git_provider = get_git_provider()()
|
git_provider = get_git_provider()()
|
||||||
user_id = git_provider.get_user_id()
|
user_id = git_provider.get_user_id()
|
||||||
agent = PRAgent()
|
agent = PRAgent()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
deployment_type = settings.github.deployment_type
|
deployment_type = settings.github.deployment_type
|
||||||
token = settings.github.user_token
|
token = settings.github.user_token
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
deployment_type = 'none'
|
deployment_type = 'none'
|
||||||
token = None
|
token = None
|
||||||
|
|
||||||
if deployment_type != 'user':
|
if deployment_type != 'user':
|
||||||
raise ValueError("Deployment mode must be set to 'user' to get notifications")
|
raise ValueError("Deployment mode must be set to 'user' to get notifications")
|
||||||
if not token:
|
if not token:
|
||||||
raise ValueError("User token must be set to get notifications")
|
raise ValueError("User token must be set to get notifications")
|
||||||
|
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession() as session:
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
@ -52,6 +64,7 @@ async def polling_loop():
|
|||||||
params["since"] = since[0]
|
params["since"] = since[0]
|
||||||
if last_modified[0]:
|
if last_modified[0]:
|
||||||
headers["If-Modified-Since"] = last_modified[0]
|
headers["If-Modified-Since"] = last_modified[0]
|
||||||
|
|
||||||
async with session.get(NOTIFICATION_URL, headers=headers, params=params) as response:
|
async with session.get(NOTIFICATION_URL, headers=headers, params=params) as response:
|
||||||
if response.status == 200:
|
if response.status == 200:
|
||||||
if 'Last-Modified' in response.headers:
|
if 'Last-Modified' in response.headers:
|
||||||
|
@ -1,64 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
import time
|
|
||||||
|
|
||||||
import gitlab
|
|
||||||
|
|
||||||
from pr_agent.agent.pr_agent import PRAgent
|
|
||||||
from pr_agent.config_loader import settings
|
|
||||||
|
|
||||||
gl = gitlab.Gitlab(
|
|
||||||
settings.get("GITLAB.URL"),
|
|
||||||
private_token=settings.get("GITLAB.PERSONAL_ACCESS_TOKEN")
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set the list of projects to monitor
|
|
||||||
projects_to_monitor = settings.get("GITLAB.PROJECTS_TO_MONITOR")
|
|
||||||
magic_word = settings.get("GITLAB.MAGIC_WORD")
|
|
||||||
|
|
||||||
# Hold the previous seen comments
|
|
||||||
previous_comments = set()
|
|
||||||
|
|
||||||
|
|
||||||
def check_comments():
|
|
||||||
print('Polling')
|
|
||||||
new_comments = {}
|
|
||||||
for project in projects_to_monitor:
|
|
||||||
project = gl.projects.get(project)
|
|
||||||
merge_requests = project.mergerequests.list(state='opened')
|
|
||||||
for mr in merge_requests:
|
|
||||||
notes = mr.notes.list(get_all=True)
|
|
||||||
for note in notes:
|
|
||||||
if note.id not in previous_comments and note.body.startswith(magic_word):
|
|
||||||
new_comments[note.id] = dict(
|
|
||||||
body=note.body[len(magic_word):],
|
|
||||||
project=project.name,
|
|
||||||
mr=mr
|
|
||||||
)
|
|
||||||
previous_comments.add(note.id)
|
|
||||||
print(f"New comment in project {project.name}, merge request {mr.title}: {note.body}")
|
|
||||||
|
|
||||||
return new_comments
|
|
||||||
|
|
||||||
|
|
||||||
def handle_new_comments(new_comments):
|
|
||||||
print('Handling new comments')
|
|
||||||
agent = PRAgent()
|
|
||||||
for _, comment in new_comments.items():
|
|
||||||
print(f"Handling comment: {comment['body']}")
|
|
||||||
asyncio.run(agent.handle_request(comment['mr'].web_url, comment['body']))
|
|
||||||
|
|
||||||
|
|
||||||
def run():
|
|
||||||
assert settings.get('CONFIG.GIT_PROVIDER') == 'gitlab', 'This script is only for GitLab'
|
|
||||||
# Initial run to populate previous_comments
|
|
||||||
check_comments()
|
|
||||||
|
|
||||||
# Run the check every minute
|
|
||||||
while True:
|
|
||||||
time.sleep(settings.get("GITLAB.POLLING_INTERVAL_SECONDS"))
|
|
||||||
new_comments = check_comments()
|
|
||||||
if new_comments:
|
|
||||||
handle_new_comments(new_comments)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
run()
|
|
47
pr_agent/servers/gitlab_webhook.py
Normal file
47
pr_agent/servers/gitlab_webhook.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
import logging
|
||||||
|
|
||||||
|
import uvicorn
|
||||||
|
from fastapi import APIRouter, FastAPI, Request, status
|
||||||
|
from fastapi.encoders import jsonable_encoder
|
||||||
|
from fastapi.responses import JSONResponse
|
||||||
|
from starlette.background import BackgroundTasks
|
||||||
|
|
||||||
|
from pr_agent.agent.pr_agent import PRAgent
|
||||||
|
from pr_agent.config_loader import settings
|
||||||
|
|
||||||
|
app = FastAPI()
|
||||||
|
router = APIRouter()
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/webhook")
|
||||||
|
async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
|
||||||
|
data = await request.json()
|
||||||
|
if data.get('object_kind') == 'merge_request' and data['object_attributes'].get('action') in ['open', 'reopen']:
|
||||||
|
logging.info(f"A merge request has been opened: {data['object_attributes'].get('title')}")
|
||||||
|
url = data['object_attributes'].get('url')
|
||||||
|
background_tasks.add_task(PRAgent().handle_request, url, "/review")
|
||||||
|
elif data.get('object_kind') == 'note' and data['event_type'] == 'note':
|
||||||
|
if 'merge_request' in data:
|
||||||
|
mr = data['merge_request']
|
||||||
|
url = mr.get('url')
|
||||||
|
body = data.get('object_attributes', {}).get('note')
|
||||||
|
background_tasks.add_task(PRAgent().handle_request, url, body)
|
||||||
|
return JSONResponse(status_code=status.HTTP_200_OK, content=jsonable_encoder({"message": "success"}))
|
||||||
|
|
||||||
|
def start():
|
||||||
|
gitlab_url = settings.get("GITLAB.URL", None)
|
||||||
|
if not gitlab_url:
|
||||||
|
raise ValueError("GITLAB.URL is not set")
|
||||||
|
gitlab_token = settings.get("GITLAB.PERSONAL_ACCESS_TOKEN", None)
|
||||||
|
if not gitlab_token:
|
||||||
|
raise ValueError("GITLAB.PERSONAL_ACCESS_TOKEN is not set")
|
||||||
|
settings.config.git_provider = "gitlab"
|
||||||
|
|
||||||
|
app = FastAPI()
|
||||||
|
app.include_router(router)
|
||||||
|
|
||||||
|
uvicorn.run(app, host="0.0.0.0", port=3000)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
start()
|
@ -21,3 +21,7 @@ def verify_signature(payload_body, secret_token, signature_header):
|
|||||||
if not hmac.compare_digest(expected_signature, signature_header):
|
if not hmac.compare_digest(expected_signature, signature_header):
|
||||||
raise HTTPException(status_code=403, detail="Request signatures didn't match!")
|
raise HTTPException(status_code=403, detail="Request signatures didn't match!")
|
||||||
|
|
||||||
|
|
||||||
|
class RateLimitExceeded(Exception):
|
||||||
|
"""Raised when the git provider API rate limit has been exceeded."""
|
||||||
|
pass
|
||||||
|
@ -1,8 +1,11 @@
|
|||||||
[config]
|
[config]
|
||||||
model="gpt-4-0613"
|
model="gpt-4"
|
||||||
|
fallback_models=["gpt-3.5-turbo-16k"]
|
||||||
git_provider="github"
|
git_provider="github"
|
||||||
publish_output=true
|
publish_output=true
|
||||||
|
publish_output_progress=true
|
||||||
verbosity_level=0 # 0,1,2
|
verbosity_level=0 # 0,1,2
|
||||||
|
use_extra_bad_extensions=false
|
||||||
|
|
||||||
[pr_reviewer]
|
[pr_reviewer]
|
||||||
require_focused_review=true
|
require_focused_review=true
|
||||||
@ -21,9 +24,13 @@ publish_description_as_comment=false
|
|||||||
[pr_code_suggestions]
|
[pr_code_suggestions]
|
||||||
num_code_suggestions=4
|
num_code_suggestions=4
|
||||||
|
|
||||||
|
[pr_update_changelog]
|
||||||
|
push_changelog_changes=false
|
||||||
|
|
||||||
[github]
|
[github]
|
||||||
# The type of deployment to create. Valid values are 'app' or 'user'.
|
# The type of deployment to create. Valid values are 'app' or 'user'.
|
||||||
deployment_type = "user"
|
deployment_type = "user"
|
||||||
|
ratelimit_retries = 5
|
||||||
|
|
||||||
[gitlab]
|
[gitlab]
|
||||||
# URL to the gitlab service
|
# URL to the gitlab service
|
||||||
@ -37,3 +44,8 @@ magic_word = "AutoReview"
|
|||||||
|
|
||||||
# Polling interval
|
# Polling interval
|
||||||
polling_interval_seconds = 30
|
polling_interval_seconds = 30
|
||||||
|
|
||||||
|
[local]
|
||||||
|
# LocalGitProvider settings - uncomment to use paths other than default
|
||||||
|
# description_path= "path/to/description.md"
|
||||||
|
# review_path= "path/to/review.md"
|
434
pr_agent/settings/language_extensions.toml
Normal file
434
pr_agent/settings/language_extensions.toml
Normal file
@ -0,0 +1,434 @@
|
|||||||
|
[bad_extensions]
|
||||||
|
default = [
|
||||||
|
'app',
|
||||||
|
'bin',
|
||||||
|
'bmp',
|
||||||
|
'bz2',
|
||||||
|
'class',
|
||||||
|
'csv',
|
||||||
|
'dat',
|
||||||
|
'db',
|
||||||
|
'dll',
|
||||||
|
'dylib',
|
||||||
|
'egg',
|
||||||
|
'eot',
|
||||||
|
'exe',
|
||||||
|
'gif',
|
||||||
|
'gitignore',
|
||||||
|
'glif',
|
||||||
|
'gradle',
|
||||||
|
'gz',
|
||||||
|
'ico',
|
||||||
|
'jar',
|
||||||
|
'jpeg',
|
||||||
|
'jpg',
|
||||||
|
'lo',
|
||||||
|
'lock',
|
||||||
|
'log',
|
||||||
|
'mp3',
|
||||||
|
'mp4',
|
||||||
|
'nar',
|
||||||
|
'o',
|
||||||
|
'ogg',
|
||||||
|
'otf',
|
||||||
|
'p',
|
||||||
|
'pdf',
|
||||||
|
'png',
|
||||||
|
'pickle',
|
||||||
|
'pkl',
|
||||||
|
'pyc',
|
||||||
|
'pyd',
|
||||||
|
'pyo',
|
||||||
|
'rkt',
|
||||||
|
'so',
|
||||||
|
'ss',
|
||||||
|
'svg',
|
||||||
|
'tar',
|
||||||
|
'tsv',
|
||||||
|
'ttf',
|
||||||
|
'war',
|
||||||
|
'webm',
|
||||||
|
'woff',
|
||||||
|
'woff2',
|
||||||
|
'xz',
|
||||||
|
'zip',
|
||||||
|
'zst',
|
||||||
|
'snap'
|
||||||
|
]
|
||||||
|
extra = [
|
||||||
|
'md',
|
||||||
|
'txt'
|
||||||
|
]
|
||||||
|
|
||||||
|
[language_extension_map_org]
|
||||||
|
ABAP = [".abap", ]
|
||||||
|
"AGS Script" = [".ash", ]
|
||||||
|
AMPL = [".ampl", ]
|
||||||
|
ANTLR = [".g4", ]
|
||||||
|
"API Blueprint" = [".apib", ]
|
||||||
|
APL = [".apl", ".dyalog", ]
|
||||||
|
ASP = [".asp", ".asax", ".ascx", ".ashx", ".asmx", ".aspx", ".axd", ]
|
||||||
|
ATS = [".dats", ".hats", ".sats", ]
|
||||||
|
ActionScript = [".as", ]
|
||||||
|
Ada = [".adb", ".ada", ".ads", ]
|
||||||
|
Agda = [".agda", ]
|
||||||
|
Alloy = [".als", ]
|
||||||
|
ApacheConf = [".apacheconf", ".vhost", ]
|
||||||
|
AppleScript = [".applescript", ".scpt", ]
|
||||||
|
Arc = [".arc", ]
|
||||||
|
Arduino = [".ino", ]
|
||||||
|
AsciiDoc = [".asciidoc", ".adoc", ]
|
||||||
|
AspectJ = [".aj", ]
|
||||||
|
Assembly = [".asm", ".a51", ".nasm", ]
|
||||||
|
Augeas = [".aug", ]
|
||||||
|
AutoHotkey = [".ahk", ".ahkl", ]
|
||||||
|
AutoIt = [".au3", ]
|
||||||
|
Awk = [".awk", ".auk", ".gawk", ".mawk", ".nawk", ]
|
||||||
|
Batchfile = [".bat", ".cmd", ]
|
||||||
|
Befunge = [".befunge", ]
|
||||||
|
Bison = [".bison", ]
|
||||||
|
BitBake = [".bb", ]
|
||||||
|
BlitzBasic = [".decls", ]
|
||||||
|
BlitzMax = [".bmx", ]
|
||||||
|
Bluespec = [".bsv", ]
|
||||||
|
Boo = [".boo", ]
|
||||||
|
Brainfuck = [".bf", ]
|
||||||
|
Brightscript = [".brs", ]
|
||||||
|
Bro = [".bro", ]
|
||||||
|
C = [".c", ".cats", ".h", ".idc", ".w", ]
|
||||||
|
"C#" = [".cs", ".cake", ".cshtml", ".csx", ]
|
||||||
|
"C++" = [".cpp", ".c++", ".cc", ".cp", ".cxx", ".h++", ".hh", ".hpp", ".hxx", ".inl", ".ipp", ".tcc", ".tpp", ".C", ".H", ]
|
||||||
|
C-ObjDump = [".c-objdump", ]
|
||||||
|
"C2hs Haskell" = [".chs", ]
|
||||||
|
CLIPS = [".clp", ]
|
||||||
|
CMake = [".cmake", ".cmake.in", ]
|
||||||
|
COBOL = [".cob", ".cbl", ".ccp", ".cobol", ".cpy", ]
|
||||||
|
CSS = [".css", ]
|
||||||
|
CSV = [".csv", ]
|
||||||
|
"Cap'n Proto" = [".capnp", ]
|
||||||
|
CartoCSS = [".mss", ]
|
||||||
|
Ceylon = [".ceylon", ]
|
||||||
|
Chapel = [".chpl", ]
|
||||||
|
ChucK = [".ck", ]
|
||||||
|
Cirru = [".cirru", ]
|
||||||
|
Clarion = [".clw", ]
|
||||||
|
Clean = [".icl", ".dcl", ]
|
||||||
|
Click = [".click", ]
|
||||||
|
Clojure = [".clj", ".boot", ".cl2", ".cljc", ".cljs", ".cljs.hl", ".cljscm", ".cljx", ".hic", ]
|
||||||
|
CoffeeScript = [".coffee", "._coffee", ".cjsx", ".cson", ".iced", ]
|
||||||
|
ColdFusion = [".cfm", ".cfml", ]
|
||||||
|
"ColdFusion CFC" = [".cfc", ]
|
||||||
|
"Common Lisp" = [".lisp", ".asd", ".lsp", ".ny", ".podsl", ".sexp", ]
|
||||||
|
"Component Pascal" = [".cps", ]
|
||||||
|
Coq = [".coq", ]
|
||||||
|
Cpp-ObjDump = [".cppobjdump", ".c++-objdump", ".c++objdump", ".cpp-objdump", ".cxx-objdump", ]
|
||||||
|
Creole = [".creole", ]
|
||||||
|
Crystal = [".cr", ]
|
||||||
|
Csound = [".csd", ]
|
||||||
|
Cucumber = [".feature", ]
|
||||||
|
Cuda = [".cu", ".cuh", ]
|
||||||
|
Cycript = [".cy", ]
|
||||||
|
Cython = [".pyx", ".pxd", ".pxi", ]
|
||||||
|
D = [".di", ]
|
||||||
|
D-ObjDump = [".d-objdump", ]
|
||||||
|
"DIGITAL Command Language" = [".com", ]
|
||||||
|
DM = [".dm", ]
|
||||||
|
"DNS Zone" = [".zone", ".arpa", ]
|
||||||
|
"Darcs Patch" = [".darcspatch", ".dpatch", ]
|
||||||
|
Dart = [".dart", ]
|
||||||
|
Diff = [".diff", ".patch", ]
|
||||||
|
Dockerfile = [".dockerfile", "Dockerfile", ]
|
||||||
|
Dogescript = [".djs", ]
|
||||||
|
Dylan = [".dylan", ".dyl", ".intr", ".lid", ]
|
||||||
|
E = [".E", ]
|
||||||
|
ECL = [".ecl", ".eclxml", ]
|
||||||
|
Eagle = [".sch", ".brd", ]
|
||||||
|
"Ecere Projects" = [".epj", ]
|
||||||
|
Eiffel = [".e", ]
|
||||||
|
Elixir = [".ex", ".exs", ]
|
||||||
|
Elm = [".elm", ]
|
||||||
|
"Emacs Lisp" = [".el", ".emacs", ".emacs.desktop", ]
|
||||||
|
EmberScript = [".em", ".emberscript", ]
|
||||||
|
Erlang = [".erl", ".escript", ".hrl", ".xrl", ".yrl", ]
|
||||||
|
"F#" = [".fs", ".fsi", ".fsx", ]
|
||||||
|
FLUX = [".flux", ]
|
||||||
|
FORTRAN = [".f90", ".f", ".f03", ".f08", ".f77", ".f95", ".for", ".fpp", ]
|
||||||
|
Factor = [".factor", ]
|
||||||
|
Fancy = [".fy", ".fancypack", ]
|
||||||
|
Fantom = [".fan", ]
|
||||||
|
Formatted = [".eam.fs", ]
|
||||||
|
Forth = [".fth", ".4th", ".forth", ".frt", ]
|
||||||
|
FreeMarker = [".ftl", ]
|
||||||
|
G-code = [".g", ".gco", ".gcode", ]
|
||||||
|
GAMS = [".gms", ]
|
||||||
|
GAP = [".gap", ".gi", ]
|
||||||
|
GAS = [".s", ]
|
||||||
|
GDScript = [".gd", ]
|
||||||
|
GLSL = [".glsl", ".fp", ".frag", ".frg", ".fsh", ".fshader", ".geo", ".geom", ".glslv", ".gshader", ".shader", ".vert", ".vrx", ".vsh", ".vshader", ]
|
||||||
|
Genshi = [".kid", ]
|
||||||
|
"Gentoo Ebuild" = [".ebuild", ]
|
||||||
|
"Gentoo Eclass" = [".eclass", ]
|
||||||
|
"Gettext Catalog" = [".po", ".pot", ]
|
||||||
|
Glyph = [".glf", ]
|
||||||
|
Gnuplot = [".gp", ".gnu", ".gnuplot", ".plot", ".plt", ]
|
||||||
|
Go = [".go", ]
|
||||||
|
Golo = [".golo", ]
|
||||||
|
Gosu = [".gst", ".gsx", ".vark", ]
|
||||||
|
Grace = [".grace", ]
|
||||||
|
Gradle = [".gradle", ]
|
||||||
|
"Grammatical Framework" = [".gf", ]
|
||||||
|
GraphQL = [".graphql", ]
|
||||||
|
"Graphviz (DOT)" = [".dot", ".gv", ]
|
||||||
|
Groff = [".man", ".1", ".1in", ".1m", ".1x", ".2", ".3", ".3in", ".3m", ".3qt", ".3x", ".4", ".5", ".6", ".7", ".8", ".9", ".me", ".rno", ".roff", ]
|
||||||
|
Groovy = [".groovy", ".grt", ".gtpl", ".gvy", ]
|
||||||
|
"Groovy Server Pages" = [".gsp", ]
|
||||||
|
HCL = [".hcl", ".tf", ]
|
||||||
|
HLSL = [".hlsl", ".fxh", ".hlsli", ]
|
||||||
|
HTML = [".html", ".htm", ".html.hl", ".xht", ".xhtml", ]
|
||||||
|
"HTML+Django" = [".mustache", ".jinja", ]
|
||||||
|
"HTML+EEX" = [".eex", ]
|
||||||
|
"HTML+ERB" = [".erb", ".erb.deface", ]
|
||||||
|
"HTML+PHP" = [".phtml", ]
|
||||||
|
HTTP = [".http", ]
|
||||||
|
Haml = [".haml", ".haml.deface", ]
|
||||||
|
Handlebars = [".handlebars", ".hbs", ]
|
||||||
|
Harbour = [".hb", ]
|
||||||
|
Haskell = [".hs", ".hsc", ]
|
||||||
|
Haxe = [".hx", ".hxsl", ]
|
||||||
|
Hy = [".hy", ]
|
||||||
|
IDL = [".dlm", ]
|
||||||
|
"IGOR Pro" = [".ipf", ]
|
||||||
|
INI = [".ini", ".cfg", ".prefs", ".properties", ]
|
||||||
|
"IRC log" = [".irclog", ".weechatlog", ]
|
||||||
|
Idris = [".idr", ".lidr", ]
|
||||||
|
"Inform 7" = [".ni", ".i7x", ]
|
||||||
|
"Inno Setup" = [".iss", ]
|
||||||
|
Io = [".io", ]
|
||||||
|
Ioke = [".ik", ]
|
||||||
|
Isabelle = [".thy", ]
|
||||||
|
J = [".ijs", ]
|
||||||
|
JFlex = [".flex", ".jflex", ]
|
||||||
|
JSON = [".json", ".geojson", ".lock", ".topojson", ]
|
||||||
|
JSON5 = [".json5", ]
|
||||||
|
JSONLD = [".jsonld", ]
|
||||||
|
JSONiq = [".jq", ]
|
||||||
|
JSX = [".jsx", ]
|
||||||
|
Jade = [".jade", ]
|
||||||
|
Jasmin = [".j", ]
|
||||||
|
Java = [".java", ]
|
||||||
|
"Java Server Pages" = [".jsp", ]
|
||||||
|
JavaScript = [".js", "._js", ".bones", ".es6", ".jake", ".jsb", ".jscad", ".jsfl", ".jsm", ".jss", ".njs", ".pac", ".sjs", ".ssjs", ".xsjs", ".xsjslib", ]
|
||||||
|
Julia = [".jl", ]
|
||||||
|
"Jupyter Notebook" = [".ipynb", ]
|
||||||
|
KRL = [".krl", ]
|
||||||
|
KiCad = [".kicad_pcb", ]
|
||||||
|
Kit = [".kit", ]
|
||||||
|
Kotlin = [".kt", ".ktm", ".kts", ]
|
||||||
|
LFE = [".lfe", ]
|
||||||
|
LLVM = [".ll", ]
|
||||||
|
LOLCODE = [".lol", ]
|
||||||
|
LSL = [".lsl", ".lslp", ]
|
||||||
|
LabVIEW = [".lvproj", ]
|
||||||
|
Lasso = [".lasso", ".las", ".lasso8", ".lasso9", ".ldml", ]
|
||||||
|
Latte = [".latte", ]
|
||||||
|
Lean = [".lean", ".hlean", ]
|
||||||
|
Less = [".less", ]
|
||||||
|
Lex = [".lex", ]
|
||||||
|
LilyPond = [".ly", ".ily", ]
|
||||||
|
"Linker Script" = [".ld", ".lds", ]
|
||||||
|
Liquid = [".liquid", ]
|
||||||
|
"Literate Agda" = [".lagda", ]
|
||||||
|
"Literate CoffeeScript" = [".litcoffee", ]
|
||||||
|
"Literate Haskell" = [".lhs", ]
|
||||||
|
LiveScript = [".ls", "._ls", ]
|
||||||
|
Logos = [".xm", ".x", ".xi", ]
|
||||||
|
Logtalk = [".lgt", ".logtalk", ]
|
||||||
|
LookML = [".lookml", ]
|
||||||
|
Lua = [".lua", ".nse", ".pd_lua", ".rbxs", ".wlua", ]
|
||||||
|
M = [".mumps", ]
|
||||||
|
M4 = [".m4", ]
|
||||||
|
MAXScript = [".mcr", ]
|
||||||
|
MTML = [".mtml", ]
|
||||||
|
MUF = [".muf", ]
|
||||||
|
Makefile = [".mak", ".mk", ".mkfile", "Makefile", ]
|
||||||
|
Mako = [".mako", ".mao", ]
|
||||||
|
Maple = [".mpl", ]
|
||||||
|
Markdown = [".md", ".markdown", ".mkd", ".mkdn", ".mkdown", ".ron", ]
|
||||||
|
Mask = [".mask", ]
|
||||||
|
Mathematica = [".mathematica", ".cdf", ".ma", ".mt", ".nb", ".nbp", ".wl", ".wlt", ]
|
||||||
|
Matlab = [".matlab", ]
|
||||||
|
Max = [".maxpat", ".maxhelp", ".maxproj", ".mxt", ".pat", ]
|
||||||
|
MediaWiki = [".mediawiki", ".wiki", ]
|
||||||
|
Metal = [".metal", ]
|
||||||
|
MiniD = [".minid", ]
|
||||||
|
Mirah = [".druby", ".duby", ".mir", ".mirah", ]
|
||||||
|
Modelica = [".mo", ]
|
||||||
|
"Module Management System" = [".mms", ".mmk", ]
|
||||||
|
Monkey = [".monkey", ]
|
||||||
|
MoonScript = [".moon", ]
|
||||||
|
Myghty = [".myt", ]
|
||||||
|
NSIS = [".nsi", ".nsh", ]
|
||||||
|
NetLinx = [".axs", ".axi", ]
|
||||||
|
"NetLinx+ERB" = [".axs.erb", ".axi.erb", ]
|
||||||
|
NetLogo = [".nlogo", ]
|
||||||
|
Nginx = [".nginxconf", ]
|
||||||
|
Nimrod = [".nim", ".nimrod", ]
|
||||||
|
Ninja = [".ninja", ]
|
||||||
|
Nit = [".nit", ]
|
||||||
|
Nix = [".nix", ]
|
||||||
|
Nu = [".nu", ]
|
||||||
|
NumPy = [".numpy", ".numpyw", ".numsc", ]
|
||||||
|
OCaml = [".ml", ".eliom", ".eliomi", ".ml4", ".mli", ".mll", ".mly", ]
|
||||||
|
ObjDump = [".objdump", ]
|
||||||
|
"Objective-C++" = [".mm", ]
|
||||||
|
Objective-J = [".sj", ]
|
||||||
|
Octave = [".oct", ]
|
||||||
|
Omgrofl = [".omgrofl", ]
|
||||||
|
Opa = [".opa", ]
|
||||||
|
Opal = [".opal", ]
|
||||||
|
OpenCL = [".cl", ".opencl", ]
|
||||||
|
"OpenEdge ABL" = [".p", ]
|
||||||
|
OpenSCAD = [".scad", ]
|
||||||
|
Org = [".org", ]
|
||||||
|
Ox = [".ox", ".oxh", ".oxo", ]
|
||||||
|
Oxygene = [".oxygene", ]
|
||||||
|
Oz = [".oz", ]
|
||||||
|
PAWN = [".pwn", ]
|
||||||
|
PHP = [".php", ".aw", ".ctp", ".php3", ".php4", ".php5", ".phps", ".phpt", ]
|
||||||
|
"POV-Ray SDL" = [".pov", ]
|
||||||
|
Pan = [".pan", ]
|
||||||
|
Papyrus = [".psc", ]
|
||||||
|
Parrot = [".parrot", ]
|
||||||
|
"Parrot Assembly" = [".pasm", ]
|
||||||
|
"Parrot Internal Representation" = [".pir", ]
|
||||||
|
Pascal = [".pas", ".dfm", ".dpr", ".lpr", ]
|
||||||
|
Perl = [".pl", ".al", ".perl", ".ph", ".plx", ".pm", ".psgi", ".t", ]
|
||||||
|
Perl6 = [".6pl", ".6pm", ".nqp", ".p6", ".p6l", ".p6m", ".pl6", ".pm6", ]
|
||||||
|
Pickle = [".pkl", ]
|
||||||
|
PigLatin = [".pig", ]
|
||||||
|
Pike = [".pike", ".pmod", ]
|
||||||
|
Pod = [".pod", ]
|
||||||
|
PogoScript = [".pogo", ]
|
||||||
|
Pony = [".pony", ]
|
||||||
|
PostScript = [".ps", ".eps", ]
|
||||||
|
PowerShell = [".ps1", ".psd1", ".psm1", ]
|
||||||
|
Processing = [".pde", ]
|
||||||
|
Prolog = [".prolog", ".yap", ]
|
||||||
|
"Propeller Spin" = [".spin", ]
|
||||||
|
"Protocol Buffer" = [".proto", ]
|
||||||
|
"Public Key" = [".pub", ]
|
||||||
|
"Pure Data" = [".pd", ]
|
||||||
|
PureBasic = [".pb", ".pbi", ]
|
||||||
|
PureScript = [".purs", ]
|
||||||
|
Python = [".py", ".bzl", ".gyp", ".lmi", ".pyde", ".pyp", ".pyt", ".pyw", ".tac", ".wsgi", ".xpy", ]
|
||||||
|
"Python traceback" = [".pytb", ]
|
||||||
|
QML = [".qml", ".qbs", ]
|
||||||
|
QMake = [".pri", ]
|
||||||
|
R = [".r", ".rd", ".rsx", ]
|
||||||
|
RAML = [".raml", ]
|
||||||
|
RDoc = [".rdoc", ]
|
||||||
|
REALbasic = [".rbbas", ".rbfrm", ".rbmnu", ".rbres", ".rbtbar", ".rbuistate", ]
|
||||||
|
RHTML = [".rhtml", ]
|
||||||
|
RMarkdown = [".rmd", ]
|
||||||
|
Racket = [".rkt", ".rktd", ".rktl", ".scrbl", ]
|
||||||
|
"Ragel in Ruby Host" = [".rl", ]
|
||||||
|
"Raw token data" = [".raw", ]
|
||||||
|
Rebol = [".reb", ".r2", ".r3", ".rebol", ]
|
||||||
|
Red = [".red", ".reds", ]
|
||||||
|
Redcode = [".cw", ]
|
||||||
|
"Ren'Py" = [".rpy", ]
|
||||||
|
RenderScript = [".rsh", ]
|
||||||
|
RobotFramework = [".robot", ]
|
||||||
|
Rouge = [".rg", ]
|
||||||
|
Ruby = [".rb", ".builder", ".gemspec", ".god", ".irbrc", ".jbuilder", ".mspec", ".podspec", ".rabl", ".rake", ".rbuild", ".rbw", ".rbx", ".ru", ".ruby", ".thor", ".watchr", ]
|
||||||
|
Rust = [".rs", ".rs.in", ]
|
||||||
|
SAS = [".sas", ]
|
||||||
|
SCSS = [".scss", ]
|
||||||
|
SMT = [".smt2", ".smt", ]
|
||||||
|
SPARQL = [".sparql", ".rq", ]
|
||||||
|
SQF = [".sqf", ".hqf", ]
|
||||||
|
SQL = [".pls", ".pck", ".pkb", ".pks", ".plb", ".plsql", ".sql", ".cql", ".ddl", ".prc", ".tab", ".udf", ".viw", ".db2", ]
|
||||||
|
STON = [".ston", ]
|
||||||
|
SVG = [".svg", ]
|
||||||
|
Sage = [".sage", ".sagews", ]
|
||||||
|
SaltStack = [".sls", ]
|
||||||
|
Sass = [".sass", ]
|
||||||
|
Scala = [".scala", ".sbt", ]
|
||||||
|
Scaml = [".scaml", ]
|
||||||
|
Scheme = [".scm", ".sld", ".sps", ".ss", ]
|
||||||
|
Scilab = [".sci", ".sce", ]
|
||||||
|
Self = [".self", ]
|
||||||
|
Shell = [".sh", ".bash", ".bats", ".command", ".ksh", ".sh.in", ".tmux", ".tool", ".zsh", ]
|
||||||
|
ShellSession = [".sh-session", ]
|
||||||
|
Shen = [".shen", ]
|
||||||
|
Slash = [".sl", ]
|
||||||
|
Slim = [".slim", ]
|
||||||
|
Smali = [".smali", ]
|
||||||
|
Smalltalk = [".st", ]
|
||||||
|
Smarty = [".tpl", ]
|
||||||
|
Solidity = [".sol", ]
|
||||||
|
SourcePawn = [".sp", ".sma", ]
|
||||||
|
Squirrel = [".nut", ]
|
||||||
|
Stan = [".stan", ]
|
||||||
|
"Standard ML" = [".ML", ".fun", ".sig", ".sml", ]
|
||||||
|
Stata = [".do", ".ado", ".doh", ".ihlp", ".mata", ".matah", ".sthlp", ]
|
||||||
|
Stylus = [".styl", ]
|
||||||
|
SuperCollider = [".scd", ]
|
||||||
|
Swift = [".swift", ]
|
||||||
|
SystemVerilog = [".sv", ".svh", ".vh", ]
|
||||||
|
TOML = [".toml", ]
|
||||||
|
TXL = [".txl", ]
|
||||||
|
Tcl = [".tcl", ".adp", ".tm", ]
|
||||||
|
Tcsh = [".tcsh", ".csh", ]
|
||||||
|
TeX = [".tex", ".aux", ".bbx", ".bib", ".cbx", ".dtx", ".ins", ".lbx", ".ltx", ".mkii", ".mkiv", ".mkvi", ".sty", ".toc", ]
|
||||||
|
Tea = [".tea", ]
|
||||||
|
Text = [".txt", ".no", ]
|
||||||
|
Textile = [".textile", ]
|
||||||
|
Thrift = [".thrift", ]
|
||||||
|
Turing = [".tu", ]
|
||||||
|
Turtle = [".ttl", ]
|
||||||
|
Twig = [".twig", ]
|
||||||
|
TypeScript = [".ts", ".tsx", ]
|
||||||
|
"Unified Parallel C" = [".upc", ]
|
||||||
|
"Unity3D Asset" = [".anim", ".asset", ".mat", ".meta", ".prefab", ".unity", ]
|
||||||
|
Uno = [".uno", ]
|
||||||
|
UnrealScript = [".uc", ]
|
||||||
|
UrWeb = [".ur", ".urs", ]
|
||||||
|
VCL = [".vcl", ]
|
||||||
|
VHDL = [".vhdl", ".vhd", ".vhf", ".vhi", ".vho", ".vhs", ".vht", ".vhw", ]
|
||||||
|
Vala = [".vala", ".vapi", ]
|
||||||
|
Verilog = [".veo", ]
|
||||||
|
VimL = [".vim", ]
|
||||||
|
"Visual Basic" = [".vb", ".bas", ".frm", ".frx", ".vba", ".vbhtml", ".vbs", ]
|
||||||
|
Volt = [".volt", ]
|
||||||
|
Vue = [".vue", ]
|
||||||
|
"Web Ontology Language" = [".owl", ]
|
||||||
|
WebAssembly = [".wat", ]
|
||||||
|
WebIDL = [".webidl", ]
|
||||||
|
X10 = [".x10", ]
|
||||||
|
XC = [".xc", ]
|
||||||
|
XML = [".xml", ".ant", ".axml", ".ccxml", ".clixml", ".cproject", ".csl", ".csproj", ".ct", ".dita", ".ditamap", ".ditaval", ".dll.config", ".dotsettings", ".filters", ".fsproj", ".fxml", ".glade", ".grxml", ".iml", ".ivy", ".jelly", ".jsproj", ".kml", ".launch", ".mdpolicy", ".mxml", ".nproj", ".nuspec", ".odd", ".osm", ".plist", ".props", ".ps1xml", ".psc1", ".pt", ".rdf", ".rss", ".scxml", ".srdf", ".storyboard", ".stTheme", ".sublime-snippet", ".targets", ".tmCommand", ".tml", ".tmLanguage", ".tmPreferences", ".tmSnippet", ".tmTheme", ".ui", ".urdf", ".ux", ".vbproj", ".vcxproj", ".vssettings", ".vxml", ".wsdl", ".wsf", ".wxi", ".wxl", ".wxs", ".x3d", ".xacro", ".xaml", ".xib", ".xlf", ".xliff", ".xmi", ".xml.dist", ".xproj", ".xsd", ".xul", ".zcml", ]
|
||||||
|
XPages = [".xsp-config", ".xsp.metadata", ]
|
||||||
|
XProc = [".xpl", ".xproc", ]
|
||||||
|
XQuery = [".xquery", ".xq", ".xql", ".xqm", ".xqy", ]
|
||||||
|
XS = [".xs", ]
|
||||||
|
XSLT = [".xslt", ".xsl", ]
|
||||||
|
Xojo = [".xojo_code", ".xojo_menu", ".xojo_report", ".xojo_script", ".xojo_toolbar", ".xojo_window", ]
|
||||||
|
Xtend = [".xtend", ]
|
||||||
|
YAML = [".yml", ".reek", ".rviz", ".sublime-syntax", ".syntax", ".yaml", ".yaml-tmlanguage", ]
|
||||||
|
YANG = [".yang", ]
|
||||||
|
Yacc = [".y", ".yacc", ".yy", ]
|
||||||
|
Zephir = [".zep", ]
|
||||||
|
Zig = [".zig", ]
|
||||||
|
Zimpl = [".zimpl", ".zmpl", ".zpl", ]
|
||||||
|
desktop = [".desktop", ".desktop.in", ]
|
||||||
|
eC = [".ec", ".eh", ]
|
||||||
|
edn = [".edn", ]
|
||||||
|
fish = [".fish", ]
|
||||||
|
mupad = [".mu", ]
|
||||||
|
nesC = [".nc", ]
|
||||||
|
ooc = [".ooc", ]
|
||||||
|
reStructuredText = [".rst", ".rest", ".rest.txt", ".rst.txt", ]
|
||||||
|
wisp = [".wisp", ]
|
||||||
|
xBase = [".prg", ".prw", ]
|
||||||
|
|
34
pr_agent/settings/pr_update_changelog.toml
Normal file
34
pr_agent/settings/pr_update_changelog.toml
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
[pr_update_changelog_prompt]
|
||||||
|
system="""You are a language model called CodiumAI-PR-Changlog-summarizer.
|
||||||
|
Your task is to update the CHANGELOG.md file of the project, to shortly summarize important changes introduced in this PR (the '+' lines).
|
||||||
|
- The output should match the existing CHANGELOG.md format, style and conventions, so it will look like a natural part of the file. For example, if previous changes were summarized in a single line, you should do the same.
|
||||||
|
- Don't repeat previous changes. Generate only new content, that is not already in the CHANGELOG.md file.
|
||||||
|
- Be general, and avoid specific details, files, etc. The output should be minimal, no more than 3-4 short lines. Ignore non-relevant subsections.
|
||||||
|
"""
|
||||||
|
|
||||||
|
user="""PR Info:
|
||||||
|
Title: '{{title}}'
|
||||||
|
Branch: '{{branch}}'
|
||||||
|
Description: '{{description}}'
|
||||||
|
{%- if language %}
|
||||||
|
Main language: {{language}}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
|
||||||
|
The PR Diff:
|
||||||
|
```
|
||||||
|
{{diff}}
|
||||||
|
```
|
||||||
|
|
||||||
|
Current date:
|
||||||
|
```
|
||||||
|
{{today}}
|
||||||
|
```
|
||||||
|
|
||||||
|
The current CHANGELOG.md:
|
||||||
|
```
|
||||||
|
{{ changelog_file_str }}
|
||||||
|
```
|
||||||
|
|
||||||
|
Response:
|
||||||
|
"""
|
@ -6,7 +6,7 @@ import textwrap
|
|||||||
from jinja2 import Environment, StrictUndefined
|
from jinja2 import Environment, StrictUndefined
|
||||||
|
|
||||||
from pr_agent.algo.ai_handler import AiHandler
|
from pr_agent.algo.ai_handler import AiHandler
|
||||||
from pr_agent.algo.pr_processing import get_pr_diff
|
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.algo.utils import try_fix_json
|
from pr_agent.algo.utils import try_fix_json
|
||||||
from pr_agent.config_loader import settings
|
from pr_agent.config_loader import settings
|
||||||
@ -44,16 +44,7 @@ class PRCodeSuggestions:
|
|||||||
logging.info('Generating code suggestions for PR...')
|
logging.info('Generating code suggestions for PR...')
|
||||||
if settings.config.publish_output:
|
if settings.config.publish_output:
|
||||||
self.git_provider.publish_comment("Preparing review...", is_temporary=True)
|
self.git_provider.publish_comment("Preparing review...", is_temporary=True)
|
||||||
logging.info('Getting PR diff...')
|
await retry_with_fallback_models(self._prepare_prediction)
|
||||||
|
|
||||||
# we are using extended hunk with line numbers for code suggestions
|
|
||||||
self.patches_diff = get_pr_diff(self.git_provider,
|
|
||||||
self.token_handler,
|
|
||||||
add_line_numbers_to_hunks=True,
|
|
||||||
disable_extra_lines=True)
|
|
||||||
|
|
||||||
logging.info('Getting AI prediction...')
|
|
||||||
self.prediction = await self._get_prediction()
|
|
||||||
logging.info('Preparing PR review...')
|
logging.info('Preparing PR review...')
|
||||||
data = self._prepare_pr_code_suggestions()
|
data = self._prepare_pr_code_suggestions()
|
||||||
if settings.config.publish_output:
|
if settings.config.publish_output:
|
||||||
@ -62,7 +53,18 @@ class PRCodeSuggestions:
|
|||||||
logging.info('Pushing inline code comments...')
|
logging.info('Pushing inline code comments...')
|
||||||
self.push_inline_code_suggestions(data)
|
self.push_inline_code_suggestions(data)
|
||||||
|
|
||||||
async def _get_prediction(self):
|
async def _prepare_prediction(self, model: str):
|
||||||
|
logging.info('Getting PR diff...')
|
||||||
|
# we are using extended hunk with line numbers for code suggestions
|
||||||
|
self.patches_diff = get_pr_diff(self.git_provider,
|
||||||
|
self.token_handler,
|
||||||
|
model,
|
||||||
|
add_line_numbers_to_hunks=True,
|
||||||
|
disable_extra_lines=True)
|
||||||
|
logging.info('Getting AI prediction...')
|
||||||
|
self.prediction = await self._get_prediction(model)
|
||||||
|
|
||||||
|
async def _get_prediction(self, model: str):
|
||||||
variables = copy.deepcopy(self.vars)
|
variables = copy.deepcopy(self.vars)
|
||||||
variables["diff"] = self.patches_diff # update diff
|
variables["diff"] = self.patches_diff # update diff
|
||||||
environment = Environment(undefined=StrictUndefined)
|
environment = Environment(undefined=StrictUndefined)
|
||||||
@ -71,7 +73,6 @@ class PRCodeSuggestions:
|
|||||||
if settings.config.verbosity_level >= 2:
|
if settings.config.verbosity_level >= 2:
|
||||||
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
||||||
logging.info(f"\nUser prompt:\n{user_prompt}")
|
logging.info(f"\nUser prompt:\n{user_prompt}")
|
||||||
model = settings.config.model
|
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
||||||
system=system_prompt, user=user_prompt)
|
system=system_prompt, user=user_prompt)
|
||||||
|
|
||||||
|
@ -1,11 +1,12 @@
|
|||||||
import copy
|
import copy
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
from typing import Tuple, List
|
||||||
|
|
||||||
from jinja2 import Environment, StrictUndefined
|
from jinja2 import Environment, StrictUndefined
|
||||||
|
|
||||||
from pr_agent.algo.ai_handler import AiHandler
|
from pr_agent.algo.ai_handler import AiHandler
|
||||||
from pr_agent.algo.pr_processing import get_pr_diff
|
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.config_loader import settings
|
from pr_agent.config_loader import settings
|
||||||
from pr_agent.git_providers import get_git_provider
|
from pr_agent.git_providers import get_git_provider
|
||||||
@ -14,11 +15,22 @@ from pr_agent.git_providers.git_provider import get_main_pr_language
|
|||||||
|
|
||||||
class PRDescription:
|
class PRDescription:
|
||||||
def __init__(self, pr_url: str):
|
def __init__(self, pr_url: str):
|
||||||
|
"""
|
||||||
|
Initialize the PRDescription object with the necessary attributes and objects for generating a PR description using an AI model.
|
||||||
|
Args:
|
||||||
|
pr_url (str): The URL of the pull request.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Initialize the git provider and main PR language
|
||||||
self.git_provider = get_git_provider()(pr_url)
|
self.git_provider = get_git_provider()(pr_url)
|
||||||
self.main_pr_language = get_main_pr_language(
|
self.main_pr_language = get_main_pr_language(
|
||||||
self.git_provider.get_languages(), self.git_provider.get_files()
|
self.git_provider.get_languages(), self.git_provider.get_files()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Initialize the AI handler
|
||||||
self.ai_handler = AiHandler()
|
self.ai_handler = AiHandler()
|
||||||
|
|
||||||
|
# Initialize the variables dictionary
|
||||||
self.vars = {
|
self.vars = {
|
||||||
"title": self.git_provider.pr.title,
|
"title": self.git_provider.pr.title,
|
||||||
"branch": self.git_provider.get_pr_branch(),
|
"branch": self.git_provider.get_pr_branch(),
|
||||||
@ -26,65 +38,135 @@ class PRDescription:
|
|||||||
"language": self.main_pr_language,
|
"language": self.main_pr_language,
|
||||||
"diff": "", # empty diff for initial calculation
|
"diff": "", # empty diff for initial calculation
|
||||||
}
|
}
|
||||||
self.token_handler = TokenHandler(self.git_provider.pr,
|
|
||||||
|
# Initialize the token handler
|
||||||
|
self.token_handler = TokenHandler(
|
||||||
|
self.git_provider.pr,
|
||||||
self.vars,
|
self.vars,
|
||||||
settings.pr_description_prompt.system,
|
settings.pr_description_prompt.system,
|
||||||
settings.pr_description_prompt.user)
|
settings.pr_description_prompt.user,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize patches_diff and prediction attributes
|
||||||
self.patches_diff = None
|
self.patches_diff = None
|
||||||
self.prediction = None
|
self.prediction = None
|
||||||
|
|
||||||
async def describe(self):
|
async def describe(self):
|
||||||
|
"""
|
||||||
|
Generates a PR description using an AI model and publishes it to the PR.
|
||||||
|
"""
|
||||||
logging.info('Generating a PR description...')
|
logging.info('Generating a PR description...')
|
||||||
if settings.config.publish_output:
|
if settings.config.publish_output:
|
||||||
self.git_provider.publish_comment("Preparing pr description...", is_temporary=True)
|
self.git_provider.publish_comment("Preparing pr description...", is_temporary=True)
|
||||||
logging.info('Getting PR diff...')
|
|
||||||
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler)
|
await retry_with_fallback_models(self._prepare_prediction)
|
||||||
logging.info('Getting AI prediction...')
|
|
||||||
self.prediction = await self._get_prediction()
|
|
||||||
logging.info('Preparing answer...')
|
logging.info('Preparing answer...')
|
||||||
pr_title, pr_body, pr_types, markdown_text = self._prepare_pr_answer()
|
pr_title, pr_body, pr_types, markdown_text = self._prepare_pr_answer()
|
||||||
|
|
||||||
if settings.config.publish_output:
|
if settings.config.publish_output:
|
||||||
logging.info('Pushing answer...')
|
logging.info('Pushing answer...')
|
||||||
if settings.pr_description.publish_description_as_comment:
|
if settings.pr_description.publish_description_as_comment:
|
||||||
self.git_provider.publish_comment(markdown_text)
|
self.git_provider.publish_comment(markdown_text)
|
||||||
else:
|
else:
|
||||||
self.git_provider.publish_description(pr_title, pr_body)
|
self.git_provider.publish_description(pr_title, pr_body)
|
||||||
self.git_provider.publish_labels(pr_types)
|
if self.git_provider.is_supported("get_labels"):
|
||||||
|
current_labels = self.git_provider.get_labels()
|
||||||
|
if current_labels is None:
|
||||||
|
current_labels = []
|
||||||
|
self.git_provider.publish_labels(pr_types + current_labels)
|
||||||
self.git_provider.remove_initial_comment()
|
self.git_provider.remove_initial_comment()
|
||||||
|
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
async def _get_prediction(self):
|
async def _prepare_prediction(self, model: str) -> None:
|
||||||
|
"""
|
||||||
|
Prepare the AI prediction for the PR description based on the provided model.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model (str): The name of the model to be used for generating the prediction.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Any exceptions raised by the 'get_pr_diff' and '_get_prediction' functions.
|
||||||
|
|
||||||
|
"""
|
||||||
|
logging.info('Getting PR diff...')
|
||||||
|
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
|
||||||
|
logging.info('Getting AI prediction...')
|
||||||
|
self.prediction = await self._get_prediction(model)
|
||||||
|
|
||||||
|
async def _get_prediction(self, model: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate an AI prediction for the PR description based on the provided model.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model (str): The name of the model to be used for generating the prediction.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The generated AI prediction.
|
||||||
|
"""
|
||||||
variables = copy.deepcopy(self.vars)
|
variables = copy.deepcopy(self.vars)
|
||||||
variables["diff"] = self.patches_diff # update diff
|
variables["diff"] = self.patches_diff # update diff
|
||||||
|
|
||||||
environment = Environment(undefined=StrictUndefined)
|
environment = Environment(undefined=StrictUndefined)
|
||||||
system_prompt = environment.from_string(settings.pr_description_prompt.system).render(variables)
|
system_prompt = environment.from_string(settings.pr_description_prompt.system).render(variables)
|
||||||
user_prompt = environment.from_string(settings.pr_description_prompt.user).render(variables)
|
user_prompt = environment.from_string(settings.pr_description_prompt.user).render(variables)
|
||||||
|
|
||||||
if settings.config.verbosity_level >= 2:
|
if settings.config.verbosity_level >= 2:
|
||||||
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
||||||
logging.info(f"\nUser prompt:\n{user_prompt}")
|
logging.info(f"\nUser prompt:\n{user_prompt}")
|
||||||
model = settings.config.model
|
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
response, finish_reason = await self.ai_handler.chat_completion(
|
||||||
system=system_prompt, user=user_prompt)
|
model=model,
|
||||||
|
temperature=0.2,
|
||||||
|
system=system_prompt,
|
||||||
|
user=user_prompt
|
||||||
|
)
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def _prepare_pr_answer(self):
|
def _prepare_pr_answer(self) -> Tuple[str, str, List[str], str]:
|
||||||
|
"""
|
||||||
|
Prepare the PR description based on the AI prediction data.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- title: a string containing the PR title.
|
||||||
|
- pr_body: a string containing the PR body in a markdown format.
|
||||||
|
- pr_types: a list of strings containing the PR types.
|
||||||
|
- markdown_text: a string containing the AI prediction data in a markdown format.
|
||||||
|
"""
|
||||||
|
# Load the AI prediction data into a dictionary
|
||||||
data = json.loads(self.prediction)
|
data = json.loads(self.prediction)
|
||||||
markdown_text = ""
|
|
||||||
|
# Initialization
|
||||||
|
markdown_text = pr_body = ""
|
||||||
|
pr_types = []
|
||||||
|
|
||||||
|
# Iterate over the dictionary items and append the key and value to 'markdown_text' in a markdown format
|
||||||
for key, value in data.items():
|
for key, value in data.items():
|
||||||
markdown_text += f"## {key}\n\n"
|
markdown_text += f"## {key}\n\n"
|
||||||
markdown_text += f"{value}\n\n"
|
markdown_text += f"{value}\n\n"
|
||||||
pr_body = ""
|
|
||||||
pr_types = []
|
# If the 'PR Type' key is present in the dictionary, split its value by comma and assign it to 'pr_types'
|
||||||
if 'PR Type' in data:
|
if 'PR Type' in data:
|
||||||
pr_types = data['PR Type'].split(',')
|
pr_types = data['PR Type'].split(',')
|
||||||
title = data['PR Title']
|
|
||||||
del data['PR Title']
|
# Assign the value of the 'PR Title' key to 'title' variable and remove it from the dictionary
|
||||||
|
title = data.pop('PR Title')
|
||||||
|
|
||||||
|
# Iterate over the remaining dictionary items and append the key and value to 'pr_body' in a markdown format,
|
||||||
|
# except for the items containing the word 'walkthrough'
|
||||||
for key, value in data.items():
|
for key, value in data.items():
|
||||||
pr_body += f"{key}:\n"
|
pr_body += f"{key}:\n"
|
||||||
if 'walkthrough' in key.lower():
|
if 'walkthrough' in key.lower():
|
||||||
pr_body += f"{value}\n"
|
pr_body += f"{value}\n"
|
||||||
else:
|
else:
|
||||||
pr_body += f"**{value}**\n\n___\n"
|
pr_body += f"**{value}**\n\n___\n"
|
||||||
|
|
||||||
if settings.config.verbosity_level >= 2:
|
if settings.config.verbosity_level >= 2:
|
||||||
logging.info(f"title:\n{title}\n{pr_body}")
|
logging.info(f"title:\n{title}\n{pr_body}")
|
||||||
|
|
||||||
return title, pr_body, pr_types, markdown_text
|
return title, pr_body, pr_types, markdown_text
|
@ -4,13 +4,15 @@ import logging
|
|||||||
from jinja2 import Environment, StrictUndefined
|
from jinja2 import Environment, StrictUndefined
|
||||||
|
|
||||||
from pr_agent.algo.ai_handler import AiHandler
|
from pr_agent.algo.ai_handler import AiHandler
|
||||||
from pr_agent.algo.pr_processing import get_pr_diff
|
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.config_loader import settings
|
from pr_agent.config_loader import settings
|
||||||
from pr_agent.git_providers import get_git_provider
|
from pr_agent.git_providers import get_git_provider
|
||||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class PRInformationFromUser:
|
class PRInformationFromUser:
|
||||||
def __init__(self, pr_url: str):
|
def __init__(self, pr_url: str):
|
||||||
self.git_provider = get_git_provider()(pr_url)
|
self.git_provider = get_git_provider()(pr_url)
|
||||||
@ -36,10 +38,7 @@ class PRInformationFromUser:
|
|||||||
logging.info('Generating question to the user...')
|
logging.info('Generating question to the user...')
|
||||||
if settings.config.publish_output:
|
if settings.config.publish_output:
|
||||||
self.git_provider.publish_comment("Preparing questions...", is_temporary=True)
|
self.git_provider.publish_comment("Preparing questions...", is_temporary=True)
|
||||||
logging.info('Getting PR diff...')
|
await retry_with_fallback_models(self._prepare_prediction)
|
||||||
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler)
|
|
||||||
logging.info('Getting AI prediction...')
|
|
||||||
self.prediction = await self._get_prediction()
|
|
||||||
logging.info('Preparing questions...')
|
logging.info('Preparing questions...')
|
||||||
pr_comment = self._prepare_pr_answer()
|
pr_comment = self._prepare_pr_answer()
|
||||||
if settings.config.publish_output:
|
if settings.config.publish_output:
|
||||||
@ -48,7 +47,13 @@ class PRInformationFromUser:
|
|||||||
self.git_provider.remove_initial_comment()
|
self.git_provider.remove_initial_comment()
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
async def _get_prediction(self):
|
async def _prepare_prediction(self, model):
|
||||||
|
logging.info('Getting PR diff...')
|
||||||
|
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
|
||||||
|
logging.info('Getting AI prediction...')
|
||||||
|
self.prediction = await self._get_prediction(model)
|
||||||
|
|
||||||
|
async def _get_prediction(self, model: str):
|
||||||
variables = copy.deepcopy(self.vars)
|
variables = copy.deepcopy(self.vars)
|
||||||
variables["diff"] = self.patches_diff # update diff
|
variables["diff"] = self.patches_diff # update diff
|
||||||
environment = Environment(undefined=StrictUndefined)
|
environment = Environment(undefined=StrictUndefined)
|
||||||
@ -57,7 +62,6 @@ class PRInformationFromUser:
|
|||||||
if settings.config.verbosity_level >= 2:
|
if settings.config.verbosity_level >= 2:
|
||||||
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
||||||
logging.info(f"\nUser prompt:\n{user_prompt}")
|
logging.info(f"\nUser prompt:\n{user_prompt}")
|
||||||
model = settings.config.model
|
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
||||||
system=system_prompt, user=user_prompt)
|
system=system_prompt, user=user_prompt)
|
||||||
return response
|
return response
|
||||||
|
@ -4,7 +4,7 @@ import logging
|
|||||||
from jinja2 import Environment, StrictUndefined
|
from jinja2 import Environment, StrictUndefined
|
||||||
|
|
||||||
from pr_agent.algo.ai_handler import AiHandler
|
from pr_agent.algo.ai_handler import AiHandler
|
||||||
from pr_agent.algo.pr_processing import get_pr_diff
|
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.config_loader import settings
|
from pr_agent.config_loader import settings
|
||||||
from pr_agent.git_providers import get_git_provider
|
from pr_agent.git_providers import get_git_provider
|
||||||
@ -46,10 +46,7 @@ class PRQuestions:
|
|||||||
logging.info('Answering a PR question...')
|
logging.info('Answering a PR question...')
|
||||||
if settings.config.publish_output:
|
if settings.config.publish_output:
|
||||||
self.git_provider.publish_comment("Preparing answer...", is_temporary=True)
|
self.git_provider.publish_comment("Preparing answer...", is_temporary=True)
|
||||||
logging.info('Getting PR diff...')
|
await retry_with_fallback_models(self._prepare_prediction)
|
||||||
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler)
|
|
||||||
logging.info('Getting AI prediction...')
|
|
||||||
self.prediction = await self._get_prediction()
|
|
||||||
logging.info('Preparing answer...')
|
logging.info('Preparing answer...')
|
||||||
pr_comment = self._prepare_pr_answer()
|
pr_comment = self._prepare_pr_answer()
|
||||||
if settings.config.publish_output:
|
if settings.config.publish_output:
|
||||||
@ -58,7 +55,13 @@ class PRQuestions:
|
|||||||
self.git_provider.remove_initial_comment()
|
self.git_provider.remove_initial_comment()
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
async def _get_prediction(self):
|
async def _prepare_prediction(self, model: str):
|
||||||
|
logging.info('Getting PR diff...')
|
||||||
|
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
|
||||||
|
logging.info('Getting AI prediction...')
|
||||||
|
self.prediction = await self._get_prediction(model)
|
||||||
|
|
||||||
|
async def _get_prediction(self, model: str):
|
||||||
variables = copy.deepcopy(self.vars)
|
variables = copy.deepcopy(self.vars)
|
||||||
variables["diff"] = self.patches_diff # update diff
|
variables["diff"] = self.patches_diff # update diff
|
||||||
environment = Environment(undefined=StrictUndefined)
|
environment = Environment(undefined=StrictUndefined)
|
||||||
@ -67,7 +70,6 @@ class PRQuestions:
|
|||||||
if settings.config.verbosity_level >= 2:
|
if settings.config.verbosity_level >= 2:
|
||||||
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
||||||
logging.info(f"\nUser prompt:\n{user_prompt}")
|
logging.info(f"\nUser prompt:\n{user_prompt}")
|
||||||
model = settings.config.model
|
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
||||||
system=system_prompt, user=user_prompt)
|
system=system_prompt, user=user_prompt)
|
||||||
return response
|
return response
|
||||||
|
@ -2,11 +2,12 @@ import copy
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
from typing import Tuple, List
|
||||||
|
|
||||||
from jinja2 import Environment, StrictUndefined
|
from jinja2 import Environment, StrictUndefined
|
||||||
|
|
||||||
from pr_agent.algo.ai_handler import AiHandler
|
from pr_agent.algo.ai_handler import AiHandler
|
||||||
from pr_agent.algo.pr_processing import get_pr_diff
|
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.algo.utils import convert_to_markdown, try_fix_json
|
from pr_agent.algo.utils import convert_to_markdown, try_fix_json
|
||||||
from pr_agent.config_loader import settings
|
from pr_agent.config_loader import settings
|
||||||
@ -16,7 +17,19 @@ from pr_agent.servers.help import actions_help_text, bot_help_text
|
|||||||
|
|
||||||
|
|
||||||
class PRReviewer:
|
class PRReviewer:
|
||||||
def __init__(self, pr_url: str, cli_mode=False, is_answer: bool = False, args=None):
|
"""
|
||||||
|
The PRReviewer class is responsible for reviewing a pull request and generating feedback using an AI model.
|
||||||
|
"""
|
||||||
|
def __init__(self, pr_url: str, cli_mode: bool = False, is_answer: bool = False, args: list = None):
|
||||||
|
"""
|
||||||
|
Initialize the PRReviewer object with the necessary attributes and objects to review a pull request.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pr_url (str): The URL of the pull request to be reviewed.
|
||||||
|
cli_mode (bool, optional): Indicates whether the review is being done in command-line interface mode. Defaults to False.
|
||||||
|
is_answer (bool, optional): Indicates whether the review is being done in answer mode. Defaults to False.
|
||||||
|
args (list, optional): List of arguments passed to the PRReviewer class. Defaults to None.
|
||||||
|
"""
|
||||||
self.parse_args(args)
|
self.parse_args(args)
|
||||||
|
|
||||||
self.git_provider = get_git_provider()(pr_url, incremental=self.incremental)
|
self.git_provider = get_git_provider()(pr_url, incremental=self.incremental)
|
||||||
@ -25,13 +38,15 @@ class PRReviewer:
|
|||||||
)
|
)
|
||||||
self.pr_url = pr_url
|
self.pr_url = pr_url
|
||||||
self.is_answer = is_answer
|
self.is_answer = is_answer
|
||||||
|
|
||||||
if self.is_answer and not self.git_provider.is_supported("get_issue_comments"):
|
if self.is_answer and not self.git_provider.is_supported("get_issue_comments"):
|
||||||
raise Exception(f"Answer mode is not supported for {settings.config.git_provider} for now")
|
raise Exception(f"Answer mode is not supported for {settings.config.git_provider} for now")
|
||||||
answer_str, question_str = self._get_user_answers()
|
|
||||||
self.ai_handler = AiHandler()
|
self.ai_handler = AiHandler()
|
||||||
self.patches_diff = None
|
self.patches_diff = None
|
||||||
self.prediction = None
|
self.prediction = None
|
||||||
self.cli_mode = cli_mode
|
self.cli_mode = cli_mode
|
||||||
|
|
||||||
|
answer_str, question_str = self._get_user_answers()
|
||||||
self.vars = {
|
self.vars = {
|
||||||
"title": self.git_provider.pr.title,
|
"title": self.git_provider.pr.title,
|
||||||
"branch": self.git_provider.get_pr_branch(),
|
"branch": self.git_provider.get_pr_branch(),
|
||||||
@ -43,16 +58,27 @@ class PRReviewer:
|
|||||||
"require_security": settings.pr_reviewer.require_security_review,
|
"require_security": settings.pr_reviewer.require_security_review,
|
||||||
"require_focused": settings.pr_reviewer.require_focused_review,
|
"require_focused": settings.pr_reviewer.require_focused_review,
|
||||||
'num_code_suggestions': settings.pr_reviewer.num_code_suggestions,
|
'num_code_suggestions': settings.pr_reviewer.num_code_suggestions,
|
||||||
#
|
|
||||||
'question_str': question_str,
|
'question_str': question_str,
|
||||||
'answer_str': answer_str,
|
'answer_str': answer_str,
|
||||||
}
|
}
|
||||||
self.token_handler = TokenHandler(self.git_provider.pr,
|
|
||||||
|
self.token_handler = TokenHandler(
|
||||||
|
self.git_provider.pr,
|
||||||
self.vars,
|
self.vars,
|
||||||
settings.pr_review_prompt.system,
|
settings.pr_review_prompt.system,
|
||||||
settings.pr_review_prompt.user)
|
settings.pr_review_prompt.user
|
||||||
|
)
|
||||||
|
|
||||||
def parse_args(self, args):
|
def parse_args(self, args: List[str]) -> None:
|
||||||
|
"""
|
||||||
|
Parse the arguments passed to the PRReviewer class and set the 'incremental' attribute accordingly.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args: A list of arguments passed to the PRReviewer class.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
is_incremental = False
|
is_incremental = False
|
||||||
if args and len(args) >= 1:
|
if args and len(args) >= 1:
|
||||||
arg = args[0]
|
arg = args[0]
|
||||||
@ -60,58 +86,93 @@ class PRReviewer:
|
|||||||
is_incremental = True
|
is_incremental = True
|
||||||
self.incremental = IncrementalPR(is_incremental)
|
self.incremental = IncrementalPR(is_incremental)
|
||||||
|
|
||||||
async def review(self):
|
async def review(self) -> None:
|
||||||
|
"""
|
||||||
|
Review the pull request and generate feedback.
|
||||||
|
"""
|
||||||
logging.info('Reviewing PR...')
|
logging.info('Reviewing PR...')
|
||||||
|
|
||||||
if settings.config.publish_output:
|
if settings.config.publish_output:
|
||||||
self.git_provider.publish_comment("Preparing review...", is_temporary=True)
|
self.git_provider.publish_comment("Preparing review...", is_temporary=True)
|
||||||
logging.info('Getting PR diff...')
|
|
||||||
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler)
|
await retry_with_fallback_models(self._prepare_prediction)
|
||||||
logging.info('Getting AI prediction...')
|
|
||||||
self.prediction = await self._get_prediction()
|
|
||||||
logging.info('Preparing PR review...')
|
logging.info('Preparing PR review...')
|
||||||
pr_comment = self._prepare_pr_review()
|
pr_comment = self._prepare_pr_review()
|
||||||
|
|
||||||
if settings.config.publish_output:
|
if settings.config.publish_output:
|
||||||
logging.info('Pushing PR review...')
|
logging.info('Pushing PR review...')
|
||||||
self.git_provider.publish_comment(pr_comment)
|
self.git_provider.publish_comment(pr_comment)
|
||||||
self.git_provider.remove_initial_comment()
|
self.git_provider.remove_initial_comment()
|
||||||
|
|
||||||
if settings.pr_reviewer.inline_code_comments:
|
if settings.pr_reviewer.inline_code_comments:
|
||||||
logging.info('Pushing inline code comments...')
|
logging.info('Pushing inline code comments...')
|
||||||
self._publish_inline_code_comments()
|
self._publish_inline_code_comments()
|
||||||
return ""
|
|
||||||
|
|
||||||
async def _get_prediction(self):
|
async def _prepare_prediction(self, model: str) -> None:
|
||||||
|
"""
|
||||||
|
Prepare the AI prediction for the pull request review.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: A string representing the AI model to be used for the prediction.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
|
logging.info('Getting PR diff...')
|
||||||
|
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
|
||||||
|
logging.info('Getting AI prediction...')
|
||||||
|
self.prediction = await self._get_prediction(model)
|
||||||
|
|
||||||
|
async def _get_prediction(self, model: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate an AI prediction for the pull request review.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: A string representing the AI model to be used for the prediction.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A string representing the AI prediction for the pull request review.
|
||||||
|
"""
|
||||||
variables = copy.deepcopy(self.vars)
|
variables = copy.deepcopy(self.vars)
|
||||||
variables["diff"] = self.patches_diff # update diff
|
variables["diff"] = self.patches_diff # update diff
|
||||||
|
|
||||||
environment = Environment(undefined=StrictUndefined)
|
environment = Environment(undefined=StrictUndefined)
|
||||||
system_prompt = environment.from_string(settings.pr_review_prompt.system).render(variables)
|
system_prompt = environment.from_string(settings.pr_review_prompt.system).render(variables)
|
||||||
user_prompt = environment.from_string(settings.pr_review_prompt.user).render(variables)
|
user_prompt = environment.from_string(settings.pr_review_prompt.user).render(variables)
|
||||||
|
|
||||||
if settings.config.verbosity_level >= 2:
|
if settings.config.verbosity_level >= 2:
|
||||||
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
||||||
logging.info(f"\nUser prompt:\n{user_prompt}")
|
logging.info(f"\nUser prompt:\n{user_prompt}")
|
||||||
model = settings.config.model
|
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
response, finish_reason = await self.ai_handler.chat_completion(
|
||||||
system=system_prompt, user=user_prompt)
|
model=model,
|
||||||
|
temperature=0.2,
|
||||||
|
system=system_prompt,
|
||||||
|
user=user_prompt
|
||||||
|
)
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def _prepare_pr_review(self) -> str:
|
def _prepare_pr_review(self) -> str:
|
||||||
|
"""
|
||||||
|
Prepare the PR review by processing the AI prediction and generating a markdown-formatted text that summarizes the feedback.
|
||||||
|
"""
|
||||||
review = self.prediction.strip()
|
review = self.prediction.strip()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
data = json.loads(review)
|
data = json.loads(review)
|
||||||
except json.decoder.JSONDecodeError:
|
except json.decoder.JSONDecodeError:
|
||||||
data = try_fix_json(review)
|
data = try_fix_json(review)
|
||||||
|
|
||||||
# reordering for nicer display
|
# Move 'Security concerns' key to 'PR Analysis' section for better display
|
||||||
if 'PR Feedback' in data:
|
if 'PR Feedback' in data and 'Security concerns' in data['PR Feedback']:
|
||||||
if 'Security concerns' in data['PR Feedback']:
|
|
||||||
val = data['PR Feedback']['Security concerns']
|
val = data['PR Feedback']['Security concerns']
|
||||||
del data['PR Feedback']['Security concerns']
|
del data['PR Feedback']['Security concerns']
|
||||||
data['PR Analysis']['Security concerns'] = val
|
data['PR Analysis']['Security concerns'] = val
|
||||||
|
|
||||||
if settings.config.git_provider != 'bitbucket' and \
|
# Filter out code suggestions that can be submitted as inline comments
|
||||||
settings.pr_reviewer.inline_code_comments and \
|
if settings.config.git_provider != 'bitbucket' and settings.pr_reviewer.inline_code_comments and 'Code suggestions' in data['PR Feedback']:
|
||||||
'Code suggestions' in data['PR Feedback']:
|
|
||||||
# keeping only code suggestions that can't be submitted as inline comments
|
|
||||||
data['PR Feedback']['Code suggestions'] = [
|
data['PR Feedback']['Code suggestions'] = [
|
||||||
d for d in data['PR Feedback']['Code suggestions']
|
d for d in data['PR Feedback']['Code suggestions']
|
||||||
if any(key not in d for key in ('relevant file', 'relevant line in file', 'suggestion content'))
|
if any(key not in d for key in ('relevant file', 'relevant line in file', 'suggestion content'))
|
||||||
@ -119,8 +180,8 @@ class PRReviewer:
|
|||||||
if not data['PR Feedback']['Code suggestions']:
|
if not data['PR Feedback']['Code suggestions']:
|
||||||
del data['PR Feedback']['Code suggestions']
|
del data['PR Feedback']['Code suggestions']
|
||||||
|
|
||||||
|
# Add incremental review section
|
||||||
if self.incremental.is_incremental:
|
if self.incremental.is_incremental:
|
||||||
# Rename title when incremental review - Add to the beginning of the dict
|
|
||||||
last_commit_url = f"{self.git_provider.get_pr_url()}/commits/{self.git_provider.incremental.first_new_commit_sha}"
|
last_commit_url = f"{self.git_provider.get_pr_url()}/commits/{self.git_provider.incremental.first_new_commit_sha}"
|
||||||
data = OrderedDict(data)
|
data = OrderedDict(data)
|
||||||
data.update({'Incremental PR Review': {
|
data.update({'Incremental PR Review': {
|
||||||
@ -130,6 +191,7 @@ class PRReviewer:
|
|||||||
markdown_text = convert_to_markdown(data)
|
markdown_text = convert_to_markdown(data)
|
||||||
user = self.git_provider.get_user_id()
|
user = self.git_provider.get_user_id()
|
||||||
|
|
||||||
|
# Add help text if not in CLI mode
|
||||||
if not self.cli_mode:
|
if not self.cli_mode:
|
||||||
markdown_text += "\n### How to use\n"
|
markdown_text += "\n### How to use\n"
|
||||||
if user and '[bot]' not in user:
|
if user and '[bot]' not in user:
|
||||||
@ -137,11 +199,16 @@ class PRReviewer:
|
|||||||
else:
|
else:
|
||||||
markdown_text += actions_help_text
|
markdown_text += actions_help_text
|
||||||
|
|
||||||
|
# Log markdown response if verbosity level is high
|
||||||
if settings.config.verbosity_level >= 2:
|
if settings.config.verbosity_level >= 2:
|
||||||
logging.info(f"Markdown response:\n{markdown_text}")
|
logging.info(f"Markdown response:\n{markdown_text}")
|
||||||
|
|
||||||
return markdown_text
|
return markdown_text
|
||||||
|
|
||||||
def _publish_inline_code_comments(self):
|
def _publish_inline_code_comments(self) -> None:
|
||||||
|
"""
|
||||||
|
Publishes inline comments on a pull request with code suggestions generated by the AI model.
|
||||||
|
"""
|
||||||
if settings.pr_reviewer.num_code_suggestions == 0:
|
if settings.pr_reviewer.num_code_suggestions == 0:
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -151,11 +218,11 @@ class PRReviewer:
|
|||||||
except json.decoder.JSONDecodeError:
|
except json.decoder.JSONDecodeError:
|
||||||
data = try_fix_json(review)
|
data = try_fix_json(review)
|
||||||
|
|
||||||
comments = []
|
comments: List[str] = []
|
||||||
for d in data['PR Feedback']['Code suggestions']:
|
for suggestion in data.get('PR Feedback', {}).get('Code suggestions', []):
|
||||||
relevant_file = d.get('relevant file', '').strip()
|
relevant_file = suggestion.get('relevant file', '').strip()
|
||||||
relevant_line_in_file = d.get('relevant line in file', '').strip()
|
relevant_line_in_file = suggestion.get('relevant line in file', '').strip()
|
||||||
content = d.get('suggestion content', '')
|
content = suggestion.get('suggestion content', '')
|
||||||
if not relevant_file or not relevant_line_in_file or not content:
|
if not relevant_file or not relevant_line_in_file or not content:
|
||||||
logging.info("Skipping inline comment with missing file/line/content")
|
logging.info("Skipping inline comment with missing file/line/content")
|
||||||
continue
|
continue
|
||||||
@ -170,15 +237,26 @@ class PRReviewer:
|
|||||||
if comments:
|
if comments:
|
||||||
self.git_provider.publish_inline_comments(comments)
|
self.git_provider.publish_inline_comments(comments)
|
||||||
|
|
||||||
def _get_user_answers(self):
|
def _get_user_answers(self) -> Tuple[str, str]:
|
||||||
answer_str = question_str = ""
|
"""
|
||||||
|
Retrieves the question and answer strings from the discussion messages related to a pull request.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A tuple containing the question and answer strings.
|
||||||
|
"""
|
||||||
|
question_str = ""
|
||||||
|
answer_str = ""
|
||||||
|
|
||||||
if self.is_answer:
|
if self.is_answer:
|
||||||
discussion_messages = self.git_provider.get_issue_comments()
|
discussion_messages = self.git_provider.get_issue_comments()
|
||||||
for message in discussion_messages.reversed:
|
|
||||||
|
for message in reversed(discussion_messages):
|
||||||
if "Questions to better understand the PR:" in message.body:
|
if "Questions to better understand the PR:" in message.body:
|
||||||
question_str = message.body
|
question_str = message.body
|
||||||
elif '/answer' in message.body:
|
elif '/answer' in message.body:
|
||||||
answer_str = message.body
|
answer_str = message.body
|
||||||
|
|
||||||
if answer_str and question_str:
|
if answer_str and question_str:
|
||||||
break
|
break
|
||||||
|
|
||||||
return question_str, answer_str
|
return question_str, answer_str
|
||||||
|
171
pr_agent/tools/pr_update_changelog.py
Normal file
171
pr_agent/tools/pr_update_changelog.py
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
import copy
|
||||||
|
import logging
|
||||||
|
from datetime import date
|
||||||
|
from time import sleep
|
||||||
|
from typing import Tuple
|
||||||
|
|
||||||
|
from jinja2 import Environment, StrictUndefined
|
||||||
|
|
||||||
|
from pr_agent.algo.ai_handler import AiHandler
|
||||||
|
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||||
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
|
from pr_agent.config_loader import settings
|
||||||
|
from pr_agent.git_providers import get_git_provider, GithubProvider
|
||||||
|
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||||
|
|
||||||
|
CHANGELOG_LINES = 50
|
||||||
|
|
||||||
|
|
||||||
|
class PRUpdateChangelog:
|
||||||
|
def __init__(self, pr_url: str, cli_mode=False, args=None):
|
||||||
|
|
||||||
|
self.git_provider = get_git_provider()(pr_url)
|
||||||
|
self.main_language = get_main_pr_language(
|
||||||
|
self.git_provider.get_languages(), self.git_provider.get_files()
|
||||||
|
)
|
||||||
|
self.commit_changelog = self._parse_args(args, settings)
|
||||||
|
self._get_changlog_file() # self.changelog_file_str
|
||||||
|
self.ai_handler = AiHandler()
|
||||||
|
self.patches_diff = None
|
||||||
|
self.prediction = None
|
||||||
|
self.cli_mode = cli_mode
|
||||||
|
self.vars = {
|
||||||
|
"title": self.git_provider.pr.title,
|
||||||
|
"branch": self.git_provider.get_pr_branch(),
|
||||||
|
"description": self.git_provider.get_pr_description(),
|
||||||
|
"language": self.main_language,
|
||||||
|
"diff": "", # empty diff for initial calculation
|
||||||
|
"changelog_file_str": self.changelog_file_str,
|
||||||
|
"today": date.today(),
|
||||||
|
}
|
||||||
|
self.token_handler = TokenHandler(self.git_provider.pr,
|
||||||
|
self.vars,
|
||||||
|
settings.pr_update_changelog_prompt.system,
|
||||||
|
settings.pr_update_changelog_prompt.user)
|
||||||
|
|
||||||
|
async def update_changelog(self):
|
||||||
|
assert type(self.git_provider) == GithubProvider, "Currently only Github is supported"
|
||||||
|
|
||||||
|
logging.info('Updating the changelog...')
|
||||||
|
if settings.config.publish_output:
|
||||||
|
self.git_provider.publish_comment("Preparing changelog updates...", is_temporary=True)
|
||||||
|
await retry_with_fallback_models(self._prepare_prediction)
|
||||||
|
logging.info('Preparing PR changelog updates...')
|
||||||
|
new_file_content, answer = self._prepare_changelog_update()
|
||||||
|
if settings.config.publish_output:
|
||||||
|
self.git_provider.remove_initial_comment()
|
||||||
|
logging.info('Publishing changelog updates...')
|
||||||
|
if self.commit_changelog:
|
||||||
|
logging.info('Pushing PR changelog updates to repo...')
|
||||||
|
self._push_changelog_update(new_file_content, answer)
|
||||||
|
else:
|
||||||
|
logging.info('Publishing PR changelog as comment...')
|
||||||
|
self.git_provider.publish_comment(f"**Changelog updates:**\n\n{answer}")
|
||||||
|
|
||||||
|
async def _prepare_prediction(self, model: str):
|
||||||
|
logging.info('Getting PR diff...')
|
||||||
|
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
|
||||||
|
logging.info('Getting AI prediction...')
|
||||||
|
self.prediction = await self._get_prediction(model)
|
||||||
|
|
||||||
|
async def _get_prediction(self, model: str):
|
||||||
|
variables = copy.deepcopy(self.vars)
|
||||||
|
variables["diff"] = self.patches_diff # update diff
|
||||||
|
environment = Environment(undefined=StrictUndefined)
|
||||||
|
system_prompt = environment.from_string(settings.pr_update_changelog_prompt.system).render(variables)
|
||||||
|
user_prompt = environment.from_string(settings.pr_update_changelog_prompt.user).render(variables)
|
||||||
|
if settings.config.verbosity_level >= 2:
|
||||||
|
logging.info(f"\nSystem prompt:\n{system_prompt}")
|
||||||
|
logging.info(f"\nUser prompt:\n{user_prompt}")
|
||||||
|
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
||||||
|
system=system_prompt, user=user_prompt)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
def _prepare_changelog_update(self) -> Tuple[str, str]:
|
||||||
|
answer = self.prediction.strip().strip("```").strip()
|
||||||
|
if hasattr(self, "changelog_file"):
|
||||||
|
existing_content = self.changelog_file.decoded_content.decode()
|
||||||
|
else:
|
||||||
|
existing_content = ""
|
||||||
|
if existing_content:
|
||||||
|
new_file_content = answer + "\n\n" + self.changelog_file.decoded_content.decode()
|
||||||
|
else:
|
||||||
|
new_file_content = answer
|
||||||
|
|
||||||
|
if not self.commit_changelog:
|
||||||
|
answer += "\n\n\n>to commit the new content to the CHANGELOG.md file, please type:" \
|
||||||
|
"\n>'/update_changelog -commit'\n"
|
||||||
|
|
||||||
|
if settings.config.verbosity_level >= 2:
|
||||||
|
logging.info(f"answer:\n{answer}")
|
||||||
|
|
||||||
|
return new_file_content, answer
|
||||||
|
|
||||||
|
def _push_changelog_update(self, new_file_content, answer):
|
||||||
|
self.git_provider.repo_obj.update_file(path=self.changelog_file.path,
|
||||||
|
message="Update CHANGELOG.md",
|
||||||
|
content=new_file_content,
|
||||||
|
sha=self.changelog_file.sha,
|
||||||
|
branch=self.git_provider.get_pr_branch())
|
||||||
|
d = dict(body="CHANGELOG.md update",
|
||||||
|
path=self.changelog_file.path,
|
||||||
|
line=max(2, len(answer.splitlines())),
|
||||||
|
start_line=1)
|
||||||
|
|
||||||
|
sleep(5) # wait for the file to be updated
|
||||||
|
last_commit_id = list(self.git_provider.pr.get_commits())[-1]
|
||||||
|
try:
|
||||||
|
self.git_provider.pr.create_review(commit=last_commit_id, comments=[d])
|
||||||
|
except:
|
||||||
|
# we can't create a review for some reason, let's just publish a comment
|
||||||
|
self.git_provider.publish_comment(f"**Changelog updates:**\n\n{answer}")
|
||||||
|
|
||||||
|
|
||||||
|
def _get_default_changelog(self):
|
||||||
|
example_changelog = \
|
||||||
|
"""
|
||||||
|
Example:
|
||||||
|
## <current_date>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
...
|
||||||
|
### Changed
|
||||||
|
...
|
||||||
|
### Fixed
|
||||||
|
...
|
||||||
|
"""
|
||||||
|
return example_changelog
|
||||||
|
|
||||||
|
def _parse_args(self, args, setting):
|
||||||
|
commit_changelog = False
|
||||||
|
if args and len(args) >= 1:
|
||||||
|
try:
|
||||||
|
if args[0] == "-commit":
|
||||||
|
commit_changelog = True
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
commit_changelog = setting.pr_update_changelog.push_changelog_changes
|
||||||
|
|
||||||
|
return commit_changelog
|
||||||
|
|
||||||
|
def _get_changlog_file(self):
|
||||||
|
try:
|
||||||
|
self.changelog_file = self.git_provider.repo_obj.get_contents("CHANGELOG.md",
|
||||||
|
ref=self.git_provider.get_pr_branch())
|
||||||
|
changelog_file_lines = self.changelog_file.decoded_content.decode().splitlines()
|
||||||
|
changelog_file_lines = changelog_file_lines[:CHANGELOG_LINES]
|
||||||
|
self.changelog_file_str = "\n".join(changelog_file_lines)
|
||||||
|
except:
|
||||||
|
self.changelog_file_str = ""
|
||||||
|
if self.commit_changelog:
|
||||||
|
logging.info("No CHANGELOG.md file found in the repository. Creating one...")
|
||||||
|
changelog_file = self.git_provider.repo_obj.create_file(path="CHANGELOG.md",
|
||||||
|
message='add CHANGELOG.md',
|
||||||
|
content="",
|
||||||
|
branch=self.git_provider.get_pr_branch())
|
||||||
|
self.changelog_file = changelog_file['content']
|
||||||
|
|
||||||
|
if not self.changelog_file_str:
|
||||||
|
self.changelog_file_str = self._get_default_changelog()
|
@ -1,3 +1,63 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "pr_agent"
|
||||||
|
version = "0.0.1"
|
||||||
|
|
||||||
|
authors = [
|
||||||
|
{name = "Itamar Friedman", email = "itamar.f@codium.ai"},
|
||||||
|
]
|
||||||
|
maintainers = [
|
||||||
|
{name = "Ori Kotek", email = "ori.k@codium.ai"},
|
||||||
|
{name = "Tal Ridnik", email = "tal.r@codium.ai"},
|
||||||
|
{name = "Hussam Lawen", email = "hussam.l@codium.ai"},
|
||||||
|
{name = "Sagi Medina", email = "sagi.m@codium.ai"}
|
||||||
|
]
|
||||||
|
description = "CodiumAI PR-Agent is an open-source tool to automatically analyze a pull request and provide several types of feedback"
|
||||||
|
readme = "README.md"
|
||||||
|
requires-python = ">=3.9"
|
||||||
|
keywords = ["ai", "tool", "developer", "review", "agent"]
|
||||||
|
license = {file = "LICENSE", name = "Apache 2.0 License"}
|
||||||
|
classifiers = [
|
||||||
|
"Development Status :: 3 - Alpha",
|
||||||
|
"Intended Audience :: Developers",
|
||||||
|
"Operating System :: Independent",
|
||||||
|
"Programming Language :: Python :: 3",
|
||||||
|
]
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
"dynaconf==3.1.12",
|
||||||
|
"fastapi==0.99.0",
|
||||||
|
"PyGithub==1.59.*",
|
||||||
|
"retry==0.9.2",
|
||||||
|
"openai==0.27.8",
|
||||||
|
"Jinja2==3.1.2",
|
||||||
|
"tiktoken==0.4.0",
|
||||||
|
"uvicorn==0.22.0",
|
||||||
|
"python-gitlab==3.15.0",
|
||||||
|
"pytest~=7.4.0",
|
||||||
|
"aiohttp~=3.8.4",
|
||||||
|
"atlassian-python-api==3.39.0",
|
||||||
|
"GitPython~=3.1.32",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.urls]
|
||||||
|
"Homepage" = "https://github.com/Codium-ai/pr-agent"
|
||||||
|
|
||||||
|
[tool.setuptools]
|
||||||
|
include-package-data = false
|
||||||
|
license-files = ["LICENSE"]
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["pr_agent"]
|
||||||
|
|
||||||
|
[project.scripts]
|
||||||
|
pr-agent = "pr_agent.cli:run"
|
||||||
|
|
||||||
|
|
||||||
[tool.ruff]
|
[tool.ruff]
|
||||||
|
|
||||||
line-length = 120
|
line-length = 120
|
||||||
|
@ -1,12 +1 @@
|
|||||||
dynaconf==3.1.12
|
-e .
|
||||||
fastapi==0.99.0
|
|
||||||
PyGithub==1.59.*
|
|
||||||
retry==0.9.2
|
|
||||||
openai==0.27.8
|
|
||||||
Jinja2==3.1.2
|
|
||||||
tiktoken==0.4.0
|
|
||||||
uvicorn==0.22.0
|
|
||||||
python-gitlab==3.15.0
|
|
||||||
pytest~=7.4.0
|
|
||||||
aiohttp~=3.8.4
|
|
||||||
atlassian-python-api==3.39.0
|
|
Reference in New Issue
Block a user