Compare commits

..

100 Commits

Author SHA1 Message Date
497f84b3bd Update release notes 2023-09-20 14:23:55 +03:00
c2fe2fc657 Added a release notes file 2023-09-20 13:50:10 +03:00
d327245edf Merge pull request #312 from Codium-ai/tr/fixes_20_9
Enhancing error handling in PR review tools
2023-09-20 07:59:02 +03:00
632de3f186 protections 2023-09-20 07:39:56 +03:00
de14b0e4c0 Merge pull request #310 from never-known-soldier/update/README.md
Update README.md file
2023-09-19 13:29:35 +03:00
f010d1389b Merge branch 'main' into update/README.md 2023-09-19 13:28:39 +03:00
4411f6d88a Update README.md file 2023-09-19 12:54:33 +05:30
1f62520606 Merge pull request #306 from Codium-ai/tr/etr
Adding Estimated Review Effort Feature and Handling Cases with No Detected Language
2023-09-17 17:10:52 +03:00
c0511c954e icon 2023-09-17 17:08:02 +03:00
818ab5a9e8 fixed tests 2023-09-17 16:56:23 +03:00
291ffdd6ae gfm_markdown 2023-09-17 16:51:16 +03:00
4fbe7d14b5 protection for no language 2023-09-17 16:41:53 +03:00
ea91a38541 Estimated effort to review 2023-09-17 16:31:58 +03:00
caaee4e43d Estimated time to review 2023-09-15 17:09:58 +03:00
279d45996f Merge pull request #301 from Codium-ai/add-links-to-readme
Addition of Relevant Links to README.md
2023-09-14 12:38:57 +03:00
01aa038ad6 Update README.md - add links 2023-09-14 11:53:47 +03:00
084256b923 fixed config 2023-09-14 08:23:34 +03:00
dc42713217 Merge pull request #285 from Codium-ai/markers
Markers
2023-09-14 08:21:35 +03:00
99f17666c5 merge 2023-09-14 08:20:36 +03:00
bba22667f1 merge 2023-09-14 08:13:00 +03:00
1b8349b0ef merge 2023-09-14 07:47:04 +03:00
b94e3521d1 Merge remote-tracking branch 'origin/main' into markers
# Conflicts:
#	pr_agent/tools/pr_description.py
2023-09-14 07:46:30 +03:00
32931f0bc0 Update Usage.md 2023-09-13 10:38:31 +03:00
72ac8e8091 Merge pull request #299 from Codium-ai/tr/readme
Enhancement of Similar Issue Tool and Documentation Updates
2023-09-13 08:19:13 +03:00
33045e6898 graphic adjustments 2023-09-13 08:17:13 +03:00
069c3a8e5c Merge remote-tracking branch 'origin/tr/readme' into tr/readme 2023-09-13 08:16:36 +03:00
9c0656c296 graphic adjustments 2023-09-13 08:16:23 +03:00
228ee26541 graphic adjustments 2023-09-13 08:16:08 +03:00
f8d548367f graphic adjustments 2023-09-13 08:15:15 +03:00
d3f466f59b graphic adjustments 2023-09-13 08:04:36 +03:00
6b45940128 Merge remote-tracking branch 'origin/main' into tr/readme 2023-09-13 07:47:21 +03:00
a52e94fcbc similar issue 2023-09-13 07:46:43 +03:00
ee3874f0aa Merge pull request #297 from Codium-ai/tr/fix_tests
Enhancing Logging in pr_similar_issue.py
2023-09-13 07:27:50 +03:00
31ba7acf49 Support issue comments in GitHub Actions 2023-09-12 16:53:54 +03:00
b7a2551cab Support issue comments in GitHub Actions 2023-09-12 16:46:02 +03:00
d4eb100cbc Support issue comments in GitHub Actions 2023-09-12 16:44:20 +03:00
21feb92b75 Support issue comments in GitHub Actions 2023-09-12 16:41:12 +03:00
2f6178306f Fix a bug in GitHub Actions 2023-09-12 13:28:35 +03:00
36e7c1c22f Merge remote-tracking branch 'origin/main' 2023-09-12 13:24:55 +03:00
c31baa5aea Fix a bug in GitHub Actions 2023-09-12 13:24:47 +03:00
901eda2f10 logs 2023-09-12 07:57:21 +03:00
8cf7d2d0b1 Merge pull request #296 from Codium-ai/tr/fix_tests
gfm_supported
2023-09-12 07:49:19 +03:00
d7f43d6ee0 gfm_supported 2023-09-12 07:43:15 +03:00
8e42162b5e Merge pull request #278 from sarbjitsinghgrewal/fix_bitbucket_publish_description
Fix bitbucket publish description
2023-09-10 14:13:26 +03:00
98d0835c48 Merge remote-tracking branch 'origin/main' into fix_bitbucket_publish_description 2023-09-10 14:08:17 +03:00
2aef9dfe55 Merge remote-tracking branch 'origin/main' into fix_bitbucket_publish_description 2023-09-10 14:06:54 +03:00
115b513c9b Remove 'bitbucket' explicit dependency anywhere that's not in bitbucket_provider.py 2023-09-10 14:06:13 +03:00
fd63fe4c95 Merge pull request #293 from Codium-ai/tr/litellm_debugger
Integration of Litellm Client with AI Handler
2023-09-10 13:56:06 +03:00
d40285e4d3 Merge branch 'main' into tr/litellm_debugger 2023-09-10 13:40:35 +03:00
517658fb37 Merge pull request #282 from Codium-ai/tr/issue_tool
Adding Similar Issue Tool and Pinecone Integration
2023-09-10 13:39:34 +03:00
f9f0f220c2 pinecone-datasets 2023-09-10 13:31:36 +03:00
6382b8a68b LITELLM_TOKEN 2023-09-10 13:28:56 +03:00
e371b217ec Merge remote-tracking branch 'origin/main' into tr/litellm_debugger 2023-09-10 13:27:19 +03:00
7dec7b0583 Merge pull request #291 from krrishdholakia/main
adding documentation on how to call local hf models
2023-09-10 13:25:44 +03:00
bf6a235add pinecone-datasets==0.6.1 2023-09-10 13:16:05 +03:00
1d9489c734 Merge remote-tracking branch 'origin/tr/issue_tool' into tr/issue_tool 2023-09-10 08:39:20 +03:00
bd588b4509 solved dependencies 2023-09-10 08:39:03 +03:00
245f29e58a solved dependencies 2023-09-10 08:22:42 +03:00
7f5f2d2d1a solved dependencies 2023-09-10 08:07:39 +03:00
fe500845b7 upgrade pip 2023-09-10 07:46:51 +03:00
b42b2536b5 upgrade pip 2023-09-10 07:39:01 +03:00
498ad3d19c upgrade pip 2023-09-10 07:36:25 +03:00
892dbe458e litellm client 2023-09-09 17:35:45 +03:00
1b098aea13 adding documentation on how to call local hf models 2023-09-08 09:59:44 -07:00
e90c9e5853 Merge pull request #287 from cloudlinux/gerrit
[gerrit] Added support project's config file: `.pr_agent.toml`
2023-09-07 19:06:32 +03:00
e4f28b157f Added support project's config file: .pr_agent.toml
+ removed markdown/html formatting from the review due to gerrit does not support it
2023-09-07 13:13:07 +01:00
6fb8a882af ordering requirements.txt 2023-09-07 12:41:31 +03:00
9889d26d3e merged main 2023-09-07 12:31:22 +03:00
b23a4c0535 Merge remote-tracking branch 'origin/main' into tr/issue_tool
# Conflicts:
#	requirements.txt
2023-09-07 12:30:16 +03:00
0f7a481eaa Merge pull request #277 from krrishdholakia/hf-usage
Showing how to use huggingface models with PR-Agent
2023-09-07 12:28:47 +03:00
3fc88b2bc4 Merge pull request #276 from krrishdholakia/main
Add docs on using Azure
2023-09-07 12:20:26 +03:00
ed5aaaab45 Merge branch 'main' into main 2023-09-07 12:19:59 +03:00
145b5db458 added 'publish_description_as_comment' support 2023-09-07 12:10:33 +03:00
8321792a8d == 2023-09-06 18:12:16 +03:00
8af8fd8e5d github action 2023-09-06 17:43:43 +03:00
753ea3e44c Update INSTALL.md 2023-09-06 11:35:41 +03:00
660601f7c5 Merge pull request #280 from Codium-ai/coditamar/install
Update INSTALL.md
2023-09-06 11:32:18 +03:00
4e7f67f596 Merge pull request #279 from Codium-ai/coditamar/readme-minor-enhancement
Enhancements to README.md for Improved Clarity and Detail
2023-09-06 11:31:55 +03:00
e486addb8f Update INSTALL.md
minor clarification to Method 8 GitLab
2023-09-06 11:28:19 +03:00
4a5310e2a1 Update README.md 2023-09-06 10:46:25 +03:00
8962c9cf8a stable 2023-09-06 09:43:23 +03:00
bc95cf5b8e stable 2023-09-06 09:12:25 +03:00
dcd8196b94 Merge remote-tracking branch 'origin/main' into tr/issue_tool
# Conflicts:
#	pr_agent/settings/configuration.toml
2023-09-06 08:43:41 +03:00
901c1dc3f0 issue tool 2023-09-06 08:43:01 +03:00
adb9964823 Merge branch 'main' of https://github.com/Codium-ai/pr-agent into fix_bitbucket_publish_description 2023-09-06 09:32:43 +05:30
335877c4a7 fix publish description for bitbucket 2023-09-06 09:26:23 +05:30
5da6a0147c showing how to use huggingface models 2023-09-05 16:23:22 -07:00
cd1ae55f4f bump litellm version to fix azure deployment id error 2023-09-05 15:26:45 -07:00
ca50724952 adding details on calling azure 2023-09-05 15:19:56 -07:00
c0b23e1091 Merge remote-tracking branch 'origin/main' into tr/issue_tool
# Conflicts:
#	pr_agent/algo/utils.py
2023-09-05 08:05:33 +03:00
704c169181 Merge branch 'main' of https://github.com/Codium-ai/pr-agent into fix_bitbucket_improve_issue 2023-09-05 10:00:07 +05:30
746140b26e Add support for markers in description 2023-09-04 12:11:39 -04:00
970a7896e9 Merge branch 'main' of https://github.com/Codium-ai/pr-agent into fix_bitbucket_improve_issue 2023-08-31 13:35:32 +05:30
2aaa722102 Merge branch 'main' of https://github.com/Codium-ai/pr-agent into fix_bitbucket_improve_issue 2023-08-29 09:49:19 +05:30
39522abc03 fix conflicts 2023-08-28 11:21:47 +05:30
0e42634da4 add publish_labels and get_labels functions 2023-08-25 10:15:30 +05:30
f0dc485305 Merge branch 'main' of https://github.com/Codium-ai/pr-agent into fix_bitbucket_improve_issue 2023-08-24 16:14:29 +05:30
db6bf41051 update readme 2023-08-24 15:56:20 +05:30
67ff50583a fix improve, update_changelog and review inline comment 2023-08-24 11:52:20 +05:30
6693aa3cbc semi stable 2023-08-20 15:01:06 +03:00
35 changed files with 817 additions and 194 deletions

View File

@ -24,4 +24,7 @@ jobs:
OPENAI_KEY: ${{ secrets.OPENAI_KEY }} OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
OPENAI_ORG: ${{ secrets.OPENAI_ORG }} # optional OPENAI_ORG: ${{ secrets.OPENAI_ORG }} # optional
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PINECONE.API_KEY: ${{ secrets.PINECONE_API_KEY }}
PINECONE.ENVIRONMENT: ${{ secrets.PINECONE_ENVIRONMENT }}

View File

@ -1 +1 @@
FROM codiumai/pr-agent:github_action FROM codiumai/pr-agent:0.7-github_action

View File

@ -24,9 +24,15 @@ To request a review for a PR, or ask a question about a PR, you can run directly
1. To request a review for a PR, run the following command: 1. To request a review for a PR, run the following command:
For GitHub:
``` ```
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent --pr_url <pr_url> review docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent --pr_url <pr_url> review
``` ```
For GitLab:
```
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> codiumai/pr-agent --pr_url <pr_url> review
```
For other git providers, update CONFIG.GIT_PROVIDER accordingly, and check the `pr_agent/settings/.secrets_template.toml` file for the environment variables expected names and values.
2. To ask a question about a PR, run the following command: 2. To ask a question about a PR, run the following command:
@ -354,7 +360,7 @@ PYTHONPATH="/PATH/TO/PROJECTS/pr-agent" python pr_agent/cli.py \
``` ```
WEBHOOK_SECRET=$(python -c "import secrets; print(secrets.token_hex(10))") WEBHOOK_SECRET=$(python -c "import secrets; print(secrets.token_hex(10))")
``` ```
3. Follow the instructions to build the Docker image, setup a secrets file and deploy on your own server from [Method 5](#method-5-run-as-a-github-app). 3. Follow the instructions to build the Docker image, setup a secrets file and deploy on your own server from [Method 5](#method-5-run-as-a-github-app) steps 4-7.
4. In the secrets file, fill in the following: 4. In the secrets file, fill in the following:
- Your OpenAI key. - Your OpenAI key.
- In the [gitlab] section, fill in personal_access_token and shared_secret. The access token can be a personal access token, or a group or project access token. - In the [gitlab] section, fill in personal_access_token and shared_secret. The access token can be a personal access token, or a group or project access token.
@ -363,11 +369,5 @@ WEBHOOK_SECRET=$(python -c "import secrets; print(secrets.token_hex(10))")
In the "Trigger" section, check the comments and merge request events boxes. In the "Trigger" section, check the comments and merge request events boxes.
6. Test your installation by opening a merge request or commenting or a merge request using one of CodiumAI's commands. 6. Test your installation by opening a merge request or commenting or a merge request using one of CodiumAI's commands.
---
### Appendix - **Debugging LLM API Calls** =======
If you're testing your codium/pr-agent server, and need to see if calls were made successfully + the exact call logs, you can use the [LiteLLM Debugger tool](https://docs.litellm.ai/docs/debugging/hosted_debugging).
You can do this by setting `litellm_debugger=true` in configuration.toml. Your Logs will be viewable in real-time @ `admin.litellm.ai/<your_email>`. Set your email in the `.secrets.toml` under 'user_email'.
<img src="./pics/debugger.png" width="800"/>

View File

@ -15,20 +15,22 @@ Making pull requests less painful with an AI agent
</div> </div>
<div style="text-align:left;"> <div style="text-align:left;">
CodiumAI `PR-Agent` is an open-source tool aiming to help developers review pull requests faster and more efficiently. It automatically analyzes the pull request and can provide several types of PR feedback: CodiumAI `PR-Agent` is an open-source tool aiming to help developers review pull requests faster and more efficiently. It automatically analyzes the pull request and can provide several types of commands:
**Auto Description (/describe)**: Automatically generating [PR description](https://github.com/Codium-ai/pr-agent/pull/229#issue-1860711415) - title, type, summary, code walkthrough and labels. **Auto Description (`/describe`)**: Automatically generating [PR description](https://github.com/Codium-ai/pr-agent/pull/229#issue-1860711415) - title, type, summary, code walkthrough and labels.
\ \
**Auto Review (/review)**: [Adjustable feedback](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695022908) about the PR main theme, type, relevant tests, security issues, score, and various suggestions for the PR content. **Auto Review (`/review`)**: [Adjustable feedback](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695022908) about the PR main theme, type, relevant tests, security issues, score, and various suggestions for the PR content.
\ \
**Question Answering (/ask ...)**: Answering [free-text questions](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021332) about the PR. **Question Answering (`/ask ...`)**: Answering [free-text questions](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021332) about the PR.
\ \
**Code Suggestions (/improve)**: [Committable code suggestions](https://github.com/Codium-ai/pr-agent/pull/229#discussion_r1306919276) for improving the PR. **Code Suggestions (`/improve`)**: [Committable code suggestions](https://github.com/Codium-ai/pr-agent/pull/229#discussion_r1306919276) for improving the PR.
\ \
**Update Changelog (/update_changelog)**: Automatically updating the CHANGELOG.md file with the [PR changes](https://github.com/Codium-ai/pr-agent/pull/168#discussion_r1282077645). **Update Changelog (`/update_changelog`)**: Automatically updating the CHANGELOG.md file with the [PR changes](https://github.com/Codium-ai/pr-agent/pull/168#discussion_r1282077645).
\
**Find similar issue (`/similar_issue`)**: Automatically retrieves and presents [similar issues](https://github.com/Alibaba-MIIL/ASL/issues/107).
See the [usage guide](./Usage.md) for instructions how to run the different tools from [CLI](./Usage.md#working-from-a-local-repo-cli), or by [online usage](./Usage.md#online-usage). See the [usage guide](./Usage.md) for instructions how to run the different tools from [CLI](./Usage.md#working-from-a-local-repo-cli), or by [online usage](./Usage.md#online-usage), as well as additional details on optional commands and configurations.
<h3>Example results:</h3> <h3>Example results:</h3>
</div> </div>
@ -87,9 +89,8 @@ See the [usage guide](./Usage.md) for instructions how to run the different tool
- [Overview](#overview) - [Overview](#overview)
- [Try it now](#try-it-now) - [Try it now](#try-it-now)
- [Installation](#installation) - [Installation](#installation)
- [Usage guide](./Usage.md)
- [How it works](#how-it-works) - [How it works](#how-it-works)
- [Why use PR-Agent](#why-use-pr-agent) - [Why use PR-Agent?](#why-use-pr-agent)
- [Roadmap](#roadmap) - [Roadmap](#roadmap)
</div> </div>
@ -100,11 +101,12 @@ See the [usage guide](./Usage.md) for instructions how to run the different tool
|-------|---------------------------------------------|:------:|:------:|:---------:|:----------:|:----------:|:----------:| |-------|---------------------------------------------|:------:|:------:|:---------:|:----------:|:----------:|:----------:|
| TOOLS | Review | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | TOOLS | Review | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| | Ask | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | | Ask | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| | Auto-Description | :white_check_mark: | :white_check_mark: | | :white_check_mark: | :white_check_mark: | :white_check_mark: | | | Auto-Description | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| | Improve Code | :white_check_mark: | :white_check_mark: | | :white_check_mark: | | :white_check_mark: | | | Improve Code | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | :white_check_mark: |
| | ⮑ Extended | :white_check_mark: | :white_check_mark: | | :white_check_mark: | | :white_check_mark: | | | ⮑ Extended | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | :white_check_mark: |
| | Reflect and Review | :white_check_mark: | | | | :white_check_mark: | :white_check_mark: | | | Reflect and Review | :white_check_mark: | :white_check_mark: | :white_check_mark: | | :white_check_mark: | :white_check_mark: |
| | Update CHANGELOG.md | :white_check_mark: | | | | | | | | Update CHANGELOG.md | :white_check_mark: | :white_check_mark: | :white_check_mark: | | | |
| | Find similar issue | :white_check_mark: | | | | | |
| | | | | | | | | | | | | | | |
| USAGE | CLI | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | USAGE | CLI | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| | App / webhook | :white_check_mark: | :white_check_mark: | | | | | | App / webhook | :white_check_mark: | :white_check_mark: | | | |
@ -182,7 +184,7 @@ Here are some advantages of PR-Agent:
- [x] Support additional models, as a replacement for OpenAI (see [here](https://github.com/Codium-ai/pr-agent/pull/172)) - [x] Support additional models, as a replacement for OpenAI (see [here](https://github.com/Codium-ai/pr-agent/pull/172))
- [x] Develop additional logic for handling large PRs (see [here](https://github.com/Codium-ai/pr-agent/pull/229)) - [x] Develop additional logic for handling large PRs (see [here](https://github.com/Codium-ai/pr-agent/pull/229))
- [ ] Add additional context to the prompt. For example, repo (or relevant files) summarization, with tools such a [ctags](https://github.com/universal-ctags/ctags) - [ ] Add additional context to the prompt. For example, repo (or relevant files) summarization, with tools such a [ctags](https://github.com/universal-ctags/ctags)
- [ ] PR-Agent for issues, and just for pull requests - [x] PR-Agent for issues
- [ ] Adding more tools. Possible directions: - [ ] Adding more tools. Possible directions:
- [x] PR description - [x] PR description
- [x] Inline code suggestions - [x] Inline code suggestions
@ -199,4 +201,14 @@ Here are some advantages of PR-Agent:
- [Aider - GPT powered coding in your terminal](https://github.com/paul-gauthier/aider) - [Aider - GPT powered coding in your terminal](https://github.com/paul-gauthier/aider)
- [openai-pr-reviewer](https://github.com/coderabbitai/openai-pr-reviewer) - [openai-pr-reviewer](https://github.com/coderabbitai/openai-pr-reviewer)
- [CodeReview BOT](https://github.com/anc95/ChatGPT-CodeReview) - [CodeReview BOT](https://github.com/anc95/ChatGPT-CodeReview)
- [AI-Maintainer](https://github.com/merwanehamadi/AI-Maintainer) - [AI-Maintainer](https://github.com/merwanehamadi/AI-Maintainer)
## Links
[![Join our Discord community](https://raw.githubusercontent.com/Codium-ai/codiumai-vscode-release/main/media/docs/Joincommunity.png)](https://discord.gg/kG35uSHDBc)
- Discord community: https://discord.gg/kG35uSHDBc
- CodiumAI site: https://codium.ai
- Blog: https://www.codium.ai/blog/
- Troubleshooting: https://www.codium.ai/blog/technical-faq-and-troubleshooting/
- Support: support@codium.ai

25
RELEASE_NOTES.md Normal file
View File

@ -0,0 +1,25 @@
## [Version 0.7] - 2023-09-20
### Docker Tags
- codiumai/pr-agent:0.7
- codiumai/pr-agent:0.7-github_app
- codiumai/pr-agent:0.7-bitbucket-app
- codiumai/pr-agent:0.7-gitlab_webhook
- codiumai/pr-agent:0.7-github_polling
- codiumai/pr-agent:0.7-github_action
### Added::Algo
- New tool /similar_issue - Currently on GitHub app and CLI: indexes the issues in the repo, find the most similar issues to the target issue.
- Describe markers: Empower the /describe tool with a templating capability (see more details in https://github.com/Codium-ai/pr-agent/pull/273).
- New feature in the /review tool - added an estimated effort estimation to the review (https://github.com/Codium-ai/pr-agent/pull/306).
### Added::Infrastructure
- Implementation of a GitLab webhook.
- Implementation of a BitBucket app.
### Fixed
- Protection against no code suggestions generated.
- Resilience to repositories where the languages cannot be automatically detected.

108
Usage.md
View File

@ -50,12 +50,12 @@ When running from your local repo (CLI), your local configuration file will be u
Examples for invoking the different tools via the CLI: Examples for invoking the different tools via the CLI:
- **Review**: `python cli.py --pr_url=<pr_url> /review` - **Review**: `python cli.py --pr_url=<pr_url> review`
- **Describe**: `python cli.py --pr_url=<pr_url> /describe` - **Describe**: `python cli.py --pr_url=<pr_url> describe`
- **Improve**: `python cli.py --pr_url=<pr_url> /improve` - **Improve**: `python cli.py --pr_url=<pr_url> improve`
- **Ask**: `python cli.py --pr_url=<pr_url> /ask "Write me a poem about this PR"` - **Ask**: `python cli.py --pr_url=<pr_url> ask "Write me a poem about this PR"`
- **Reflect**: `python cli.py --pr_url=<pr_url> /reflect` - **Reflect**: `python cli.py --pr_url=<pr_url> reflect`
- **Update Changelog**: `python cli.py --pr_url=<pr_url> /update_changelog` - **Update Changelog**: `python cli.py --pr_url=<pr_url> update_changelog`
`<pr_url>` is the url of the relevant PR (for example: https://github.com/Codium-ai/pr-agent/pull/50). `<pr_url>` is the url of the relevant PR (for example: https://github.com/Codium-ai/pr-agent/pull/50).
@ -149,15 +149,83 @@ TBD
#### Changing a model #### Changing a model
See [here](pr_agent/algo/__init__.py) for the list of available models. See [here](pr_agent/algo/__init__.py) for the list of available models.
To use Llama2 model, for example, set: #### Azure
To use Azure, set:
```
api_key = "" # your azure api key
api_type = "azure"
api_version = '2023-05-15' # Check Azure documentation for the current API version
api_base = "" # The base URL for your Azure OpenAI resource. e.g. "https://<your resource name>.openai.azure.com"
deployment_id = "" # The deployment name you chose when you deployed the engine
```
in your .secrets.toml
and
``` ```
[config] [config]
model="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
```
in the configuration.toml
#### Huggingface
**Local**
You can run Huggingface models locally through either [VLLM](https://docs.litellm.ai/docs/providers/vllm) or [Ollama](https://docs.litellm.ai/docs/providers/ollama)
E.g. to use a new Huggingface model locally via Ollama, set:
```
[__init__.py]
MAX_TOKENS = {
"model-name-on-ollama": <max_tokens>
}
e.g.
MAX_TOKENS={
...,
"llama2": 4096
}
[config] # in configuration.toml
model = "ollama/llama2"
[ollama] # in .secrets.toml
api_base = ... # the base url for your huggingface inference endpoint
```
**Inference Endpoints**
To use a new model with Huggingface Inference Endpoints, for example, set:
```
[__init__.py]
MAX_TOKENS = {
"model-name-on-huggingface": <max_tokens>
}
e.g.
MAX_TOKENS={
...,
"meta-llama/Llama-2-7b-chat-hf": 4096
}
[config] # in configuration.toml
model = "huggingface/meta-llama/Llama-2-7b-chat-hf"
[huggingface] # in .secrets.toml
key = ... # your huggingface api key
api_base = ... # the base url for your huggingface inference endpoint
```
(you can obtain a Llama2 key from [here](https://replicate.com/replicate/llama-2-70b-chat/api))
#### Replicate
To use Llama2 model with Replicate, for example, set:
```
[config] # in configuration.toml
model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1" model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
[replicate] [replicate] # in .secrets.toml
key = ... key = ...
``` ```
(you can obtain a Llama2 key from [here](https://replicate.com/replicate/llama-2-70b-chat/api)) (you can obtain a Llama2 key from [here](https://replicate.com/replicate/llama-2-70b-chat/api))
Also review the [AiHandler](pr_agent/algo/ai_handler.py) file for instruction how to set keys for other models. Also review the [AiHandler](pr_agent/algo/ai_handler.py) file for instruction how to set keys for other models.
#### Extra instructions #### Extra instructions
@ -179,4 +247,26 @@ And use the following settings (you have to replace the values) in .secrets.toml
[azure_devops] [azure_devops]
org = "https://dev.azure.com/YOUR_ORGANIZATION/" org = "https://dev.azure.com/YOUR_ORGANIZATION/"
pat = "YOUR_PAT_TOKEN" pat = "YOUR_PAT_TOKEN"
``` ```
#### Similar issue tool
[Example usage](https://github.com/Alibaba-MIIL/ASL/issues/107)
<img src=./pics/similar_issue_tool.png width="768">
To enable usage of the '**similar issue**' tool, you need to set the following keys in `.secrets.toml` (or in the relevant environment variables):
```
[pinecone]
api_key = "..."
environment = "..."
```
These parameters can be obtained by registering to [Pinecone](https://app.pinecone.io/?sessionType=signup/).
- To invoke the 'similar issue' tool from **CLI**, run:
`python3 cli.py --issue_url=... similar_issue`
- To invoke the 'similar' issue tool via online usage, [comment](https://github.com/Codium-ai/pr-agent/issues/178#issuecomment-1716934893) on a PR:
`/similar_issue`
- You can also enable the 'similar issue' tool to run automatically when a new issue is opened, by adding it to the [pr_commands list in the github_app section](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L66)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 534 KiB

BIN
pics/similar_issue_tool.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 210 KiB

View File

@ -9,6 +9,7 @@ from pr_agent.git_providers import get_git_provider
from pr_agent.tools.pr_code_suggestions import PRCodeSuggestions from pr_agent.tools.pr_code_suggestions import PRCodeSuggestions
from pr_agent.tools.pr_description import PRDescription from pr_agent.tools.pr_description import PRDescription
from pr_agent.tools.pr_information_from_user import PRInformationFromUser from pr_agent.tools.pr_information_from_user import PRInformationFromUser
from pr_agent.tools.pr_similar_issue import PRSimilarIssue
from pr_agent.tools.pr_questions import PRQuestions from pr_agent.tools.pr_questions import PRQuestions
from pr_agent.tools.pr_reviewer import PRReviewer from pr_agent.tools.pr_reviewer import PRReviewer
from pr_agent.tools.pr_update_changelog import PRUpdateChangelog from pr_agent.tools.pr_update_changelog import PRUpdateChangelog
@ -30,6 +31,7 @@ command2class = {
"update_changelog": PRUpdateChangelog, "update_changelog": PRUpdateChangelog,
"config": PRConfig, "config": PRConfig,
"settings": PRConfig, "settings": PRConfig,
"similar_issue": PRSimilarIssue,
} }
commands = list(command2class.keys()) commands = list(command2class.keys())

View File

@ -1,4 +1,5 @@
MAX_TOKENS = { MAX_TOKENS = {
'text-embedding-ada-002': 8000,
'gpt-3.5-turbo': 4000, 'gpt-3.5-turbo': 4000,
'gpt-3.5-turbo-0613': 4000, 'gpt-3.5-turbo-0613': 4000,
'gpt-3.5-turbo-0301': 4000, 'gpt-3.5-turbo-0301': 4000,
@ -11,4 +12,5 @@ MAX_TOKENS = {
'claude-2': 100000, 'claude-2': 100000,
'command-nightly': 4096, 'command-nightly': 4096,
'replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1': 4096, 'replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1': 4096,
'meta-llama/Llama-2-7b-chat-hf': 4096
} }

View File

@ -1,13 +1,12 @@
import logging import logging
import os
import litellm import litellm
import openai import openai
from litellm import acompletion from litellm import acompletion
from openai.error import APIError, RateLimitError, Timeout, TryAgain from openai.error import APIError, RateLimitError, Timeout, TryAgain
from retry import retry from retry import retry
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
OPENAI_RETRIES = 5 OPENAI_RETRIES = 5
@ -26,7 +25,11 @@ class AiHandler:
try: try:
openai.api_key = get_settings().openai.key openai.api_key = get_settings().openai.key
litellm.openai_key = get_settings().openai.key litellm.openai_key = get_settings().openai.key
litellm.debugger = get_settings().litellm.debugger if get_settings().get("litellm.use_client"):
litellm_token = get_settings().get("litellm.LITELLM_TOKEN")
assert litellm_token, "LITELLM_TOKEN is required"
os.environ["LITELLM_TOKEN"] = litellm_token
litellm.use_client = True
self.azure = False self.azure = False
if get_settings().get("OPENAI.ORG", None): if get_settings().get("OPENAI.ORG", None):
litellm.organization = get_settings().openai.org litellm.organization = get_settings().openai.org
@ -48,8 +51,8 @@ class AiHandler:
litellm.replicate_key = get_settings().replicate.key litellm.replicate_key = get_settings().replicate.key
if get_settings().get("HUGGINGFACE.KEY", None): if get_settings().get("HUGGINGFACE.KEY", None):
litellm.huggingface_key = get_settings().huggingface.key litellm.huggingface_key = get_settings().huggingface.key
if get_settings().get("LITELLM.DEBUGGER") and get_settings().get("LITELLM.EMAIL"): if get_settings().get("HUGGINGFACE.API_BASE", None):
litellm.email = get_settings().get("LITELLM.EMAIL", None) litellm.api_base = get_settings().huggingface.api_base
except AttributeError as e: except AttributeError as e:
raise ValueError("OpenAI key is required") from e raise ValueError("OpenAI key is required") from e

View File

@ -42,6 +42,11 @@ def sort_files_by_main_languages(languages: Dict, files: list):
files_sorted = [] files_sorted = []
rest_files = {} rest_files = {}
# if no languages detected, put all files in the "Other" category
if not languages:
files_sorted = [({"language": "Other", "files": list(files_filtered)})]
return files_sorted
main_extensions_flat = [] main_extensions_flat = []
for ext in main_extensions: for ext in main_extensions:
main_extensions_flat.extend(ext) main_extensions_flat.extend(ext)

View File

@ -21,7 +21,7 @@ class TokenHandler:
method. method.
""" """
def __init__(self, pr, vars: dict, system, user): def __init__(self, pr=None, vars: dict = {}, system="", user=""):
""" """
Initializes the TokenHandler object. Initializes the TokenHandler object.
@ -32,7 +32,8 @@ class TokenHandler:
- user: The user string. - user: The user string.
""" """
self.encoder = get_token_encoder() self.encoder = get_token_encoder()
self.prompt_tokens = self._get_system_user_tokens(pr, self.encoder, vars, system, user) if pr is not None:
self.prompt_tokens = self._get_system_user_tokens(pr, self.encoder, vars, system, user)
def _get_system_user_tokens(self, pr, encoder, vars: dict, system, user): def _get_system_user_tokens(self, pr, encoder, vars: dict, system, user):
""" """

View File

@ -20,7 +20,7 @@ def get_setting(key: str) -> Any:
except Exception: except Exception:
return global_settings.get(key, None) return global_settings.get(key, None)
def convert_to_markdown(output_data: dict) -> str: def convert_to_markdown(output_data: dict, gfm_supported: bool=True) -> str:
""" """
Convert a dictionary of data into markdown format. Convert a dictionary of data into markdown format.
Args: Args:
@ -42,6 +42,7 @@ def convert_to_markdown(output_data: dict) -> str:
"General suggestions": "💡", "General suggestions": "💡",
"Insights from user's answers": "📝", "Insights from user's answers": "📝",
"Code feedback": "🤖", "Code feedback": "🤖",
"Estimated effort to review [1-5]": "⏱️",
} }
for key, value in output_data.items(): for key, value in output_data.items():
@ -49,11 +50,14 @@ def convert_to_markdown(output_data: dict) -> str:
continue continue
if isinstance(value, dict): if isinstance(value, dict):
markdown_text += f"## {key}\n\n" markdown_text += f"## {key}\n\n"
markdown_text += convert_to_markdown(value) markdown_text += convert_to_markdown(value, gfm_supported)
elif isinstance(value, list): elif isinstance(value, list):
emoji = emojis.get(key, "") emoji = emojis.get(key, "")
if key.lower() == 'code feedback': if key.lower() == 'code feedback':
markdown_text += f"\n\n- **<details><summary> { emoji } Code feedback:**</summary>\n\n" if gfm_supported:
markdown_text += f"\n\n- **<details><summary> { emoji } Code feedback:**</summary>\n\n"
else:
markdown_text += f"\n\n- **{emoji} Code feedback:**\n\n"
else: else:
markdown_text += f"- {emoji} **{key}:**\n\n" markdown_text += f"- {emoji} **{key}:**\n\n"
for item in value: for item in value:
@ -62,7 +66,10 @@ def convert_to_markdown(output_data: dict) -> str:
elif item: elif item:
markdown_text += f" - {item}\n" markdown_text += f" - {item}\n"
if key.lower() == 'code feedback': if key.lower() == 'code feedback':
markdown_text += "</details>\n\n" if gfm_supported:
markdown_text += "</details>\n\n"
else:
markdown_text += "\n\n"
elif value != 'n/a': elif value != 'n/a':
emoji = emojis.get(key, "") emoji = emojis.get(key, "")
markdown_text += f"- {emoji} **{key}:** {value}\n" markdown_text += f"- {emoji} **{key}:** {value}\n"
@ -168,7 +175,7 @@ def fix_json_escape_char(json_message=None):
Raises: Raises:
None None
""" """
try: try:
result = json.loads(json_message) result = json.loads(json_message)
except Exception as e: except Exception as e:
@ -195,7 +202,7 @@ def convert_str_to_datetime(date_str):
Example: Example:
>>> convert_str_to_datetime('Mon, 01 Jan 2022 12:00:00 UTC') >>> convert_str_to_datetime('Mon, 01 Jan 2022 12:00:00 UTC')
datetime.datetime(2022, 1, 1, 12, 0, 0) datetime.datetime(2022, 1, 1, 12, 0, 0)
""" """
datetime_format = '%a, %d %b %Y %H:%M:%S %Z' datetime_format = '%a, %d %b %Y %H:%M:%S %Z'
return datetime.strptime(date_str, datetime_format) return datetime.strptime(date_str, datetime_format)

View File

@ -17,6 +17,7 @@ For example:
- cli.py --pr_url=... improve - cli.py --pr_url=... improve
- cli.py --pr_url=... ask "write me a poem about this PR" - cli.py --pr_url=... ask "write me a poem about this PR"
- cli.py --pr_url=... reflect - cli.py --pr_url=... reflect
- cli.py --issue_url=... similar_issue
Supported commands: Supported commands:
-review / review_pr - Add a review that includes a summary of the PR and specific suggestions for improvement. -review / review_pr - Add a review that includes a summary of the PR and specific suggestions for improvement.
@ -37,14 +38,22 @@ Configuration:
To edit any configuration parameter from 'configuration.toml', just add -config_path=<value>. To edit any configuration parameter from 'configuration.toml', just add -config_path=<value>.
For example: 'python cli.py --pr_url=... review --pr_reviewer.extra_instructions="focus on the file: ..."' For example: 'python cli.py --pr_url=... review --pr_reviewer.extra_instructions="focus on the file: ..."'
""") """)
parser.add_argument('--pr_url', type=str, help='The URL of the PR to review', required=True) parser.add_argument('--pr_url', type=str, help='The URL of the PR to review', default=None)
parser.add_argument('--issue_url', type=str, help='The URL of the Issue to review', default=None)
parser.add_argument('command', type=str, help='The', choices=commands, default='review') parser.add_argument('command', type=str, help='The', choices=commands, default='review')
parser.add_argument('rest', nargs=argparse.REMAINDER, default=[]) parser.add_argument('rest', nargs=argparse.REMAINDER, default=[])
args = parser.parse_args(inargs) args = parser.parse_args(inargs)
if not args.pr_url and not args.issue_url:
parser.print_help()
return
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO")) logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
command = args.command.lower() command = args.command.lower()
get_settings().set("CONFIG.CLI_MODE", True) get_settings().set("CONFIG.CLI_MODE", True)
result = asyncio.run(PRAgent().handle_request(args.pr_url, command + " " + " ".join(args.rest))) if args.issue_url:
result = asyncio.run(PRAgent().handle_request(args.issue_url, command + " " + " ".join(args.rest)))
else:
result = asyncio.run(PRAgent().handle_request(args.pr_url, command + " " + " ".join(args.rest)))
if not result: if not result:
parser.print_help() parser.print_help()

View File

@ -38,7 +38,8 @@ class AzureDevopsProvider:
self.set_pr(pr_url) self.set_pr(pr_url)
def is_supported(self, capability: str) -> bool: def is_supported(self, capability: str) -> bool:
if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments', 'get_labels', 'remove_initial_comment']: if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments', 'get_labels',
'remove_initial_comment', 'gfm_markdown']:
return False return False
return True return True

View File

@ -7,6 +7,7 @@ import requests
from atlassian.bitbucket import Cloud from atlassian.bitbucket import Cloud
from starlette_context import context from starlette_context import context
from ..algo.pr_processing import clip_tokens, find_line_number_of_relevant_line_in_file
from ..config_loader import get_settings from ..config_loader import get_settings
from .git_provider import FilePatchInfo, GitProvider from .git_provider import FilePatchInfo, GitProvider
@ -35,9 +36,8 @@ class BitbucketProvider(GitProvider):
self.incremental = incremental self.incremental = incremental
if pr_url: if pr_url:
self.set_pr(pr_url) self.set_pr(pr_url)
self.bitbucket_comment_api_url = self.pr._BitbucketBase__data["links"][ self.bitbucket_comment_api_url = self.pr._BitbucketBase__data["links"]["comments"]["href"]
"comments" self.bitbucket_pull_request_api_url = self.pr._BitbucketBase__data["links"]['self']['href']
]["href"]
def get_repo_settings(self): def get_repo_settings(self):
try: try:
@ -101,12 +101,7 @@ class BitbucketProvider(GitProvider):
return False return False
def is_supported(self, capability: str) -> bool: def is_supported(self, capability: str) -> bool:
if capability in [ if capability in ['get_issue_comments', 'publish_inline_comments', 'get_labels', 'gfm_markdown']:
"get_issue_comments",
"create_inline_comment",
"publish_inline_comments",
"get_labels",
]:
return False return False
return True return True
@ -151,17 +146,30 @@ class BitbucketProvider(GitProvider):
except Exception as e: except Exception as e:
logging.exception(f"Failed to remove temp comments, error: {e}") logging.exception(f"Failed to remove temp comments, error: {e}")
def publish_inline_comment(
self, comment: str, from_line: int, to_line: int, file: str # funtion to create_inline_comment
): def create_inline_comment(self, body: str, relevant_file: str, relevant_line_in_file: str):
payload = json.dumps( position, absolute_position = find_line_number_of_relevant_line_in_file(self.get_diff_files(), relevant_file.strip('`'), relevant_line_in_file)
{ if position == -1:
"content": { if get_settings().config.verbosity_level >= 2:
"raw": comment, logging.info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
}, subject_type = "FILE"
"inline": {"to": from_line, "path": file}, else:
} subject_type = "LINE"
) path = relevant_file.strip()
return dict(body=body, path=path, position=absolute_position) if subject_type == "LINE" else {}
def publish_inline_comment(self, comment: str, from_line: int, file: str):
payload = json.dumps( {
"content": {
"raw": comment,
},
"inline": {
"to": from_line,
"path": file
},
})
response = requests.request( response = requests.request(
"POST", self.bitbucket_comment_api_url, data=payload, headers=self.headers "POST", self.bitbucket_comment_api_url, data=payload, headers=self.headers
) )
@ -169,9 +177,7 @@ class BitbucketProvider(GitProvider):
def publish_inline_comments(self, comments: list[dict]): def publish_inline_comments(self, comments: list[dict]):
for comment in comments: for comment in comments:
self.publish_inline_comment( self.publish_inline_comment(comment['body'], comment['start_line'], comment['path'])
comment["body"], comment["start_line"], comment["line"], comment["path"]
)
def get_title(self): def get_title(self):
return self.pr.title return self.pr.title
@ -238,16 +244,22 @@ class BitbucketProvider(GitProvider):
def get_commit_messages(self): def get_commit_messages(self):
return "" # not implemented yet return "" # not implemented yet
# bitbucket does not support labels
def publish_description(self, pr_title: str, description: str):
payload = json.dumps({
"description": description,
"title": pr_title
def publish_description(self, pr_title: str, pr_body: str): })
pass
def create_inline_comment(
self, body: str, relevant_file: str, relevant_line_in_file: str
):
pass
def publish_labels(self, labels): response = requests.request("PUT", self.bitbucket_pull_request_api_url, headers=self.headers, data=payload)
pass return response
# bitbucket does not support labels
def publish_labels(self, pr_types: list):
pass
# bitbucket does not support labels
def get_labels(self): def get_labels(self):
pass pass

View File

@ -54,11 +54,16 @@ class CodeCommitClient:
def __init__(self): def __init__(self):
self.boto_client = None self.boto_client = None
def is_supported(self, capability: str) -> bool:
if capability in ["gfm_markdown"]:
return False
return True
def _connect_boto_client(self): def _connect_boto_client(self):
try: try:
self.boto_client = boto3.client("codecommit") self.boto_client = boto3.client("codecommit")
except Exception as e: except Exception as e:
raise ValueError(f"Failed to connect to AWS CodeCommit: {e}") raise ValueError(f"Failed to connect to AWS CodeCommit: {e}") from e
def get_differences(self, repo_name: int, destination_commit: str, source_commit: str): def get_differences(self, repo_name: int, destination_commit: str, source_commit: str):
""" """

View File

@ -74,6 +74,7 @@ class CodeCommitProvider(GitProvider):
"create_inline_comment", "create_inline_comment",
"publish_inline_comments", "publish_inline_comments",
"get_labels", "get_labels",
"gfm_markdown"
]: ]:
return False return False
return True return True

View File

@ -115,7 +115,14 @@ def adopt_to_gerrit_message(message):
lines = message.splitlines() lines = message.splitlines()
buf = [] buf = []
for line in lines: for line in lines:
line = line.replace("*", "").replace("``", "`") # remove markdown formatting
line = (line.replace("*", "")
.replace("``", "`")
.replace("<details>", "")
.replace("</details>", "")
.replace("<summary>", "")
.replace("</summary>", ""))
line = line.strip() line = line.strip()
if line.startswith('#'): if line.startswith('#'):
buf.append("\n" + buf.append("\n" +
@ -219,10 +226,12 @@ class GerritProvider(GitProvider):
return [self.repo.head.commit.message] return [self.repo.head.commit.message]
def get_repo_settings(self): def get_repo_settings(self):
""" try:
TODO: Implement support of .pr_agent.toml with open(self.repo_path / ".pr_agent.toml", 'rb') as f:
""" contents = f.read()
return "" return contents
except OSError:
return b""
def get_diff_files(self) -> list[FilePatchInfo]: def get_diff_files(self) -> list[FilePatchInfo]:
diffs = self.repo.head.commit.diff( diffs = self.repo.head.commit.diff(
@ -304,7 +313,8 @@ class GerritProvider(GitProvider):
# 'get_issue_comments', # 'get_issue_comments',
'create_inline_comment', 'create_inline_comment',
'publish_inline_comments', 'publish_inline_comments',
'get_labels' 'get_labels',
'gfm_markdown'
]: ]:
return False return False
return True return True

View File

@ -132,6 +132,10 @@ def get_main_pr_language(languages, files) -> str:
Get the main language of the commit. Return an empty string if cannot determine. Get the main language of the commit. Return an empty string if cannot determine.
""" """
main_language_str = "" main_language_str = ""
if not languages:
logging.info("No languages detected")
return main_language_str
try: try:
top_language = max(languages, key=languages.get).lower() top_language = max(languages, key=languages.get).lower()

View File

@ -32,7 +32,7 @@ class GithubProvider(GitProvider):
self.diff_files = None self.diff_files = None
self.git_files = None self.git_files = None
self.incremental = incremental self.incremental = incremental
if pr_url: if pr_url and 'pull' in pr_url:
self.set_pr(pr_url) self.set_pr(pr_url)
self.last_commit_id = list(self.pr.get_commits())[-1] self.last_commit_id = list(self.pr.get_commits())[-1]
@ -309,6 +309,35 @@ class GithubProvider(GitProvider):
return repo_name, pr_number return repo_name, pr_number
@staticmethod
def _parse_issue_url(issue_url: str) -> Tuple[str, int]:
parsed_url = urlparse(issue_url)
if 'github.com' not in parsed_url.netloc:
raise ValueError("The provided URL is not a valid GitHub URL")
path_parts = parsed_url.path.strip('/').split('/')
if 'api.github.com' in parsed_url.netloc:
if len(path_parts) < 5 or path_parts[3] != 'issues':
raise ValueError("The provided URL does not appear to be a GitHub ISSUE URL")
repo_name = '/'.join(path_parts[1:3])
try:
issue_number = int(path_parts[4])
except ValueError as e:
raise ValueError("Unable to convert issue number to integer") from e
return repo_name, issue_number
if len(path_parts) < 4 or path_parts[2] != 'issues':
raise ValueError("The provided URL does not appear to be a GitHub PR issue")
repo_name = '/'.join(path_parts[:2])
try:
issue_number = int(path_parts[3])
except ValueError as e:
raise ValueError("Unable to convert issue number to integer") from e
return repo_name, issue_number
def _get_github_client(self): def _get_github_client(self):
deployment_type = get_settings().get("GITHUB.DEPLOYMENT_TYPE", "user") deployment_type = get_settings().get("GITHUB.DEPLOYMENT_TYPE", "user")

View File

@ -43,7 +43,7 @@ class GitLabProvider(GitProvider):
self.incremental = incremental self.incremental = incremental
def is_supported(self, capability: str) -> bool: def is_supported(self, capability: str) -> bool:
if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments']: if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments', 'gfm_markdown']:
return False return False
return True return True

View File

@ -56,7 +56,8 @@ class LocalGitProvider(GitProvider):
raise KeyError(f'Branch: {self.target_branch_name} does not exist') raise KeyError(f'Branch: {self.target_branch_name} does not exist')
def is_supported(self, capability: str) -> bool: def is_supported(self, capability: str) -> bool:
if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments', 'get_labels']: if capability in ['get_issue_comments', 'create_inline_comment', 'publish_inline_comments', 'get_labels',
'gfm_markdown']:
return False return False
return True return True

View File

@ -12,8 +12,8 @@ async def run_action():
# Get environment variables # Get environment variables
GITHUB_EVENT_NAME = os.environ.get('GITHUB_EVENT_NAME') GITHUB_EVENT_NAME = os.environ.get('GITHUB_EVENT_NAME')
GITHUB_EVENT_PATH = os.environ.get('GITHUB_EVENT_PATH') GITHUB_EVENT_PATH = os.environ.get('GITHUB_EVENT_PATH')
OPENAI_KEY = os.environ.get('OPENAI_KEY') OPENAI_KEY = os.environ.get('OPENAI_KEY') or os.environ.get('OPENAI.KEY')
OPENAI_ORG = os.environ.get('OPENAI_ORG') OPENAI_ORG = os.environ.get('OPENAI_ORG') or os.environ.get('OPENAI.ORG')
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN') GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')
get_settings().set("CONFIG.PUBLISH_OUTPUT_PROGRESS", False) get_settings().set("CONFIG.PUBLISH_OUTPUT_PROGRESS", False)
@ -61,12 +61,21 @@ async def run_action():
if action in ["created", "edited"]: if action in ["created", "edited"]:
comment_body = event_payload.get("comment", {}).get("body") comment_body = event_payload.get("comment", {}).get("body")
if comment_body: if comment_body:
pr_url = event_payload.get("issue", {}).get("pull_request", {}).get("url") is_pr = False
if pr_url: # check if issue is pull request
if event_payload.get("issue", {}).get("pull_request"):
url = event_payload.get("issue", {}).get("pull_request", {}).get("url")
is_pr = True
else:
url = event_payload.get("issue", {}).get("url")
if url:
body = comment_body.strip().lower() body = comment_body.strip().lower()
comment_id = event_payload.get("comment", {}).get("id") comment_id = event_payload.get("comment", {}).get("id")
provider = get_git_provider()(pr_url=pr_url) provider = get_git_provider()(pr_url=url)
await PRAgent().handle_request(pr_url, body, notify=lambda: provider.add_eyes_reaction(comment_id)) if is_pr:
await PRAgent().handle_request(url, body, notify=lambda: provider.add_eyes_reaction(comment_id))
else:
await PRAgent().handle_request(url, body)
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -16,6 +16,10 @@ key = "" # Acquire through https://platform.openai.com
#deployment_id = "" # The deployment name you chose when you deployed the engine #deployment_id = "" # The deployment name you chose when you deployed the engine
#fallback_deployments = [] # For each fallback model specified in configuration.toml in the [config] section, specify the appropriate deployment_id #fallback_deployments = [] # For each fallback model specified in configuration.toml in the [config] section, specify the appropriate deployment_id
[pinecone]
api_key = "..."
environment = "gcp-starter"
[anthropic] [anthropic]
key = "" # Optional, uncomment if you want to use Anthropic. Acquire through https://www.anthropic.com/ key = "" # Optional, uncomment if you want to use Anthropic. Acquire through https://www.anthropic.com/
@ -24,6 +28,14 @@ key = "" # Optional, uncomment if you want to use Cohere. Acquire through https:
[replicate] [replicate]
key = "" # Optional, uncomment if you want to use Replicate. Acquire through https://replicate.com/ key = "" # Optional, uncomment if you want to use Replicate. Acquire through https://replicate.com/
[huggingface]
key = "" # Optional, uncomment if you want to use Huggingface Inference API. Acquire through https://huggingface.co/docs/api-inference/quicktour
api_base = "" # the base url for your huggingface inference endpoint
[ollama]
api_base = "" # the base url for your huggingface inference endpoint
[github] [github]
# ---- Set the following only for deployment type == "user" # ---- Set the following only for deployment type == "user"
user_token = "" # A GitHub personal access token with 'repo' scope. user_token = "" # A GitHub personal access token with 'repo' scope.
@ -43,5 +55,12 @@ webhook_secret = "<WEBHOOK SECRET>" # Optional, may be commented out.
personal_access_token = "" personal_access_token = ""
[bitbucket] [bitbucket]
# Bitbucket personal bearer token # For Bitbucket personal/repository bearer token
bearer_token = "" bearer_token = ""
# For Bitbucket app
app_key = ""
base_url = ""
[litellm]
LITELLM_TOKEN = "" # see https://docs.litellm.ai/docs/debugging/hosted_debugging for details and instructions on how to get a token

View File

@ -11,12 +11,14 @@ ai_timeout=180
max_description_tokens = 500 max_description_tokens = 500
max_commits_tokens = 500 max_commits_tokens = 500
secret_provider="google_cloud_storage" secret_provider="google_cloud_storage"
cli_mode=false
[pr_reviewer] # /review # [pr_reviewer] # /review #
require_focused_review=false require_focused_review=false
require_score_review=false require_score_review=false
require_tests_review=true require_tests_review=true
require_security_review=true require_security_review=true
require_estimate_effort_to_review=true
num_code_suggestions=4 num_code_suggestions=4
inline_code_comments = false inline_code_comments = false
ask_and_reflect=false ask_and_reflect=false
@ -24,10 +26,14 @@ automatic_review=true
extra_instructions = "" extra_instructions = ""
[pr_description] # /describe # [pr_description] # /describe #
publish_labels=true
publish_description_as_comment=false publish_description_as_comment=false
add_original_user_description=false add_original_user_description=false
keep_original_user_title=false keep_original_user_title=false
extra_instructions = "" extra_instructions = ""
# markers
use_description_markers=false
include_generated_by_header=true
[pr_questions] # /ask # [pr_questions] # /ask #
@ -96,5 +102,14 @@ polling_interval_seconds = 30
# patch_server_token = "" # patch_server_token = ""
[litellm] [litellm]
debugger=false #use_client = false
#email="youremail@example.com"
[pr_similar_issue]
skip_comments = false
force_update_dataset = false
max_issues_to_scan = 500
[pinecone]
# fill and place in .secrets.toml
#api_key = ...
# environment = "gcp-starter"

View File

@ -85,6 +85,14 @@ PR Analysis:
code diff changes are too scattered, then the PR is not focused. Explain code diff changes are too scattered, then the PR is not focused. Explain
your answer shortly. your answer shortly.
{%- endif %} {%- endif %}
{%- if require_estimate_effort_to_review %}
Estimated effort to review [1-5]:
type: string
description: >-
Estimate, on a scale of 1-5 (inclusive), the time and effort required to review this PR by an experienced and knowledgeable developer. 1 means short and easy review , 5 means long and hard review.
Take into account the size, complexity, quality, and the needed changes of the PR code diff.
Explain your answer shortly (1-2 sentences).
{%- endif %}
PR Feedback: PR Feedback:
General suggestions: General suggestions:
type: string type: string

View File

@ -48,27 +48,33 @@ class PRCodeSuggestions:
get_settings().pr_code_suggestions_prompt.user) get_settings().pr_code_suggestions_prompt.user)
async def run(self): async def run(self):
logging.info('Generating code suggestions for PR...') try:
if get_settings().config.publish_output: logging.info('Generating code suggestions for PR...')
self.git_provider.publish_comment("Preparing review...", is_temporary=True) if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing review...", is_temporary=True)
logging.info('Preparing PR review...') logging.info('Preparing PR review...')
if not self.is_extended: if not self.is_extended:
await retry_with_fallback_models(self._prepare_prediction) await retry_with_fallback_models(self._prepare_prediction)
data = self._prepare_pr_code_suggestions() data = self._prepare_pr_code_suggestions()
else: else:
data = await retry_with_fallback_models(self._prepare_prediction_extended) data = await retry_with_fallback_models(self._prepare_prediction_extended)
if (not data) or (not 'Code suggestions' in data):
logging.info('No code suggestions found for PR.')
return
if (not self.is_extended and get_settings().pr_code_suggestions.rank_suggestions) or \ if (not self.is_extended and get_settings().pr_code_suggestions.rank_suggestions) or \
(self.is_extended and get_settings().pr_code_suggestions.rank_extended_suggestions): (self.is_extended and get_settings().pr_code_suggestions.rank_extended_suggestions):
logging.info('Ranking Suggestions...') logging.info('Ranking Suggestions...')
data['Code suggestions'] = await self.rank_suggestions(data['Code suggestions']) data['Code suggestions'] = await self.rank_suggestions(data['Code suggestions'])
if get_settings().config.publish_output: if get_settings().config.publish_output:
logging.info('Pushing PR review...') logging.info('Pushing PR review...')
self.git_provider.remove_initial_comment() self.git_provider.remove_initial_comment()
logging.info('Pushing inline code suggestions...') logging.info('Pushing inline code suggestions...')
self.push_inline_code_suggestions(data) self.push_inline_code_suggestions(data)
except Exception as e:
logging.error(f"Failed to generate code suggestions for PR, error: {e}")
async def _prepare_prediction(self, model: str): async def _prepare_prediction(self, model: str):
logging.info('Getting PR diff...') logging.info('Getting PR diff...')

View File

@ -1,5 +1,6 @@
import copy import copy
import json import json
import re
import logging import logging
from typing import List, Tuple from typing import List, Tuple
@ -28,6 +29,7 @@ class PRDescription:
self.main_pr_language = get_main_pr_language( self.main_pr_language = get_main_pr_language(
self.git_provider.get_languages(), self.git_provider.get_files() self.git_provider.get_languages(), self.git_provider.get_files()
) )
self.pr_id = f"{self.git_provider.repo}/{self.git_provider.pr_num}"
# Initialize the AI handler # Initialize the AI handler
self.ai_handler = AiHandler() self.ai_handler = AiHandler()
@ -61,27 +63,44 @@ class PRDescription:
""" """
Generates a PR description using an AI model and publishes it to the PR. Generates a PR description using an AI model and publishes it to the PR.
""" """
logging.info('Generating a PR description...')
if get_settings().config.publish_output: try:
self.git_provider.publish_comment("Preparing pr description...", is_temporary=True) logging.info(f"Generating a PR description {self.pr_id}")
if get_settings().config.publish_output:
await retry_with_fallback_models(self._prepare_prediction) self.git_provider.publish_comment("Preparing pr description...", is_temporary=True)
logging.info('Preparing answer...') await retry_with_fallback_models(self._prepare_prediction)
pr_title, pr_body, pr_types, markdown_text = self._prepare_pr_answer()
logging.info(f"Preparing answer {self.pr_id}")
if get_settings().config.publish_output: if self.prediction:
logging.info('Pushing answer...') self._prepare_data()
if get_settings().pr_description.publish_description_as_comment:
self.git_provider.publish_comment(markdown_text)
else: else:
self.git_provider.publish_description(pr_title, pr_body) return None
if self.git_provider.is_supported("get_labels"):
current_labels = self.git_provider.get_labels() pr_labels = []
if current_labels is None: if get_settings().pr_description.publish_labels:
current_labels = [] pr_labels = self._prepare_labels()
self.git_provider.publish_labels(pr_types + current_labels)
self.git_provider.remove_initial_comment() if get_settings().pr_description.use_description_markers:
pr_title, pr_body = self._prepare_pr_answer_with_markers()
else:
pr_title, pr_body, = self._prepare_pr_answer()
full_markdown_description = f"## Title\n\n{pr_title}\n\n___\n{pr_body}"
if get_settings().config.publish_output:
logging.info(f"Pushing answer {self.pr_id}")
if get_settings().pr_description.publish_description_as_comment:
self.git_provider.publish_comment(full_markdown_description)
else:
self.git_provider.publish_description(pr_title, pr_body)
if get_settings().pr_description.publish_labels and self.git_provider.is_supported("get_labels"):
current_labels = self.git_provider.get_labels()
if current_labels is None:
current_labels = []
self.git_provider.publish_labels(pr_labels + current_labels)
self.git_provider.remove_initial_comment()
except Exception as e:
logging.error(f"Error generating PR description {self.pr_id}: {e}")
return "" return ""
@ -99,9 +118,12 @@ class PRDescription:
Any exceptions raised by the 'get_pr_diff' and '_get_prediction' functions. Any exceptions raised by the 'get_pr_diff' and '_get_prediction' functions.
""" """
logging.info('Getting PR diff...') if get_settings().pr_description.use_description_markers and 'pr_agent:' not in self.user_description:
return None
logging.info(f"Getting PR diff {self.pr_id}")
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model) self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
logging.info('Getting AI prediction...') logging.info(f"Getting AI prediction {self.pr_id}")
self.prediction = await self._get_prediction(model) self.prediction = await self._get_prediction(model)
async def _get_prediction(self, model: str) -> str: async def _get_prediction(self, model: str) -> str:
@ -134,34 +156,71 @@ class PRDescription:
return response return response
def _prepare_pr_answer(self) -> Tuple[str, str, List[str], str]:
def _prepare_data(self):
# Load the AI prediction data into a dictionary
self.data = load_yaml(self.prediction.strip())
if get_settings().pr_description.add_original_user_description and self.user_description:
self.data["User Description"] = self.user_description
def _prepare_labels(self) -> List[str]:
pr_types = []
# If the 'PR Type' key is present in the dictionary, split its value by comma and assign it to 'pr_types'
if 'PR Type' in self.data:
if type(self.data['PR Type']) == list:
pr_types = self.data['PR Type']
elif type(self.data['PR Type']) == str:
pr_types = self.data['PR Type'].split(',')
return pr_types
def _prepare_pr_answer_with_markers(self) -> Tuple[str, str]:
logging.info(f"Using description marker replacements {self.pr_id}")
title = self.vars["title"]
body = self.user_description
if get_settings().pr_description.include_generated_by_header:
ai_header = f"### 🤖 Generated by PR Agent at {self.git_provider.last_commit_id.sha}\n\n"
else:
ai_header = ""
ai_summary = self.data.get('PR Description')
if ai_summary and not re.search(r'<!--\s*pr_agent:summary\s*-->', body):
summary = f"{ai_header}{ai_summary}"
body = body.replace('pr_agent:summary', summary)
if not re.search(r'<!--\s*pr_agent:walkthrough\s*-->', body):
ai_walkthrough = self.data.get('PR Main Files Walkthrough')
if ai_walkthrough:
walkthrough = str(ai_header)
for file in ai_walkthrough:
filename = file['filename'].replace("'", "`")
description = file['changes in file'].replace("'", "`")
walkthrough += f'- `{filename}`: {description}\n'
body = body.replace('pr_agent:walkthrough', walkthrough)
return title, body
def _prepare_pr_answer(self) -> Tuple[str, str]:
""" """
Prepare the PR description based on the AI prediction data. Prepare the PR description based on the AI prediction data.
Returns: Returns:
- title: a string containing the PR title. - title: a string containing the PR title.
- pr_body: a string containing the PR body in a markdown format. - pr_body: a string containing the PR description body in a markdown format.
- pr_types: a list of strings containing the PR types.
- markdown_text: a string containing the AI prediction data in a markdown format. used for publishing a comment
""" """
# Load the AI prediction data into a dictionary
data = load_yaml(self.prediction.strip())
if get_settings().pr_description.add_original_user_description and self.user_description: # Iterate over the dictionary items and append the key and value to 'markdown_text' in a markdown format
data["User Description"] = self.user_description markdown_text = ""
for key, value in self.data.items():
# Initialization markdown_text += f"## {key}\n\n"
pr_types = [] markdown_text += f"{value}\n\n"
# If the 'PR Type' key is present in the dictionary, split its value by comma and assign it to 'pr_types'
if 'PR Type' in data:
if type(data['PR Type']) == list:
pr_types = data['PR Type']
elif type(data['PR Type']) == str:
pr_types = data['PR Type'].split(',')
# Remove the 'PR Title' key from the dictionary # Remove the 'PR Title' key from the dictionary
ai_title = data.pop('PR Title') ai_title = self.data.pop('PR Title', self.vars["title"])
if get_settings().pr_description.keep_original_user_title: if get_settings().pr_description.keep_original_user_title:
# Assign the original PR title to the 'title' variable # Assign the original PR title to the 'title' variable
title = self.vars["title"] title = self.vars["title"]
@ -172,25 +231,27 @@ class PRDescription:
# Iterate over the remaining dictionary items and append the key and value to 'pr_body' in a markdown format, # Iterate over the remaining dictionary items and append the key and value to 'pr_body' in a markdown format,
# except for the items containing the word 'walkthrough' # except for the items containing the word 'walkthrough'
pr_body = "" pr_body = ""
for idx, (key, value) in enumerate(data.items()): for idx, (key, value) in enumerate(self.data.items()):
pr_body += f"## {key}:\n" pr_body += f"## {key}:\n"
if 'walkthrough' in key.lower(): if 'walkthrough' in key.lower():
# for filename, description in value.items(): # for filename, description in value.items():
if self.git_provider.is_supported("gfm_markdown"):
pr_body += "<details> <summary>files:</summary>\n\n"
for file in value: for file in value:
filename = file['filename'].replace("'", "`") filename = file['filename'].replace("'", "`")
description = file['changes in file'] description = file['changes in file']
pr_body += f'`{filename}`: {description}\n' pr_body += f'`{filename}`: {description}\n'
if self.git_provider.is_supported("gfm_markdown"):
pr_body +="</details>\n"
else: else:
# if the value is a list, join its items by comma # if the value is a list, join its items by comma
if type(value) == list: if type(value) == list:
value = ', '.join(v for v in value) value = ', '.join(v for v in value)
pr_body += f"{value}\n" pr_body += f"{value}\n"
if idx < len(data) - 1: if idx < len(self.data) - 1:
pr_body += "\n___\n" pr_body += "\n___\n"
markdown_text = f"## Title\n\n{title}\n\n___\n{pr_body}"
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"title:\n{title}\n{pr_body}") logging.info(f"title:\n{title}\n{pr_body}")
return title, pr_body, pr_types, markdown_text return title, pr_body

View File

@ -59,6 +59,7 @@ class PRReviewer:
"require_tests": get_settings().pr_reviewer.require_tests_review, "require_tests": get_settings().pr_reviewer.require_tests_review,
"require_security": get_settings().pr_reviewer.require_security_review, "require_security": get_settings().pr_reviewer.require_security_review,
"require_focused": get_settings().pr_reviewer.require_focused_review, "require_focused": get_settings().pr_reviewer.require_focused_review,
"require_estimate_effort_to_review": get_settings().pr_reviewer.require_estimate_effort_to_review,
'num_code_suggestions': get_settings().pr_reviewer.num_code_suggestions, 'num_code_suggestions': get_settings().pr_reviewer.num_code_suggestions,
'question_str': question_str, 'question_str': question_str,
'answer_str': answer_str, 'answer_str': answer_str,
@ -94,28 +95,32 @@ class PRReviewer:
""" """
Review the pull request and generate feedback. Review the pull request and generate feedback.
""" """
if self.is_auto and not get_settings().pr_reviewer.automatic_review:
logging.info(f'Automatic review is disabled {self.pr_url}')
return None
logging.info(f'Reviewing PR: {self.pr_url} ...') try:
if self.is_auto and not get_settings().pr_reviewer.automatic_review:
logging.info(f'Automatic review is disabled {self.pr_url}')
return None
if get_settings().config.publish_output: logging.info(f'Reviewing PR: {self.pr_url} ...')
self.git_provider.publish_comment("Preparing review...", is_temporary=True)
if get_settings().config.publish_output:
await retry_with_fallback_models(self._prepare_prediction) self.git_provider.publish_comment("Preparing review...", is_temporary=True)
logging.info('Preparing PR review...') await retry_with_fallback_models(self._prepare_prediction)
pr_comment = self._prepare_pr_review()
logging.info('Preparing PR review...')
if get_settings().config.publish_output: pr_comment = self._prepare_pr_review()
logging.info('Pushing PR review...')
self.git_provider.publish_comment(pr_comment) if get_settings().config.publish_output:
self.git_provider.remove_initial_comment() logging.info('Pushing PR review...')
self.git_provider.publish_comment(pr_comment)
if get_settings().pr_reviewer.inline_code_comments: self.git_provider.remove_initial_comment()
logging.info('Pushing inline code comments...')
self._publish_inline_code_comments() if get_settings().pr_reviewer.inline_code_comments:
logging.info('Pushing inline code comments...')
self._publish_inline_code_comments()
except Exception as e:
logging.error(f"Failed to review PR: {e}")
async def _prepare_prediction(self, model: str) -> None: async def _prepare_prediction(self, model: str) -> None:
""" """
@ -214,7 +219,7 @@ class PRReviewer:
"⏮️ Review for commits since previous PR-Agent review": f"Starting from commit {last_commit_url}"}}) "⏮️ Review for commits since previous PR-Agent review": f"Starting from commit {last_commit_url}"}})
data.move_to_end('Incremental PR Review', last=False) data.move_to_end('Incremental PR Review', last=False)
markdown_text = convert_to_markdown(data) markdown_text = convert_to_markdown(data, self.git_provider.is_supported("gfm_markdown"))
user = self.git_provider.get_user_id() user = self.git_provider.get_user_id()
# Add help text if not in CLI mode # Add help text if not in CLI mode
@ -266,7 +271,7 @@ class PRReviewer:
self.git_provider.publish_inline_comment(content, relevant_file, relevant_line_in_file) self.git_provider.publish_inline_comment(content, relevant_file, relevant_line_in_file)
if comments: if comments:
self.git_provider.publish_inline_comments(comments) self.git_provider.publish_inline_comments(comments)
def _get_user_answers(self) -> Tuple[str, str]: def _get_user_answers(self) -> Tuple[str, str]:
""" """

View File

@ -0,0 +1,276 @@
import copy
import json
import logging
from enum import Enum
from typing import List, Tuple
import pinecone
import openai
import pandas as pd
from pydantic import BaseModel, Field
from pr_agent.algo import MAX_TOKENS
from pr_agent.algo.token_handler import TokenHandler
from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider
from pinecone_datasets import Dataset, DatasetMetadata
MODEL = "text-embedding-ada-002"
class PRSimilarIssue:
def __init__(self, issue_url: str, args: list = None):
if get_settings().config.git_provider != "github":
raise Exception("Only github is supported for similar issue tool")
self.cli_mode = get_settings().CONFIG.CLI_MODE
self.max_issues_to_scan = get_settings().pr_similar_issue.max_issues_to_scan
self.issue_url = issue_url
self.git_provider = get_git_provider()()
repo_name, issue_number = self.git_provider._parse_issue_url(issue_url.split('=')[-1])
self.git_provider.repo = repo_name
self.git_provider.repo_obj = self.git_provider.github_client.get_repo(repo_name)
self.token_handler = TokenHandler()
repo_obj = self.git_provider.repo_obj
repo_name_for_index = self.repo_name_for_index = repo_obj.full_name.lower().replace('/', '-').replace('_/', '-')
index_name = self.index_name = "codium-ai-pr-agent-issues"
# assuming pinecone api key and environment are set in secrets file
try:
api_key = get_settings().pinecone.api_key
environment = get_settings().pinecone.environment
except Exception:
if not self.cli_mode:
repo_name, original_issue_number = self.git_provider._parse_issue_url(self.issue_url.split('=')[-1])
issue_main = self.git_provider.repo_obj.get_issue(original_issue_number)
issue_main.create_comment("Please set pinecone api key and environment in secrets file")
raise Exception("Please set pinecone api key and environment in secrets file")
# check if index exists, and if repo is already indexed
run_from_scratch = False
upsert = True
pinecone.init(api_key=api_key, environment=environment)
if not index_name in pinecone.list_indexes():
run_from_scratch = True
upsert = False
else:
if get_settings().pr_similar_issue.force_update_dataset:
upsert = True
else:
pinecone_index = pinecone.Index(index_name=index_name)
res = pinecone_index.fetch([f"example_issue_{repo_name_for_index}"]).to_dict()
if res["vectors"]:
upsert = False
if run_from_scratch or upsert: # index the entire repo
logging.info('Indexing the entire repo...')
logging.info('Getting issues...')
issues = list(repo_obj.get_issues(state='all'))
logging.info('Done')
self._update_index_with_issues(issues, repo_name_for_index, upsert=upsert)
else: # update index if needed
pinecone_index = pinecone.Index(index_name=index_name)
issues_to_update = []
issues_paginated_list = repo_obj.get_issues(state='all')
counter = 1
for issue in issues_paginated_list:
if issue.pull_request:
continue
issue_str, comments, number = self._process_issue(issue)
issue_key = f"issue_{number}"
id = issue_key + "." + "issue"
res = pinecone_index.fetch([id]).to_dict()
is_new_issue = True
for vector in res["vectors"].values():
if vector['metadata']['repo'] == repo_name_for_index:
is_new_issue = False
break
if is_new_issue:
counter += 1
issues_to_update.append(issue)
else:
break
if issues_to_update:
logging.info(f'Updating index with {counter} new issues...')
self._update_index_with_issues(issues_to_update, repo_name_for_index, upsert=True)
else:
logging.info('No new issues to update')
async def run(self):
logging.info('Getting issue...')
repo_name, original_issue_number = self.git_provider._parse_issue_url(self.issue_url.split('=')[-1])
issue_main = self.git_provider.repo_obj.get_issue(original_issue_number)
issue_str, comments, number = self._process_issue(issue_main)
openai.api_key = get_settings().openai.key
logging.info('Done')
logging.info('Querying...')
res = openai.Embedding.create(input=[issue_str], engine=MODEL)
embeds = [record['embedding'] for record in res['data']]
pinecone_index = pinecone.Index(index_name=self.index_name)
res = pinecone_index.query(embeds[0],
top_k=5,
filter={"repo": self.repo_name_for_index},
include_metadata=True).to_dict()
relevant_issues_number_list = []
relevant_comment_number_list = []
score_list = []
for r in res['matches']:
issue_number = int(r["id"].split('.')[0].split('_')[-1])
if original_issue_number == issue_number:
continue
if issue_number not in relevant_issues_number_list:
relevant_issues_number_list.append(issue_number)
if 'comment' in r["id"]:
relevant_comment_number_list.append(int(r["id"].split('.')[1].split('_')[-1]))
else:
relevant_comment_number_list.append(-1)
score_list.append(str("{:.2f}".format(r['score'])))
logging.info('Done')
logging.info('Publishing response...')
similar_issues_str = "### Similar Issues\n___\n\n"
for i, issue_number_similar in enumerate(relevant_issues_number_list):
issue = self.git_provider.repo_obj.get_issue(issue_number_similar)
title = issue.title
url = issue.html_url
if relevant_comment_number_list[i] != -1:
url = list(issue.get_comments())[relevant_comment_number_list[i]].html_url
similar_issues_str += f"{i + 1}. **[{title}]({url})** (score={score_list[i]})\n\n"
if get_settings().config.publish_output:
response = issue_main.create_comment(similar_issues_str)
logging.info(similar_issues_str)
logging.info('Done')
def _process_issue(self, issue):
header = issue.title
body = issue.body
number = issue.number
if get_settings().pr_similar_issue.skip_comments:
comments = []
else:
comments = list(issue.get_comments())
issue_str = f"Issue Header: \"{header}\"\n\nIssue Body:\n{body}"
return issue_str, comments, number
def _update_index_with_issues(self, issues_list, repo_name_for_index, upsert=False):
logging.info('Processing issues...')
corpus = Corpus()
example_issue_record = Record(
id=f"example_issue_{repo_name_for_index}",
text="example_issue",
metadata=Metadata(repo=repo_name_for_index)
)
corpus.append(example_issue_record)
counter = 0
for issue in issues_list:
if issue.pull_request:
continue
counter += 1
if counter % 100 == 0:
logging.info(f"Scanned {counter} issues")
if counter >= self.max_issues_to_scan:
logging.info(f"Scanned {self.max_issues_to_scan} issues, stopping")
break
issue_str, comments, number = self._process_issue(issue)
issue_key = f"issue_{number}"
username = issue.user.login
created_at = str(issue.created_at)
if len(issue_str) < 8000 or \
self.token_handler.count_tokens(issue_str) < MAX_TOKENS[MODEL]: # fast reject first
issue_record = Record(
id=issue_key + "." + "issue",
text=issue_str,
metadata=Metadata(repo=repo_name_for_index,
username=username,
created_at=created_at,
level=IssueLevel.ISSUE)
)
corpus.append(issue_record)
if comments:
for j, comment in enumerate(comments):
comment_body = comment.body
num_words_comment = len(comment_body.split())
if num_words_comment < 10 or not isinstance(comment_body, str):
continue
if len(comment_body) < 8000 or \
self.token_handler.count_tokens(comment_body) < MAX_TOKENS[MODEL]:
comment_record = Record(
id=issue_key + ".comment_" + str(j + 1),
text=comment_body,
metadata=Metadata(repo=repo_name_for_index,
username=username, # use issue username for all comments
created_at=created_at,
level=IssueLevel.COMMENT)
)
corpus.append(comment_record)
df = pd.DataFrame(corpus.dict()["documents"])
logging.info('Done')
logging.info('Embedding...')
openai.api_key = get_settings().openai.key
list_to_encode = list(df["text"].values)
try:
res = openai.Embedding.create(input=list_to_encode, engine=MODEL)
embeds = [record['embedding'] for record in res['data']]
except:
embeds = []
logging.error('Failed to embed entire list, embedding one by one...')
for i, text in enumerate(list_to_encode):
try:
res = openai.Embedding.create(input=[text], engine=MODEL)
embeds.append(res['data'][0]['embedding'])
except:
embeds.append([0] * 1536)
df["values"] = embeds
meta = DatasetMetadata.empty()
meta.dense_model.dimension = len(embeds[0])
ds = Dataset.from_pandas(df, meta)
logging.info('Done')
api_key = get_settings().pinecone.api_key
environment = get_settings().pinecone.environment
if not upsert:
logging.info('Creating index from scratch...')
ds.to_pinecone_index(self.index_name, api_key=api_key, environment=environment)
else:
logging.info('Upserting index...')
namespace = ""
batch_size: int = 100
concurrency: int = 10
pinecone.init(api_key=api_key, environment=environment)
ds._upsert_to_index(self.index_name, namespace, batch_size, concurrency)
logging.info('Done')
class IssueLevel(str, Enum):
ISSUE = "issue"
COMMENT = "comment"
class Metadata(BaseModel):
repo: str
username: str = Field(default="@codium")
created_at: str = Field(default="01-01-1970 00:00:00.00000")
level: IssueLevel = Field(default=IssueLevel.ISSUE)
class Config:
use_enum_values = True
class Record(BaseModel):
id: str
text: str
metadata: Metadata
class Corpus(BaseModel):
documents: List[Record] = Field(default=[])
def append(self, r: Record):
self.documents.append(r)

View File

@ -46,7 +46,7 @@ class PRUpdateChangelog:
get_settings().pr_update_changelog_prompt.user) get_settings().pr_update_changelog_prompt.user)
async def run(self): async def run(self):
assert type(self.git_provider) == GithubProvider, "Currently only Github is supported" # assert type(self.git_provider) == GithubProvider, "Currently only Github is supported"
logging.info('Updating the changelog...') logging.info('Updating the changelog...')
if get_settings().config.publish_output: if get_settings().config.publish_output:

View File

@ -7,15 +7,17 @@ Jinja2==3.1.2
tiktoken==0.4.0 tiktoken==0.4.0
uvicorn==0.22.0 uvicorn==0.22.0
python-gitlab==3.15.0 python-gitlab==3.15.0
pytest~=7.4.0 pytest==7.4.0
aiohttp~=3.8.4 aiohttp==3.8.4
atlassian-python-api==3.39.0 atlassian-python-api==3.39.0
GitPython~=3.1.32 GitPython==3.1.32
PyYAML==6.0 PyYAML==6.0
starlette-context==0.3.6 starlette-context==0.3.6
litellm~=0.1.504 litellm~=0.1.574
boto3~=1.28.25 boto3==1.28.25
google-cloud-storage==2.10.0 google-cloud-storage==2.10.0
ujson==5.8.0 ujson==5.8.0
azure-devops==7.1.0b3 azure-devops==7.1.0b3
msrest==0.7.1 msrest==0.7.1
pinecone-client
pinecone-datasets @ git+https://github.com/mrT23/pinecone-datasets.git@main

View File

@ -61,7 +61,7 @@ class TestSortFilesByMainLanguages:
type('', (object,), {'filename': 'file1.py'})(), type('', (object,), {'filename': 'file1.py'})(),
type('', (object,), {'filename': 'file2.java'})() type('', (object,), {'filename': 'file2.java'})()
] ]
expected_output = [{'language': 'Other', 'files': []}] expected_output = [{'language': 'Other', 'files': files}]
assert sort_files_by_main_languages(languages, files) == expected_output assert sort_files_by_main_languages(languages, files) == expected_output
# Tests that function handles empty files list # Tests that function handles empty files list