Compare commits

...

182 Commits
1331 ... v0.26

Author SHA1 Message Date
Tal
8218fa6e13 Merge pull request #1421 from dceoy/main
Update Groq models and tokens
2024-12-30 19:25:23 +02:00
8463c4f549 fix: sanitize Ask tool answers to prevent markdown formatting issues with leading slashes 2024-12-30 16:54:03 +02:00
Tal
014b1f20c5 Merge pull request #1426 from Codium-ai/tr/ask_fix
fix: sanitize Ask tool answers to prevent markdown formatting issues
2024-12-30 15:12:28 +02:00
2f73ab6eab fix: sanitize Ask tool answers to prevent markdown formatting issues with leading slashes 2024-12-30 15:06:27 +02:00
16dc29a23a fix: sanitize Ask tool answers to prevent markdown formatting issues 2024-12-30 14:58:53 +02:00
bd9522057f fix: add OpenAI configuration parameters to restricted CLI arguments 2024-12-30 14:33:26 +02:00
Tal
b3d4af6cbf Merge pull request #1425 from Codium-ai/tr/limit_online_commenting
fix: restrict sensitive configuration parameters in CLI arguments
2024-12-30 14:11:28 +02:00
5df9698bae fix: restrict sensitive configuration parameters in CLI arguments 2024-12-30 13:57:55 +02:00
e89b65ed38 Increase the max token of groq/llama-3.3-70b-versatile 2024-12-30 11:35:02 +09:00
6a145af159 fix: make semantic file types extension optional in PR description 2024-12-29 21:43:46 +02:00
Tal
39a375b3e4 Merge pull request #1423 from Codium-ai/tr/describe_v2
Tr/describe v2
2024-12-29 16:02:03 +02:00
dbd76ecde5 refactor: improve file changes title description length guidance 2024-12-29 11:42:05 +02:00
e95920c58c refactor: improve file handling and description generation in PR description tool 2024-12-29 11:37:05 +02:00
59899f0c62 fix: improve patch generation error handling and logging 2024-12-29 11:27:53 +02:00
5e46955d52 fix: improve file path formatting in patch output 2024-12-29 11:26:13 +02:00
95d0fafa75 refactor: optimize file content loading and improve rate limit handling 2024-12-29 11:25:33 +02:00
71c558d306 Update Groq models and tokens 2024-12-28 01:51:33 +09:00
Tal
7b2c41e0d2 Merge pull request #1420 from Codium-ai/tr/review_fix
fix: improve line extraction from files with missing content
2024-12-27 09:02:41 +02:00
4aad67b563 fix: improve line extraction from files with missing content 2024-12-27 09:00:20 +02:00
Tal
12d603fdb4 Update README.md 2024-12-25 08:42:24 +02:00
Tal
6540e2c674 Merge pull request #1416 from Codium-ai/tr/remove_review_suggestions
refactor: remove legacy code suggestions feature from review tool
2024-12-25 08:26:03 +02:00
83e68f168a log verbosity 2024-12-25 08:22:53 +02:00
5e1b04980e refactor: remove reflection and incremental review features from docs and code 2024-12-25 08:21:33 +02:00
495c1ebe5f refactor: remove legacy code suggestions feature from review tool 2024-12-25 08:18:28 +02:00
Tal
ad71de82a9 Merge pull request #1413 from addianto/docs/1398-environment-variables
Document an example on how to configure PR Agent using environment variables
2024-12-24 21:25:54 +02:00
11676943b6 docs: Remind user to avoid commiting .env file (#1398) 2024-12-24 15:20:15 +07:00
b88507aa23 chore: Ignore .env file 2024-12-24 15:14:30 +07:00
a5c5e6f4ae docs: Add example how to define environment variables (#1398) 2024-12-24 15:01:26 +07:00
47cd361663 docs: Mention about the use of Dynaconf (#1398) 2024-12-24 15:01:13 +07:00
Tal
c84b3d04b9 Merge pull request #1412 from Codium-ai/tr/dedent_review
feat: add dedent option to code snippet formatting
2024-12-24 07:54:36 +02:00
7d9288bb1a feat: add dedent option to code snippet formatting 2024-12-24 07:49:27 +02:00
Tal
93e64367d2 Merge pull request #1410 from Codium-ai/tr/update_changelog_fix
Tr/update changelog fix
2024-12-23 19:37:28 +02:00
6c131b8406 feat: add PR link support in changelog updates 2024-12-23 19:35:52 +02:00
dd89e1f2dc feat: add PR link support in changelog updates 2024-12-23 19:34:21 +02:00
e8e4fb0afa feat: add PR link support in changelog updates 2024-12-23 17:20:29 +02:00
3360a28b3e fix: improve changelog update prompt and response handling 2024-12-23 17:06:21 +02:00
Tal
20c506d2e0 Merge pull request #1402 from KennyDizi/main
Add support for OpenAI `o1` model and snapshot version `o1-2024-12-17`
2024-12-22 09:36:04 +02:00
Tal
a1921d931c Merge pull request #1407 from Codium-ai/tr/publish_output_no_suggestions_fix
fix: only publish empty code suggestions when configured
2024-12-22 09:34:34 +02:00
31aa460f5f fix: only publish empty code suggestions when configured 2024-12-22 09:32:11 +02:00
23678c1d4d Update O1_MODEL_PREFIX to o1 based on new models released 2024-12-22 10:36:59 +07:00
8d7825233a Supported model gpt-o1 2024-12-22 10:33:26 +07:00
Tal
c9f02e63e1 Merge pull request #1403 from Codium-ai/tr/re_review
feat: enhance code review output with collapsible code snippets
2024-12-20 16:38:56 +02:00
c2f1f2dba0 fix: improve markdown rendering when git provider is unavailable 2024-12-19 21:08:27 +02:00
3ab2cac089 fix: improve markdown rendering when git provider is unavailable 2024-12-19 20:59:17 +02:00
989670b159 fix: improve markdown rendering when git provider is unavailable 2024-12-19 20:49:40 +02:00
7e8361b5fd feat: enhance code review output with collapsible code snippets and variable links 2024-12-19 20:30:56 +02:00
eaaaf6a6a2 Fix context windows token for model o1-2024-12-17 2024-12-19 23:11:45 +07:00
07f3933f6d Add support OpenAI model o1 snapshot version o1-2024-12-17 2024-12-19 23:00:47 +07:00
Tal
84786495ed Merge pull request #1401 from Codium-ai/tr/docs3
docs: simplify default tool configurations and update documentation
2024-12-19 16:43:25 +02:00
d09aa1b13e docs: remove unused automatic_review configuration option 2024-12-19 16:41:18 +02:00
Tal
e9615c6994 Merge pull request #1384 from MarkRx/feature/version-metadata
Add --version command and version metadata
2024-12-19 09:34:24 +02:00
f3ee4a75b5 docs: simplify default tool configurations and update documentation 2024-12-19 09:33:20 +02:00
452abe2e18 Move get_version to algo/util.py; fix version to 0.25 2024-12-17 08:44:53 -07:00
Tal
a768969d37 Merge pull request #1397 from KennyDizi/main
Add support for `gemini/gemini-2.0-flash-exp` model
2024-12-16 20:37:55 +02:00
Tal
9ef9198468 Update index.md 2024-12-16 20:37:09 +02:00
Tal
d0ea901bca Update fetching_ticket_context.md 2024-12-16 20:29:17 +02:00
Tal
57089c931b Merge pull request #1396 from ofir-frd/add-pr-body-license-documentation
Similar Code: Add PR Body License Documentation
2024-12-16 20:24:42 +02:00
03d2bea50b Add support model gemini-2.0-flash-exp 2024-12-16 23:37:19 +07:00
721d38d4ed docs: add license information to similar code documentation 2024-12-16 17:11:17 +02:00
Tal
bbec5d9cc9 Merge pull request #1395 from Codium-ai/mrT23-patch-6
Update review.md
2024-12-14 11:54:56 +02:00
Tal
0be2750dfa Update review.md 2024-12-14 11:53:47 +02:00
Tal
5e5c251cd0 Merge pull request #1394 from Codium-ai/mrT23-patch-6
Update README.md
2024-12-13 16:11:12 +02:00
Tal
048ae8ee9e Update README.md
Co-authored-by: qodo-merge-pro-for-open-source[bot] <189517486+qodo-merge-pro-for-open-source[bot]@users.noreply.github.com>
2024-12-13 16:11:04 +02:00
Tal
1a77e9afaf Update README.md 2024-12-13 16:09:18 +02:00
Tal
184a52d325 Update README.md 2024-12-12 21:40:04 +02:00
Tal
3d38060dff Update README.md 2024-12-12 21:35:56 +02:00
c4dc263f2c docs: update repository reference to be platform-agnostic 2024-12-11 18:31:49 +02:00
Tal
f67cc0dd18 Merge pull request #1392 from Codium-ai/tr/model_weak
docs: remove model_weak configuration and simplify model selection
2024-12-11 18:19:05 +02:00
872b27bfd8 docs: remove model_weak configuration and simplify model selection 2024-12-11 18:10:34 +02:00
Tal
cb88489dbe Merge pull request #1387 from KennyDizi/main
Introduce to weak model
2024-12-11 17:36:18 +02:00
Tal
da786b8020 Merge pull request #1391 from Codium-ai/tr/thumbs_up_down
feat: add thumbs up/down support and refactor code suggestions handling
2024-12-11 13:21:07 +02:00
6a51b8501d docs: add allow_thumbs_up_down configuration option and remove rank_extended_suggestions 2024-12-11 13:16:21 +02:00
d34edb83ff feat: add thumbs up/down support and refactor code suggestions handling 2024-12-11 13:03:43 +02:00
Tal
c6eb253ed1 Merge pull request #1390 from glebzhidkov/docs/escape-list-in-yaml-file
Fix doc for Github Actions
2024-12-10 22:07:55 +02:00
a61f1889d1 fix doc 2024-12-10 13:44:56 +01:00
75a120952c Add version metadata and --version command 2024-12-09 09:27:54 -07:00
f9a7b18073 Improve condition to pick up weak model 2024-12-09 22:36:07 +07:00
6352e6e3bf Change default model to regular model 2024-12-09 22:24:44 +07:00
f49217e058 docs: fix typos in improve.md documentation 2024-12-09 09:09:09 +02:00
Tal
e11cec7d9e Merge pull request #1388 from Codium-ai/tr/readme_enhancments
docs: reorganize and enhance best practices documentation in improve.md
2024-12-09 09:05:25 +02:00
c45cde93ef docs: fix typos in improve.md documentation 2024-12-09 09:04:53 +02:00
9e25667d97 docs: reorganize and enhance best practices documentation in improve.md 2024-12-09 09:02:40 +02:00
7dc9e73423 fix: move user_tag variable declaration outside conditional block 2024-12-09 08:27:30 +02:00
e3d779c30d Fix typo model_weak 2024-12-08 22:09:48 +07:00
88a93bdcd7 Update weak model document 2024-12-08 22:01:00 +07:00
3c31048afc Update model in git provider 2024-12-08 22:00:37 +07:00
fc5dda0957 Use weak model for the rest flows 2024-12-08 21:51:29 +07:00
936894e4d1 Use regular model for pr review and code suggestion flows 2024-12-08 21:51:09 +07:00
dec2859fc4 Set default model to weak model 2024-12-08 21:10:26 +07:00
a4d9a65fc6 Add model_week 2024-12-08 20:23:36 +07:00
683108d3a5 Removed model_turbo 2024-12-08 20:10:38 +07:00
e68e100117 typo 2024-12-08 11:46:10 +02:00
Tal
158892047b Merge pull request #1386 from Codium-ai/tr/self_check
Tr/self check
2024-12-08 11:42:53 +02:00
Tal
7d5e59cd40 Update tests/health_test/main.py
Co-authored-by: qodo-merge-pro-for-open-source[bot] <189517486+qodo-merge-pro-for-open-source[bot]@users.noreply.github.com>
2024-12-08 11:41:05 +02:00
e8fc351ce9 docs: add CLI health check section and reorganize automations documentation 2024-12-08 11:39:15 +02:00
43e91b0df7 feat: add health test for PR agent commands and improve output handling 2024-12-08 11:27:43 +02:00
39a461b3b2 docs: update badges and clarify Qodo Merge Pro description 2024-12-05 21:50:39 +02:00
19ade4acf0 fixed link 2024-12-04 14:29:12 +02:00
Tal
10f8b522db Merge pull request #1381 from Codium-ai/tr/qodo_installation
Tr/qodo installation
2024-12-03 18:17:37 +02:00
d26ca4f71c docs: update Qodo Merge Pro installation documentation with Bitbucket support 2024-12-03 18:16:02 +02:00
9160f756db docs: update Qodo Merge Pro installation documentation with Bitbucket support 2024-12-03 18:14:36 +02:00
84c3a7b969 docs: update Qodo Merge Pro installation documentation with Bitbucket support 2024-12-03 18:12:21 +02:00
d9f9cc65b3 Merge pull request #1380 from Codium-ai/hl/ticket_docs_update
Update fetching_ticket_context.md
2024-12-03 18:04:06 +02:00
7d99c0db8e Merge remote-tracking branch 'origin/main' 2024-12-03 17:48:14 +02:00
fe20a8c5e7 docs: update Qodo Merge Pro installation documentation with rebranding changes 2024-12-03 17:48:05 +02:00
43c95106d4 Update fetching_ticket_context.md 2024-12-03 17:18:40 +02:00
Tal
1dd5f0b848 Merge pull request #1379 from Codium-ai/tr/disable_auto_commands
Add disable_auto_feedback configuration option to control automatic feedback
2024-12-02 21:34:15 +02:00
8610aa27a4 Add disable_auto_feedback configuration option to control automatic PR feedback 2024-12-02 21:28:48 +02:00
91bf3c0749 openai version 2024-12-02 09:23:51 +02:00
Tal
159155785e Update README.md 2024-12-02 08:46:36 +02:00
Tal
eabc296246 Merge pull request #1376 from pdecat/enhancement/generalize_publish_output_progress
Add publish_output_progress config support to AzureDevOps, BitBucket and Gitlab providers
2024-12-02 08:27:06 +02:00
Tal
b44030114e Merge pull request #1374 from KennyDizi/main
Add Support for GPT-4o November 2024 Model and Update Configurations
2024-12-02 08:23:26 +02:00
Tal
1d6f87be3b Merge pull request #1375 from Codium-ai/update-google-tag-manager
Update Google Tag Manager ID in custom analytics integration
2024-12-02 07:53:16 +02:00
Tal
a7c6fa7bd2 Merge pull request #1364 from ryanzll/main
Check git_provider and reference_link before using them in utils.py
2024-12-02 07:52:59 +02:00
a825aec5f3 Add publish_output_progress config support to AzureDevOps, BitBucket and Gitlab providers 2024-11-28 17:15:24 +01:00
4df097c228 Update Google Tag Manager ID in custom analytics integration 2024-11-25 15:07:28 +02:00
6871e1b27a docs: add section on customizing best practices label in improve.md 2024-11-24 17:37:35 +02:00
4afe05761d docs: add section on best practices for multiple languages in improve.md 2024-11-24 17:22:18 +02:00
7d1b6c2f0a Upgrade litellm to v1.52.12 to support model gpt-4o-2024-11-20 2024-11-21 22:12:01 +07:00
3547cf2057 Update model_turbo and fallback_models 2024-11-21 22:10:55 +07:00
f2043d639c Add support model gpt-4o-2024-11-20 2024-11-21 22:10:27 +07:00
Tal
6240de3898 Merge pull request #1373 from Codium-ai/tr/ado
Improve logging and error handling in Azure DevOps provider for code …
2024-11-21 13:41:22 +02:00
f08b20c667 Improve logging and error handling in Azure DevOps provider for code suggestions 2024-11-21 13:37:48 +02:00
Tal
e64b468556 Update azure.md 2024-11-21 09:24:45 +02:00
Tal
d48d14dac7 Merge pull request #1369 from Codium-ai/tr/committable_comments
Tr/committable comments
2024-11-20 17:49:08 +02:00
eb0c959ca9 Add validation for committable comments within PR hunks in GitHub provider 2024-11-20 17:28:13 +02:00
741a70ad9d Add detailed diff code generation for GitLab suggestions and improve comment formatting 2024-11-20 17:26:36 +02:00
22ee03981e Add diff code generation for Bitbucket code suggestions and improve logging 2024-11-20 17:25:10 +02:00
Tal
b1336e7d08 Merge pull request #1355 from Codium-ai/tr/3-way-prs
use a more modern package
2024-11-18 17:02:26 +02:00
Tal
751caca141 Merge pull request #1367 from Codium-ai/tr/focus_only_on_problems_enabled
Enable focus_only_on_problems mode by default in configuration and up…
2024-11-18 16:49:57 +02:00
612004727c true 2024-11-18 16:47:55 +02:00
577ee0241d Enable focus_only_on_problems mode by default in configuration and update README.md 2024-11-18 16:35:23 +02:00
a141ca133c Update utils.py
1. add missed emoji for "PR contains tests"
2. check git_provider and reference_link before using them
2024-11-16 09:32:05 +08:00
a14b6a580d Enable pre-commit workflow with manual dispatch trigger 2024-11-14 15:40:29 +02:00
Tal
cc5005c490 Merge pull request #1362 from samuele-ruffino96/docs/update-ci-pipeline-variable-note
docs: add note about $CI_SERVER_FQDN variable in GitLab CI/CD pipeline
2024-11-14 15:36:33 +02:00
3a5d0f54ce docs: add note about $CI_SERVER_FQDN variable in GitLab CI/CD pipeline
Clarified that the $CI_SERVER_FQDN variable was introduced in GitLab 16.10 and explained how to combine $CI_SERVER_HOST:$CI_SERVER_PORT to achieve the same result in earlier GitLab versions.
2024-11-14 12:57:44 +01:00
Tal
cd8ba4f59f Merge pull request #1359 from Codium-ai/tr/is_bot_user
Refactor `is_bot_user` function to improve actor type handling
2024-11-14 08:29:05 +02:00
fe27f96bf1 Improve robustness of sender_id extraction in Bitbucket app server by using safe dictionary access 2024-11-14 08:26:04 +02:00
2c3aa7b2dc Improve actor data extraction logic in Bitbucket app server 2024-11-14 08:23:38 +02:00
c934523f2d Refactor is_bot_user function to improve actor type handling and logging 2024-11-14 08:19:49 +02:00
2f4545dc15 Refactor byte decoding in Bitbucket server provider using decode_if_bytes function 2024-11-12 08:26:33 +02:00
cbd490b3d7 use a more modern version 2024-11-12 08:23:11 +02:00
Tal
b07f96d26a Merge pull request #1354 from Codium-ai/tr/3-way-prs
Tr/3 way prs
2024-11-12 08:17:41 +02:00
065777040f Improve PR file content retrieval and logging verbosity handling 2024-11-12 08:06:02 +02:00
9c82047dc3 Add validation for hunk lines matching original file content in git patch processing 2024-11-12 07:50:37 +02:00
Tal
e0c15409bb Merge pull request #1351 from Codium-ai/tr/fix_docs
Fixed mkdocs emoji configuration after pre-commit error
2024-11-08 10:49:41 +02:00
d956c72cb6 Disable pre-commit workflow and update mkdocs emoji configuration 2024-11-08 10:46:35 +02:00
Tal
dfb3d801cf Merge pull request #1316 from yu-iskw/introduce-pre-commit
Introduce pre-commit hooks and GitHub Actions
2024-11-08 09:58:59 +02:00
Tal
5c5a3e267c Merge branch 'main' into introduce-pre-commit 2024-11-08 09:54:21 +02:00
Tal
f9380c2440 Merge pull request #1350 from NxPKG/patch-1
Remove unused deployment_id variable.
2024-11-08 09:49:07 +02:00
Tal
e6a1f14c0e Merge pull request #1345 from KennyDizi/main
Add Claude 3.5 Haiku Model Support and Update Dependencies
2024-11-07 17:23:28 +02:00
6339845eb4 Remove unused deployment_id variable.
The deployment_id variable is assigned but never used in the function.
2024-11-07 14:24:35 +06:00
Tal
732cc18fd6 Merge pull request #1348 from Codium-ai/tr/focus_only_on_problems
Add focus_only_on_problems setting for targeted code suggestions
2024-11-07 09:09:03 +02:00
84d0f80c81 Add documentation for focus_only_on_problems setting in improve.md and README.md 2024-11-07 09:07:16 +02:00
ee26bf35c1 Add documentation for focus_only_on_problems setting in improve.md and README.md 2024-11-07 09:06:30 +02:00
7a5e9102fd Add documentation for focus_only_on_problems setting in improve.md and README.md 2024-11-07 08:59:10 +02:00
a8c97bfa73 Add documentation for focus_only_on_problems setting in improve.md and README.md 2024-11-07 08:30:18 +02:00
af653a048f Add support model bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 2024-11-07 09:12:52 +07:00
d2663f959a Add focus_only_on_problems setting for targeted code suggestions 2024-11-06 21:22:58 +02:00
e650fe9ce9 Merge remote-tracking branch 'origin/main' 2024-11-06 12:20:41 +02:00
daeca42ae8 Update ticket analysis review setting key in compliance check function 2024-11-06 12:20:31 +02:00
04496f9b0e Update tiktoken to v0.8.0 2024-11-06 08:07:44 +07:00
0eacb3e35e Update openai to v1.54.1 2024-11-06 08:07:19 +07:00
c5ed2f040a Update litellm to v1..52.0 2024-11-06 07:56:30 +07:00
c394fc2767 Upgrade anthropic version to 0.39.0 2024-11-06 07:55:19 +07:00
157251493a Add support claude-3-5-haiku-20241022 model 2024-11-06 07:52:58 +07:00
Tal
4a982a849d Merge pull request #1343 from Codium-ai/mrT23-patch-5
Update automations_and_usage.md
2024-11-05 11:30:16 +02:00
Tal
6e3544f523 Update automations_and_usage.md 2024-11-05 11:27:38 +02:00
Tal
bf3ebbb95f Merge pull request #1342 from Codium-ai/hl/docs_tickets
update docs
2024-11-04 17:20:00 +02:00
eb44ecb1be update docs 2024-11-04 17:18:18 +02:00
Tal
45bae48701 Merge pull request #1341 from Codium-ai/hl/docs_tickets
Hl/docs tickets
2024-11-04 16:36:01 +02:00
b2181e4c79 typos 2024-11-04 16:35:21 +02:00
5939d3b17b readme 2024-11-04 16:32:53 +02:00
c1f4964a55 update review 2024-11-04 16:19:17 +02:00
022e407d84 add documentation for ticket integrations 2024-11-04 15:52:39 +02:00
Tal
93ba2d239a Merge pull request #1331 from miyagi-do/nocode_suggestions_config
Add configuration option to control publish of no code suggestions message
2024-11-04 07:50:57 +02:00
Tal
fa49dd5167 Merge branch 'main' into nocode_suggestions_config 2024-11-04 07:50:22 +02:00
443d06df06 Add configuration option to control publish of no code suggestions message 2024-11-01 15:38:16 -04:00
852bb371af Add pre-commit.yml
Signed-off-by: Yu Ishikawa <yu-iskw@users.noreply.github.com>
2024-10-30 10:00:59 +09:00
7c90e44656 Add pre-commit
Signed-off-by: Yu Ishikawa <yu-iskw@users.noreply.github.com>
2024-10-30 10:00:42 +09:00
81dea65856 Format files by pre-commit run -a
Signed-off-by: Yu Ishikawa <yu-iskw@users.noreply.github.com>
2024-10-30 10:00:36 +09:00
a3d572fb69 Add .pre-commit-config.yaml
Signed-off-by: Yu Ishikawa <yu-iskw@users.noreply.github.com>
2024-10-30 09:55:15 +09:00
139 changed files with 2034 additions and 1444 deletions

View File

@ -37,5 +37,3 @@ jobs:
name: Test dev docker name: Test dev docker
run: | run: |
docker run --rm codiumai/pr-agent:test pytest -v tests/unittest docker run --rm codiumai/pr-agent:test pytest -v tests/unittest

View File

@ -30,6 +30,3 @@ jobs:
GITHUB_ACTION_CONFIG.AUTO_DESCRIBE: true GITHUB_ACTION_CONFIG.AUTO_DESCRIBE: true
GITHUB_ACTION_CONFIG.AUTO_REVIEW: true GITHUB_ACTION_CONFIG.AUTO_REVIEW: true
GITHUB_ACTION_CONFIG.AUTO_IMPROVE: true GITHUB_ACTION_CONFIG.AUTO_IMPROVE: true

17
.github/workflows/pre-commit.yml vendored Normal file
View File

@ -0,0 +1,17 @@
# disabled. We might run it manually if needed.
name: pre-commit
on:
workflow_dispatch:
# pull_request:
# push:
# branches: [main]
jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v5
# SEE https://github.com/pre-commit/action
- uses: pre-commit/action@v3.0.1

1
.gitignore vendored
View File

@ -1,6 +1,7 @@
.idea/ .idea/
.lsp/ .lsp/
.vscode/ .vscode/
.env
venv/ venv/
pr_agent/settings/.secrets.toml pr_agent/settings/.secrets.toml
__pycache__ __pycache__

46
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,46 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
default_language_version:
python: python3
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: check-added-large-files
- id: check-toml
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace
# - repo: https://github.com/rhysd/actionlint
# rev: v1.7.3
# hooks:
# - id: actionlint
- repo: https://github.com/pycqa/isort
# rev must match what's in dev-requirements.txt
rev: 5.13.2
hooks:
- id: isort
# - repo: https://github.com/PyCQA/bandit
# rev: 1.7.10
# hooks:
# - id: bandit
# args: [
# "-c", "pyproject.toml",
# ]
# - repo: https://github.com/astral-sh/ruff-pre-commit
# rev: v0.7.1
# hooks:
# - id: ruff
# args:
# - --fix
# - id: ruff-format
# - repo: https://github.com/PyCQA/autoflake
# rev: v2.3.1
# hooks:
# - id: autoflake
# args:
# - --in-place
# - --remove-all-unused-imports
# - --remove-unused-variables

164
README.md
View File

@ -13,15 +13,13 @@
Qode Merge PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedback and suggestions Qode Merge PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedback and suggestions
</div> </div>
[![GitHub license](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://github.com/Codium-ai/pr-agent/blob/main/LICENSE)
[![Static Badge](https://img.shields.io/badge/Chrome-Extension-violet)](https://chromewebstore.google.com/detail/pr-agent-chrome-extension/ephlnjeghhogofkifjloamocljapahnl) [![Static Badge](https://img.shields.io/badge/Chrome-Extension-violet)](https://chromewebstore.google.com/detail/pr-agent-chrome-extension/ephlnjeghhogofkifjloamocljapahnl)
[![Static Badge](https://img.shields.io/badge/Code-Benchmark-blue)](https://pr-agent-docs.codium.ai/finetuning_benchmark/) [![Static Badge](https://img.shields.io/badge/Pro-App-blue)](https://github.com/apps/qodo-merge-pro/)
[![Static Badge](https://img.shields.io/badge/OpenSource-App-red)](https://github.com/apps/qodo-merge-pro-for-open-source/)
[![Discord](https://badgen.net/badge/icon/discord?icon=discord&label&color=purple)](https://discord.com/channels/1057273017547378788/1126104260430528613) [![Discord](https://badgen.net/badge/icon/discord?icon=discord&label&color=purple)](https://discord.com/channels/1057273017547378788/1126104260430528613)
[![Twitter](https://img.shields.io/twitter/follow/codiumai)](https://twitter.com/codiumai) <a href="https://github.com/Codium-ai/pr-agent/commits/main">
[![Cheat Sheet](https://img.shields.io/badge/Cheat-Sheet-red)](https://www.codium.ai/images/pr_agent/cheat_sheet.pdf) <img alt="GitHub" src="https://img.shields.io/github/last-commit/Codium-ai/pr-agent/main?style=for-the-badge" height="20">
<a href="https://github.com/Codium-ai/pr-agent/commits/main"> </a>
<img alt="GitHub" src="https://img.shields.io/github/last-commit/Codium-ai/pr-agent/main?style=for-the-badge" height="20">
</a>
</div> </div>
### [Documentation](https://pr-agent-docs.codium.ai/) ### [Documentation](https://pr-agent-docs.codium.ai/)
@ -43,50 +41,44 @@ Qode Merge PR-Agent aims to help efficiently review and handle pull requests, by
## News and Updates ## News and Updates
### November 3, 2024 ### December 25, 2024
Meaningful improvement to the quality of code suggestions by separating the code suggestion generation from [line number detection](https://github.com/Codium-ai/pr-agent/pull/1338) The `review` tool previously included a legacy feature for providing code suggestions (controlled by '--pr_reviewer.num_code_suggestion'). This functionality has been deprecated. Use instead the [`improve`](https://qodo-merge-docs.qodo.ai/tools/improve/) tool, which offers higher quality and more actionable code suggestions.
<kbd>![image](https://github.com/user-attachments/assets/093c185c-31ca-47a1-a4fe-be7d9335ea66)</kbd> ### December 2, 2024
Open-source repositories can now freely use Qodo Merge Pro, and enjoy easy one-click installation using a marketplace [app](https://github.com/apps/qodo-merge-pro-for-open-source).
<kbd><img src="https://github.com/user-attachments/assets/b0838724-87b9-43b0-ab62-73739a3a855c" width="512"></kbd>
See [here](https://qodo-merge-docs.qodo.ai/installation/pr_agent_pro/) for more details about installing Qodo Merge Pro for private repositories.
### October 27, 2024 ### November 18, 2024
Qodo Merge PR Agent will now automatically document accepted code suggestions in a dedicated wiki page (`.pr_agent_accepted_suggestions`), enabling users to track historical changes, assess the tool's effectiveness, and learn from previously implemented recommendations in the repository. A new mode was enabled by default for code suggestions - `--pr_code_suggestions.focus_only_on_problems=true`:
This dedicated wiki page will also serve as a foundation for future AI model improvements, allowing it to learn from historically implemented suggestions and generate more targeted, contextually relevant recommendations. - This option reduces the number of code suggestions received
Read more about this novel feature [here](https://qodo-merge-docs.qodo.ai/tools/improve/#suggestion-tracking). - The suggestions will focus more on identifying and fixing code problems, rather than style considerations like best practices, maintainability, or readability.
- The suggestions will be categorized into just two groups: "Possible Issues" and "General".
<kbd><img href="https://qodo.ai/images/pr_agent/pr_agent_accepted_suggestions1.png" src="https://qodo.ai/images/pr_agent/pr_agent_accepted_suggestions1.png" width="768"></kbd> Still, if you prefer the previous mode, you can set `--pr_code_suggestions.focus_only_on_problems=false` in the [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/).
**Example results:**
Original mode
<kbd><img src="https://qodo.ai/images/pr_agent/code_suggestions_original_mode.png" width="512"></kbd>
Focused mode
<kbd><img src="https://qodo.ai/images/pr_agent/code_suggestions_focused_mode.png" width="512"></kbd>
### November 4, 2024
### October 21, 2024 Qodo Merge PR Agent will now leverage context from Jira or GitHub tickets to enhance the PR Feedback. Read more about this feature
**Disable publishing labels by default:** [here](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/)
The default setting for `pr_description.publish_labels` has been updated to `false`. This means that labels generated by the `/describe` tool will no longer be published, unless this configuration is explicitly set to `true`.
We constantly strive to balance informative AI analysis with reducing unnecessary noise. User feedback indicated that in many cases, the original PR title alone provides sufficient information, making the generated labels (`enhancement`, `documentation`, `bug fix`, ...) redundant.
The [`review_effort`](https://qodo-merge-docs.qodo.ai/tools/review/#configuration-options) label, generated by the `review` tool, will still be published by default, as it provides valuable information enabling reviewers to prioritize small PRs first.
However, every user has different preferences. To still publish the `describe` labels, set `pr_description.publish_labels=true` in the [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/).
For more tailored and relevant labeling, we recommend using the [`custom_labels 💎`](https://qodo-merge-docs.qodo.ai/tools/custom_labels/) tool, that allows generating labels specific to your project's needs.
<kbd>![image](https://github.com/user-attachments/assets/8f38d222-53b1-4742-b2ec-7ea0a30c9076)</kbd>
<kbd>![image](https://github.com/user-attachments/assets/8285bd90-0dda-4c7e-9237-bbfde5e21880)</kbd>
### October 14, 2024
Improved support for GitHub enterprise server with [GitHub Actions](https://qodo-merge-docs.qodo.ai/installation/github/#action-for-github-enterprise-server)
### October 10, 2024
New ability for the `review` tool - **ticket compliance feedback**. If the PR contains a ticket number, PR-Agent will check if the PR code actually [complies](https://github.com/Codium-ai/pr-agent/pull/1279#issuecomment-2404042130) with the ticket requirements.
<kbd><img src="https://github.com/user-attachments/assets/4a2a728b-5f47-40fa-80cc-16efd296938c" width="768"></kbd>
## Overview ## Overview
@ -94,39 +86,41 @@ New ability for the `review` tool - **ticket compliance feedback**. If the PR co
Supported commands per platform: Supported commands per platform:
| | | GitHub | Gitlab | Bitbucket | Azure DevOps | | | | GitHub | GitLab | Bitbucket | Azure DevOps |
|-------|---------------------------------------------------------------------------------------------------------|:--------------------:|:--------------------:|:--------------------:|:------------:| |-------|---------------------------------------------------------------------------------------------------------|:--------------------:|:--------------------:|:--------------------:|:------------:|
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ | | TOOLS | [Review](https://qodo-merge-docs.qodo.ai/tools/review/) | ✅ | ✅ | ✅ | ✅ |
| | ⮑ Incremental | ✅ | | | | | | [Describe](https://qodo-merge-docs.qodo.ai/tools/describe/) | ✅ | ✅ | ✅ | |
| | Describe | ✅ | ✅ | ✅ | ✅ | | | [Improve](https://qodo-merge-docs.qodo.ai/tools/improve/) | ✅ | ✅ | ✅ | ✅ |
| | ⮑ [Inline File Summary](https://pr-agent-docs.codium.ai/tools/describe#inline-file-summary) 💎 | ✅ | | | | | | [Ask](https://qodo-merge-docs.qodo.ai/tools/ask/) | ✅ | ✅ | ✅ | ✅ |
| | Improve | ✅ | ✅ | ✅ | ✅ |
| | ⮑ Extended | ✅ | ✅ | ✅ | ✅ |
| | Ask | ✅ | ✅ | ✅ | ✅ |
| | ⮑ [Ask on code lines](https://pr-agent-docs.codium.ai/tools/ask#ask-lines) | ✅ | ✅ | | | | | ⮑ [Ask on code lines](https://pr-agent-docs.codium.ai/tools/ask#ask-lines) | ✅ | ✅ | | |
| | [Custom Prompt](https://pr-agent-docs.codium.ai/tools/custom_prompt/) 💎 | ✅ | ✅ | ✅ | | | | [Update CHANGELOG](https://qodo-merge-docs.qodo.ai/tools/update_changelog/) | ✅ | ✅ | ✅ | |
| | [Test](https://pr-agent-docs.codium.ai/tools/test/) 💎 | ✅ | ✅ | | | | | [Ticket Context](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/) 💎 | ✅ | ✅ | | |
| | Reflect and Review | ✅ | ✅ | ✅ | ✅ | | | [Utilizing Best Practices](https://qodo-merge-docs.qodo.ai/tools/improve/#best-practices) 💎 | ✅ | ✅ | | |
| | Update CHANGELOG.md | ✅ | ✅ | ✅ | ✅ | | | [PR Chat](https://qodo-merge-docs.qodo.ai/chrome-extension/features/#pr-chat) 💎 | ✅ | | | |
| | Find Similar Issue | ✅ | | | | | | [Suggestion Tracking](https://qodo-merge-docs.qodo.ai/tools/improve/#suggestion-tracking) 💎 | ✅ | ✅ | | |
| | [Add PR Documentation](https://pr-agent-docs.codium.ai/tools/documentation/) 💎 | ✅ | | | | | | [CI Feedback](https://pr-agent-docs.codium.ai/tools/ci_feedback/) 💎 | ✅ | | | |
| | [PR Documentation](https://pr-agent-docs.codium.ai/tools/documentation/) 💎 | ✅ | ✅ | | |
| | [Custom Labels](https://pr-agent-docs.codium.ai/tools/custom_labels/) 💎 | ✅ | ✅ | | | | | [Custom Labels](https://pr-agent-docs.codium.ai/tools/custom_labels/) 💎 | ✅ | ✅ | | |
| | [Analyze](https://pr-agent-docs.codium.ai/tools/analyze/) 💎 | ✅ | ✅ | | | | | [Analyze](https://pr-agent-docs.codium.ai/tools/analyze/) 💎 | ✅ | ✅ | | |
| | [CI Feedback](https://pr-agent-docs.codium.ai/tools/ci_feedback/) 💎 | ✅ | | | |
| | [Similar Code](https://pr-agent-docs.codium.ai/tools/similar_code/) 💎 | ✅ | | | | | | [Similar Code](https://pr-agent-docs.codium.ai/tools/similar_code/) 💎 | ✅ | | | |
| | [Custom Prompt](https://pr-agent-docs.codium.ai/tools/custom_prompt/) 💎 | ✅ | ✅ | ✅ | |
| | [Test](https://pr-agent-docs.codium.ai/tools/test/) 💎 | ✅ | ✅ | | |
| | | | | | | | | | | | | |
| USAGE | CLI | ✅ | ✅ | ✅ | ✅ | | USAGE | [CLI](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#local-repo-cli) | ✅ | ✅ | ✅ | ✅ |
| | App / webhook | ✅ | ✅ | ✅ | ✅ | | | [App / webhook](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#github-app) | ✅ | ✅ | ✅ | ✅ |
| | Tagging bot | ✅ | | | | | | [Tagging bot](https://github.com/Codium-ai/pr-agent#try-it-now) | ✅ | | | |
| | Actions | ✅ |✅| ✅ |✅| | | [Actions](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-action) | ✅ |✅| ✅ |✅|
| | | | | | | | | | | | | |
| CORE | PR compression | ✅ | ✅ | ✅ | ✅ | | CORE | [PR compression](https://qodo-merge-docs.qodo.ai/core-abilities/compression_strategy/) | ✅ | ✅ | ✅ | ✅ |
| | Repo language prioritization | ✅ | ✅ | ✅ | ✅ |
| | Adaptive and token-aware file patch fitting | ✅ | ✅ | ✅ | ✅ | | | Adaptive and token-aware file patch fitting | ✅ | ✅ | ✅ | ✅ |
| | Multiple models support | ✅ | ✅ | ✅ | ✅ | | | [Multiple models support](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/) | ✅ | ✅ | ✅ | ✅ |
| | [Static code analysis](https://pr-agent-docs.codium.ai/core-abilities/#static-code-analysis) 💎 | ✅ | ✅ | ✅ | | | | [Local and global metadata](https://qodo-merge-docs.qodo.ai/core-abilities/metadata/) | ✅ | ✅ | ✅ | |
| | [Dynamic context](https://qodo-merge-docs.qodo.ai/core-abilities/dynamic_context/) | ✅ | ✅ | ✅ | ✅ |
| | [Self reflection](https://qodo-merge-docs.qodo.ai/core-abilities/self_reflection/) | ✅ | ✅ | ✅ | ✅ |
| | [Static code analysis](https://qodo-merge-docs.qodo.ai/core-abilities/static_code_analysis/) 💎 | ✅ | ✅ | ✅ | |
| | [Global and wiki configurations](https://pr-agent-docs.codium.ai/usage-guide/configuration_options/) 💎 | ✅ | ✅ | ✅ | | | | [Global and wiki configurations](https://pr-agent-docs.codium.ai/usage-guide/configuration_options/) 💎 | ✅ | ✅ | ✅ | |
| | [PR interactive actions](https://www.codium.ai/images/pr_agent/pr-actions.mp4) 💎 | ✅ | ✅ | | | | | [PR interactive actions](https://www.codium.ai/images/pr_agent/pr-actions.mp4) 💎 | ✅ | ✅ | | |
| | [Impact Evaluation](https://qodo-merge-docs.qodo.ai/core-abilities/impact_evaluation/) 💎 | ✅ | ✅ | | |
- 💎 means this feature is available only in [PR-Agent Pro](https://www.codium.ai/pricing/) - 💎 means this feature is available only in [PR-Agent Pro](https://www.codium.ai/pricing/)
[//]: # (- Support for additional git providers is described in [here]&#40;./docs/Full_environments.md&#41;) [//]: # (- Support for additional git providers is described in [here]&#40;./docs/Full_environments.md&#41;)
@ -187,50 +181,8 @@ ___
</kbd> </kbd>
</p> </p>
</div> </div>
<hr>
<h4><a href="https://github.com/Codium-ai/pr-agent/pull/530">/generate_labels</a></h4>
<div align="center">
<p float="center">
<kbd><img src="https://www.codium.ai/images/pr_agent/geneare_custom_labels_main_short.png" width="300"></kbd>
</p>
</div>
[//]: # (<h4><a href="https://github.com/Codium-ai/pr-agent/pull/78#issuecomment-1639739496">/reflect_and_review:</a></h4>)
[//]: # (<div align="center">)
[//]: # (<p float="center">)
[//]: # (<img src="https://www.codium.ai/images/reflect_and_review.gif" width="800">)
[//]: # (</p>)
[//]: # (</div>)
[//]: # (<h4><a href="https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695020538">/ask:</a></h4>)
[//]: # (<div align="center">)
[//]: # (<p float="center">)
[//]: # (<img src="https://www.codium.ai/images/ask-2.gif" width="800">)
[//]: # (</p>)
[//]: # (</div>)
[//]: # (<h4><a href="https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695024952">/improve:</a></h4>)
[//]: # (<div align="center">)
[//]: # (<p float="center">)
[//]: # (<img src="https://www.codium.ai/images/improve-2.gif" width="800">)
[//]: # (</p>)
[//]: # (</div>)
<div align="left"> <div align="left">

View File

@ -2,4 +2,3 @@ We take your code's security and privacy seriously:
- The Chrome extension will not send your code to any external servers. - The Chrome extension will not send your code to any external servers.
- For private repositories, we will first validate the user's identity and permissions. After authentication, we generate responses using the existing Qodo Merge Pro integration. - For private repositories, we will first validate the user's identity and permissions. After authentication, we generate responses using the existing Qodo Merge Pro integration.

View File

@ -4,7 +4,7 @@ With a single-click installation you will gain access to a context-aware chat on
The extension is powered by top code models like Claude 3.5 Sonnet and GPT4. All the extension's features are free to use on public repositories. The extension is powered by top code models like Claude 3.5 Sonnet and GPT4. All the extension's features are free to use on public repositories.
For private repositories, you will need to install [Qodo Merge Pro](https://github.com/apps/codiumai-pr-agent-pro) in addition to the extension (Quick GitHub app setup with a 14-day free trial. No credit card needed). For private repositories, you will need to install [Qodo Merge Pro](https://github.com/apps/qodo-merge-pro) in addition to the extension (Quick GitHub app setup with a 14-day free trial. No credit card needed).
For a demonstration of how to install Qodo Merge Pro and use it with the Chrome extension, please refer to the tutorial video at the provided [link](https://codium.ai/images/pr_agent/private_repos.mp4). For a demonstration of how to install Qodo Merge Pro and use it with the Chrome extension, please refer to the tutorial video at the provided [link](https://codium.ai/images/pr_agent/private_repos.mp4).
<img src="https://codium.ai/images/pr_agent/PR-AgentChat.gif" width="768"> <img src="https://codium.ai/images/pr_agent/PR-AgentChat.gif" width="768">

View File

@ -0,0 +1,134 @@
# Fetching Ticket Context for PRs
`Supported Git Platforms : GitHub, GitLab, Bitbucket`
## Overview
Qodo Merge PR Agent streamlines code review workflows by seamlessly connecting with multiple ticket management systems.
This integration enriches the review process by automatically surfacing relevant ticket information and context alongside code changes.
## Ticket systems supported
- GitHub
- Jira (💎)
Ticket data fetched:
1. Ticket Title
2. Ticket Description
3. Custom Fields (Acceptance criteria)
4. Subtasks (linked tasks)
5. Labels
6. Attached Images/Screenshots
## Affected Tools
Ticket Recognition Requirements:
- The PR description should contain a link to the ticket or if the branch name starts with the ticket id / number.
- For Jira tickets, you should follow the instructions in [Jira Integration](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/#jira-integration) in order to authenticate with Jira.
### Describe tool
Qodo Merge PR Agent will recognize the ticket and use the ticket content (title, description, labels) to provide additional context for the code changes.
By understanding the reasoning and intent behind modifications, the LLM can offer more insightful and relevant code analysis.
### Review tool
Similarly to the `describe` tool, the `review` tool will use the ticket content to provide additional context for the code changes.
In addition, this feature will evaluate how well a Pull Request (PR) adheres to its original purpose/intent as defined by the associated ticket or issue mentioned in the PR description.
Each ticket will be assigned a label (Compliance/Alignment level), Indicates the degree to which the PR fulfills its original purpose, Options: Fully compliant, Partially compliant or Not compliant.
![Ticket Compliance](https://www.qodo.ai/images/pr_agent/ticket_compliance_review.png){width=768}
By default, the tool will automatically validate if the PR complies with the referenced ticket.
If you want to disable this feedback, add the following line to your configuration file:
```toml
[pr_reviewer]
require_ticket_analysis_review=false
```
## Providers
### Github Issues Integration
Qodo Merge PR Agent will automatically recognize Github issues mentioned in the PR description and fetch the issue content.
Examples of valid GitHub issue references:
- `https://github.com/<ORG_NAME>/<REPO_NAME>/issues/<ISSUE_NUMBER>`
- `#<ISSUE_NUMBER>`
- `<ORG_NAME>/<REPO_NAME>#<ISSUE_NUMBER>`
Since Qodo Merge PR Agent is integrated with GitHub, it doesn't require any additional configuration to fetch GitHub issues.
### Jira Integration 💎
We support both Jira Cloud and Jira Server/Data Center.
To integrate with Jira, you can link your PR to a ticket using either of these methods:
**Method 1: Description Reference:**
Include a ticket reference in your PR description using either the complete URL format https://<JIRA_ORG>.atlassian.net/browse/ISSUE-123 or the shortened ticket ID ISSUE-123.
**Method 2: Branch Name Detection:**
Name your branch with the ticket ID as a prefix (e.g., `ISSUE-123-feature-description` or `ISSUE-123/feature-description`).
!!! note "Jira Base URL"
For shortened ticket IDs or branch detection (method 2), you must configure the Jira base URL in your configuration file under the [jira] section:
```toml
[jira]
jira_base_url = "https://<JIRA_ORG>.atlassian.net"
```
#### Jira Cloud 💎
There are two ways to authenticate with Jira Cloud:
**1) Jira App Authentication**
The recommended way to authenticate with Jira Cloud is to install the Qodo Merge app in your Jira Cloud instance. This will allow Qodo Merge to access Jira data on your behalf.
Installation steps:
1. Click [here](https://auth.atlassian.com/authorize?audience=api.atlassian.com&client_id=8krKmA4gMD8mM8z24aRCgPCSepZNP1xf&scope=read%3Ajira-work%20offline_access&redirect_uri=https%3A%2F%2Fregister.jira.pr-agent.codium.ai&state=qodomerge&response_type=code&prompt=consent) to install the Qodo Merge app in your Jira Cloud instance, click the `accept` button.<br>
![Jira Cloud App Installation](https://www.qodo.ai/images/pr_agent/jira_app_installation1.png){width=384}
2. After installing the app, you will be redirected to the Qodo Merge registration page. and you will see a success message.<br>
![Jira Cloud App success message](https://www.qodo.ai/images/pr_agent/jira_app_success.png){width=384}
3. Now you can use the Jira integration in Qodo Merge PR Agent.
**2) Email/Token Authentication**
You can create an API token from your Atlassian account:
1. Log in to https://id.atlassian.com/manage-profile/security/api-tokens.
2. Click Create API token.
3. From the dialog that appears, enter a name for your new token and click Create.
4. Click Copy to clipboard.
![Jira Cloud API Token](https://images.ctfassets.net/zsv3d0ugroxu/1RYvh9lqgeZjjNe5S3Hbfb/155e846a1cb38f30bf17512b6dfd2229/screenshot_NewAPIToken){width=384}
5. In your [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) add the following lines:
```toml
[jira]
jira_api_token = "YOUR_API_TOKEN"
jira_api_email = "YOUR_EMAIL"
```
#### Jira Server/Data Center 💎
Currently, we only support the Personal Access Token (PAT) Authentication method.
1. Create a [Personal Access Token (PAT)](https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html) in your Jira account
2. In your Configuration file/Environment variables/Secrets file, add the following lines:
```toml
[jira]
jira_base_url = "YOUR_JIRA_BASE_URL" # e.g. https://jira.example.com
jira_api_token = "YOUR_API_TOKEN"
```

View File

@ -1,6 +1,7 @@
# Core Abilities # Core Abilities
Qodo Merge utilizes a variety of core abilities to provide a comprehensive and efficient code review experience. These abilities include: Qodo Merge utilizes a variety of core abilities to provide a comprehensive and efficient code review experience. These abilities include:
- [Fetching ticket context](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/)
- [Local and global metadata](https://qodo-merge-docs.qodo.ai/core-abilities/metadata/) - [Local and global metadata](https://qodo-merge-docs.qodo.ai/core-abilities/metadata/)
- [Dynamic context](https://qodo-merge-docs.qodo.ai/core-abilities/dynamic_context/) - [Dynamic context](https://qodo-merge-docs.qodo.ai/core-abilities/dynamic_context/)
- [Self-reflection](https://qodo-merge-docs.qodo.ai/core-abilities/self_reflection/) - [Self-reflection](https://qodo-merge-docs.qodo.ai/core-abilities/self_reflection/)

View File

@ -61,7 +61,7 @@ Or be triggered interactively by using the `analyze` tool.
### Find Similar Code ### Find Similar Code
The [`similar code`](https://qodo-merge-docs.qodo.ai/tools/similar_code/) tool retrieves the most similar code components from inside the organization's codebase, or from open-source code. The [`similar code`](https://qodo-merge-docs.qodo.ai/tools/similar_code/) tool retrieves the most similar code components from inside the organization's codebase or from open-source code, including details about the license associated with each repository.
For example: For example:

View File

@ -25,36 +25,43 @@ To search the documentation site using natural language:
Qodo Merge offers extensive pull request functionalities across various git providers. Qodo Merge offers extensive pull request functionalities across various git providers.
| | | GitHub | Gitlab | Bitbucket | Azure DevOps | | | | GitHub | GitLab | Bitbucket | Azure DevOps |
|-------|-----------------------------------------------------------------------------------------------------------------------|:------:|:------:|:---------:|:------------:| |-------|---------------------------------------------------------------------------------------------------------|:--------------------:|:--------------------:|:--------------------:|:------------:|
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ | | TOOLS | [Review](https://qodo-merge-docs.qodo.ai/tools/review/) | ✅ | ✅ | ✅ | ✅ |
| | ⮑ Incremental | ✅ | | | | | | [Describe](https://qodo-merge-docs.qodo.ai/tools/describe/) | ✅ | ✅ | ✅ | |
| | Ask | ✅ | ✅ | ✅ | ✅ | | | [Improve](https://qodo-merge-docs.qodo.ai/tools/improve/) | ✅ | ✅ | ✅ | ✅ |
| | Describe | ✅ | ✅ | ✅ | ✅ | | | [Ask](https://qodo-merge-docs.qodo.ai/tools/ask/) | ✅ | ✅ | ✅ | ✅ |
| | ⮑ [Inline file summary](https://qodo-merge-docs.qodo.ai/tools/describe/#inline-file-summary){:target="_blank"} 💎 | | | | | | | ⮑ [Ask on code lines](https://pr-agent-docs.codium.ai/tools/ask#ask-lines) | ✅ || | |
| | Improve | ✅ | ✅ | ✅ | ✅ | | | [Update CHANGELOG](https://qodo-merge-docs.qodo.ai/tools/update_changelog/) | ✅ | ✅ | ✅ | ✅ |
| | ⮑ Extended | ✅ | ✅ | | | | | [Ticket Context](https://qodo-merge-docs.qodo.ai/core-abilities/fetching_ticket_context/) 💎 | ✅ | ✅ || |
| | [Custom Prompt](./tools/custom_prompt.md){:target="_blank"} 💎 | ✅ | ✅ | | | | | [Utilizing Best Practices](https://qodo-merge-docs.qodo.ai/tools/improve/#best-practices) 💎 | ✅ | ✅ || |
| | Reflect and Review | ✅ | ✅ | ✅ | | | | [PR Chat](https://qodo-merge-docs.qodo.ai/chrome-extension/features/#pr-chat) 💎 | ✅ | | | |
| | Update CHANGELOG.md | | ✅ | ✅ | | | | [Suggestion Tracking](https://qodo-merge-docs.qodo.ai/tools/improve/#suggestion-tracking) 💎 | ✅ | ✅ | | |
| | Find Similar Issue | | | | | | | [CI Feedback](https://pr-agent-docs.codium.ai/tools/ci_feedback/) 💎 | ✅ | | | |
| | [Add PR Documentation](./tools/documentation.md){:target="_blank"} 💎 | ✅ | | | | | | [PR Documentation](https://pr-agent-docs.codium.ai/tools/documentation/) 💎 | ✅ | ✅ | | |
| | [Generate Custom Labels](./tools/describe.md#handle-custom-labels-from-the-repos-labels-page-💎){:target="_blank"} 💎 | ✅ | | | | | | [Custom Labels](https://pr-agent-docs.codium.ai/tools/custom_labels/) 💎 | ✅ || | |
| | [Analyze PR Components](./tools/analyze.md){:target="_blank"} 💎 | | | | | | | [Analyze](https://pr-agent-docs.codium.ai/tools/analyze/) 💎 | ✅ | ✅ | | |
| | | | | | | | | [Similar Code](https://pr-agent-docs.codium.ai/tools/similar_code/) 💎 | ✅ | | | |
| USAGE | CLI | ✅ | ✅ | | | | | [Custom Prompt](https://pr-agent-docs.codium.ai/tools/custom_prompt/) 💎 | ✅ | ✅ || |
| | App / webhook | | | | | | | [Test](https://pr-agent-docs.codium.ai/tools/test/) 💎 | ✅ || | |
| | Actions | ✅ | | | | | | | | | | |
| | | | | | | USAGE | [CLI](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#local-repo-cli) | ✅ | ✅ | ✅ | |
| CORE | PR compression | ✅ | ✅ | ✅ | ✅ | | | [App / webhook](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#github-app) | ✅ | ✅ | ✅ | ✅ |
| | Repo language prioritization | | | | | | | [Tagging bot](https://github.com/Codium-ai/pr-agent#try-it-now) | ✅ | | | |
| | [Actions](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-action) | ✅ |✅| ✅ |✅|
| | | | | | |
| CORE | [PR compression](https://qodo-merge-docs.qodo.ai/core-abilities/compression_strategy/) | ✅ | ✅ | ✅ | ✅ |
| | Adaptive and token-aware file patch fitting | ✅ | ✅ | ✅ | ✅ | | | Adaptive and token-aware file patch fitting | ✅ | ✅ | ✅ | ✅ |
| | Multiple models support | ✅ | ✅ | ✅ | ✅ | | | [Multiple models support](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/) | ✅ | ✅ | ✅ | ✅ |
| | Incremental PR review | ✅ | | | | | | [Local and global metadata](https://qodo-merge-docs.qodo.ai/core-abilities/metadata/) | ✅ | ✅ | ✅ | |
| | [Static code analysis](./tools/analyze.md/){:target="_blank"} 💎 | ✅ | ✅ | ✅ | | | | [Dynamic context](https://qodo-merge-docs.qodo.ai/core-abilities/dynamic_context/) | ✅ | ✅ | ✅ | |
| | [Multiple configuration options](./usage-guide/configuration_options.md){:target="_blank"} 💎 | ✅ | ✅ | ✅ | | | | [Self reflection](https://qodo-merge-docs.qodo.ai/core-abilities/self_reflection/) | ✅ | ✅ | ✅ | |
| | [Static code analysis](https://qodo-merge-docs.qodo.ai/core-abilities/static_code_analysis/) 💎 | ✅ | ✅ | ✅ | |
| | [Global and wiki configurations](https://pr-agent-docs.codium.ai/usage-guide/configuration_options/) 💎 | ✅ | ✅ | ✅ | |
| | [PR interactive actions](https://www.codium.ai/images/pr_agent/pr-actions.mp4) 💎 | ✅ | ✅ | | |
| | [Impact Evaluation](https://qodo-merge-docs.qodo.ai/core-abilities/impact_evaluation/) 💎 | ✅ | ✅ | | |
💎 marks a feature available only in [Qodo Merge Pro](https://www.codium.ai/pricing/){:target="_blank"} 💎 marks a feature available only in [Qodo Merge Pro](https://www.qodo.ai/pricing/){:target="_blank"}
## Example Results ## Example Results

View File

@ -51,10 +51,12 @@ stages:
``` ```
This script will run Qodo Merge on every new merge request, with the `improve`, `review`, and `describe` commands. This script will run Qodo Merge on every new merge request, with the `improve`, `review`, and `describe` commands.
Note that you need to export the `azure_devops__pat` and `OPENAI_KEY` variables in the Azure DevOps pipeline settings (Pipelines -> Library -> + Variable group): Note that you need to export the `azure_devops__pat` and `OPENAI_KEY` variables in the Azure DevOps pipeline settings (Pipelines -> Library -> + Variable group):
![Qodo Merge Pro](https://codium.ai/images/pr_agent/azure_devops_pipeline_secrets.png){width=468} ![Qodo Merge Pro](https://codium.ai/images/pr_agent/azure_devops_pipeline_secrets.png){width=468}
Make sure to give pipeline permissions to the `pr_agent` variable group. Make sure to give pipeline permissions to the `pr_agent` variable group.
> Note that Azure Pipelines lacks support for triggering workflows from PR comments. If you find a viable solution, please contribute it to our [issue tracker](https://github.com/Codium-ai/pr-agent/issues)
## Azure DevOps from CLI ## Azure DevOps from CLI

View File

@ -38,6 +38,7 @@ You can also modify the `script` section to run different Qodo Merge commands, o
Note that if your base branches are not protected, don't set the variables as `protected`, since the pipeline will not have access to them. Note that if your base branches are not protected, don't set the variables as `protected`, since the pipeline will not have access to them.
> **Note**: The `$CI_SERVER_FQDN` variable is available starting from GitLab version 16.10. If you're using an earlier version, this variable will not be available. However, you can combine `$CI_SERVER_HOST` and `$CI_SERVER_PORT` to achieve the same result. Please ensure you're using a compatible version or adjust your configuration.
## Run a GitLab webhook server ## Run a GitLab webhook server

View File

@ -66,7 +66,30 @@ To invoke a tool (for example `review`), you can run directly from the Docker im
docker run --rm -it -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=<pr_url> review docker run --rm -it -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=<pr_url> review
``` ```
For other git providers, update CONFIG.GIT_PROVIDER accordingly, and check the `pr_agent/settings/.secrets_template.toml` file for the environment variables expected names and values. For other git providers, update `CONFIG.GIT_PROVIDER` accordingly and check the `pr_agent/settings/.secrets_template.toml` file for environment variables expected names and values.
The `pr_agent` uses [Dynaconf](https://www.dynaconf.com/) to load settings from configuration files.
It is also possible to provide or override the configuration by setting the corresponding environment variables.
You can define the corresponding environment variables by following this convention: `<TABLE>__<KEY>=<VALUE>` or `<TABLE>.<KEY>=<VALUE>`.
The `<TABLE>` refers to a table/section in a configuration file and `<KEY>=<VALUE>` refers to the key/value pair of a setting in the configuration file.
For example, suppose you want to run `pr_agent` that connects to a self-hosted GitLab instance similar to an example above.
You can define the environment variables in a plain text file named `.env` with the following content:
> Warning: Never commit the `.env` file to version control system as it might contains sensitive credentials!
```
CONFIG__GIT_PROVIDER="gitlab"
GITLAB__URL="<your url>"
GITLAB__PERSONAL_ACCESS_TOKEN="<your token>"
OPENAI__KEY="<your key>"
```
Then, you can run `pr_agent` using Docker with the following command:
```shell
docker run --rm -it --env-file .env codiumai/pr-agent:latest <tool> <tool parameter>
```
--- ---

View File

@ -1,31 +1,44 @@
Qodo Merge Pro is a versatile application compatible with GitHub, GitLab, and BitBucket, hosted by QodoAI.
## Getting Started with Qodo Merge Pro
Qodo Merge Pro is a versatile application compatible with GitHub, GitLab, and BitBucket, hosted by CodiumAI.
See [here](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/) for more details about the benefits of using Qodo Merge Pro. See [here](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/) for more details about the benefits of using Qodo Merge Pro.
Interested parties can subscribe to Qodo Merge Pro through the following [link](https://www.codium.ai/pricing/). A complimentary two-week trial is provided to all new users. Following the trial period, user licenses (seats) are required for continued access.
After subscribing, you are granted the ability to easily install the application across any of your repositories. To purchase user licenses, please visit our [pricing page](https://www.qodo.ai/pricing/).
Once subscribed, users can seamlessly deploy the application across any of their code repositories.
## Install Qodo Merge Pro for GitHub
### GitHub Cloud
Qodo Merge Pro for GitHub cloud is available for installation through the [GitHub Marketplace](https://github.com/apps/qodo-merge-pro).
![Qodo Merge Pro](https://codium.ai/images/pr_agent/pr_agent_pro_install.png){width=468} ![Qodo Merge Pro](https://codium.ai/images/pr_agent/pr_agent_pro_install.png){width=468}
Each user who wants to use Qodo Merge pro needs to buy a seat. ### GitHub Enterprise Server
Initially, CodiumAI offers a two-week trial period at no cost, after which continued access requires each user to secure a personal seat.
Once a user acquires a seat, they gain the flexibility to use Qodo Merge Pro across any repository where it was enabled.
Users without a purchased seat who interact with a repository featuring Qodo Merge Pro are entitled to receive up to five complimentary feedbacks.
Beyond this limit, Qodo Merge Pro will cease to respond to their inquiries unless a seat is purchased.
## Install Qodo Merge Pro for GitHub Enterprise Server
To use Qodo Merge Pro application on your private GitHub Enterprise Server, you will need to contact us for starting an [Enterprise](https://www.codium.ai/pricing/) trial. To use Qodo Merge Pro application on your private GitHub Enterprise Server, you will need to contact us for starting an [Enterprise](https://www.codium.ai/pricing/) trial.
### GitHub Open Source Projects
For open-source projects, Qodo Merge Pro is available for free usage. To install Qodo Merge Pro for your open-source repositories, use the following marketplace [link](https://github.com/apps/qodo-merge-pro-for-open-source).
## Install Qodo Merge Pro for Bitbucket
### Bitbucket Cloud
Qodo Merge Pro for Bitbucket Cloud is available for installation through the following [link](https://bitbucket.org/site/addons/authorize?addon_key=d6df813252c37258)
![Qodo Merge Pro](https://qodo.ai/images/pr_agent/pr_agent_pro_bitbucket_install.png){width=468}
### Bitbucket Server
To use Qodo Merge Pro application on your private Bitbucket Server, you will need to contact us for starting an [Enterprise](https://www.codium.ai/pricing/) trial.
## Install Qodo Merge Pro for GitLab (Teams & Enterprise) ## Install Qodo Merge Pro for GitLab (Teams & Enterprise)
Since GitLab platform does not support apps, installing Qodo Merge Pro for GitLab is a bit more involved, and requires the following steps: Since GitLab platform does not support apps, installing Qodo Merge Pro for GitLab is a bit more involved, and requires the following steps:
### Step 1 #### Step 1
Acquire a personal, project or group level access token. Enable the “api” scope in order to allow Qodo Merge to read pull requests, comment and respond to requests. Acquire a personal, project or group level access token. Enable the “api” scope in order to allow Qodo Merge to read pull requests, comment and respond to requests.
@ -35,14 +48,14 @@ Acquire a personal, project or group level access token. Enable the “api” sc
Store the token in a safe place, you wont be able to access it again after it was generated. Store the token in a safe place, you wont be able to access it again after it was generated.
### Step 2 #### Step 2
Generate a shared secret and link it to the access token. Browse to [https://register.gitlab.pr-agent.codium.ai](https://register.gitlab.pr-agent.codium.ai). Generate a shared secret and link it to the access token. Browse to [https://register.gitlab.pr-agent.codium.ai](https://register.gitlab.pr-agent.codium.ai).
Fill in your generated GitLab token and your company or personal name in the appropriate fields and click "Submit". Fill in your generated GitLab token and your company or personal name in the appropriate fields and click "Submit".
You should see "Success!" displayed above the Submit button, and a shared secret will be generated. Store it in a safe place, you wont be able to access it again after it was generated. You should see "Success!" displayed above the Submit button, and a shared secret will be generated. Store it in a safe place, you wont be able to access it again after it was generated.
### Step 3 #### Step 3
Install a webhook for your repository or groups, by clicking “webhooks” on the settings menu. Click the “Add new webhook” button. Install a webhook for your repository or groups, by clicking “webhooks” on the settings menu. Click the “Add new webhook” button.
@ -53,7 +66,7 @@ Install a webhook for your repository or groups, by clicking “webhooks” on t
In the webhook definition form, fill in the following fields: In the webhook definition form, fill in the following fields:
URL: https://pro.gitlab.pr-agent.codium.ai/webhook URL: https://pro.gitlab.pr-agent.codium.ai/webhook
Secret token: Your CodiumAI key Secret token: Your QodoAI key
Trigger: Check the comments and merge request events boxes. Trigger: Check the comments and merge request events boxes.
Enable SSL verification: Check the box. Enable SSL verification: Check the box.
@ -61,7 +74,7 @@ Enable SSL verification: Check the box.
![Step 3.2](https://www.codium.ai/images/pr_agent/gitlab_pro_webhooks.png){width=750} ![Step 3.2](https://www.codium.ai/images/pr_agent/gitlab_pro_webhooks.png){width=750}
</figure> </figure>
### Step 4 #### Step 4
Youre all set! Youre all set!

View File

@ -1,6 +1,6 @@
### Overview ### Overview
[Qodo Merge Pro](https://www.codium.ai/pricing/) is a hosted version of Qodo Merge, provided by Qodo. A complimentary two-week trial is offered, followed by a monthly subscription fee. [Qodo Merge Pro](https://www.codium.ai/pricing/) is a hosted version of open-source [Qodo Merge (PR-Agent)](https://github.com/Codium-ai/pr-agent). A complimentary two-week trial is offered, followed by a monthly subscription fee.
Qodo Merge Pro is designed for companies and teams that require additional features and capabilities. It provides the following benefits: Qodo Merge Pro is designed for companies and teams that require additional features and capabilities. It provides the following benefits:
1. **Fully managed** - We take care of everything for you - hosting, models, regular updates, and more. Installation is as simple as signing up and adding the Qodo Merge app to your GitHub\GitLab\BitBucket repo. 1. **Fully managed** - We take care of everything for you - hosting, models, regular updates, and more. Installation is as simple as signing up and adding the Qodo Merge app to your GitHub\GitLab\BitBucket repo.

View File

@ -95,6 +95,112 @@ This feature is controlled by a boolean configuration parameter: `pr_code_sugges
Instead, we leverage a dedicated private page, within your repository wiki, to track suggestions. This approach offers convenient secure suggestion tracking while avoiding pull requests or any noise to the main repository. Instead, we leverage a dedicated private page, within your repository wiki, to track suggestions. This approach offers convenient secure suggestion tracking while avoiding pull requests or any noise to the main repository.
## `Extra instructions` and `best practices`
The `improve` tool can be further customized by providing additional instructions and best practices to the AI model.
### Extra instructions
>`Platforms supported: GitHub, GitLab, Bitbucket, Azure DevOps`
You can use the `extra_instructions` configuration option to give the AI model additional instructions for the `improve` tool.
Be specific, clear, and concise in the instructions. With extra instructions, you are the prompter.
Examples for possible instructions:
```toml
[pr_code_suggestions]
extra_instructions="""\
(1) Answer in japanese
(2) Don't suggest to add try-except block
(3) Ignore changes in toml files
...
"""
```
Use triple quotes to write multi-line instructions. Use bullet points or numbers to make the instructions more readable.
### Best practices 💎
>`Platforms supported: GitHub, GitLab, Bitbucket`
Another option to give additional guidance to the AI model is by creating a dedicated [**wiki page**](https://github.com/Codium-ai/pr-agent/wiki) called `best_practices.md`.
This page can contain a list of best practices, coding standards, and guidelines that are specific to your repo/organization.
The AI model will use this wiki page as a reference, and in case the PR code violates any of the guidelines, it will create additional suggestions, with a dedicated label: `Organization
best practice`.
Example for a python `best_practices.md` content:
```markdown
## Project best practices
- Make sure that I/O operations are encapsulated in a try-except block
- Use the `logging` module for logging instead of `print` statements
- Use `is` and `is not` to compare with `None`
- Use `if __name__ == '__main__':` to run the code only when the script is executed
- Use `with` statement to open files
...
```
Tips for writing an effective `best_practices.md` file:
- Write clearly and concisely
- Include brief code examples when helpful
- Focus on project-specific guidelines, that will result in relevant suggestions you actually want to get
- Keep the file relatively short, under 800 lines, since:
- AI models may not process effectively very long documents
- Long files tend to contain generic guidelines already known to AI
#### Local and global best practices
By default, Qodo Merge will look for a local `best_practices.md` wiki file in the root of the relevant local repo.
If you want to enable also a global `best_practices.md` wiki file, set first in the global configuration file:
```toml
[best_practices]
enable_global_best_practices = true
```
Then, create a `best_practices.md` wiki file in the root of [global](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#global-configuration-file) configuration repository, `pr-agent-settings`.
#### Best practices for multiple languages
For a git organization working with multiple programming languages, you can maintain a centralized global `best_practices.md` file containing language-specific guidelines.
When reviewing pull requests, Qodo Merge automatically identifies the programming language and applies the relevant best practices from this file.
To do this, structure your `best_practices.md` file using the following format:
```
# [Python]
...
# [Java]
...
# [JavaScript]
...
```
#### Dedicated label for best practices suggestions
Best practice suggestions are labeled as `Organization best practice` by default.
To customize this label, modify it in your configuration file:
```toml
[best_practices]
organization_name = "..."
```
And the label will be: `{organization_name} best practice`.
#### Example results
![best_practice](https://codium.ai/images/pr_agent/org_best_practice.png){width=512}
### How to combine `extra instructions` and `best practices`
The `extra instructions` configuration is more related to the `improve` tool prompt. It can be used, for example, to avoid specific suggestions ("Don't suggest to add try-except block", "Ignore changes in toml files", ...) or to emphasize specific aspects or formats ("Answer in Japanese", "Give only short suggestions", ...)
In contrast, the `best_practices.md` file is a general guideline for the way code should be written in the repo.
Using a combination of both can help the AI model to provide relevant and tailored suggestions.
## Usage Tips ## Usage Tips
### Implementing the proposed code suggestions ### Implementing the proposed code suggestions
@ -191,73 +297,6 @@ This approach has two main benefits:
Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 lines of code), Qodo Merge will be able to process the entire code in a single call. Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 lines of code), Qodo Merge will be able to process the entire code in a single call.
### 'Extra instructions' and 'best practices'
#### Extra instructions
>`Platforms supported: GitHub, GitLab, Bitbucket`
You can use the `extra_instructions` configuration option to give the AI model additional instructions for the `improve` tool.
Be specific, clear, and concise in the instructions. With extra instructions, you are the prompter. Specify relevant aspects that you want the model to focus on.
Examples for possible instructions:
```toml
[pr_code_suggestions]
extra_instructions="""\
(1) Answer in japanese
(2) Don't suggest to add try-excpet block
(3) Ignore changes in toml files
...
"""
```
Use triple quotes to write multi-line instructions. Use bullet points or numbers to make the instructions more readable.
#### Best practices 💎
>`Platforms supported: GitHub, GitLab`
Another option to give additional guidance to the AI model is by creating a dedicated [**wiki page**](https://github.com/Codium-ai/pr-agent/wiki) called `best_practices.md`.
This page can contain a list of best practices, coding standards, and guidelines that are specific to your repo/organization.
The AI model will use this wiki page as a reference, and in case the PR code violates any of the guidelines, it will suggest improvements accordingly, with a dedicated label: `Organization
best practice`.
Example for a `best_practices.md` content can be found [here](https://github.com/Codium-ai/pr-agent/blob/main/docs/docs/usage-guide/EXAMPLE_BEST_PRACTICE.md) (adapted from Google's [pyguide](https://google.github.io/styleguide/pyguide.html)).
This file is only an example. Since it is used as a prompt for an AI model, we want to emphasize the following:
- It should be written in a clear and concise manner
- If needed, it should give short relevant code snippets as examples
- Recommended to limit the text to 800 lines or fewer. Heres why:
1) Extremely long best practices documents may not be fully processed by the AI model.
2) A lengthy file probably represent a more "**generic**" set of guidelines, which the AI model is already familiar with. The objective is to focus on a more targeted set of guidelines tailored to the specific needs of this project.
##### Local and global best practices
By default, Qodo Merge will look for a local `best_practices.md` wiki file in the root of the relevant local repo.
If you want to enable also a global `best_practices.md` wiki file, set first in the global configuration file:
```toml
[best_practices]
enable_global_best_practices = true
```
Then, create a `best_practices.md` wiki file in the root of [global](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#global-configuration-file) configuration repository, `pr-agent-settings`.
##### Example results
![best_practice](https://codium.ai/images/pr_agent/org_best_practice.png){width=512}
#### How to combine `extra instructions` and `best practices`
The `extra instructions` configuration is more related to the `improve` tool prompt. It can be used, for example, to avoid specific suggestions ("Don't suggest to add try-except block", "Ignore changes in toml files", ...) or to emphasize specific aspects or formats ("Answer in Japanese", "Give only short suggestions", ...)
In contrast, the `best_practices.md` file is a general guideline for the way code should be written in the repo.
Using a combination of both can help the AI model to provide relevant and tailored suggestions.
## Configuration options ## Configuration options
??? example "General options" ??? example "General options"
@ -275,6 +314,10 @@ Using a combination of both can help the AI model to provide relevant and tailor
<td><b>dual_publishing_score_threshold</b></td> <td><b>dual_publishing_score_threshold</b></td>
<td>Minimum score threshold for suggestions to be presented as commitable PR comments in addition to the table. Default is -1 (disabled).</td> <td>Minimum score threshold for suggestions to be presented as commitable PR comments in addition to the table. Default is -1 (disabled).</td>
</tr> </tr>
<tr>
<td><b>focus_only_on_problems</b></td>
<td>If set to true, suggestions will focus primarily on identifying and fixing code problems, and less on style considerations like best practices, maintainability, or readability. Default is true.</td>
</tr>
<tr> <tr>
<td><b>persistent_comment</b></td> <td><b>persistent_comment</b></td>
<td>If set to true, the improve comment will be persistent, meaning that every new improve request will edit the previous one. Default is false.</td> <td>If set to true, the improve comment will be persistent, meaning that every new improve request will edit the previous one. Default is false.</td>
@ -299,6 +342,10 @@ Using a combination of both can help the AI model to provide relevant and tailor
<td><b>wiki_page_accepted_suggestions</b></td> <td><b>wiki_page_accepted_suggestions</b></td>
<td>If set to true, the tool will automatically track accepted suggestions in a dedicated wiki page called `.pr_agent_accepted_suggestions`. Default is true.</td> <td>If set to true, the tool will automatically track accepted suggestions in a dedicated wiki page called `.pr_agent_accepted_suggestions`. Default is true.</td>
</tr> </tr>
<tr>
<td><b>allow_thumbs_up_down</b></td>
<td>If set to true, all code suggestions will have thumbs up and thumbs down buttons, to encourage users to provide feedback on the suggestions. Default is false.</td>
</tr>
</table> </table>
??? example "Params for number of suggestions and AI calls" ??? example "Params for number of suggestions and AI calls"
@ -316,10 +363,6 @@ Using a combination of both can help the AI model to provide relevant and tailor
<td><b>max_number_of_calls</b></td> <td><b>max_number_of_calls</b></td>
<td>Maximum number of chunks. Default is 3.</td> <td>Maximum number of chunks. Default is 3.</td>
</tr> </tr>
<tr>
<td><b>rank_extended_suggestions</b></td>
<td>If set to true, the tool will rank the suggestions, based on importance. Default is true.</td>
</tr>
</table> </table>
## A note on code suggestions quality ## A note on code suggestions quality

View File

@ -39,68 +39,19 @@ pr_commands = [
] ]
[pr_reviewer] [pr_reviewer]
num_code_suggestions = ... extra_instructions = "..."
... ...
``` ```
- The `pr_commands` lists commands that will be executed automatically when a PR is opened. - The `pr_commands` lists commands that will be executed automatically when a PR is opened.
- The `[pr_reviewer]` section contains the configurations for the `review` tool you want to edit (if any). - The `[pr_reviewer]` section contains the configurations for the `review` tool you want to edit (if any).
[//]: # ()
[//]: # (### Incremental Mode)
[//]: # (Incremental review only considers changes since the last Qodo Merge review. This can be useful when working on the PR in an iterative manner, and you want to focus on the changes since the last review instead of reviewing the entire PR again.)
[//]: # (For invoking the incremental mode, the following command can be used:)
[//]: # (```)
[//]: # (/review -i)
[//]: # (```)
[//]: # (Note that the incremental mode is only available for GitHub.)
[//]: # ()
[//]: # (![incremental review]&#40;https://codium.ai/images/pr_agent/incremental_review_2.png&#41;{width=512})
[//]: # (### PR Reflection)
[//]: # ()
[//]: # (By invoking:)
[//]: # (```)
[//]: # (/reflect_and_review)
[//]: # (```)
[//]: # (The tool will first ask the author questions about the PR, and will guide the review based on their answers.)
[//]: # ()
[//]: # (![reflection questions]&#40;https://codium.ai/images/pr_agent/reflection_questions.png&#41;{width=512})
[//]: # ()
[//]: # (![reflection answers]&#40;https://codium.ai/images/pr_agent/reflection_answers.png&#41;{width=512})
[//]: # ()
[//]: # (![reflection insights]&#40;https://codium.ai/images/pr_agent/reflection_insights.png&#41;{width=512})
## Configuration options ## Configuration options
!!! example "General options" !!! example "General options"
<table> <table>
<tr>
<td><b>num_code_suggestions</b></td>
<td>Number of code suggestions provided by the 'review' tool. Default is 0, meaning no code suggestions will be provided by the `review` tool.</td>
</tr>
<tr>
<td><b>inline_code_comments</b></td>
<td>If set to true, the tool will publish the code suggestions as comments on the code diff. Default is false. Note that you need to set `num_code_suggestions`>0 to get code suggestions </td>
</tr>
<tr> <tr>
<td><b>persistent_comment</b></td> <td><b>persistent_comment</b></td>
<td>If set to true, the review comment will be persistent, meaning that every new review request will edit the previous one. Default is true.</td> <td>If set to true, the review comment will be persistent, meaning that every new review request will edit the previous one. Default is true.</td>
@ -140,7 +91,7 @@ num_code_suggestions = ...
</tr> </tr>
<tr> <tr>
<td><b>require_ticket_analysis_review</b></td> <td><b>require_ticket_analysis_review</b></td>
<td>If set to true, and the PR contains a GitHub ticket number, the tool will add a section that checks if the PR in fact fulfilled the ticket requirements. Default is true.</td> <td>If set to true, and the PR contains a GitHub or Jira ticket link, the tool will add a section that checks if the PR in fact fulfilled the ticket requirements. Default is true.</td>
</tr> </tr>
</table> </table>
@ -189,9 +140,9 @@ If enabled, the `review` tool can approve a PR when a specific comment, `/review
!!! tip "Automation" !!! tip "Automation"
When you first install Qodo Merge app, the [default mode](../usage-guide/automations_and_usage.md#github-app-automatic-tools-when-a-new-pr-is-opened) for the `review` tool is: When you first install Qodo Merge app, the [default mode](../usage-guide/automations_and_usage.md#github-app-automatic-tools-when-a-new-pr-is-opened) for the `review` tool is:
``` ```
pr_commands = ["/review --pr_reviewer.num_code_suggestions=0", ...] pr_commands = ["/review", ...]
``` ```
Meaning the `review` tool will run automatically on every PR, without providing code suggestions. Meaning the `review` tool will run automatically on every PR, without any additional configurations.
Edit this field to enable/disable the tool, or to change the configurations used. Edit this field to enable/disable the tool, or to change the configurations used.
!!! tip "Possible labels from the review tool" !!! tip "Possible labels from the review tool"
@ -249,13 +200,8 @@ If enabled, the `review` tool can approve a PR when a specific comment, `/review
maximal_review_effort = 5 maximal_review_effort = 5
``` ```
[//]: # (!!! tip "Code suggestions") !!! tip "Code suggestions"
[//]: # () The `review` tool previously included a legacy feature for providing code suggestions (controlled by `--pr_reviewer.num_code_suggestion`). This functionality has been deprecated and replaced by the [`improve`](./improve.md) tool, which offers higher quality and more actionable code suggestions.
[//]: # ( If you set `num_code_suggestions`>0 , the `review` tool will also provide code suggestions.)
[//]: # ( )
[//]: # ( Notice If you are interested **only** in the code suggestions, it is recommended to use the [`improve`]&#40;./improve.md&#41; feature instead, since it is a dedicated only to code suggestions, and usually gives better results.)
[//]: # ( Use the `review` tool if you want to get more comprehensive feedback, which includes code suggestions as well.)

View File

@ -49,9 +49,10 @@ It can be invoked automatically from the analyze table, can be accessed by:
/analyze /analyze
``` ```
Choose the components you want to find similar code for, and click on the `similar` checkbox. Choose the components you want to find similar code for, and click on the `similar` checkbox.
![analyze similar](https://codium.ai/images/pr_agent/analyze_similar.png){width=768} ![analyze similar](https://codium.ai/images/pr_agent/analyze_similar.png){width=768}
If you are looking to search for similar code in the organization's codebase, you can click on the `Organization` checkbox, and it will invoke a new search command just for the organization's codebase. You can search for similar code either within the organization's codebase or globally, which includes open-source repositories. Each result will include the relevant code components along with their associated license details.
![similar code global](https://codium.ai/images/pr_agent/similar_code_global.png){width=768} ![similar code global](https://codium.ai/images/pr_agent/similar_code_global.png){width=768}

View File

@ -17,3 +17,4 @@ Under the section `pr_update_changelog`, the [configuration file](https://github
- `push_changelog_changes`: whether to push the changes to CHANGELOG.md, or just print them. Default is false (print only). - `push_changelog_changes`: whether to push the changes to CHANGELOG.md, or just print them. Default is false (print only).
- `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ... - `extra_instructions`: Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...
- `add_pr_link`: whether the model should try to add a link to the PR in the changelog. Default is true.

View File

@ -1,4 +1,5 @@
## Local repo (CLI) ## Local repo (CLI)
When running from your locally cloned Qodo Merge repo (CLI), your local configuration file will be used. When running from your locally cloned Qodo Merge repo (CLI), your local configuration file will be used.
Examples of invoking the different tools via the CLI: Examples of invoking the different tools via the CLI:
@ -35,9 +36,29 @@ This is useful for debugging or experimenting with different tools.
Default is "github". Default is "github".
### CLI Health Check
To verify that Qodo Merge has been configured correctly, you can run this health check command from the repository root:
```bash
python -m tests.health_test.main
```
### Online usage If the health check passes, you will see the following output:
```
========
Health test passed successfully
========
```
At the end of the run.
Before running the health check, ensure you have:
- Configured your [LLM provider](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/)
- Added a valid GitHub token to your configuration file
## Online usage
Online usage means invoking Qodo Merge tools by [comments](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021901) on a PR. Online usage means invoking Qodo Merge tools by [comments](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021901) on a PR.
Commands for invoking the different tools via comments: Commands for invoking the different tools via comments:
@ -58,59 +79,80 @@ For example, if you want to edit the `review` tool configurations, you can run:
Any configuration value in [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml) file can be similarly edited. Comment `/config` to see the list of available configurations. Any configuration value in [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml) file can be similarly edited. Comment `/config` to see the list of available configurations.
## GitHub App ## Qodo Merge Automatic Feedback
### Disabling all automatic feedback
To easily disable all automatic feedback from Qodo Merge (GitHub App, GitLab Webhook, BitBucket App, Azure DevOps Webhook), set in a configuration file:
```toml
[config]
disable_auto_feedback = true
```
When this parameter is set to `true`, Qodo Merge will not run any automatic tools (like `describe`, `review`, `improve`) when a new PR is opened, or when new code is pushed to an open PR.
### GitHub App
!!! note "Configurations for Qodo Merge Pro" !!! note "Configurations for Qodo Merge Pro"
Qodo Merge Pro for GitHub is an App, hosted by CodiumAI. So all the instructions below are relevant also for Qodo Merge Pro users. Qodo Merge Pro for GitHub is an App, hosted by CodiumAI. So all the instructions below are relevant also for Qodo Merge Pro users.
Same goes for [GitLab webhook](#gitlab-webhook) and [BitBucket App](#bitbucket-app) sections. Same goes for [GitLab webhook](#gitlab-webhook) and [BitBucket App](#bitbucket-app) sections.
### GitHub app automatic tools when a new PR is opened #### GitHub app automatic tools when a new PR is opened
The [github_app](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L108) section defines GitHub app specific configurations. The [github_app](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L220) section defines GitHub app specific configurations.
The configuration parameter `pr_commands` defines the list of tools that will be **run automatically** when a new PR is opened. The configuration parameter `pr_commands` defines the list of tools that will be **run automatically** when a new PR is opened:
``` ```toml
[github_app] [github_app]
pr_commands = [ pr_commands = [
"/describe --pr_description.final_update_message=false", "/describe",
"/review --pr_reviewer.num_code_suggestions=0", "/review",
"/improve", "/improve",
] ]
``` ```
This means that when a new PR is opened/reopened or marked as ready for review, Qodo Merge will run the `describe`, `review` and `improve` tools. This means that when a new PR is opened/reopened or marked as ready for review, Qodo Merge will run the `describe`, `review` and `improve` tools.
For the `review` tool, for example, the `num_code_suggestions` parameter will be set to 0.
You can override the default tool parameters by using one the three options for a [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/): **wiki**, **local**, or **global**. You can override the default tool parameters by using one the three options for a [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/): **wiki**, **local**, or **global**.
For example, if your local `.pr_agent.toml` file contains: For example, if your configuration file contains:
```
```toml
[pr_description] [pr_description]
generate_ai_title = true generate_ai_title = true
``` ```
Every time you run the `describe` tool, including automatic runs, the PR title will be generated by the AI.
To cancel the automatic run of all the tools, set: Every time you run the `describe` tool (including automatic runs) the PR title will be generated by the AI.
```
You can customize configurations specifically for automated runs by using the `--config_path=<value>` parameter.
For instance, to modify the `review` tool settings only for newly opened PRs, use:
```toml
[github_app] [github_app]
pr_commands = [] pr_commands = [
"/describe",
"/review --pr_reviewer.extra_instructions='focus on the file: ...'",
"/improve",
]
``` ```
### GitHub app automatic tools for push actions (commits to an open PR) #### GitHub app automatic tools for push actions (commits to an open PR)
In addition to running automatic tools when a PR is opened, the GitHub app can also respond to new code that is pushed to an open PR. In addition to running automatic tools when a PR is opened, the GitHub app can also respond to new code that is pushed to an open PR.
The configuration toggle `handle_push_trigger` can be used to enable this feature. The configuration toggle `handle_push_trigger` can be used to enable this feature.
The configuration parameter `push_commands` defines the list of tools that will be **run automatically** when new code is pushed to the PR. The configuration parameter `push_commands` defines the list of tools that will be **run automatically** when new code is pushed to the PR.
``` ```toml
[github_app] [github_app]
handle_push_trigger = true handle_push_trigger = true
push_commands = [ push_commands = [
"/describe", "/describe",
"/review --pr_reviewer.num_code_suggestions=0 --pr_reviewer.final_update_message=false", "/review",
] ]
``` ```
This means that when new code is pushed to the PR, the Qodo Merge will run the `describe` and `review` tools, with the specified parameters. This means that when new code is pushed to the PR, the Qodo Merge will run the `describe` and `review` tools, with the specified parameters.
## GitHub Action ### GitHub Action
`GitHub Action` is a different way to trigger Qodo Merge tools, and uses a different configuration mechanism than `GitHub App`.<br> `GitHub Action` is a different way to trigger Qodo Merge tools, and uses a different configuration mechanism than `GitHub App`.<br>
You can configure settings for `GitHub Action` by adding environment variables under the env section in `.github/workflows/pr_agent.yml` file. You can configure settings for `GitHub Action` by adding environment variables under the env section in `.github/workflows/pr_agent.yml` file.
Specifically, start by setting the following environment variables: Specifically, start by setting the following environment variables:
@ -121,7 +163,7 @@ Specifically, start by setting the following environment variables:
github_action_config.auto_review: "true" # enable\disable auto review github_action_config.auto_review: "true" # enable\disable auto review
github_action_config.auto_describe: "true" # enable\disable auto describe github_action_config.auto_describe: "true" # enable\disable auto describe
github_action_config.auto_improve: "true" # enable\disable auto improve github_action_config.auto_improve: "true" # enable\disable auto improve
github_action_config.pr_actions: ["opened", "reopened", "ready_for_review", "review_requested"] github_action_config.pr_actions: '["opened", "reopened", "ready_for_review", "review_requested"]'
``` ```
`github_action_config.auto_review`, `github_action_config.auto_describe` and `github_action_config.auto_improve` are used to enable/disable automatic tools that run when a new PR is opened. `github_action_config.auto_review`, `github_action_config.auto_describe` and `github_action_config.auto_improve` are used to enable/disable automatic tools that run when a new PR is opened.
If not set, the default configuration is for all three tools to run automatically when a new PR is opened. If not set, the default configuration is for all three tools to run automatically when a new PR is opened.
@ -136,19 +178,22 @@ The JSON structure is equivalent to the yaml data structure defined in [pr_revie
Note that you can give additional config parameters by adding environment variables to `.github/workflows/pr_agent.yml`, or by using a `.pr_agent.toml` [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#global-configuration-file) in the root of your repo Note that you can give additional config parameters by adding environment variables to `.github/workflows/pr_agent.yml`, or by using a `.pr_agent.toml` [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#global-configuration-file) in the root of your repo
For example, you can set an environment variable: `pr_description.publish_labels=false`, or add a `.pr_agent.toml` file with the following content: For example, you can set an environment variable: `pr_description.publish_labels=false`, or add a `.pr_agent.toml` file with the following content:
```
```toml
[pr_description] [pr_description]
publish_labels = false publish_labels = false
``` ```
to prevent Qodo Merge from publishing labels when running the `describe` tool. to prevent Qodo Merge from publishing labels when running the `describe` tool.
## GitLab Webhook ### GitLab Webhook
After setting up a GitLab webhook, to control which commands will run automatically when a new MR is opened, you can set the `pr_commands` parameter in the configuration file, similar to the GitHub App: After setting up a GitLab webhook, to control which commands will run automatically when a new MR is opened, you can set the `pr_commands` parameter in the configuration file, similar to the GitHub App:
```
```toml
[gitlab] [gitlab]
pr_commands = [ pr_commands = [
"/describe", "/describe",
"/review --pr_reviewer.num_code_suggestions=0", "/review",
"/improve", "/improve",
] ]
``` ```
@ -156,24 +201,24 @@ pr_commands = [
the GitLab webhook can also respond to new code that is pushed to an open MR. the GitLab webhook can also respond to new code that is pushed to an open MR.
The configuration toggle `handle_push_trigger` can be used to enable this feature. The configuration toggle `handle_push_trigger` can be used to enable this feature.
The configuration parameter `push_commands` defines the list of tools that will be **run automatically** when new code is pushed to the MR. The configuration parameter `push_commands` defines the list of tools that will be **run automatically** when new code is pushed to the MR.
``` ```toml
[gitlab] [gitlab]
handle_push_trigger = true handle_push_trigger = true
push_commands = [ push_commands = [
"/describe", "/describe",
"/review --pr_reviewer.num_code_suggestions=0 --pr_reviewer.final_update_message=false", "/review",
] ]
``` ```
Note that to use the 'handle_push_trigger' feature, you need to give the gitlab webhook also the "Push events" scope. Note that to use the 'handle_push_trigger' feature, you need to give the gitlab webhook also the "Push events" scope.
## BitBucket App ### BitBucket App
Similar to GitHub app, when running Qodo Merge from BitBucket App, the default [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml) from a pre-built docker will be initially loaded. Similar to GitHub app, when running Qodo Merge from BitBucket App, the default [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml) from a pre-built docker will be initially loaded.
By uploading a local `.pr_agent.toml` file to the root of the repo's main branch, you can edit and customize any configuration parameter. Note that you need to upload `.pr_agent.toml` prior to creating a PR, in order for the configuration to take effect. By uploading a local `.pr_agent.toml` file to the root of the repo's main branch, you can edit and customize any configuration parameter. Note that you need to upload `.pr_agent.toml` prior to creating a PR, in order for the configuration to take effect.
For example, if your local `.pr_agent.toml` file contains: For example, if your local `.pr_agent.toml` file contains:
``` ```toml
[pr_reviewer] [pr_reviewer]
extra_instructions = "Answer in japanese" extra_instructions = "Answer in japanese"
``` ```
@ -182,29 +227,39 @@ Each time you invoke a `/review` tool, it will use the extra instructions you se
Note that among other limitations, BitBucket provides relatively low rate-limits for applications (up to 1000 requests per hour), and does not provide an API to track the actual rate-limit usage. Note that among other limitations, BitBucket provides relatively low rate-limits for applications (up to 1000 requests per hour), and does not provide an API to track the actual rate-limit usage.
If you experience lack of responses from Qodo Merge, you might want to set: `bitbucket_app.avoid_full_files=true` in your configuration file. If you experience a lack of responses from Qodo Merge, you might want to set: `bitbucket_app.avoid_full_files=true` in your configuration file.
This will prevent Qodo Merge from acquiring the full file content, and will only use the diff content. This will reduce the number of requests made to BitBucket, at the cost of small decrease in accuracy, as dynamic context will not be applicable. This will prevent Qodo Merge from acquiring the full file content, and will only use the diff content. This will reduce the number of requests made to BitBucket, at the cost of small decrease in accuracy, as dynamic context will not be applicable.
### BitBucket Self-Hosted App automatic tools #### BitBucket Self-Hosted App automatic tools
To control which commands will run automatically when a new PR is opened, you can set the `pr_commands` parameter in the configuration file: To control which commands will run automatically when a new PR is opened, you can set the `pr_commands` parameter in the configuration file:
Specifically, set the following values: Specifically, set the following values:
``` ```toml
[bitbucket_app] [bitbucket_app]
pr_commands = [ pr_commands = [
"/review --pr_reviewer.num_code_suggestions=0", "/review",
"/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7", "/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7",
] ]
``` ```
Note that we set specifically for bitbucket, we recommend using: `--pr_code_suggestions.suggestions_score_threshold=7` and that is the default value we set for bitbucket. Note that we set specifically for bitbucket, we recommend using: `--pr_code_suggestions.suggestions_score_threshold=7` and that is the default value we set for bitbucket.
Since this platform only supports inline code suggestions, we want to limit the number of suggestions, and only present a limited number. Since this platform only supports inline code suggestions, we want to limit the number of suggestions, and only present a limited number.
## Azure DevOps provider To enable BitBucket app to respond to each **push** to the PR, set (for example):
```toml
[bitbucket_app]
handle_push_trigger = true
push_commands = [
"/describe",
"/review",
]
```
### Azure DevOps provider
To use Azure DevOps provider use the following settings in configuration.toml: To use Azure DevOps provider use the following settings in configuration.toml:
``` ```toml
[config] [config]
git_provider="azure" git_provider="azure"
``` ```
@ -223,14 +278,14 @@ org = "https://dev.azure.com/YOUR_ORGANIZATION/"
# pat = "YOUR_PAT_TOKEN" needed only if using PAT for authentication # pat = "YOUR_PAT_TOKEN" needed only if using PAT for authentication
``` ```
### Azure DevOps Webhook #### Azure DevOps Webhook
To control which commands will run automatically when a new PR is opened, you can set the `pr_commands` parameter in the configuration file, similar to the GitHub App: To control which commands will run automatically when a new PR is opened, you can set the `pr_commands` parameter in the configuration file, similar to the GitHub App:
``` ```toml
[azure_devops_server] [azure_devops_server]
pr_commands = [ pr_commands = [
"/describe", "/describe",
"/review --pr_reviewer.num_code_suggestions=0", "/review",
"/improve", "/improve",
] ]
``` ```

View File

@ -5,7 +5,6 @@ To use a different model than the default (GPT-4), you need to edit in the [conf
``` ```
[config] [config]
model = "..." model = "..."
model_turbo = "..."
fallback_models = ["..."] fallback_models = ["..."]
``` ```
@ -27,9 +26,8 @@ deployment_id = "" # The deployment name you chose when you deployed the engine
and set in your configuration file: and set in your configuration file:
``` ```
[config] [config]
model="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo) model="" # the OpenAI model you've deployed on Azure (e.g. gpt-4o)
model_turbo="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo) fallback_models=["..."]
fallback_models=["..."] # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
``` ```
### Hugging Face ### Hugging Face
@ -52,7 +50,6 @@ MAX_TOKENS={
[config] # in configuration.toml [config] # in configuration.toml
model = "ollama/llama2" model = "ollama/llama2"
model_turbo = "ollama/llama2"
fallback_models=["ollama/llama2"] fallback_models=["ollama/llama2"]
[ollama] # in .secrets.toml [ollama] # in .secrets.toml
@ -76,7 +73,6 @@ MAX_TOKENS={
} }
[config] # in configuration.toml [config] # in configuration.toml
model = "huggingface/meta-llama/Llama-2-7b-chat-hf" model = "huggingface/meta-llama/Llama-2-7b-chat-hf"
model_turbo = "huggingface/meta-llama/Llama-2-7b-chat-hf"
fallback_models=["huggingface/meta-llama/Llama-2-7b-chat-hf"] fallback_models=["huggingface/meta-llama/Llama-2-7b-chat-hf"]
[huggingface] # in .secrets.toml [huggingface] # in .secrets.toml
@ -91,7 +87,6 @@ To use Llama2 model with Replicate, for example, set:
``` ```
[config] # in configuration.toml [config] # in configuration.toml
model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1" model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
model_turbo = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
fallback_models=["replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"] fallback_models=["replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"]
[replicate] # in .secrets.toml [replicate] # in .secrets.toml
key = ... key = ...
@ -107,7 +102,6 @@ To use Llama3 model with Groq, for example, set:
``` ```
[config] # in configuration.toml [config] # in configuration.toml
model = "llama3-70b-8192" model = "llama3-70b-8192"
model_turbo = "llama3-70b-8192"
fallback_models = ["groq/llama3-70b-8192"] fallback_models = ["groq/llama3-70b-8192"]
[groq] # in .secrets.toml [groq] # in .secrets.toml
key = ... # your Groq api key key = ... # your Groq api key
@ -121,7 +115,6 @@ To use Google's Vertex AI platform and its associated models (chat-bison/codecha
``` ```
[config] # in configuration.toml [config] # in configuration.toml
model = "vertex_ai/codechat-bison" model = "vertex_ai/codechat-bison"
model_turbo = "vertex_ai/codechat-bison"
fallback_models="vertex_ai/codechat-bison" fallback_models="vertex_ai/codechat-bison"
[vertexai] # in .secrets.toml [vertexai] # in .secrets.toml
@ -140,7 +133,6 @@ To use [Google AI Studio](https://aistudio.google.com/) models, set the relevant
```toml ```toml
[config] # in configuration.toml [config] # in configuration.toml
model="google_ai_studio/gemini-1.5-flash" model="google_ai_studio/gemini-1.5-flash"
model_turbo="google_ai_studio/gemini-1.5-flash"
fallback_models=["google_ai_studio/gemini-1.5-flash"] fallback_models=["google_ai_studio/gemini-1.5-flash"]
[google_ai_studio] # in .secrets.toml [google_ai_studio] # in .secrets.toml
@ -156,7 +148,6 @@ To use Anthropic models, set the relevant models in the configuration section of
``` ```
[config] [config]
model="anthropic/claude-3-opus-20240229" model="anthropic/claude-3-opus-20240229"
model_turbo="anthropic/claude-3-opus-20240229"
fallback_models=["anthropic/claude-3-opus-20240229"] fallback_models=["anthropic/claude-3-opus-20240229"]
``` ```
@ -173,7 +164,6 @@ To use Amazon Bedrock and its foundational models, add the below configuration:
``` ```
[config] # in configuration.toml [config] # in configuration.toml
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0" model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
model_turbo="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
fallback_models=["bedrock/anthropic.claude-v2:1"] fallback_models=["bedrock/anthropic.claude-v2:1"]
``` ```
@ -195,7 +185,6 @@ If the relevant model doesn't appear [here](https://github.com/Codium-ai/pr-agen
``` ```
[config] [config]
model="custom_model_name" model="custom_model_name"
model_turbo="custom_model_name"
fallback_models=["custom_model_name"] fallback_models=["custom_model_name"]
``` ```
(2) Set the maximal tokens for the model: (2) Set the maximal tokens for the model:

View File

@ -10,4 +10,3 @@ Specifically, CLI commands can be issued by invoking a pre-built [docker image](
For online usage, you will need to setup either a [GitHub App](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-app) or a [GitHub Action](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-action) (GitHub), a [GitLab webhook](https://qodo-merge-docs.qodo.ai/installation/gitlab/#run-a-gitlab-webhook-server) (GitLab), or a [BitBucket App](https://qodo-merge-docs.qodo.ai/installation/bitbucket/#run-using-codiumai-hosted-bitbucket-app) (BitBucket). For online usage, you will need to setup either a [GitHub App](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-app) or a [GitHub Action](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-action) (GitHub), a [GitLab webhook](https://qodo-merge-docs.qodo.ai/installation/gitlab/#run-a-gitlab-webhook-server) (GitLab), or a [BitBucket App](https://qodo-merge-docs.qodo.ai/installation/bitbucket/#run-using-codiumai-hosted-bitbucket-app) (BitBucket).
These platforms also enable to run Qodo Merge specific tools automatically when a new PR is opened, or on each push to a branch. These platforms also enable to run Qodo Merge specific tools automatically when a new PR is opened, or on each push to a branch.

View File

@ -43,6 +43,7 @@ nav:
- 💎 Similar Code: 'tools/similar_code.md' - 💎 Similar Code: 'tools/similar_code.md'
- Core Abilities: - Core Abilities:
- 'core-abilities/index.md' - 'core-abilities/index.md'
- Fetching ticket context: 'core-abilities/fetching_ticket_context.md'
- Local and global metadata: 'core-abilities/metadata.md' - Local and global metadata: 'core-abilities/metadata.md'
- Dynamic context: 'core-abilities/dynamic_context.md' - Dynamic context: 'core-abilities/dynamic_context.md'
- Self-reflection: 'core-abilities/self_reflection.md' - Self-reflection: 'core-abilities/self_reflection.md'

View File

@ -3,5 +3,5 @@
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0], new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src= j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f); 'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-5C9KZBM3');</script> })(window,document,'script','dataLayer','GTM-M6PJSFV');</script>
<!-- End Google Tag Manager --> <!-- End Google Tag Manager -->

View File

@ -1 +0,0 @@

View File

@ -3,7 +3,6 @@ from functools import partial
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.utils import update_settings_from_args from pr_agent.algo.utils import update_settings_from_args
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers.utils import apply_repo_settings from pr_agent.git_providers.utils import apply_repo_settings
@ -14,7 +13,6 @@ from pr_agent.tools.pr_config import PRConfig
from pr_agent.tools.pr_description import PRDescription from pr_agent.tools.pr_description import PRDescription
from pr_agent.tools.pr_generate_labels import PRGenerateLabels from pr_agent.tools.pr_generate_labels import PRGenerateLabels
from pr_agent.tools.pr_help_message import PRHelpMessage from pr_agent.tools.pr_help_message import PRHelpMessage
from pr_agent.tools.pr_information_from_user import PRInformationFromUser
from pr_agent.tools.pr_line_questions import PR_LineQuestions from pr_agent.tools.pr_line_questions import PR_LineQuestions
from pr_agent.tools.pr_questions import PRQuestions from pr_agent.tools.pr_questions import PRQuestions
from pr_agent.tools.pr_reviewer import PRReviewer from pr_agent.tools.pr_reviewer import PRReviewer
@ -26,8 +24,6 @@ command2class = {
"answer": PRReviewer, "answer": PRReviewer,
"review": PRReviewer, "review": PRReviewer,
"review_pr": PRReviewer, "review_pr": PRReviewer,
"reflect": PRInformationFromUser,
"reflect_and_review": PRInformationFromUser,
"describe": PRDescription, "describe": PRDescription,
"describe_pr": PRDescription, "describe_pr": PRDescription,
"improve": PRCodeSuggestions, "improve": PRCodeSuggestions,
@ -50,7 +46,6 @@ commands = list(command2class.keys())
class PRAgent: class PRAgent:
def __init__(self, ai_handler: partial[BaseAiHandler,] = LiteLLMAIHandler): def __init__(self, ai_handler: partial[BaseAiHandler,] = LiteLLMAIHandler):
self.ai_handler = ai_handler # will be initialized in run_action self.ai_handler = ai_handler # will be initialized in run_action
self.forbidden_cli_args = ['enable_auto_approval']
async def handle_request(self, pr_url, request, notify=None) -> bool: async def handle_request(self, pr_url, request, notify=None) -> bool:
# First, apply repo specific settings if exists # First, apply repo specific settings if exists
@ -65,10 +60,13 @@ class PRAgent:
else: else:
action, *args = request action, *args = request
forbidden_cli_args = ['enable_auto_approval', 'base_url', 'url', 'app_name', 'secret_provider',
'git_provider', 'skip_keys', 'key', 'ANALYTICS_FOLDER', 'uri', 'app_id', 'webhook_secret',
'bearer_token', 'PERSONAL_ACCESS_TOKEN', 'override_deployment_type', 'private_key', 'api_base', 'api_type', 'api_version']
if args: if args:
for forbidden_arg in self.forbidden_cli_args: for forbidden_arg in forbidden_cli_args:
for arg in args: for arg in args:
if forbidden_arg in arg: if forbidden_arg.lower() in arg.lower():
get_logger().error( get_logger().error(
f"CLI argument for param '{forbidden_arg}' is forbidden. Use instead a configuration file." f"CLI argument for param '{forbidden_arg}' is forbidden. Use instead a configuration file."
) )
@ -77,12 +75,10 @@ class PRAgent:
action = action.lstrip("/").lower() action = action.lstrip("/").lower()
if action not in command2class: if action not in command2class:
get_logger().debug(f"Unknown command: {action}") get_logger().error(f"Unknown command: {action}")
return False return False
with get_logger().contextualize(command=action, pr_url=pr_url): with get_logger().contextualize(command=action, pr_url=pr_url):
get_logger().info("PR-Agent request handler started", analytics=True) get_logger().info("PR-Agent request handler started", analytics=True)
if action == "reflect_and_review":
get_settings().pr_reviewer.ask_and_reflect = True
if action == "answer": if action == "answer":
if notify: if notify:
notify() notify()

View File

@ -19,10 +19,13 @@ MAX_TOKENS = {
'gpt-4o-mini': 128000, # 128K, but may be limited by config.max_model_tokens 'gpt-4o-mini': 128000, # 128K, but may be limited by config.max_model_tokens
'gpt-4o-mini-2024-07-18': 128000, # 128K, but may be limited by config.max_model_tokens 'gpt-4o-mini-2024-07-18': 128000, # 128K, but may be limited by config.max_model_tokens
'gpt-4o-2024-08-06': 128000, # 128K, but may be limited by config.max_model_tokens 'gpt-4o-2024-08-06': 128000, # 128K, but may be limited by config.max_model_tokens
'gpt-4o-2024-11-20': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-mini': 128000, # 128K, but may be limited by config.max_model_tokens 'o1-mini': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-mini-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens 'o1-mini-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-preview': 128000, # 128K, but may be limited by config.max_model_tokens 'o1-preview': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-preview-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens 'o1-preview-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-2024-12-17': 204800, # 200K, but may be limited by config.max_model_tokens
'o1': 204800, # 200K, but may be limited by config.max_model_tokens
'claude-instant-1': 100000, 'claude-instant-1': 100000,
'claude-2': 100000, 'claude-2': 100000,
'command-nightly': 4096, 'command-nightly': 4096,
@ -31,6 +34,7 @@ MAX_TOKENS = {
'vertex_ai/codechat-bison': 6144, 'vertex_ai/codechat-bison': 6144,
'vertex_ai/codechat-bison-32k': 32000, 'vertex_ai/codechat-bison-32k': 32000,
'vertex_ai/claude-3-haiku@20240307': 100000, 'vertex_ai/claude-3-haiku@20240307': 100000,
'vertex_ai/claude-3-5-haiku@20241022': 100000,
'vertex_ai/claude-3-sonnet@20240229': 100000, 'vertex_ai/claude-3-sonnet@20240229': 100000,
'vertex_ai/claude-3-opus@20240229': 100000, 'vertex_ai/claude-3-opus@20240229': 100000,
'vertex_ai/claude-3-5-sonnet@20240620': 100000, 'vertex_ai/claude-3-5-sonnet@20240620': 100000,
@ -40,6 +44,7 @@ MAX_TOKENS = {
'vertex_ai/gemma2': 8200, 'vertex_ai/gemma2': 8200,
'gemini/gemini-1.5-pro': 1048576, 'gemini/gemini-1.5-pro': 1048576,
'gemini/gemini-1.5-flash': 1048576, 'gemini/gemini-1.5-flash': 1048576,
'gemini/gemini-2.0-flash-exp': 1048576,
'codechat-bison': 6144, 'codechat-bison': 6144,
'codechat-bison-32k': 32000, 'codechat-bison-32k': 32000,
'anthropic.claude-instant-v1': 100000, 'anthropic.claude-instant-v1': 100000,
@ -48,20 +53,23 @@ MAX_TOKENS = {
'anthropic/claude-3-opus-20240229': 100000, 'anthropic/claude-3-opus-20240229': 100000,
'anthropic/claude-3-5-sonnet-20240620': 100000, 'anthropic/claude-3-5-sonnet-20240620': 100000,
'anthropic/claude-3-5-sonnet-20241022': 100000, 'anthropic/claude-3-5-sonnet-20241022': 100000,
'anthropic/claude-3-5-haiku-20241022': 100000,
'bedrock/anthropic.claude-instant-v1': 100000, 'bedrock/anthropic.claude-instant-v1': 100000,
'bedrock/anthropic.claude-v2': 100000, 'bedrock/anthropic.claude-v2': 100000,
'bedrock/anthropic.claude-v2:1': 100000, 'bedrock/anthropic.claude-v2:1': 100000,
'bedrock/anthropic.claude-3-sonnet-20240229-v1:0': 100000, 'bedrock/anthropic.claude-3-sonnet-20240229-v1:0': 100000,
'bedrock/anthropic.claude-3-haiku-20240307-v1:0': 100000, 'bedrock/anthropic.claude-3-haiku-20240307-v1:0': 100000,
'bedrock/anthropic.claude-3-5-haiku-20241022-v1:0': 100000,
'bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0': 100000, 'bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0': 100000,
'bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0': 100000, 'bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0': 100000,
"bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0": 100000,
'claude-3-5-sonnet': 100000, 'claude-3-5-sonnet': 100000,
'groq/llama3-8b-8192': 8192, 'groq/llama3-8b-8192': 8192,
'groq/llama3-70b-8192': 8192, 'groq/llama3-70b-8192': 8192,
'groq/llama-3.1-8b-instant': 8192,
'groq/llama-3.3-70b-versatile': 128000,
'groq/mixtral-8x7b-32768': 32768, 'groq/mixtral-8x7b-32768': 32768,
'groq/llama-3.1-8b-instant': 131072, 'groq/gemma2-9b-it': 8192,
'groq/llama-3.1-70b-versatile': 131072,
'groq/llama-3.1-405b-reasoning': 131072,
'ollama/llama3': 4096, 'ollama/llama3': 4096,
'watsonx/meta-llama/llama-3-8b-instruct': 4096, 'watsonx/meta-llama/llama-3-8b-instruct': 4096,
"watsonx/meta-llama/llama-3-70b-instruct": 4096, "watsonx/meta-llama/llama-3-70b-instruct": 4096,

View File

@ -1,17 +1,18 @@
try: try:
from langchain_openai import ChatOpenAI, AzureChatOpenAI from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.messages import SystemMessage, HumanMessage from langchain_openai import AzureChatOpenAI, ChatOpenAI
except: # we don't enforce langchain as a dependency, so if it's not installed, just move on except: # we don't enforce langchain as a dependency, so if it's not installed, just move on
pass pass
import functools
from openai import APIError, RateLimitError, Timeout
from retry import retry
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger from pr_agent.log import get_logger
from openai import APIError, RateLimitError, Timeout
from retry import retry
import functools
OPENAI_RETRIES = 5 OPENAI_RETRIES = 5
@ -73,4 +74,3 @@ class LangChainOpenAIHandler(BaseAiHandler):
raise ValueError(f"OpenAI {e.name} is required") from e raise ValueError(f"OpenAI {e.name} is required") from e
else: else:
raise e raise e

View File

@ -1,11 +1,13 @@
import os import os
import requests
import litellm import litellm
import openai import openai
import requests
from litellm import acompletion from litellm import acompletion
from tenacity import retry, retry_if_exception_type, stop_after_attempt from tenacity import retry, retry_if_exception_type, stop_after_attempt
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.utils import get_version
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger from pr_agent.log import get_logger
@ -131,7 +133,7 @@ class LiteLLMAIHandler(BaseAiHandler):
if "langfuse" in callbacks: if "langfuse" in callbacks:
metadata.update({ metadata.update({
"trace_name": command, "trace_name": command,
"tags": [git_provider, command], "tags": [git_provider, command, f'version:{get_version()}'],
"trace_metadata": { "trace_metadata": {
"command": command, "command": command,
"pr_url": pr_url, "pr_url": pr_url,
@ -140,7 +142,7 @@ class LiteLLMAIHandler(BaseAiHandler):
if "langsmith" in callbacks: if "langsmith" in callbacks:
metadata.update({ metadata.update({
"run_name": command, "run_name": command,
"tags": [git_provider, command], "tags": [git_provider, command, f'version:{get_version()}'],
"extra": { "extra": {
"metadata": { "metadata": {
"command": command, "command": command,
@ -191,8 +193,8 @@ class LiteLLMAIHandler(BaseAiHandler):
messages[1]["content"] = [{"type": "text", "text": messages[1]["content"]}, messages[1]["content"] = [{"type": "text", "text": messages[1]["content"]},
{"type": "image_url", "image_url": {"url": img_path}}] {"type": "image_url", "image_url": {"url": img_path}}]
# Currently O1 does not support separate system and user prompts # Currently, model OpenAI o1 series does not support a separate system and user prompts
O1_MODEL_PREFIX = 'o1-' O1_MODEL_PREFIX = 'o1'
model_type = model.split('/')[-1] if '/' in model else model model_type = model.split('/')[-1] if '/' in model else model
if model_type.startswith(O1_MODEL_PREFIX): if model_type.startswith(O1_MODEL_PREFIX):
user = f"{system}\n\n\n{user}" user = f"{system}\n\n\n{user}"

View File

@ -4,6 +4,7 @@ import openai
from openai import APIError, AsyncOpenAI, RateLimitError, Timeout from openai import APIError, AsyncOpenAI, RateLimitError, Timeout
from retry import retry from retry import retry
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger from pr_agent.log import get_logger
@ -41,7 +42,6 @@ class OpenAIHandler(BaseAiHandler):
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3)) tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2): async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2):
try: try:
deployment_id = self.deployment_id
get_logger().info("System: ", system) get_logger().info("System: ", system)
get_logger().info("User: ", user) get_logger().info("User: ", user)
messages = [{"role": "system", "content": system}, {"role": "user", "content": user}] messages = [{"role": "system", "content": system}, {"role": "user", "content": user}]

View File

@ -3,8 +3,8 @@ from __future__ import annotations
import re import re
import traceback import traceback
from pr_agent.config_loader import get_settings
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger from pr_agent.log import get_logger
@ -31,7 +31,7 @@ def extend_patch(original_file_str, patch_str, patch_extra_lines_before=0,
def decode_if_bytes(original_file_str): def decode_if_bytes(original_file_str):
if isinstance(original_file_str, bytes): if isinstance(original_file_str, (bytes, bytearray)):
try: try:
return original_file_str.decode('utf-8') return original_file_str.decode('utf-8')
except UnicodeDecodeError: except UnicodeDecodeError:
@ -61,23 +61,26 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
patch_lines = patch_str.splitlines() patch_lines = patch_str.splitlines()
extended_patch_lines = [] extended_patch_lines = []
is_valid_hunk = True
start1, size1, start2, size2 = -1, -1, -1, -1 start1, size1, start2, size2 = -1, -1, -1, -1
RE_HUNK_HEADER = re.compile( RE_HUNK_HEADER = re.compile(
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)") r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
try: try:
for line in patch_lines: for i,line in enumerate(patch_lines):
if line.startswith('@@'): if line.startswith('@@'):
match = RE_HUNK_HEADER.match(line) match = RE_HUNK_HEADER.match(line)
# identify hunk header # identify hunk header
if match: if match:
# finish processing previous hunk # finish processing previous hunk
if start1 != -1 and patch_extra_lines_after > 0: if is_valid_hunk and (start1 != -1 and patch_extra_lines_after > 0):
delta_lines = [f' {line}' for line in original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after]] delta_lines = [f' {line}' for line in original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after]]
extended_patch_lines.extend(delta_lines) extended_patch_lines.extend(delta_lines)
section_header, size1, size2, start1, start2 = extract_hunk_headers(match) section_header, size1, size2, start1, start2 = extract_hunk_headers(match)
if patch_extra_lines_before > 0 or patch_extra_lines_after > 0: is_valid_hunk = check_if_hunk_lines_matches_to_file(i, original_lines, patch_lines, start1)
if is_valid_hunk and (patch_extra_lines_before > 0 or patch_extra_lines_after > 0):
def _calc_context_limits(patch_lines_before): def _calc_context_limits(patch_lines_before):
extended_start1 = max(1, start1 - patch_lines_before) extended_start1 = max(1, start1 - patch_lines_before)
extended_size1 = size1 + (start1 - extended_start1) + patch_extra_lines_after extended_size1 = size1 + (start1 - extended_start1) + patch_extra_lines_after
@ -138,7 +141,7 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
return patch_str return patch_str
# finish processing last hunk # finish processing last hunk
if start1 != -1 and patch_extra_lines_after > 0: if start1 != -1 and patch_extra_lines_after > 0 and is_valid_hunk:
delta_lines = original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after] delta_lines = original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after]
# add space at the beginning of each extra line # add space at the beginning of each extra line
delta_lines = [f' {line}' for line in delta_lines] delta_lines = [f' {line}' for line in delta_lines]
@ -148,6 +151,23 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
return extended_patch_str return extended_patch_str
def check_if_hunk_lines_matches_to_file(i, original_lines, patch_lines, start1):
"""
Check if the hunk lines match the original file content. We saw cases where the hunk header line doesn't match the original file content, and then
extending the hunk with extra lines before the hunk header can cause the hunk to be invalid.
"""
is_valid_hunk = True
try:
if i + 1 < len(patch_lines) and patch_lines[i + 1][0] == ' ': # an existing line in the file
if patch_lines[i + 1].strip() != original_lines[start1 - 1].strip():
is_valid_hunk = False
get_logger().error(
f"Invalid hunk in PR, line {start1} in hunk header doesn't match the original file content")
except:
pass
return is_valid_hunk
def extract_hunk_headers(match): def extract_hunk_headers(match):
res = list(match.groups()) res = list(match.groups())
for i in range(len(res)): for i in range(len(res)):
@ -344,7 +364,7 @@ __old hunk__
def extract_hunk_lines_from_patch(patch: str, file_name, line_start, line_end, side) -> tuple[str, str]: def extract_hunk_lines_from_patch(patch: str, file_name, line_start, line_end, side) -> tuple[str, str]:
try:
patch_with_lines_str = f"\n\n## File: '{file_name.strip()}'\n\n" patch_with_lines_str = f"\n\n## File: '{file_name.strip()}'\n\n"
selected_lines = "" selected_lines = ""
patch_lines = patch.splitlines() patch_lines = patch.splitlines()
@ -387,5 +407,8 @@ def extract_hunk_lines_from_patch(patch: str, file_name, line_start, line_end, s
patch_with_lines_str += line + '\n' patch_with_lines_str += line + '\n'
if not line.startswith('-'): # currently we don't support /ask line for deleted lines if not line.startswith('-'): # currently we don't support /ask line for deleted lines
selected_lines_num += 1 selected_lines_num += 1
except Exception as e:
get_logger().error(f"Failed to extract hunk lines from patch: {e}", artifact={"traceback": traceback.format_exc()})
return "", ""
return patch_with_lines_str.rstrip(), selected_lines.rstrip() return patch_with_lines_str.rstrip(), selected_lines.rstrip()

View File

@ -4,8 +4,6 @@ from typing import Dict
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
def filter_bad_extensions(files): def filter_bad_extensions(files):
# Bad Extensions, source: https://github.com/EleutherAI/github-downloader/blob/345e7c4cbb9e0dc8a0615fd995a08bf9d73b3fe6/download_repo_text.py # noqa: E501 # Bad Extensions, source: https://github.com/EleutherAI/github-downloader/blob/345e7c4cbb9e0dc8a0615fd995a08bf9d73b3fe6/download_repo_text.py # noqa: E501
bad_extensions = get_settings().bad_extensions.default bad_extensions = get_settings().bad_extensions.default

View File

@ -5,14 +5,15 @@ from typing import Callable, List, Tuple
from github import RateLimitExceededException from github import RateLimitExceededException
from pr_agent.algo.git_patch_processing import convert_to_hunks_with_lines_numbers, extend_patch, handle_patch_deletions
from pr_agent.algo.language_handler import sort_files_by_main_languages
from pr_agent.algo.file_filter import filter_ignored from pr_agent.algo.file_filter import filter_ignored
from pr_agent.algo.git_patch_processing import (
convert_to_hunks_with_lines_numbers, extend_patch, handle_patch_deletions)
from pr_agent.algo.language_handler import sort_files_by_main_languages
from pr_agent.algo.token_handler import TokenHandler from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import get_max_tokens, clip_tokens, ModelType from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.algo.utils import ModelType, clip_tokens, get_max_tokens, get_weak_model
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers.git_provider import GitProvider from pr_agent.git_providers.git_provider import GitProvider
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.log import get_logger from pr_agent.log import get_logger
DELETED_FILES_ = "Deleted files:\n" DELETED_FILES_ = "Deleted files:\n"
@ -315,13 +316,13 @@ def generate_full_patch(convert_hunks_to_line_numbers, file_dict, max_tokens_mod
# TODO: Option for alternative logic to remove hunks from the patch to reduce the number of tokens # TODO: Option for alternative logic to remove hunks from the patch to reduce the number of tokens
# until we meet the requirements # until we meet the requirements
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
get_logger().warning(f"Patch too large, skipping it, {filename}") get_logger().warning(f"Patch too large, skipping it: '{filename}'")
remaining_files_list_new.append(filename) remaining_files_list_new.append(filename)
continue continue
if patch: if patch:
if not convert_hunks_to_line_numbers: if not convert_hunks_to_line_numbers:
patch_final = f"\n\n## File: '{filename.strip()}\n\n{patch.strip()}\n'" patch_final = f"\n\n## File: '{filename.strip()}'\n\n{patch.strip()}\n"
else: else:
patch_final = "\n\n" + patch.strip() patch_final = "\n\n" + patch.strip()
patches.append(patch_final) patches.append(patch_final)
@ -353,8 +354,8 @@ async def retry_with_fallback_models(f: Callable, model_type: ModelType = ModelT
def _get_all_models(model_type: ModelType = ModelType.REGULAR) -> List[str]: def _get_all_models(model_type: ModelType = ModelType.REGULAR) -> List[str]:
if model_type == ModelType.TURBO: if model_type == ModelType.WEAK:
model = get_settings().config.model_turbo model = get_weak_model()
else: else:
model = get_settings().config.model model = get_settings().config.model
fallback_models = get_settings().config.fallback_models fallback_models = get_settings().config.fallback_models

View File

@ -1,8 +1,9 @@
from jinja2 import Environment, StrictUndefined
from tiktoken import encoding_for_model, get_encoding
from pr_agent.config_loader import get_settings
from threading import Lock from threading import Lock
from jinja2 import Environment, StrictUndefined
from tiktoken import encoding_for_model, get_encoding
from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger from pr_agent.log import get_logger

View File

@ -1,5 +1,6 @@
from dataclasses import dataclass from dataclasses import dataclass
from enum import Enum from enum import Enum
from typing import Optional
class EDIT_TYPE(Enum): class EDIT_TYPE(Enum):
@ -21,4 +22,5 @@ class FilePatchInfo:
old_filename: str = None old_filename: str = None
num_plus_lines: int = -1 num_plus_lines: int = -1
num_minus_lines: int = -1 num_minus_lines: int = -1
language: Optional[str] = None
ai_file_summary: str = None ai_file_summary: str = None

View File

@ -7,14 +7,15 @@ import html
import json import json
import os import os
import re import re
import sys
import textwrap import textwrap
import time import time
import traceback import traceback
from datetime import datetime from datetime import datetime
from enum import Enum from enum import Enum
from importlib.metadata import PackageNotFoundError, version
from typing import Any, List, Tuple from typing import Any, List, Tuple
import html2text import html2text
import requests import requests
import yaml import yaml
@ -22,11 +23,19 @@ from pydantic import BaseModel
from starlette_context import context from starlette_context import context
from pr_agent.algo import MAX_TOKENS from pr_agent.algo import MAX_TOKENS
from pr_agent.algo.git_patch_processing import extract_hunk_lines_from_patch
from pr_agent.algo.token_handler import TokenEncoder from pr_agent.algo.token_handler import TokenEncoder
from pr_agent.config_loader import get_settings, global_settings
from pr_agent.algo.types import FilePatchInfo from pr_agent.algo.types import FilePatchInfo
from pr_agent.config_loader import get_settings, global_settings
from pr_agent.log import get_logger from pr_agent.log import get_logger
def get_weak_model() -> str:
if get_settings().get("config.model_weak"):
return get_settings().config.model_weak
return get_settings().config.model
class Range(BaseModel): class Range(BaseModel):
line_start: int # should be 0-indexed line_start: int # should be 0-indexed
line_end: int line_end: int
@ -35,8 +44,7 @@ class Range(BaseModel):
class ModelType(str, Enum): class ModelType(str, Enum):
REGULAR = "regular" REGULAR = "regular"
TURBO = "turbo" WEAK = "weak"
class PRReviewHeader(str, Enum): class PRReviewHeader(str, Enum):
REGULAR = "## PR Reviewer Guide" REGULAR = "## PR Reviewer Guide"
@ -97,7 +105,8 @@ def unique_strings(input_list: List[str]) -> List[str]:
def convert_to_markdown_v2(output_data: dict, def convert_to_markdown_v2(output_data: dict,
gfm_supported: bool = True, gfm_supported: bool = True,
incremental_review=None, incremental_review=None,
git_provider=None) -> str: git_provider=None,
files=None) -> str:
""" """
Convert a dictionary of data into markdown format. Convert a dictionary of data into markdown format.
Args: Args:
@ -173,7 +182,7 @@ def convert_to_markdown_v2(output_data: dict,
if is_value_no(value): if is_value_no(value):
markdown_text += f'### {emoji} No relevant tests\n\n' markdown_text += f'### {emoji} No relevant tests\n\n'
else: else:
markdown_text += f"### PR contains tests\n\n" markdown_text += f"### {emoji} PR contains tests\n\n"
elif 'ticket compliance check' in key_nice.lower(): elif 'ticket compliance check' in key_nice.lower():
markdown_text = ticket_markdown_logic(emoji, markdown_text, value, gfm_supported) markdown_text = ticket_markdown_logic(emoji, markdown_text, value, gfm_supported)
elif 'security concerns' in key_nice.lower(): elif 'security concerns' in key_nice.lower():
@ -221,15 +230,31 @@ def convert_to_markdown_v2(output_data: dict,
continue continue
relevant_file = issue.get('relevant_file', '').strip() relevant_file = issue.get('relevant_file', '').strip()
issue_header = issue.get('issue_header', '').strip() issue_header = issue.get('issue_header', '').strip()
if issue_header.lower() == 'possible bug':
issue_header = 'Possible Issue' # Make the header less frightening
issue_content = issue.get('issue_content', '').strip() issue_content = issue.get('issue_content', '').strip()
start_line = int(str(issue.get('start_line', 0)).strip()) start_line = int(str(issue.get('start_line', 0)).strip())
end_line = int(str(issue.get('end_line', 0)).strip()) end_line = int(str(issue.get('end_line', 0)).strip())
relevant_lines_str = extract_relevant_lines_str(end_line, files, relevant_file, start_line, dedent=True)
if git_provider:
reference_link = git_provider.get_line_link(relevant_file, start_line, end_line) reference_link = git_provider.get_line_link(relevant_file, start_line, end_line)
else:
reference_link = None
if gfm_supported: if gfm_supported:
if reference_link is not None and len(reference_link) > 0:
if relevant_lines_str:
issue_str = f"<details><summary><a href='{reference_link}'><strong>{issue_header}</strong></a>\n\n{issue_content}</summary>\n\n{relevant_lines_str}\n\n</details>"
else:
issue_str = f"<a href='{reference_link}'><strong>{issue_header}</strong></a><br>{issue_content}" issue_str = f"<a href='{reference_link}'><strong>{issue_header}</strong></a><br>{issue_content}"
else: else:
issue_str = f"<strong>{issue_header}</strong><br>{issue_content}"
else:
if reference_link is not None and len(reference_link) > 0:
issue_str = f"[**{issue_header}**]({reference_link})\n\n{issue_content}\n\n" issue_str = f"[**{issue_header}**]({reference_link})\n\n{issue_content}\n\n"
else:
issue_str = f"**{issue_header}**\n\n{issue_content}\n\n"
markdown_text += f"{issue_str}\n\n" markdown_text += f"{issue_str}\n\n"
except Exception as e: except Exception as e:
get_logger().exception(f"Failed to process 'Recommended focus areas for review': {e}") get_logger().exception(f"Failed to process 'Recommended focus areas for review': {e}")
@ -246,25 +271,49 @@ def convert_to_markdown_v2(output_data: dict,
if gfm_supported: if gfm_supported:
markdown_text += "</table>\n" markdown_text += "</table>\n"
if 'code_feedback' in output_data:
if gfm_supported:
markdown_text += f"\n\n"
markdown_text += f"<details><summary> <strong>Code feedback:</strong></summary>\n\n"
markdown_text += "<hr>"
else:
markdown_text += f"\n\n### Code feedback:\n\n"
for i, value in enumerate(output_data['code_feedback']):
if value is None or value == '' or value == {} or value == []:
continue
markdown_text += parse_code_suggestion(value, i, gfm_supported)+"\n\n"
if markdown_text.endswith('<hr>'):
markdown_text= markdown_text[:-4]
if gfm_supported:
markdown_text += f"</details>"
return markdown_text return markdown_text
def extract_relevant_lines_str(end_line, files, relevant_file, start_line, dedent=False) -> str:
"""
Finds 'relevant_file' in 'files', and extracts the lines from 'start_line' to 'end_line' string from the file content.
"""
try:
relevant_lines_str = ""
if files:
files = set_file_languages(files)
for file in files:
if file.filename.strip() == relevant_file:
if not file.head_file:
# as a fallback, extract relevant lines directly from patch
patch = file.patch
get_logger().info(f"No content found in file: '{file.filename}' for 'extract_relevant_lines_str'. Using patch instead")
_, selected_lines = extract_hunk_lines_from_patch(patch, file.filename, start_line, end_line,side='right')
if not selected_lines:
get_logger().error(f"Failed to extract relevant lines from patch: {file.filename}")
return ""
# filter out '-' lines
relevant_lines_str = ""
for line in selected_lines.splitlines():
if line.startswith('-'):
continue
relevant_lines_str += line[1:] + '\n'
else:
relevant_file_lines = file.head_file.splitlines()
relevant_lines_str = "\n".join(relevant_file_lines[start_line - 1:end_line])
if dedent and relevant_lines_str:
# Remove the longest leading string of spaces and tabs common to all lines.
relevant_lines_str = textwrap.dedent(relevant_lines_str)
relevant_lines_str = f"```{file.language}\n{relevant_lines_str}\n```"
break
return relevant_lines_str
except Exception as e:
get_logger().exception(f"Failed to extract relevant lines: {e}")
return ""
def ticket_markdown_logic(emoji, markdown_text, value, gfm_supported) -> str: def ticket_markdown_logic(emoji, markdown_text, value, gfm_supported) -> str:
ticket_compliance_str = "" ticket_compliance_str = ""
final_compliance_level = -1 final_compliance_level = -1
@ -534,27 +583,20 @@ def load_large_diff(filename, new_file_content_str: str, original_file_content_s
""" """
Generate a patch for a modified file by comparing the original content of the file with the new content provided as Generate a patch for a modified file by comparing the original content of the file with the new content provided as
input. input.
Args:
new_file_content_str: The new content of the file as a string.
original_file_content_str: The original content of the file as a string.
Returns:
The generated or provided patch string.
Raises:
None.
""" """
patch = "" if not original_file_content_str and not new_file_content_str:
return ""
try: try:
diff = difflib.unified_diff(original_file_content_str.splitlines(keepends=True), diff = difflib.unified_diff(original_file_content_str.splitlines(keepends=True),
new_file_content_str.splitlines(keepends=True)) new_file_content_str.splitlines(keepends=True))
if get_settings().config.verbosity_level >= 2 and show_warning: if get_settings().config.verbosity_level >= 2 and show_warning:
get_logger().warning(f"File was modified, but no patch was found. Manually creating patch: {filename}.") get_logger().info(f"File was modified, but no patch was found. Manually creating patch: {filename}.")
patch = ''.join(diff) patch = ''.join(diff)
except Exception:
pass
return patch return patch
except Exception as e:
get_logger().exception(f"Failed to generate patch for file: {filename}")
return ""
def update_settings_from_args(args: List[str]) -> List[str]: def update_settings_from_args(args: List[str]) -> List[str]:
@ -1097,3 +1139,48 @@ def process_description(description_full: str) -> Tuple[str, List]:
get_logger().exception(f"Failed to process description: {e}") get_logger().exception(f"Failed to process description: {e}")
return base_description_str, files return base_description_str, files
def get_version() -> str:
# First check pyproject.toml if running directly out of repository
if os.path.exists("pyproject.toml"):
if sys.version_info >= (3, 11):
import tomllib
with open("pyproject.toml", "rb") as f:
data = tomllib.load(f)
if "project" in data and "version" in data["project"]:
return data["project"]["version"]
else:
get_logger().warning("Version not found in pyproject.toml")
else:
get_logger().warning("Unable to determine local version from pyproject.toml")
# Otherwise get the installed pip package version
try:
return version('pr-agent')
except PackageNotFoundError:
get_logger().warning("Unable to find package named 'pr-agent'")
return "unknown"
def set_file_languages(diff_files) -> List[FilePatchInfo]:
try:
# if the language is already set, do not change it
if hasattr(diff_files[0], 'language') and diff_files[0].language:
return diff_files
# map file extensions to programming languages
language_extension_map_org = get_settings().language_extension_map_org
extension_to_language = {}
for language, extensions in language_extension_map_org.items():
for ext in extensions:
extension_to_language[ext] = language
for file in diff_files:
extension_s = '.' + file.filename.rsplit('.')[-1]
language_name = "txt"
if extension_s and (extension_s in extension_to_language):
language_name = extension_to_language[extension_s]
file.language = language_name.lower()
except Exception as e:
get_logger().exception(f"Failed to set file languages: {e}")
return diff_files

View File

@ -3,8 +3,9 @@ import asyncio
import os import os
from pr_agent.agent.pr_agent import PRAgent, commands from pr_agent.agent.pr_agent import PRAgent, commands
from pr_agent.algo.utils import get_version
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.log import setup_logger, get_logger from pr_agent.log import get_logger, setup_logger
log_level = os.environ.get("LOG_LEVEL", "INFO") log_level = os.environ.get("LOG_LEVEL", "INFO")
setup_logger(log_level) setup_logger(log_level)
@ -45,6 +46,7 @@ def set_parser():
To edit any configuration parameter from 'configuration.toml', just add -config_path=<value>. To edit any configuration parameter from 'configuration.toml', just add -config_path=<value>.
For example: 'python cli.py --pr_url=... review --pr_reviewer.extra_instructions="focus on the file: ..."' For example: 'python cli.py --pr_url=... review --pr_reviewer.extra_instructions="focus on the file: ..."'
""") """)
parser.add_argument('--version', action='version', version=f'pr-agent {get_version()}')
parser.add_argument('--pr_url', type=str, help='The URL of the PR to review', default=None) parser.add_argument('--pr_url', type=str, help='The URL of the PR to review', default=None)
parser.add_argument('--issue_url', type=str, help='The URL of the Issue to review', default=None) parser.add_argument('--issue_url', type=str, help='The URL of the Issue to review', default=None)
parser.add_argument('command', type=str, help='The', choices=commands, default='review') parser.add_argument('command', type=str, help='The', choices=commands, default='review')

View File

@ -1,14 +1,16 @@
from starlette_context import context
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers.azuredevops_provider import AzureDevopsProvider
from pr_agent.git_providers.bitbucket_provider import BitbucketProvider from pr_agent.git_providers.bitbucket_provider import BitbucketProvider
from pr_agent.git_providers.bitbucket_server_provider import BitbucketServerProvider from pr_agent.git_providers.bitbucket_server_provider import \
BitbucketServerProvider
from pr_agent.git_providers.codecommit_provider import CodeCommitProvider from pr_agent.git_providers.codecommit_provider import CodeCommitProvider
from pr_agent.git_providers.gerrit_provider import GerritProvider
from pr_agent.git_providers.git_provider import GitProvider from pr_agent.git_providers.git_provider import GitProvider
from pr_agent.git_providers.github_provider import GithubProvider from pr_agent.git_providers.github_provider import GithubProvider
from pr_agent.git_providers.gitlab_provider import GitLabProvider from pr_agent.git_providers.gitlab_provider import GitLabProvider
from pr_agent.git_providers.local_git_provider import LocalGitProvider from pr_agent.git_providers.local_git_provider import LocalGitProvider
from pr_agent.git_providers.azuredevops_provider import AzureDevopsProvider
from pr_agent.git_providers.gerrit_provider import GerritProvider
from starlette_context import context
_GIT_PROVIDERS = { _GIT_PROVIDERS = {
'github': GithubProvider, 'github': GithubProvider,

View File

@ -2,33 +2,33 @@ import os
from typing import Optional, Tuple from typing import Optional, Tuple
from urllib.parse import urlparse from urllib.parse import urlparse
from ..algo.file_filter import filter_ignored
from ..log import get_logger
from ..algo.language_handler import is_valid_file
from ..algo.utils import clip_tokens, find_line_number_of_relevant_line_in_file, load_large_diff, PRDescriptionHeader
from ..config_loader import get_settings
from .git_provider import GitProvider
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from ..algo.file_filter import filter_ignored
from ..algo.language_handler import is_valid_file
from ..algo.utils import (PRDescriptionHeader, clip_tokens,
find_line_number_of_relevant_line_in_file,
load_large_diff)
from ..config_loader import get_settings
from ..log import get_logger
from .git_provider import GitProvider
AZURE_DEVOPS_AVAILABLE = True AZURE_DEVOPS_AVAILABLE = True
ADO_APP_CLIENT_DEFAULT_ID = "499b84ac-1321-427f-aa17-267ca6975798/.default" ADO_APP_CLIENT_DEFAULT_ID = "499b84ac-1321-427f-aa17-267ca6975798/.default"
MAX_PR_DESCRIPTION_AZURE_LENGTH = 4000-1 MAX_PR_DESCRIPTION_AZURE_LENGTH = 4000-1
try: try:
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
from msrest.authentication import BasicAuthentication
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
from azure.devops.connection import Connection from azure.devops.connection import Connection
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
from azure.identity import DefaultAzureCredential from azure.devops.v7_1.git.models import (Comment, CommentThread,
# noinspection PyUnresolvedReferences
from azure.devops.v7_1.git.models import (
Comment,
CommentThread,
GitVersionDescriptor,
GitPullRequest, GitPullRequest,
GitPullRequestIterationChanges, GitPullRequestIterationChanges,
) GitVersionDescriptor)
# noinspection PyUnresolvedReferences
from azure.identity import DefaultAzureCredential
from msrest.authentication import BasicAuthentication
except ImportError: except ImportError:
AZURE_DEVOPS_AVAILABLE = False AZURE_DEVOPS_AVAILABLE = False
@ -67,14 +67,12 @@ class AzureDevopsProvider(GitProvider):
relevant_lines_end = suggestion['relevant_lines_end'] relevant_lines_end = suggestion['relevant_lines_end']
if not relevant_lines_start or relevant_lines_start == -1: if not relevant_lines_start or relevant_lines_start == -1:
if get_settings().config.verbosity_level >= 2: get_logger().warning(
get_logger().exception(
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}") f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}")
continue continue
if relevant_lines_end < relevant_lines_start: if relevant_lines_end < relevant_lines_start:
if get_settings().config.verbosity_level >= 2: get_logger().warning(f"Failed to publish code suggestion, "
get_logger().exception(f"Failed to publish code suggestion, "
f"relevant_lines_end is {relevant_lines_end} and " f"relevant_lines_end is {relevant_lines_end} and "
f"relevant_lines_start is {relevant_lines_start}") f"relevant_lines_start is {relevant_lines_start}")
continue continue
@ -95,9 +93,11 @@ class AzureDevopsProvider(GitProvider):
"side": "RIGHT", "side": "RIGHT",
} }
post_parameters_list.append(post_parameters) post_parameters_list.append(post_parameters)
if not post_parameters_list:
return False
try:
for post_parameters in post_parameters_list: for post_parameters in post_parameters_list:
try:
comment = Comment(content=post_parameters["body"], comment_type=1) comment = Comment(content=post_parameters["body"], comment_type=1)
thread = CommentThread(comments=[comment], thread = CommentThread(comments=[comment],
thread_context={ thread_context={
@ -117,15 +117,11 @@ class AzureDevopsProvider(GitProvider):
repository_id=self.repo_slug, repository_id=self.repo_slug,
pull_request_id=self.pr_num pull_request_id=self.pr_num
) )
if get_settings().config.verbosity_level >= 2:
get_logger().info(
f"Published code suggestion on {self.pr_num} at {post_parameters['path']}"
)
return True
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2: get_logger().warning(f"Azure failed to publish code suggestion, error: {e}")
get_logger().error(f"Failed to publish code suggestion, error: {e}") return True
return False
def get_pr_description_full(self) -> str: def get_pr_description_full(self) -> str:
return self.pr.description return self.pr.description
@ -382,6 +378,9 @@ class AzureDevopsProvider(GitProvider):
return [] return []
def publish_comment(self, pr_comment: str, is_temporary: bool = False, thread_context=None): def publish_comment(self, pr_comment: str, is_temporary: bool = False, thread_context=None):
if is_temporary and not get_settings().config.publish_output_progress:
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
return None
comment = Comment(content=pr_comment) comment = Comment(content=pr_comment)
thread = CommentThread(comments=[comment], thread_context=thread_context, status=5) thread = CommentThread(comments=[comment], thread_context=thread_context, status=5)
thread_response = self.azure_devops_client.create_thread( thread_response = self.azure_devops_client.create_thread(
@ -620,4 +619,3 @@ class AzureDevopsProvider(GitProvider):
def publish_file_comments(self, file_comments: list) -> bool: def publish_file_comments(self, file_comments: list) -> bool:
pass pass

View File

@ -1,4 +1,6 @@
import difflib
import json import json
import re
from typing import Optional, Tuple from typing import Optional, Tuple
from urllib.parse import urlparse from urllib.parse import urlparse
@ -6,13 +8,14 @@ import requests
from atlassian.bitbucket import Cloud from atlassian.bitbucket import Cloud
from starlette_context import context from starlette_context import context
from pr_agent.algo.types import FilePatchInfo, EDIT_TYPE from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from ..algo.file_filter import filter_ignored from ..algo.file_filter import filter_ignored
from ..algo.language_handler import is_valid_file from ..algo.language_handler import is_valid_file
from ..algo.utils import find_line_number_of_relevant_line_in_file from ..algo.utils import find_line_number_of_relevant_line_in_file
from ..config_loader import get_settings from ..config_loader import get_settings
from ..log import get_logger from ..log import get_logger
from .git_provider import GitProvider, MAX_FILES_ALLOWED_FULL from .git_provider import MAX_FILES_ALLOWED_FULL, GitProvider
def _gef_filename(diff): def _gef_filename(diff):
@ -71,19 +74,33 @@ class BitbucketProvider(GitProvider):
post_parameters_list = [] post_parameters_list = []
for suggestion in code_suggestions: for suggestion in code_suggestions:
body = suggestion["body"] body = suggestion["body"]
original_suggestion = suggestion.get('original_suggestion', None) # needed for diff code
if original_suggestion:
try:
existing_code = original_suggestion['existing_code'].rstrip() + "\n"
improved_code = original_suggestion['improved_code'].rstrip() + "\n"
diff = difflib.unified_diff(existing_code.split('\n'),
improved_code.split('\n'), n=999)
patch_orig = "\n".join(diff)
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
diff_code = f"\n\n```diff\n{patch.rstrip()}\n```"
# replace ```suggestion ... ``` with diff_code, using regex:
body = re.sub(r'```suggestion.*?```', diff_code, body, flags=re.DOTALL)
except Exception as e:
get_logger().exception(f"Bitbucket failed to get diff code for publishing, error: {e}")
continue
relevant_file = suggestion["relevant_file"] relevant_file = suggestion["relevant_file"]
relevant_lines_start = suggestion["relevant_lines_start"] relevant_lines_start = suggestion["relevant_lines_start"]
relevant_lines_end = suggestion["relevant_lines_end"] relevant_lines_end = suggestion["relevant_lines_end"]
if not relevant_lines_start or relevant_lines_start == -1: if not relevant_lines_start or relevant_lines_start == -1:
if get_settings().config.verbosity_level >= 2:
get_logger().exception( get_logger().exception(
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}" f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}"
) )
continue continue
if relevant_lines_end < relevant_lines_start: if relevant_lines_end < relevant_lines_start:
if get_settings().config.verbosity_level >= 2:
get_logger().exception( get_logger().exception(
f"Failed to publish code suggestion, " f"Failed to publish code suggestion, "
f"relevant_lines_end is {relevant_lines_end} and " f"relevant_lines_end is {relevant_lines_end} and "
@ -112,8 +129,7 @@ class BitbucketProvider(GitProvider):
self.publish_inline_comments(post_parameters_list) self.publish_inline_comments(post_parameters_list)
return True return True
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2: get_logger().error(f"Bitbucket failed to publish code suggestion, error: {e}")
get_logger().error(f"Failed to publish code suggestion, error: {e}")
return False return False
def publish_file_comments(self, file_comments: list) -> bool: def publish_file_comments(self, file_comments: list) -> bool:
@ -309,6 +325,9 @@ class BitbucketProvider(GitProvider):
self.publish_comment(pr_comment) self.publish_comment(pr_comment)
def publish_comment(self, pr_comment: str, is_temporary: bool = False): def publish_comment(self, pr_comment: str, is_temporary: bool = False):
if is_temporary and not get_settings().config.publish_output_progress:
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
return None
pr_comment = self.limit_output_characters(pr_comment, self.max_comment_length) pr_comment = self.limit_output_characters(pr_comment, self.max_comment_length)
comment = self.pr.comment(pr_comment) comment = self.pr.comment(pr_comment)
if is_temporary: if is_temporary:

View File

@ -1,16 +1,21 @@
from distutils.version import LooseVersion import difflib
from requests.exceptions import HTTPError import re
from packaging.version import parse as parse_version
from typing import Optional, Tuple from typing import Optional, Tuple
from urllib.parse import quote_plus, urlparse from urllib.parse import quote_plus, urlparse
from atlassian.bitbucket import Bitbucket from atlassian.bitbucket import Bitbucket
from requests.exceptions import HTTPError
from .git_provider import GitProvider from ..algo.git_patch_processing import decode_if_bytes
from ..algo.types import EDIT_TYPE, FilePatchInfo
from ..algo.language_handler import is_valid_file from ..algo.language_handler import is_valid_file
from ..algo.utils import load_large_diff, find_line_number_of_relevant_line_in_file from ..algo.types import EDIT_TYPE, FilePatchInfo
from ..algo.utils import (find_line_number_of_relevant_line_in_file,
load_large_diff)
from ..config_loader import get_settings from ..config_loader import get_settings
from ..log import get_logger from ..log import get_logger
from .git_provider import GitProvider
class BitbucketServerProvider(GitProvider): class BitbucketServerProvider(GitProvider):
@ -35,7 +40,7 @@ class BitbucketServerProvider(GitProvider):
token=get_settings().get("BITBUCKET_SERVER.BEARER_TOKEN", token=get_settings().get("BITBUCKET_SERVER.BEARER_TOKEN",
None)) None))
try: try:
self.bitbucket_api_version = LooseVersion(self.bitbucket_client.get("rest/api/1.0/application-properties").get('version')) self.bitbucket_api_version = parse_version(self.bitbucket_client.get("rest/api/1.0/application-properties").get('version'))
except Exception: except Exception:
self.bitbucket_api_version = None self.bitbucket_api_version = None
@ -65,20 +70,33 @@ class BitbucketServerProvider(GitProvider):
post_parameters_list = [] post_parameters_list = []
for suggestion in code_suggestions: for suggestion in code_suggestions:
body = suggestion["body"] body = suggestion["body"]
original_suggestion = suggestion.get('original_suggestion', None) # needed for diff code
if original_suggestion:
try:
existing_code = original_suggestion['existing_code'].rstrip() + "\n"
improved_code = original_suggestion['improved_code'].rstrip() + "\n"
diff = difflib.unified_diff(existing_code.split('\n'),
improved_code.split('\n'), n=999)
patch_orig = "\n".join(diff)
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
diff_code = f"\n\n```diff\n{patch.rstrip()}\n```"
# replace ```suggestion ... ``` with diff_code, using regex:
body = re.sub(r'```suggestion.*?```', diff_code, body, flags=re.DOTALL)
except Exception as e:
get_logger().exception(f"Bitbucket failed to get diff code for publishing, error: {e}")
continue
relevant_file = suggestion["relevant_file"] relevant_file = suggestion["relevant_file"]
relevant_lines_start = suggestion["relevant_lines_start"] relevant_lines_start = suggestion["relevant_lines_start"]
relevant_lines_end = suggestion["relevant_lines_end"] relevant_lines_end = suggestion["relevant_lines_end"]
if not relevant_lines_start or relevant_lines_start == -1: if not relevant_lines_start or relevant_lines_start == -1:
if get_settings().config.verbosity_level >= 2: get_logger().warning(
get_logger().exception(
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}" f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}"
) )
continue continue
if relevant_lines_end < relevant_lines_start: if relevant_lines_end < relevant_lines_start:
if get_settings().config.verbosity_level >= 2: get_logger().warning(
get_logger().exception(
f"Failed to publish code suggestion, " f"Failed to publish code suggestion, "
f"relevant_lines_end is {relevant_lines_end} and " f"relevant_lines_end is {relevant_lines_end} and "
f"relevant_lines_start is {relevant_lines_start}" f"relevant_lines_start is {relevant_lines_start}"
@ -159,7 +177,7 @@ class BitbucketServerProvider(GitProvider):
head_sha = self.pr.fromRef['latestCommit'] head_sha = self.pr.fromRef['latestCommit']
# if Bitbucket api version is >= 8.16 then use the merge-base api for 2-way diff calculation # if Bitbucket api version is >= 8.16 then use the merge-base api for 2-way diff calculation
if self.bitbucket_api_version is not None and self.bitbucket_api_version >= LooseVersion("8.16"): if self.bitbucket_api_version is not None and self.bitbucket_api_version >= parse_version("8.16"):
try: try:
base_sha = self.bitbucket_client.get(self._get_merge_base())['id'] base_sha = self.bitbucket_client.get(self._get_merge_base())['id']
except Exception as e: except Exception as e:
@ -174,7 +192,7 @@ class BitbucketServerProvider(GitProvider):
# if Bitbucket api version is None or < 7.0 then do a simple diff with a guaranteed common ancestor # if Bitbucket api version is None or < 7.0 then do a simple diff with a guaranteed common ancestor
base_sha = source_commits_list[-1]['parents'][0]['id'] base_sha = source_commits_list[-1]['parents'][0]['id']
# if Bitbucket api version is 7.0-8.15 then use 2-way diff functionality for the base_sha # if Bitbucket api version is 7.0-8.15 then use 2-way diff functionality for the base_sha
if self.bitbucket_api_version is not None and self.bitbucket_api_version >= LooseVersion("7.0"): if self.bitbucket_api_version is not None and self.bitbucket_api_version >= parse_version("7.0"):
try: try:
destination_commits = list( destination_commits = list(
self.bitbucket_client.get_commits(self.workspace_slug, self.repo_slug, base_sha, self.bitbucket_client.get_commits(self.workspace_slug, self.repo_slug, base_sha,
@ -200,25 +218,21 @@ class BitbucketServerProvider(GitProvider):
case 'ADD': case 'ADD':
edit_type = EDIT_TYPE.ADDED edit_type = EDIT_TYPE.ADDED
new_file_content_str = self.get_file(file_path, head_sha) new_file_content_str = self.get_file(file_path, head_sha)
if isinstance(new_file_content_str, (bytes, bytearray)): new_file_content_str = decode_if_bytes(new_file_content_str)
new_file_content_str = new_file_content_str.decode("utf-8")
original_file_content_str = "" original_file_content_str = ""
case 'DELETE': case 'DELETE':
edit_type = EDIT_TYPE.DELETED edit_type = EDIT_TYPE.DELETED
new_file_content_str = "" new_file_content_str = ""
original_file_content_str = self.get_file(file_path, base_sha) original_file_content_str = self.get_file(file_path, base_sha)
if isinstance(original_file_content_str, (bytes, bytearray)): original_file_content_str = decode_if_bytes(original_file_content_str)
original_file_content_str = original_file_content_str.decode("utf-8")
case 'RENAME': case 'RENAME':
edit_type = EDIT_TYPE.RENAMED edit_type = EDIT_TYPE.RENAMED
case _: case _:
edit_type = EDIT_TYPE.MODIFIED edit_type = EDIT_TYPE.MODIFIED
original_file_content_str = self.get_file(file_path, base_sha) original_file_content_str = self.get_file(file_path, base_sha)
if isinstance(original_file_content_str, (bytes, bytearray)): original_file_content_str = decode_if_bytes(original_file_content_str)
original_file_content_str = original_file_content_str.decode("utf-8")
new_file_content_str = self.get_file(file_path, head_sha) new_file_content_str = self.get_file(file_path, head_sha)
if isinstance(new_file_content_str, (bytes, bytearray)): new_file_content_str = decode_if_bytes(new_file_content_str)
new_file_content_str = new_file_content_str.decode("utf-8")
patch = load_large_diff(file_path, new_file_content_str, original_file_content_str) patch = load_large_diff(file_path, new_file_content_str, original_file_content_str)

View File

@ -4,13 +4,15 @@ from collections import Counter
from typing import List, Optional, Tuple from typing import List, Optional, Tuple
from urllib.parse import urlparse from urllib.parse import urlparse
from pr_agent.git_providers.codecommit_client import CodeCommitClient from pr_agent.algo.language_handler import is_valid_file
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.git_providers.codecommit_client import CodeCommitClient
from ..algo.utils import load_large_diff from ..algo.utils import load_large_diff
from .git_provider import GitProvider
from ..config_loader import get_settings from ..config_loader import get_settings
from ..log import get_logger from ..log import get_logger
from pr_agent.algo.language_handler import is_valid_file from .git_provider import GitProvider
class PullRequestCCMimic: class PullRequestCCMimic:
""" """

View File

@ -12,9 +12,9 @@ import requests
import urllib3.util import urllib3.util
from git import Repo from git import Repo
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers.git_provider import GitProvider from pr_agent.git_providers.git_provider import GitProvider
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.git_providers.local_git_provider import PullRequestMimic from pr_agent.git_providers.local_git_provider import PullRequestMimic
from pr_agent.log import get_logger from pr_agent.log import get_logger

View File

@ -1,12 +1,12 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
# enum EDIT_TYPE (ADDED, DELETED, MODIFIED, RENAMED) # enum EDIT_TYPE (ADDED, DELETED, MODIFIED, RENAMED)
from typing import Optional from typing import Optional
from pr_agent.algo.types import FilePatchInfo
from pr_agent.algo.utils import Range, process_description from pr_agent.algo.utils import Range, process_description
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.algo.types import FilePatchInfo
from pr_agent.log import get_logger from pr_agent.log import get_logger
MAX_FILES_ALLOWED_FULL = 50 MAX_FILES_ALLOWED_FULL = 50
class GitProvider(ABC): class GitProvider(ABC):
@ -62,8 +62,8 @@ class GitProvider(ABC):
pass pass
def get_pr_description(self, full: bool = True, split_changes_walkthrough=False) -> str or tuple: def get_pr_description(self, full: bool = True, split_changes_walkthrough=False) -> str or tuple:
from pr_agent.config_loader import get_settings
from pr_agent.algo.utils import clip_tokens from pr_agent.algo.utils import clip_tokens
from pr_agent.config_loader import get_settings
max_tokens_description = get_settings().get("CONFIG.MAX_DESCRIPTION_TOKENS", None) max_tokens_description = get_settings().get("CONFIG.MAX_DESCRIPTION_TOKENS", None)
description = self.get_pr_description_full() if full else self.get_user_description() description = self.get_pr_description_full() if full else self.get_user_description()
if split_changes_walkthrough: if split_changes_walkthrough:

View File

@ -1,22 +1,30 @@
import itertools import copy
import time import difflib
import hashlib import hashlib
import itertools
import re
import time
import traceback import traceback
from datetime import datetime from datetime import datetime
from typing import Optional, Tuple from typing import Optional, Tuple
from urllib.parse import urlparse from urllib.parse import urlparse
from github import AppAuthentication, Auth, Github from github import AppAuthentication, Auth, Github
from retry import retry from retry import retry
from starlette_context import context from starlette_context import context
from ..algo.file_filter import filter_ignored from ..algo.file_filter import filter_ignored
from ..algo.git_patch_processing import extract_hunk_headers
from ..algo.language_handler import is_valid_file from ..algo.language_handler import is_valid_file
from ..algo.types import EDIT_TYPE from ..algo.types import EDIT_TYPE
from ..algo.utils import PRReviewHeader, load_large_diff, clip_tokens, find_line_number_of_relevant_line_in_file, Range from ..algo.utils import (PRReviewHeader, Range, clip_tokens,
find_line_number_of_relevant_line_in_file,
load_large_diff, set_file_languages)
from ..config_loader import get_settings from ..config_loader import get_settings
from ..log import get_logger from ..log import get_logger
from ..servers.utils import RateLimitExceeded from ..servers.utils import RateLimitExceeded
from .git_provider import FilePatchInfo, GitProvider, IncrementalPR, MAX_FILES_ALLOWED_FULL from .git_provider import (MAX_FILES_ALLOWED_FULL, FilePatchInfo, GitProvider,
IncrementalPR)
class GithubProvider(GitProvider): class GithubProvider(GitProvider):
@ -166,6 +174,24 @@ class GithubProvider(GitProvider):
diff_files = [] diff_files = []
invalid_files_names = [] invalid_files_names = []
is_close_to_rate_limit = False
# The base.sha will point to the current state of the base branch (including parallel merges), not the original base commit when the PR was created
# We can fix this by finding the merge base commit between the PR head and base branches
# Note that The pr.head.sha is actually correct as is - it points to the latest commit in your PR branch.
# This SHA isn't affected by parallel merges to the base branch since it's specific to your PR's branch.
repo = self.repo_obj
pr = self.pr
try:
compare = repo.compare(pr.base.sha, pr.head.sha) # communication with GitHub
merge_base_commit = compare.merge_base_commit
except Exception as e:
get_logger().error(f"Failed to get merge base commit: {e}")
merge_base_commit = pr.base
if merge_base_commit.sha != pr.base.sha:
get_logger().info(
f"Using merge base commit {merge_base_commit.sha} instead of base commit ")
counter_valid = 0 counter_valid = 0
for file in files: for file in files:
if not is_valid_file(file.filename): if not is_valid_file(file.filename):
@ -173,7 +199,10 @@ class GithubProvider(GitProvider):
continue continue
patch = file.patch patch = file.patch
if is_close_to_rate_limit:
new_file_content_str = ""
original_file_content_str = ""
else:
# allow only a limited number of files to be fully loaded. We can manage the rest with diffs only # allow only a limited number of files to be fully loaded. We can manage the rest with diffs only
counter_valid += 1 counter_valid += 1
avoid_load = False avoid_load = False
@ -195,10 +224,12 @@ class GithubProvider(GitProvider):
if avoid_load: if avoid_load:
original_file_content_str = "" original_file_content_str = ""
else: else:
original_file_content_str = self._get_pr_file_content(file, self.pr.base.sha) original_file_content_str = self._get_pr_file_content(file, merge_base_commit.sha)
# original_file_content_str = self._get_pr_file_content(file, self.pr.base.sha)
if not patch: if not patch:
patch = load_large_diff(file.filename, new_file_content_str, original_file_content_str) patch = load_large_diff(file.filename, new_file_content_str, original_file_content_str)
if file.status == 'added': if file.status == 'added':
edit_type = EDIT_TYPE.ADDED edit_type = EDIT_TYPE.ADDED
elif file.status == 'removed': elif file.status == 'removed':
@ -212,9 +243,14 @@ class GithubProvider(GitProvider):
edit_type = EDIT_TYPE.UNKNOWN edit_type = EDIT_TYPE.UNKNOWN
# count number of lines added and removed # count number of lines added and removed
if hasattr(file, 'additions') and hasattr(file, 'deletions'):
num_plus_lines = file.additions
num_minus_lines = file.deletions
else:
patch_lines = patch.splitlines(keepends=True) patch_lines = patch.splitlines(keepends=True)
num_plus_lines = len([line for line in patch_lines if line.startswith('+')]) num_plus_lines = len([line for line in patch_lines if line.startswith('+')])
num_minus_lines = len([line for line in patch_lines if line.startswith('-')]) num_minus_lines = len([line for line in patch_lines if line.startswith('-')])
file_patch_canonical_structure = FilePatchInfo(original_file_content_str, new_file_content_str, patch, file_patch_canonical_structure = FilePatchInfo(original_file_content_str, new_file_content_str, patch,
file.filename, edit_type=edit_type, file.filename, edit_type=edit_type,
num_plus_lines=num_plus_lines, num_plus_lines=num_plus_lines,
@ -279,7 +315,6 @@ class GithubProvider(GitProvider):
relevant_line_in_file, relevant_line_in_file,
absolute_position) absolute_position)
if position == -1: if position == -1:
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"Could not find position for {relevant_file} {relevant_line_in_file}") get_logger().info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
subject_type = "FILE" subject_type = "FILE"
else: else:
@ -292,11 +327,9 @@ class GithubProvider(GitProvider):
# publish all comments in a single message # publish all comments in a single message
self.pr.create_review(commit=self.last_commit_id, comments=comments) self.pr.create_review(commit=self.last_commit_id, comments=comments)
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2: get_logger().info(f"Initially failed to publish inline comments as committable")
get_logger().error(f"Failed to publish inline comments")
if (getattr(e, "status", None) == 422 if (getattr(e, "status", None) == 422 and not disable_fallback):
and get_settings().github.publish_inline_comments_fallback_with_verification and not disable_fallback):
pass # continue to try _publish_inline_comments_fallback_with_verification pass # continue to try _publish_inline_comments_fallback_with_verification
else: else:
raise e # will end up with publishing the comments one by one raise e # will end up with publishing the comments one by one
@ -304,7 +337,6 @@ class GithubProvider(GitProvider):
try: try:
self._publish_inline_comments_fallback_with_verification(comments) self._publish_inline_comments_fallback_with_verification(comments)
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2:
get_logger().error(f"Failed to publish inline code comments fallback, error: {e}") get_logger().error(f"Failed to publish inline code comments fallback, error: {e}")
raise e raise e
@ -330,10 +362,8 @@ class GithubProvider(GitProvider):
for comment in fixed_comments_as_one_liner: for comment in fixed_comments_as_one_liner:
try: try:
self.publish_inline_comments([comment], disable_fallback=True) self.publish_inline_comments([comment], disable_fallback=True)
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"Published invalid comment as a single line comment: {comment}") get_logger().info(f"Published invalid comment as a single line comment: {comment}")
except: except:
if get_settings().config.verbosity_level >= 2:
get_logger().error(f"Failed to publish invalid comment as a single line comment: {comment}") get_logger().error(f"Failed to publish invalid comment as a single line comment: {comment}")
def _verify_code_comment(self, comment: dict): def _verify_code_comment(self, comment: dict):
@ -392,7 +422,6 @@ class GithubProvider(GitProvider):
if fixed_comment != comment: if fixed_comment != comment:
fixed_comments.append(fixed_comment) fixed_comments.append(fixed_comment)
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2:
get_logger().error(f"Failed to fix inline comment, error: {e}") get_logger().error(f"Failed to fix inline comment, error: {e}")
return fixed_comments return fixed_comments
@ -401,20 +430,21 @@ class GithubProvider(GitProvider):
Publishes code suggestions as comments on the PR. Publishes code suggestions as comments on the PR.
""" """
post_parameters_list = [] post_parameters_list = []
for suggestion in code_suggestions:
code_suggestions_validated = self.validate_comments_inside_hunks(code_suggestions)
for suggestion in code_suggestions_validated:
body = suggestion['body'] body = suggestion['body']
relevant_file = suggestion['relevant_file'] relevant_file = suggestion['relevant_file']
relevant_lines_start = suggestion['relevant_lines_start'] relevant_lines_start = suggestion['relevant_lines_start']
relevant_lines_end = suggestion['relevant_lines_end'] relevant_lines_end = suggestion['relevant_lines_end']
if not relevant_lines_start or relevant_lines_start == -1: if not relevant_lines_start or relevant_lines_start == -1:
if get_settings().config.verbosity_level >= 2:
get_logger().exception( get_logger().exception(
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}") f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}")
continue continue
if relevant_lines_end < relevant_lines_start: if relevant_lines_end < relevant_lines_start:
if get_settings().config.verbosity_level >= 2:
get_logger().exception(f"Failed to publish code suggestion, " get_logger().exception(f"Failed to publish code suggestion, "
f"relevant_lines_end is {relevant_lines_end} and " f"relevant_lines_end is {relevant_lines_end} and "
f"relevant_lines_start is {relevant_lines_start}") f"relevant_lines_start is {relevant_lines_start}")
@ -441,7 +471,6 @@ class GithubProvider(GitProvider):
self.publish_inline_comments(post_parameters_list) self.publish_inline_comments(post_parameters_list)
return True return True
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2:
get_logger().error(f"Failed to publish code suggestion, error: {e}") get_logger().error(f"Failed to publish code suggestion, error: {e}")
return False return False
@ -501,6 +530,7 @@ class GithubProvider(GitProvider):
elif self.deployment_type == 'user': elif self.deployment_type == 'user':
same_comment_creator = self.github_user_id == existing_comment['user']['login'] same_comment_creator = self.github_user_id == existing_comment['user']['login']
if existing_comment['subject_type'] == 'file' and comment['path'] == existing_comment['path'] and same_comment_creator: if existing_comment['subject_type'] == 'file' and comment['path'] == existing_comment['path'] and same_comment_creator:
headers, data_patch = self.pr._requester.requestJsonAndCheck( headers, data_patch = self.pr._requester.requestJsonAndCheck(
"PATCH", f"{self.base_url}/repos/{self.repo}/pulls/comments/{existing_comment['id']}", input={"body":comment['body']} "PATCH", f"{self.base_url}/repos/{self.repo}/pulls/comments/{existing_comment['id']}", input={"body":comment['body']}
) )
@ -512,7 +542,6 @@ class GithubProvider(GitProvider):
) )
return True return True
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2:
get_logger().error(f"Failed to publish diffview file summary, error: {e}") get_logger().error(f"Failed to publish diffview file summary, error: {e}")
return False return False
@ -801,7 +830,6 @@ class GithubProvider(GitProvider):
link = f"{self.base_url_html}/{self.repo}/pull/{self.pr_num}/files#diff-{sha_file}R{absolute_position}" link = f"{self.base_url_html}/{self.repo}/pull/{self.pr_num}/files#diff-{sha_file}R{absolute_position}"
return link return link
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"Failed adding line link, error: {e}") get_logger().info(f"Failed adding line link, error: {e}")
return "" return ""
@ -862,3 +890,89 @@ class GithubProvider(GitProvider):
def calc_pr_statistics(self, pull_request_data: dict): def calc_pr_statistics(self, pull_request_data: dict):
return {} return {}
def validate_comments_inside_hunks(self, code_suggestions):
"""
validate that all committable comments are inside PR hunks - this is a must for committable comments in GitHub
"""
code_suggestions_copy = copy.deepcopy(code_suggestions)
diff_files = self.get_diff_files()
RE_HUNK_HEADER = re.compile(
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
diff_files = set_file_languages(diff_files)
for suggestion in code_suggestions_copy:
try:
relevant_file_path = suggestion['relevant_file']
for file in diff_files:
if file.filename == relevant_file_path:
# generate on-demand the patches range for the relevant file
patch_str = file.patch
if not hasattr(file, 'patches_range'):
file.patches_range = []
patch_lines = patch_str.splitlines()
for i, line in enumerate(patch_lines):
if line.startswith('@@'):
match = RE_HUNK_HEADER.match(line)
# identify hunk header
if match:
section_header, size1, size2, start1, start2 = extract_hunk_headers(match)
file.patches_range.append({'start': start2, 'end': start2 + size2 - 1})
patches_range = file.patches_range
comment_start_line = suggestion.get('relevant_lines_start', None)
comment_end_line = suggestion.get('relevant_lines_end', None)
original_suggestion = suggestion.get('original_suggestion', None) # needed for diff code
if not comment_start_line or not comment_end_line or not original_suggestion:
continue
# check if the comment is inside a valid hunk
is_valid_hunk = False
min_distance = float('inf')
patch_range_min = None
# find the hunk that contains the comment, or the closest one
for i, patch_range in enumerate(patches_range):
d1 = comment_start_line - patch_range['start']
d2 = patch_range['end'] - comment_end_line
if d1 >= 0 and d2 >= 0: # found a valid hunk
is_valid_hunk = True
min_distance = 0
patch_range_min = patch_range
break
elif d1 * d2 <= 0: # comment is possibly inside the hunk
d1_clip = abs(min(0, d1))
d2_clip = abs(min(0, d2))
d = max(d1_clip, d2_clip)
if d < min_distance:
patch_range_min = patch_range
min_distance = min(min_distance, d)
if not is_valid_hunk:
if min_distance < 10: # 10 lines - a reasonable distance to consider the comment inside the hunk
# make the suggestion non-committable, yet multi line
suggestion['relevant_lines_start'] = max(suggestion['relevant_lines_start'], patch_range_min['start'])
suggestion['relevant_lines_end'] = min(suggestion['relevant_lines_end'], patch_range_min['end'])
body = suggestion['body'].strip()
# present new diff code in collapsible
existing_code = original_suggestion['existing_code'].rstrip() + "\n"
improved_code = original_suggestion['improved_code'].rstrip() + "\n"
diff = difflib.unified_diff(existing_code.split('\n'),
improved_code.split('\n'), n=999)
patch_orig = "\n".join(diff)
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
diff_code = f"\n\n<details><summary>New proposed code:</summary>\n\n```diff\n{patch.rstrip()}\n```"
# replace ```suggestion ... ``` with diff_code, using regex:
body = re.sub(r'```suggestion.*?```', diff_code, body, flags=re.DOTALL)
body += "\n\n</details>"
suggestion['body'] = body
get_logger().info(f"Comment was moved to a valid hunk, "
f"start_line={suggestion['relevant_lines_start']}, end_line={suggestion['relevant_lines_end']}, file={file.filename}")
else:
get_logger().error(f"Comment is not inside a valid hunk, "
f"start_line={suggestion['relevant_lines_start']}, end_line={suggestion['relevant_lines_end']}, file={file.filename}")
except Exception as e:
get_logger().error(f"Failed to process patch for committable comment, error: {e}")
return code_suggestions_copy

View File

@ -1,3 +1,4 @@
import difflib
import hashlib import hashlib
import re import re
from typing import Optional, Tuple from typing import Optional, Tuple
@ -7,13 +8,16 @@ import gitlab
import requests import requests
from gitlab import GitlabGetError from gitlab import GitlabGetError
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from ..algo.file_filter import filter_ignored from ..algo.file_filter import filter_ignored
from ..algo.language_handler import is_valid_file from ..algo.language_handler import is_valid_file
from ..algo.utils import load_large_diff, clip_tokens, find_line_number_of_relevant_line_in_file from ..algo.utils import (clip_tokens,
find_line_number_of_relevant_line_in_file,
load_large_diff)
from ..config_loader import get_settings from ..config_loader import get_settings
from .git_provider import GitProvider, MAX_FILES_ALLOWED_FULL
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from ..log import get_logger from ..log import get_logger
from .git_provider import MAX_FILES_ALLOWED_FULL, GitProvider
class DiffNotFoundError(Exception): class DiffNotFoundError(Exception):
@ -190,6 +194,9 @@ class GitLabProvider(GitProvider):
self.publish_persistent_comment_full(pr_comment, initial_header, update_header, name, final_update_message) self.publish_persistent_comment_full(pr_comment, initial_header, update_header, name, final_update_message)
def publish_comment(self, mr_comment: str, is_temporary: bool = False): def publish_comment(self, mr_comment: str, is_temporary: bool = False):
if is_temporary and not get_settings().config.publish_output_progress:
get_logger().debug(f"Skipping publish_comment for temporary comment: {mr_comment}")
return None
mr_comment = self.limit_output_characters(mr_comment, self.max_comment_chars) mr_comment = self.limit_output_characters(mr_comment, self.max_comment_chars)
comment = self.mr.notes.create({'body': mr_comment}) comment = self.mr.notes.create({'body': mr_comment})
if is_temporary: if is_temporary:
@ -275,20 +282,23 @@ class GitLabProvider(GitProvider):
new_code_snippet = original_suggestion['improved_code'] new_code_snippet = original_suggestion['improved_code']
content = original_suggestion['suggestion_content'] content = original_suggestion['suggestion_content']
label = original_suggestion['label'] label = original_suggestion['label']
if 'score' in original_suggestion: score = original_suggestion.get('score', 7)
score = original_suggestion['score']
else:
score = 7
if hasattr(self, 'main_language'): if hasattr(self, 'main_language'):
language = self.main_language language = self.main_language
else: else:
language = '' language = ''
link = self.get_line_link(relevant_file, line_start, line_end) link = self.get_line_link(relevant_file, line_start, line_end)
body_fallback =f"**Suggestion:** {content} [{label}, importance: {score}]\n___\n" body_fallback =f"**Suggestion:** {content} [{label}, importance: {score}]\n\n"
body_fallback +=f"\n\nReplace lines ([{line_start}-{line_end}]({link}))\n\n```{language}\n{old_code_snippet}\n````\n\n" body_fallback +=f"\n\n<details><summary>[{target_file.filename} [{line_start}-{line_end}]]({link}):</summary>\n\n"
body_fallback +=f"with\n\n```{language}\n{new_code_snippet}\n````" body_fallback += f"\n\n___\n\n`(Cannot implement directly - GitLab API allows committable suggestions strictly on MR diff lines)`"
body_fallback += f"\n\n___\n\n`(Cannot implement this suggestion directly, as gitlab API does not enable committing to a non -+ line in a PR)`" body_fallback+="</details>\n\n"
diff_patch = difflib.unified_diff(old_code_snippet.split('\n'),
new_code_snippet.split('\n'), n=999)
patch_orig = "\n".join(diff_patch)
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
diff_code = f"\n\n```diff\n{patch.rstrip()}\n```"
body_fallback += diff_code
# Create a general note on the file in the MR # Create a general note on the file in the MR
self.mr.notes.create({ self.mr.notes.create({
@ -301,6 +311,7 @@ class GitLabProvider(GitProvider):
'file_path': f'{target_file.filename}', 'file_path': f'{target_file.filename}',
} }
}) })
get_logger().debug(f"Created fallback comment in MR {self.id_mr} with position {pos_obj}")
# get_logger().debug( # get_logger().debug(
# f"Failed to create comment in MR {self.id_mr} with position {pos_obj} (probably not a '+' line)") # f"Failed to create comment in MR {self.id_mr} with position {pos_obj} (probably not a '+' line)")

View File

@ -4,9 +4,9 @@ from typing import List
from git import Repo from git import Repo
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.config_loader import _find_repository_root, get_settings from pr_agent.config_loader import _find_repository_root, get_settings
from pr_agent.git_providers.git_provider import GitProvider from pr_agent.git_providers.git_provider import GitProvider
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.log import get_logger from pr_agent.log import get_logger

View File

@ -3,11 +3,12 @@ import os
import tempfile import tempfile
from dynaconf import Dynaconf from dynaconf import Dynaconf
from starlette_context import context
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider, get_git_provider_with_context from pr_agent.git_providers import (get_git_provider,
get_git_provider_with_context)
from pr_agent.log import get_logger from pr_agent.log import get_logger
from starlette_context import context
def apply_repo_settings(pr_url): def apply_repo_settings(pr_url):
@ -98,5 +99,5 @@ def set_claude_model():
""" """
model_claude = "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0" model_claude = "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0"
get_settings().set('config.model', model_claude) get_settings().set('config.model', model_claude)
get_settings().set('config.model_turbo', model_claude) get_settings().set('config.model_weak', model_claude)
get_settings().set('config.fallback_models', [model_claude]) get_settings().set('config.fallback_models', [model_claude])

View File

@ -1,5 +1,6 @@
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.identity_providers.default_identity_provider import DefaultIdentityProvider from pr_agent.identity_providers.default_identity_provider import \
DefaultIdentityProvider
_IDENTITY_PROVIDERS = { _IDENTITY_PROVIDERS = {
'default': DefaultIdentityProvider 'default': DefaultIdentityProvider

View File

@ -1,4 +1,5 @@
from pr_agent.identity_providers.identity_provider import Eligibility, IdentityProvider from pr_agent.identity_providers.identity_provider import (Eligibility,
IdentityProvider)
class DefaultIdentityProvider(IdentityProvider): class DefaultIdentityProvider(IdentityProvider):

View File

@ -8,12 +8,10 @@ def get_secret_provider():
provider_id = get_settings().config.secret_provider provider_id = get_settings().config.secret_provider
if provider_id == 'google_cloud_storage': if provider_id == 'google_cloud_storage':
try: try:
from pr_agent.secret_providers.google_cloud_storage_secret_provider import GoogleCloudStorageSecretProvider from pr_agent.secret_providers.google_cloud_storage_secret_provider import \
GoogleCloudStorageSecretProvider
return GoogleCloudStorageSecretProvider() return GoogleCloudStorageSecretProvider()
except Exception as e: except Exception as e:
raise ValueError(f"Failed to initialize google_cloud_storage secret provider {provider_id}") from e raise ValueError(f"Failed to initialize google_cloud_storage secret provider {provider_id}") from e
else: else:
raise ValueError("Unknown SECRET_PROVIDER") raise ValueError("Unknown SECRET_PROVIDER")

View File

@ -9,9 +9,9 @@ import secrets
from urllib.parse import unquote from urllib.parse import unquote
import uvicorn import uvicorn
from fastapi import APIRouter, Depends, FastAPI, HTTPException from fastapi import APIRouter, Depends, FastAPI, HTTPException, Request
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from fastapi.encoders import jsonable_encoder from fastapi.encoders import jsonable_encoder
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from starlette import status from starlette import status
from starlette.background import BackgroundTasks from starlette.background import BackgroundTasks
from starlette.middleware import Middleware from starlette.middleware import Middleware
@ -23,9 +23,6 @@ from pr_agent.agent.pr_agent import PRAgent, command2class
from pr_agent.algo.utils import update_settings_from_args from pr_agent.algo.utils import update_settings_from_args
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers.utils import apply_repo_settings from pr_agent.git_providers.utils import apply_repo_settings
from pr_agent.log import get_logger
from fastapi import Request, Depends
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from pr_agent.log import LoggingFormat, get_logger, setup_logger from pr_agent.log import LoggingFormat, get_logger, setup_logger
setup_logger(fmt=LoggingFormat.JSON, level="DEBUG") setup_logger(fmt=LoggingFormat.JSON, level="DEBUG")
@ -67,6 +64,9 @@ def authorize(credentials: HTTPBasicCredentials = Depends(security)):
async def _perform_commands_azure(commands_conf: str, agent: PRAgent, api_url: str, log_context: dict): async def _perform_commands_azure(commands_conf: str, agent: PRAgent, api_url: str, log_context: dict):
apply_repo_settings(api_url) apply_repo_settings(api_url)
if commands_conf == "pr_commands" and get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {api_url=}", **log_context)
return
commands = get_settings().get(f"azure_devops_server.{commands_conf}") commands = get_settings().get(f"azure_devops_server.{commands_conf}")
get_settings().set("config.is_auto_command", True) get_settings().set("config.is_auto_command", True)
for command in commands: for command in commands:

View File

@ -77,6 +77,9 @@ async def handle_manifest(request: Request, response: Response):
async def _perform_commands_bitbucket(commands_conf: str, agent: PRAgent, api_url: str, log_context: dict, data: dict): async def _perform_commands_bitbucket(commands_conf: str, agent: PRAgent, api_url: str, log_context: dict, data: dict):
apply_repo_settings(api_url) apply_repo_settings(api_url)
if commands_conf == "pr_commands" and get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {api_url=}")
return
if data.get("event", "") == "pullrequest:created": if data.get("event", "") == "pullrequest:created":
if not should_process_pr_logic(data): if not should_process_pr_logic(data):
return return
@ -98,11 +101,14 @@ async def _perform_commands_bitbucket(commands_conf: str, agent: PRAgent, api_ur
def is_bot_user(data) -> bool: def is_bot_user(data) -> bool:
try: try:
if data["data"]["actor"]["type"] != "user": actor = data.get("data", {}).get("actor", {})
get_logger().info(f"BitBucket actor type is not 'user': {data['data']['actor']['type']}") # allow actor type: user . if it's "AppUser" or "team" then it is a bot user
allowed_actor_types = {"user"}
if actor and actor["type"].lower() not in allowed_actor_types:
get_logger().info(f"BitBucket actor type is not 'user', skipping: {actor}")
return True return True
except Exception as e: except Exception as e:
get_logger().error("Failed 'is_bot_user' logic: {e}") get_logger().error(f"Failed 'is_bot_user' logic: {e}")
return False return False
@ -161,16 +167,18 @@ async def handle_github_webhooks(background_tasks: BackgroundTasks, request: Req
return "OK" return "OK"
# Get the username of the sender # Get the username of the sender
actor = data.get("data", {}).get("actor", {})
if actor:
try: try:
username = data["data"]["actor"]["username"] username = actor["username"]
except KeyError: except KeyError:
try: try:
username = data["data"]["actor"]["display_name"] username = actor["display_name"]
except KeyError: except KeyError:
username = data["data"]["actor"]["nickname"] username = actor["nickname"]
log_context["sender"] = username log_context["sender"] = username
sender_id = data["data"]["actor"]["account_id"] sender_id = data.get("data", {}).get("actor", {}).get("account_id", "")
log_context["sender_id"] = sender_id log_context["sender_id"] = sender_id
jwt_parts = input_jwt.split(".") jwt_parts = input_jwt.split(".")
claim_part = jwt_parts[1] claim_part = jwt_parts[1]

View File

@ -6,20 +6,20 @@ from typing import List
import uvicorn import uvicorn
from fastapi import APIRouter, FastAPI from fastapi import APIRouter, FastAPI
from fastapi.encoders import jsonable_encoder from fastapi.encoders import jsonable_encoder
from fastapi.responses import RedirectResponse
from starlette import status from starlette import status
from starlette.background import BackgroundTasks from starlette.background import BackgroundTasks
from starlette.middleware import Middleware from starlette.middleware import Middleware
from starlette.requests import Request from starlette.requests import Request
from starlette.responses import JSONResponse from starlette.responses import JSONResponse
from starlette_context.middleware import RawContextMiddleware from starlette_context.middleware import RawContextMiddleware
from pr_agent.agent.pr_agent import PRAgent from pr_agent.agent.pr_agent import PRAgent
from pr_agent.algo.utils import update_settings_from_args from pr_agent.algo.utils import update_settings_from_args
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers.utils import apply_repo_settings from pr_agent.git_providers.utils import apply_repo_settings
from pr_agent.log import LoggingFormat, get_logger, setup_logger from pr_agent.log import LoggingFormat, get_logger, setup_logger
from pr_agent.servers.utils import verify_signature from pr_agent.servers.utils import verify_signature
from fastapi.responses import RedirectResponse
setup_logger(fmt=LoggingFormat.JSON, level="DEBUG") setup_logger(fmt=LoggingFormat.JSON, level="DEBUG")
router = APIRouter() router = APIRouter()
@ -72,6 +72,11 @@ async def handle_webhook(background_tasks: BackgroundTasks, request: Request):
commands_to_run = [] commands_to_run = []
if data["eventKey"] == "pr:opened": if data["eventKey"] == "pr:opened":
apply_repo_settings(pr_url)
if get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {pr_url}", **log_context)
return
get_settings().set("config.is_auto_command", True)
commands_to_run.extend(_get_commands_list_from_settings('BITBUCKET_SERVER.PR_COMMANDS')) commands_to_run.extend(_get_commands_list_from_settings('BITBUCKET_SERVER.PR_COMMANDS'))
elif data["eventKey"] == "pr:comment:added": elif data["eventKey"] == "pr:comment:added":
commands_to_run.append(data["comment"]["text"]) commands_to_run.append(data["comment"]["text"])

View File

@ -15,7 +15,8 @@ from starlette_context.middleware import RawContextMiddleware
from pr_agent.agent.pr_agent import PRAgent from pr_agent.agent.pr_agent import PRAgent
from pr_agent.algo.utils import update_settings_from_args from pr_agent.algo.utils import update_settings_from_args
from pr_agent.config_loader import get_settings, global_settings from pr_agent.config_loader import get_settings, global_settings
from pr_agent.git_providers import get_git_provider, get_git_provider_with_context from pr_agent.git_providers import (get_git_provider,
get_git_provider_with_context)
from pr_agent.git_providers.git_provider import IncrementalPR from pr_agent.git_providers.git_provider import IncrementalPR
from pr_agent.git_providers.utils import apply_repo_settings from pr_agent.git_providers.utils import apply_repo_settings
from pr_agent.identity_providers import get_identity_provider from pr_agent.identity_providers import get_identity_provider
@ -373,6 +374,9 @@ def _check_pull_request_event(action: str, body: dict, log_context: dict) -> Tup
async def _perform_auto_commands_github(commands_conf: str, agent: PRAgent, body: dict, api_url: str, async def _perform_auto_commands_github(commands_conf: str, agent: PRAgent, body: dict, api_url: str,
log_context: dict): log_context: dict):
apply_repo_settings(api_url) apply_repo_settings(api_url)
if commands_conf == "pr_commands" and get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {api_url=}")
return
if not should_process_pr_logic(body): # Here we already updated the configuration with the repo settings if not should_process_pr_logic(body): # Here we already updated the configuration with the repo settings
return {} return {}
commands = get_settings().get(f"github_app.{commands_conf}") commands = get_settings().get(f"github_app.{commands_conf}")

View File

@ -1,11 +1,12 @@
import asyncio import asyncio
import multiprocessing import multiprocessing
from collections import deque
import traceback
from datetime import datetime, timezone
import time import time
import requests import traceback
from collections import deque
from datetime import datetime, timezone
import aiohttp import aiohttp
import requests
from pr_agent.agent.pr_agent import PRAgent from pr_agent.agent.pr_agent import PRAgent
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
@ -83,6 +84,7 @@ async def is_valid_notification(notification, headers, handled_ids, session, use
return False, handled_ids return False, handled_ids
async with session.get(latest_comment, headers=headers) as comment_response: async with session.get(latest_comment, headers=headers) as comment_response:
check_prev_comments = False check_prev_comments = False
user_tag = "@" + user_id
if comment_response.status == 200: if comment_response.status == 200:
comment = await comment_response.json() comment = await comment_response.json()
if 'id' in comment: if 'id' in comment:
@ -100,7 +102,6 @@ async def is_valid_notification(notification, headers, handled_ids, session, use
get_logger().debug(f"no comment_body") get_logger().debug(f"no comment_body")
check_prev_comments = True check_prev_comments = True
else: else:
user_tag = "@" + user_id
if user_tag not in comment_body: if user_tag not in comment_body:
get_logger().debug(f"user_tag not in comment_body") get_logger().debug(f"user_tag not in comment_body")
check_prev_comments = True check_prev_comments = True

View File

@ -1,6 +1,6 @@
import copy import copy
import re
import json import json
import re
from datetime import datetime from datetime import datetime
import uvicorn import uvicorn
@ -61,6 +61,9 @@ async def handle_request(api_url: str, body: str, log_context: dict, sender_id:
async def _perform_commands_gitlab(commands_conf: str, agent: PRAgent, api_url: str, async def _perform_commands_gitlab(commands_conf: str, agent: PRAgent, api_url: str,
log_context: dict, data: dict): log_context: dict, data: dict):
apply_repo_settings(api_url) apply_repo_settings(api_url)
if commands_conf == "pr_commands" and get_settings().config.disable_auto_feedback: # auto commands for PR, and auto feedback is disabled
get_logger().info(f"Auto feedback is disabled, skipping auto commands for PR {api_url=}", **log_context)
return
if not should_process_pr_logic(data): # Here we already updated the configurations if not should_process_pr_logic(data): # Here we already updated the configurations
return return
commands = get_settings().get(f"gitlab.{commands_conf}", {}) commands = get_settings().get(f"gitlab.{commands_conf}", {})

View File

@ -5,7 +5,6 @@ from starlette_context.middleware import RawContextMiddleware
from pr_agent.servers.github_app import router from pr_agent.servers.github_app import router
middleware = [Middleware(RawContextMiddleware)] middleware = [Middleware(RawContextMiddleware)]
app = FastAPI(middleware=middleware) app = FastAPI(middleware=middleware)
app.include_router(router) app.include_router(router)

View File

@ -2,7 +2,7 @@ import hashlib
import hmac import hmac
import time import time
from collections import defaultdict from collections import defaultdict
from typing import Callable, Any from typing import Any, Callable
from fastapi import HTTPException from fastapi import HTTPException

View File

@ -1,18 +1,20 @@
[config] [config]
# models # models
model="gpt-4-turbo-2024-04-09" model="gpt-4o-2024-11-20"
model_turbo="gpt-4o-2024-08-06" fallback_models=["gpt-4o-2024-08-06"]
fallback_models=["gpt-4o-2024-05-13"] #model_weak="gpt-4o-mini-2024-07-18" # optional, a weaker model to use for some easier tasks
# CLI # CLI
git_provider="github" git_provider="github"
publish_output=true publish_output=true
publish_output_progress=true publish_output_progress=true
publish_output_no_suggestions=true
verbosity_level=0 # 0,1,2 verbosity_level=0 # 0,1,2
use_extra_bad_extensions=false use_extra_bad_extensions=false
# Configurations # Configurations
use_wiki_settings_file=true use_wiki_settings_file=true
use_repo_settings_file=true use_repo_settings_file=true
use_global_settings_file=true use_global_settings_file=true
disable_auto_feedback = false
ai_timeout=120 # 2minutes ai_timeout=120 # 2minutes
skip_keys = [] skip_keys = []
# token limits # token limits
@ -53,10 +55,6 @@ require_can_be_split_review=false
require_security_review=true require_security_review=true
require_ticket_analysis_review=true require_ticket_analysis_review=true
# general options # general options
num_code_suggestions=0
inline_code_comments = false
ask_and_reflect=false
#automatic_review=true
persistent_comment=true persistent_comment=true
extra_instructions = "" extra_instructions = ""
final_update_message = true final_update_message = true
@ -90,6 +88,7 @@ publish_description_as_comment_persistent=true
## changes walkthrough section ## changes walkthrough section
enable_semantic_files_types=true enable_semantic_files_types=true
collapsible_file_list='adaptive' # true, false, 'adaptive' collapsible_file_list='adaptive' # true, false, 'adaptive'
collapsible_file_list_threshold=8
inline_file_summary=false # false, true, 'table' inline_file_summary=false # false, true, 'table'
# markers # markers
use_description_markers=false use_description_markers=false
@ -98,7 +97,6 @@ include_generated_by_header=true
enable_large_pr_handling=true enable_large_pr_handling=true
max_ai_calls=4 max_ai_calls=4
async_ai_calls=true async_ai_calls=true
mention_extra_files=true
#custom_labels = ['Bug fix', 'Tests', 'Bug fix with tests', 'Enhancement', 'Documentation', 'Other'] #custom_labels = ['Bug fix', 'Tests', 'Bug fix with tests', 'Enhancement', 'Documentation', 'Other']
[pr_questions] # /ask # [pr_questions] # /ask #
@ -106,13 +104,13 @@ enable_help_text=false
[pr_code_suggestions] # /improve # [pr_code_suggestions] # /improve #
max_context_tokens=14000 max_context_tokens=16000
# #
commitable_code_suggestions = false commitable_code_suggestions = false
dual_publishing_score_threshold=-1 # -1 to disable, [0-10] to set the threshold (>=) for publishing a code suggestion both in a table and as commitable dual_publishing_score_threshold=-1 # -1 to disable, [0-10] to set the threshold (>=) for publishing a code suggestion both in a table and as commitable
focus_only_on_problems=true
# #
extra_instructions = "" extra_instructions = ""
rank_suggestions = false
enable_help_text=false enable_help_text=false
enable_chat_text=false enable_chat_text=false
enable_intro_text=true enable_intro_text=true
@ -127,7 +125,7 @@ auto_extended_mode=true
num_code_suggestions_per_chunk=4 num_code_suggestions_per_chunk=4
max_number_of_calls = 3 max_number_of_calls = 3
parallel_calls = true parallel_calls = true
rank_extended_suggestions = false
final_clip_factor = 0.8 final_clip_factor = 0.8
# self-review checkbox # self-review checkbox
demand_code_suggestions_self_review=false # add a checkbox for the author to self-review the code suggestions demand_code_suggestions_self_review=false # add a checkbox for the author to self-review the code suggestions
@ -137,6 +135,7 @@ fold_suggestions_on_self_review=true # Pro feature. if true, the code suggestion
# Suggestion impact 💎 # Suggestion impact 💎
publish_post_process_suggestion_impact=true publish_post_process_suggestion_impact=true
wiki_page_accepted_suggestions=true wiki_page_accepted_suggestions=true
allow_thumbs_up_down=false
[pr_custom_prompt] # /custom_prompt # [pr_custom_prompt] # /custom_prompt #
prompt = """\ prompt = """\
@ -160,6 +159,7 @@ class_name = "" # in case there are several methods with the same name in
[pr_update_changelog] # /update_changelog # [pr_update_changelog] # /update_changelog #
push_changelog_changes=false push_changelog_changes=false
extra_instructions = "" extra_instructions = ""
add_pr_link=true
[pr_analyze] # /analyze # [pr_analyze] # /analyze #
enable_help_text=true enable_help_text=true
@ -216,7 +216,7 @@ override_deployment_type = true
handle_pr_actions = ['opened', 'reopened', 'ready_for_review'] handle_pr_actions = ['opened', 'reopened', 'ready_for_review']
pr_commands = [ pr_commands = [
"/describe --pr_description.final_update_message=false", "/describe --pr_description.final_update_message=false",
"/review --pr_reviewer.num_code_suggestions=0", "/review",
"/improve", "/improve",
] ]
# settings for "pull_request" event with "synchronize" action - used to detect and handle push triggers for new commits # settings for "pull_request" event with "synchronize" action - used to detect and handle push triggers for new commits
@ -228,27 +228,27 @@ push_trigger_pending_tasks_backlog = true
push_trigger_pending_tasks_ttl = 300 push_trigger_pending_tasks_ttl = 300
push_commands = [ push_commands = [
"/describe", "/describe",
"/review --pr_reviewer.num_code_suggestions=0", "/review",
] ]
[gitlab] [gitlab]
url = "https://gitlab.com" url = "https://gitlab.com"
pr_commands = [ pr_commands = [
"/describe --pr_description.final_update_message=false", "/describe --pr_description.final_update_message=false",
"/review --pr_reviewer.num_code_suggestions=0", "/review",
"/improve", "/improve",
] ]
handle_push_trigger = false handle_push_trigger = false
push_commands = [ push_commands = [
"/describe", "/describe",
"/review --pr_reviewer.num_code_suggestions=0", "/review",
] ]
[bitbucket_app] [bitbucket_app]
pr_commands = [ pr_commands = [
"/describe --pr_description.final_update_message=false", "/describe --pr_description.final_update_message=false",
"/review --pr_reviewer.num_code_suggestions=0", "/review",
"/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7", "/improve --pr_code_suggestions.commitable_code_suggestions=true",
] ]
avoid_full_files = false avoid_full_files = false
@ -273,8 +273,8 @@ avoid_full_files = false
url = "" url = ""
pr_commands = [ pr_commands = [
"/describe --pr_description.final_update_message=false", "/describe --pr_description.final_update_message=false",
"/review --pr_reviewer.num_code_suggestions=0", "/review",
"/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7", "/improve --pr_code_suggestions.commitable_code_suggestions=true",
] ]
[litellm] [litellm]

View File

@ -1,7 +1,10 @@
[pr_code_suggestions_prompt] [pr_code_suggestions_prompt]
system="""You are PR-Reviewer, an AI specializing in Pull Request (PR) code analysis and suggestions. system="""You are PR-Reviewer, an AI specializing in Pull Request (PR) code analysis and suggestions.
Your task is to examine the provided code diff, focusing on new code (lines prefixed with '+'), and offer concise, actionable suggestions to fix possible bugs and problems, and enhance code quality, readability, and performance. {%- if not focus_only_on_problems %}
Your task is to examine the provided code diff, focusing on new code (lines prefixed with '+'), and offer concise, actionable suggestions to fix possible bugs and problems, and enhance code quality and performance.
{%- else %}
Your task is to examine the provided code diff, focusing on new code (lines prefixed with '+'), and offer concise, actionable suggestions to fix critical bugs and problems.
{%- endif %}
The PR code diff will be in the following structured format: The PR code diff will be in the following structured format:
====== ======
@ -42,9 +45,17 @@ __new hunk__
Specific guidelines for generating code suggestions: Specific guidelines for generating code suggestions:
{%- if not focus_only_on_problems %}
- Provide up to {{ num_code_suggestions }} distinct and insightful code suggestions. - Provide up to {{ num_code_suggestions }} distinct and insightful code suggestions.
{%- else %}
- Provide up to {{ num_code_suggestions }} distinct and insightful code suggestions. Return less suggestions if no pertinent ones are applicable.
{%- endif %}
- Focus solely on enhancing new code introduced in the PR, identified by '+' prefixes in '__new hunk__' sections. - Focus solely on enhancing new code introduced in the PR, identified by '+' prefixes in '__new hunk__' sections.
{%- if not focus_only_on_problems %}
- Prioritize suggestions that address potential issues, critical problems, and bugs in the PR code. Avoid repeating changes already implemented in the PR. If no pertinent suggestions are applicable, return an empty list. - Prioritize suggestions that address potential issues, critical problems, and bugs in the PR code. Avoid repeating changes already implemented in the PR. If no pertinent suggestions are applicable, return an empty list.
{%- else %}
- Only give suggestions that address critical problems and bugs in the PR code. If no relevant suggestions are applicable, return an empty list.
{%- endif %}
- Don't suggest to add docstring, type hints, or comments, to remove unused imports, or to use more specific exception types. - Don't suggest to add docstring, type hints, or comments, to remove unused imports, or to use more specific exception types.
- When referencing variables or names from the code, enclose them in backticks (`). Example: "ensure that `variable_name` is..." - When referencing variables or names from the code, enclose them in backticks (`). Example: "ensure that `variable_name` is..."
- Be mindful you are viewing a partial PR code diff, not the full codebase. Avoid suggestions that might conflict with unseen code or alerting variables not declared in the visible scope, as the context is incomplete. - Be mindful you are viewing a partial PR code diff, not the full codebase. Avoid suggestions that might conflict with unseen code or alerting variables not declared in the visible scope, as the context is incomplete.
@ -69,7 +80,11 @@ class CodeSuggestion(BaseModel):
existing_code: str = Field(description="A short code snippet from a '__new hunk__' section that the suggestion aims to enhance or fix. Include only complete code lines. Use ellipsis (...) for brevity if needed. This snippet should represent the specific PR code targeted for improvement.") existing_code: str = Field(description="A short code snippet from a '__new hunk__' section that the suggestion aims to enhance or fix. Include only complete code lines. Use ellipsis (...) for brevity if needed. This snippet should represent the specific PR code targeted for improvement.")
improved_code: str = Field(description="A refined code snippet that replaces the 'existing_code' snippet after implementing the suggestion.") improved_code: str = Field(description="A refined code snippet that replaces the 'existing_code' snippet after implementing the suggestion.")
one_sentence_summary: str = Field(description="A concise, single-sentence overview of the suggested improvement. Focus on the 'what'. Be general, and avoid method or variable names.") one_sentence_summary: str = Field(description="A concise, single-sentence overview of the suggested improvement. Focus on the 'what'. Be general, and avoid method or variable names.")
{%- if not focus_only_on_problems %}
label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'possible bug', 'possible issue', 'performance', 'enhancement', 'best practice', 'maintainability', 'typo'. Other relevant labels are also acceptable.") label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'possible bug', 'possible issue', 'performance', 'enhancement', 'best practice', 'maintainability', 'typo'. Other relevant labels are also acceptable.")
{%- else %}
label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'critical bug', 'general'. The 'general' section should be used for suggestions that address a major issue, but are necessarily on a critical level.")
{%- endif %}
class PRCodeSuggestions(BaseModel): class PRCodeSuggestions(BaseModel):

View File

@ -9,7 +9,7 @@ Your task is to provide a full description for the PR content - files walkthroug
- Keep in mind that the 'Previous title', 'Previous description' and 'Commit messages' sections may be partial, simplistic, non-informative or out of date. Hence, compare them to the PR diff code, and use them only as a reference. - Keep in mind that the 'Previous title', 'Previous description' and 'Commit messages' sections may be partial, simplistic, non-informative or out of date. Hence, compare them to the PR diff code, and use them only as a reference.
- The generated title and description should prioritize the most significant changes. - The generated title and description should prioritize the most significant changes.
- If needed, each YAML output should be in block scalar indicator ('|-') - If needed, each YAML output should be in block scalar indicator ('|-')
- When quoting variables or names from the code, use backticks (`) instead of single quote ('). - When quoting variables, names or file paths from the code, use backticks (`) instead of single quote (').
{%- if extra_instructions %} {%- if extra_instructions %}
@ -38,23 +38,22 @@ class PRType(str, Enum):
{%- if enable_semantic_files_types %} {%- if enable_semantic_files_types %}
class FileDescription(BaseModel): class FileDescription(BaseModel):
filename: str = Field(description="The full file path of the relevant file.") filename: str = Field(description="The full file path of the relevant file")
language: str = Field(description="The programming language of the relevant file.") language: str = Field(description="The programming language of the relevant file")
{%- if include_file_summary_changes %}
changes_summary: str = Field(description="concise summary of the changes in the relevant file, in bullet points (1-4 bullet points).") changes_summary: str = Field(description="concise summary of the changes in the relevant file, in bullet points (1-4 bullet points).")
changes_title: str = Field(description="an informative title for the changes in the files, describing its main theme (5-10 words).") {%- endif %}
changes_title: str = Field(description="one-line summary (5-10 words) capturing the main theme of changes in the file")
label: str = Field(description="a single semantic label that represents a type of code changes that occurred in the File. Possible values (partial list): 'bug fix', 'tests', 'enhancement', 'documentation', 'error handling', 'configuration changes', 'dependencies', 'formatting', 'miscellaneous', ...") label: str = Field(description="a single semantic label that represents a type of code changes that occurred in the File. Possible values (partial list): 'bug fix', 'tests', 'enhancement', 'documentation', 'error handling', 'configuration changes', 'dependencies', 'formatting', 'miscellaneous', ...")
{%- endif %} {%- endif %}
class PRDescription(BaseModel): class PRDescription(BaseModel):
type: List[PRType] = Field(description="one or more types that describe the PR content. Return the label member value (e.g. 'Bug fix', not 'bug_fix')") type: List[PRType] = Field(description="one or more types that describe the PR content. Return the label member value (e.g. 'Bug fix', not 'bug_fix')")
{%- if enable_semantic_files_types %} {%- if enable_semantic_files_types %}
pr_files: List[FileDescription] = Field(max_items=15, description="a list of the files in the PR, and summary of their changes") pr_files: List[FileDescription] = Field(max_items=20, description="a list of all the files that were changed in the PR, and summary of their changes. Each file must be analyzed regardless of change size.")
{%- endif %} {%- endif %}
description: str = Field(description="an informative and concise description of the PR. Use bullet points. Display first the most significant changes.") description: str = Field(description="an informative and concise description of the PR. Use bullet points. Display first the most significant changes.")
title: str = Field(description="an informative title for the PR, describing its main theme") title: str = Field(description="an informative title for the PR, describing its main theme")
{%- if enable_custom_labels %}
labels: List[Label] = Field(min_items=0, description="choose the relevant custom labels that describe the PR content, and return their keys. Use the value field of the Label object to better understand the label meaning.")
{%- endif %}
===== =====
@ -70,25 +69,20 @@ pr_files:
... ...
language: | language: |
... ...
{%- if include_file_summary_changes %}
changes_summary: | changes_summary: |
... ...
{%- endif %}
changes_title: | changes_title: |
... ...
label: | label: |
... label_key_1
... ...
{%- endif %} {%- endif %}
description: | description: |
... ...
title: | title: |
... ...
{%- if enable_custom_labels %}
labels:
- |
...
- |
...
{%- endif %}
``` ```
Answer should be a valid YAML, and nothing else. Each YAML output MUST be after a newline, with proper indent, and block scalar indicator ('|') Answer should be a valid YAML, and nothing else. Each YAML output MUST be after a newline, with proper indent, and block scalar indicator ('|')

View File

@ -1,10 +1,6 @@
[pr_review_prompt] [pr_review_prompt]
system="""You are PR-Reviewer, a language model designed to review a Git Pull Request (PR). system="""You are PR-Reviewer, a language model designed to review a Git Pull Request (PR).
{%- if num_code_suggestions > 0 %}
Your task is to provide constructive and concise feedback for the PR, and also provide meaningful code suggestions.
{%- else %}
Your task is to provide constructive and concise feedback for the PR. Your task is to provide constructive and concise feedback for the PR.
{%- endif %}
The review should focus on new code added in the PR code diff (lines starting with '+') The review should focus on new code added in the PR code diff (lines starting with '+')
@ -49,16 +45,6 @@ __new hunk__
{%- endif %} {%- endif %}
- When quoting variables or names from the code, use backticks (`) instead of single quote ('). - When quoting variables or names from the code, use backticks (`) instead of single quote (').
{%- if num_code_suggestions > 0 %}
Code suggestions guidelines:
- Provide up to {{ num_code_suggestions }} code suggestions. Try to provide diverse and insightful suggestions.
- Focus on important suggestions like fixing code problems, issues and bugs. As a second priority, provide suggestions for meaningful code improvements, like performance, vulnerability, modularity, and best practices.
- Avoid making suggestions that have already been implemented in the PR code. For example, if you want to add logs, or change a variable to const, or anything else, make sure it isn't already in the PR code.
- Don't suggest to add docstring, type hints, or comments.
- Suggestions should address the new code added in the PR diff (lines starting with '+')
{%- endif %}
{%- if extra_instructions %} {%- if extra_instructions %}
@ -80,8 +66,8 @@ class SubPR(BaseModel):
class KeyIssuesComponentLink(BaseModel): class KeyIssuesComponentLink(BaseModel):
relevant_file: str = Field(description="The full file path of the relevant file") relevant_file: str = Field(description="The full file path of the relevant file")
issue_header: str = Field(description="One or two word title for the the issue. For example: 'Possible Bug', 'Performance Issue', 'Code Smell', etc.") issue_header: str = Field(description="One or two word title for the the issue. For example: 'Possible Bug', etc.")
issue_content: str = Field(description="A short and concise summary of what should be further inspected and validated during the PR review process for this issue. Don't state line numbers here") issue_content: str = Field(description="A short and concise summary of what should be further inspected and validated during the PR review process for this issue. Do not reference line numbers in this field.")
start_line: int = Field(description="The start line that corresponds to this issue in the relevant file") start_line: int = Field(description="The start line that corresponds to this issue in the relevant file")
end_line: int = Field(description="The end line that corresponds to this issue in the relevant file") end_line: int = Field(description="The end line that corresponds to this issue in the relevant file")
@ -111,32 +97,16 @@ class Review(BaseModel):
{%- if question_str %} {%- if question_str %}
insights_from_user_answers: str = Field(description="shortly summarize the insights you gained from the user's answers to the questions") insights_from_user_answers: str = Field(description="shortly summarize the insights you gained from the user's answers to the questions")
{%- endif %} {%- endif %}
key_issues_to_review: List[KeyIssuesComponentLink] = Field("A diverse list of bugs, issue or major performance concerns introduced in this PR, which the PR reviewer should further investigate") key_issues_to_review: List[KeyIssuesComponentLink] = Field("A short and diverse list (0-3 issues) of high-priority bugs, problems or performance concerns introduced in the PR code, which the PR reviewer should further focus on and validate during the review process.")
{%- if require_security_review %} {%- if require_security_review %}
security_concerns: str = Field(description="Does this PR code introduce possible vulnerabilities such as exposure of sensitive information (e.g., API keys, secrets, passwords), or security concerns like SQL injection, XSS, CSRF, and others ? Answer 'No' (without explaining why) if there are no possible issues. If there are security concerns or issues, start your answer with a short header, such as: 'Sensitive information exposure: ...', 'SQL injection: ...' etc. Explain your answer. Be specific and give examples if possible") security_concerns: str = Field(description="Does this PR code introduce possible vulnerabilities such as exposure of sensitive information (e.g., API keys, secrets, passwords), or security concerns like SQL injection, XSS, CSRF, and others ? Answer 'No' (without explaining why) if there are no possible issues. If there are security concerns or issues, start your answer with a short header, such as: 'Sensitive information exposure: ...', 'SQL injection: ...' etc. Explain your answer. Be specific and give examples if possible")
{%- endif %} {%- endif %}
{%- if require_can_be_split_review %} {%- if require_can_be_split_review %}
can_be_split: List[SubPR] = Field(min_items=0, max_items=3, description="Can this PR, which contains {{ num_pr_files }} changed files in total, be divided into smaller sub-PRs with distinct tasks that can be reviewed and merged independently, regardless of the order ? Make sure that the sub-PRs are indeed independent, with no code dependencies between them, and that each sub-PR represent a meaningful independent task. Output an empty list if the PR code does not need to be split.") can_be_split: List[SubPR] = Field(min_items=0, max_items=3, description="Can this PR, which contains {{ num_pr_files }} changed files in total, be divided into smaller sub-PRs with distinct tasks that can be reviewed and merged independently, regardless of the order ? Make sure that the sub-PRs are indeed independent, with no code dependencies between them, and that each sub-PR represent a meaningful independent task. Output an empty list if the PR code does not need to be split.")
{%- endif %} {%- endif %}
{%- if num_code_suggestions > 0 %}
class CodeSuggestion(BaseModel):
relevant_file: str = Field(description="The full file path of the relevant file")
language: str = Field(description="The programming language of the relevant file")
suggestion: str = Field(description="a concrete suggestion for meaningfully improving the new PR code. Also describe how, specifically, the suggestion can be applied to new PR code. Add tags with importance measure that matches each suggestion ('important' or 'medium'). Do not make suggestions for updating or adding docstrings, renaming PR title and description, or linter like.")
relevant_line: str = Field(description="a single code line taken from the relevant file, to which the suggestion applies. The code line should start with a '+'. Make sure to output the line exactly as it appears in the relevant file")
{%- endif %}
{%- if num_code_suggestions > 0 %}
class PRReview(BaseModel): class PRReview(BaseModel):
review: Review review: Review
code_feedback: List[CodeSuggestion]
{%- else %}
class PRReview(BaseModel):
review: Review
{%- endif %}
===== =====
@ -185,18 +155,6 @@ review:
title: ... title: ...
- ... - ...
{%- endif %} {%- endif %}
{%- if num_code_suggestions > 0 %}
code_feedback:
- relevant_file: |
directory/xxx.py
language: |
python
suggestion: |
xxx [important]
relevant_line: |
xxx
{%- endif %}
``` ```
Answer should be a valid YAML, and nothing else. Each YAML output MUST be after a newline, with proper indent, and block scalar indicator ('|') Answer should be a valid YAML, and nothing else. Each YAML output MUST be after a newline, with proper indent, and block scalar indicator ('|')

View File

@ -1,9 +1,14 @@
[pr_update_changelog_prompt] [pr_update_changelog_prompt]
system="""You are a language model called PR-Changelog-Updater. system="""You are a language model called PR-Changelog-Updater.
Your task is to update the CHANGELOG.md file of the project, to shortly summarize important changes introduced in this PR (the '+' lines). Your task is to add a brief summary of this PR's changes to CHANGELOG.md file of the project:
- The output should match the existing CHANGELOG.md format, style and conventions, so it will look like a natural part of the file. For example, if previous changes were summarized in a single line, you should do the same. - Follow the file's existing format and style conventions like dates, section titles, etc.
- Don't repeat previous changes. Generate only new content, that is not already in the CHANGELOG.md file. - Only add new changes (don't repeat existing entries)
- Be general, and avoid specific details, files, etc. The output should be minimal, no more than 3-4 short lines. Ignore non-relevant subsections. - Be general, and avoid specific details, files, etc. The output should be minimal, no more than 3-4 short lines.
- Write only the new content to be added to CHANGELOG.md, without any introduction or summary. The content should appear as if it's a natural part of the existing file.
{%- if pr_link %}
- If relevant, convert the changelog main header into a clickable link using the PR URL '{{ pr_link }}'. Format: header [*][pr_link]
{%- endif %}
{%- if extra_instructions %} {%- if extra_instructions %}
@ -47,16 +52,19 @@ The PR Git Diff:
{{ diff|trim }} {{ diff|trim }}
====== ======
Current date: Current date:
``` ```
{{today}} {{today}}
``` ```
The current CHANGELOG.md:
The current 'CHANGELOG.md' file
====== ======
{{ changelog_file_str }} {{ changelog_file_str }}
====== ======
Response: Response:
```markdown
""" """

View File

@ -1,26 +1,30 @@
import asyncio import asyncio
import copy import copy
import difflib
import re
import textwrap import textwrap
import traceback import traceback
from functools import partial from functools import partial
from typing import Dict, List from typing import Dict, List
from jinja2 import Environment, StrictUndefined from jinja2 import Environment, StrictUndefined
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.pr_processing import get_pr_diff, get_pr_multi_diffs, retry_with_fallback_models, \ from pr_agent.algo.pr_processing import (add_ai_metadata_to_diff_files,
add_ai_metadata_to_diff_files get_pr_diff, get_pr_multi_diffs,
retry_with_fallback_models)
from pr_agent.algo.token_handler import TokenHandler from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import load_yaml, replace_code_tags, ModelType, show_relevant_configurations from pr_agent.algo.utils import (ModelType, load_yaml, replace_code_tags,
show_relevant_configurations)
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider, get_git_provider_with_context, GithubProvider, GitLabProvider, \ from pr_agent.git_providers import (AzureDevopsProvider, GithubProvider,
AzureDevopsProvider GitLabProvider, get_git_provider,
from pr_agent.git_providers.git_provider import get_main_pr_language get_git_provider_with_context)
from pr_agent.git_providers.git_provider import get_main_pr_language, GitProvider
from pr_agent.log import get_logger from pr_agent.log import get_logger
from pr_agent.servers.help import HelpMessage from pr_agent.servers.help import HelpMessage
from pr_agent.tools.pr_description import insert_br_after_x_chars from pr_agent.tools.pr_description import insert_br_after_x_chars
import difflib
import re
class PRCodeSuggestions: class PRCodeSuggestions:
@ -76,6 +80,7 @@ class PRCodeSuggestions:
"commit_messages_str": self.git_provider.get_commit_messages(), "commit_messages_str": self.git_provider.get_commit_messages(),
"relevant_best_practices": "", "relevant_best_practices": "",
"is_ai_metadata": get_settings().get("config.enable_ai_metadata", False), "is_ai_metadata": get_settings().get("config.enable_ai_metadata", False),
"focus_only_on_problems": get_settings().get("pr_code_suggestions.focus_only_on_problems", False),
} }
self.pr_code_suggestions_prompt_system = get_settings().pr_code_suggestions_prompt.system self.pr_code_suggestions_prompt_system = get_settings().pr_code_suggestions_prompt.system
@ -98,6 +103,8 @@ class PRCodeSuggestions:
relevant_configs = {'pr_code_suggestions': dict(get_settings().pr_code_suggestions), relevant_configs = {'pr_code_suggestions': dict(get_settings().pr_code_suggestions),
'config': dict(get_settings().config)} 'config': dict(get_settings().config)}
get_logger().debug("Relevant configs", artifacts=relevant_configs) get_logger().debug("Relevant configs", artifacts=relevant_configs)
# publish "Preparing suggestions..." comments
if (get_settings().config.publish_output and get_settings().config.publish_output_progress and if (get_settings().config.publish_output and get_settings().config.publish_output_progress and
not get_settings().config.get('is_auto_command', False)): not get_settings().config.get('is_auto_command', False)):
if self.git_provider.is_supported("gfm_markdown"): if self.git_provider.is_supported("gfm_markdown"):
@ -105,33 +112,26 @@ class PRCodeSuggestions:
else: else:
self.git_provider.publish_comment("Preparing suggestions...", is_temporary=True) self.git_provider.publish_comment("Preparing suggestions...", is_temporary=True)
# call the model to get the suggestions, and self-reflect on them
if not self.is_extended: if not self.is_extended:
data = await retry_with_fallback_models(self._prepare_prediction) data = await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.REGULAR)
else: else:
data = await retry_with_fallback_models(self._prepare_prediction_extended) data = await retry_with_fallback_models(self._prepare_prediction_extended, model_type=ModelType.REGULAR)
if not data: if not data:
data = {"code_suggestions": []} data = {"code_suggestions": []}
self.data = data
# Handle the case where the PR has no suggestions
if (data is None or 'code_suggestions' not in data or not data['code_suggestions']): if (data is None or 'code_suggestions' not in data or not data['code_suggestions']):
pr_body = "## PR Code Suggestions ✨\n\nNo code suggestions found for the PR." await self.publish_no_suggestions()
if get_settings().config.publish_output:
get_logger().warning('No code suggestions found for the PR.')
get_logger().debug(f"PR output", artifact=pr_body)
if self.progress_response:
self.git_provider.edit_comment(self.progress_response, body=pr_body)
else:
self.git_provider.publish_comment(pr_body)
else:
get_settings().data = {"artifact": ""}
return return
if (not self.is_extended and get_settings().pr_code_suggestions.rank_suggestions) or \ # publish the suggestions
(self.is_extended and get_settings().pr_code_suggestions.rank_extended_suggestions):
get_logger().info('Ranking Suggestions...')
data['code_suggestions'] = await self.rank_suggestions(data['code_suggestions'])
if get_settings().config.publish_output: if get_settings().config.publish_output:
# If a temporary comment was published, remove it
self.git_provider.remove_initial_comment() self.git_provider.remove_initial_comment()
# Publish table summarized suggestions
if ((not get_settings().pr_code_suggestions.commitable_code_suggestions) and if ((not get_settings().pr_code_suggestions.commitable_code_suggestions) and
self.git_provider.is_supported("gfm_markdown")): self.git_provider.is_supported("gfm_markdown")):
@ -141,10 +141,7 @@ class PRCodeSuggestions:
# require self-review # require self-review
if get_settings().pr_code_suggestions.demand_code_suggestions_self_review: if get_settings().pr_code_suggestions.demand_code_suggestions_self_review:
text = get_settings().pr_code_suggestions.code_suggestions_self_review_text pr_body = await self.add_self_review_text(pr_body)
pr_body += f"\n\n- [ ] {text}"
if get_settings().pr_code_suggestions.approve_pr_on_self_review:
pr_body += ' <!-- approve pr self-review -->'
# add usage guide # add usage guide
if (get_settings().pr_code_suggestions.enable_chat_text and get_settings().config.is_auto_command if (get_settings().pr_code_suggestions.enable_chat_text and get_settings().config.is_auto_command
@ -160,13 +157,13 @@ class PRCodeSuggestions:
pr_body += show_relevant_configurations(relevant_section='pr_code_suggestions') pr_body += show_relevant_configurations(relevant_section='pr_code_suggestions')
# publish the PR comment # publish the PR comment
if get_settings().pr_code_suggestions.persistent_comment: if get_settings().pr_code_suggestions.persistent_comment: # true by default
final_update_message = False self.publish_persistent_comment_with_history(self.git_provider,
self.publish_persistent_comment_with_history(pr_body, pr_body,
initial_header="## PR Code Suggestions ✨", initial_header="## PR Code Suggestions ✨",
update_header=True, update_header=True,
name="suggestions", name="suggestions",
final_update_message=final_update_message, final_update_message=False,
max_previous_comments=get_settings().pr_code_suggestions.max_history_len, max_previous_comments=get_settings().pr_code_suggestions.max_history_len,
progress_response=self.progress_response) progress_response=self.progress_response)
else: else:
@ -177,29 +174,15 @@ class PRCodeSuggestions:
# dual publishing mode # dual publishing mode
if int(get_settings().pr_code_suggestions.dual_publishing_score_threshold) > 0: if int(get_settings().pr_code_suggestions.dual_publishing_score_threshold) > 0:
data_above_threshold = {'code_suggestions': []} await self.dual_publishing(data)
try:
for suggestion in data['code_suggestions']:
if int(suggestion.get('score', 0)) >= int(get_settings().pr_code_suggestions.dual_publishing_score_threshold) \
and suggestion.get('improved_code'):
data_above_threshold['code_suggestions'].append(suggestion)
if not data_above_threshold['code_suggestions'][-1]['existing_code']:
get_logger().info(f'Identical existing and improved code for dual publishing found')
data_above_threshold['code_suggestions'][-1]['existing_code'] = suggestion[
'improved_code']
if data_above_threshold['code_suggestions']:
get_logger().info(
f"Publishing {len(data_above_threshold['code_suggestions'])} suggestions in dual publishing mode")
self.push_inline_code_suggestions(data_above_threshold)
except Exception as e:
get_logger().error(f"Failed to publish dual publishing suggestions, error: {e}")
else: else:
self.push_inline_code_suggestions(data) await self.push_inline_code_suggestions(data)
if self.progress_response: if self.progress_response:
self.git_provider.remove_comment(self.progress_response) self.git_provider.remove_comment(self.progress_response)
else: else:
get_logger().info('Code suggestions generated for PR, but not published since publish_output is False.') get_logger().info('Code suggestions generated for PR, but not published since publish_output is False.')
get_settings().data = {"artifact": data} pr_body = self.generate_summarized_suggestions(data)
get_settings().data = {"artifact": pr_body}
return return
except Exception as e: except Exception as e:
get_logger().error(f"Failed to generate code suggestions for PR, error: {e}", get_logger().error(f"Failed to generate code suggestions for PR, error: {e}",
@ -212,47 +195,108 @@ class PRCodeSuggestions:
self.git_provider.remove_initial_comment() self.git_provider.remove_initial_comment()
self.git_provider.publish_comment(f"Failed to generate code suggestions for PR") self.git_provider.publish_comment(f"Failed to generate code suggestions for PR")
except Exception as e: except Exception as e:
pass get_logger().exception(f"Failed to update persistent review, error: {e}")
def publish_persistent_comment_with_history(self, pr_comment: str, async def add_self_review_text(self, pr_body):
text = get_settings().pr_code_suggestions.code_suggestions_self_review_text
pr_body += f"\n\n- [ ] {text}"
approve_pr_on_self_review = get_settings().pr_code_suggestions.approve_pr_on_self_review
fold_suggestions_on_self_review = get_settings().pr_code_suggestions.fold_suggestions_on_self_review
if approve_pr_on_self_review and not fold_suggestions_on_self_review:
pr_body += ' <!-- approve pr self-review -->'
elif fold_suggestions_on_self_review and not approve_pr_on_self_review:
pr_body += ' <!-- fold suggestions self-review -->'
else:
pr_body += ' <!-- approve and fold suggestions self-review -->'
return pr_body
async def publish_no_suggestions(self):
pr_body = "## PR Code Suggestions ✨\n\nNo code suggestions found for the PR."
if get_settings().config.publish_output and get_settings().config.publish_output_no_suggestions:
get_logger().warning('No code suggestions found for the PR.')
get_logger().debug(f"PR output", artifact=pr_body)
if self.progress_response:
self.git_provider.edit_comment(self.progress_response, body=pr_body)
else:
self.git_provider.publish_comment(pr_body)
else:
get_settings().data = {"artifact": ""}
async def dual_publishing(self, data):
data_above_threshold = {'code_suggestions': []}
try:
for suggestion in data['code_suggestions']:
if int(suggestion.get('score', 0)) >= int(
get_settings().pr_code_suggestions.dual_publishing_score_threshold) \
and suggestion.get('improved_code'):
data_above_threshold['code_suggestions'].append(suggestion)
if not data_above_threshold['code_suggestions'][-1]['existing_code']:
get_logger().info(f'Identical existing and improved code for dual publishing found')
data_above_threshold['code_suggestions'][-1]['existing_code'] = suggestion[
'improved_code']
if data_above_threshold['code_suggestions']:
get_logger().info(
f"Publishing {len(data_above_threshold['code_suggestions'])} suggestions in dual publishing mode")
await self.push_inline_code_suggestions(data_above_threshold)
except Exception as e:
get_logger().error(f"Failed to publish dual publishing suggestions, error: {e}")
@staticmethod
def publish_persistent_comment_with_history(git_provider: GitProvider,
pr_comment: str,
initial_header: str, initial_header: str,
update_header: bool = True, update_header: bool = True,
name='review', name='review',
final_update_message=True, final_update_message=True,
max_previous_comments=4, max_previous_comments=4,
progress_response=None): progress_response=None,
only_fold=False):
if isinstance(self.git_provider, AzureDevopsProvider): # get_latest_commit_url is not supported yet def _extract_link(comment_text: str):
r = re.compile(r"<!--.*?-->")
match = r.search(comment_text)
up_to_commit_txt = ""
if match:
up_to_commit_txt = f" up to commit {match.group(0)[4:-3].strip()}"
return up_to_commit_txt
if isinstance(git_provider, AzureDevopsProvider): # get_latest_commit_url is not supported yet
if progress_response: if progress_response:
self.git_provider.edit_comment(progress_response, pr_comment) git_provider.edit_comment(progress_response, pr_comment)
new_comment = progress_response
else: else:
self.git_provider.publish_comment(pr_comment) new_comment = git_provider.publish_comment(pr_comment)
return return new_comment
history_header = f"#### Previous suggestions\n" history_header = f"#### Previous suggestions\n"
last_commit_num = self.git_provider.get_latest_commit_url().split('/')[-1][:7] last_commit_num = git_provider.get_latest_commit_url().split('/')[-1][:7]
if only_fold: # A user clicked on the 'self-review' checkbox
text = get_settings().pr_code_suggestions.code_suggestions_self_review_text
latest_suggestion_header = f"\n\n- [x] {text}"
else:
latest_suggestion_header = f"Latest suggestions up to {last_commit_num}" latest_suggestion_header = f"Latest suggestions up to {last_commit_num}"
latest_commit_html_comment = f"<!-- {last_commit_num} -->" latest_commit_html_comment = f"<!-- {last_commit_num} -->"
found_comment = None found_comment = None
if max_previous_comments > 0: if max_previous_comments > 0:
try: try:
prev_comments = list(self.git_provider.get_issue_comments()) prev_comments = list(git_provider.get_issue_comments())
for comment in prev_comments: for comment in prev_comments:
if comment.body.startswith(initial_header): if comment.body.startswith(initial_header):
prev_suggestions = comment.body prev_suggestions = comment.body
found_comment = comment found_comment = comment
comment_url = self.git_provider.get_comment_url(comment) comment_url = git_provider.get_comment_url(comment)
if history_header.strip() not in comment.body: if history_header.strip() not in comment.body:
# no history section # no history section
# extract everything between <table> and </table> in comment.body including <table> and </table> # extract everything between <table> and </table> in comment.body including <table> and </table>
table_index = comment.body.find("<table>") table_index = comment.body.find("<table>")
if table_index == -1: if table_index == -1:
self.git_provider.edit_comment(comment, pr_comment) git_provider.edit_comment(comment, pr_comment)
continue continue
# find http link from comment.body[:table_index] # find http link from comment.body[:table_index]
up_to_commit_txt = self.extract_link(comment.body[:table_index]) up_to_commit_txt = _extract_link(comment.body[:table_index])
prev_suggestion_table = comment.body[ prev_suggestion_table = comment.body[
table_index:comment.body.rfind("</table>") + len("</table>")] table_index:comment.body.rfind("</table>") + len("</table>")]
@ -273,7 +317,7 @@ class PRCodeSuggestions:
# get text after the latest_suggestion_header in comment.body # get text after the latest_suggestion_header in comment.body
table_ind = latest_table.find("<table>") table_ind = latest_table.find("<table>")
up_to_commit_txt = self.extract_link(latest_table[:table_ind]) up_to_commit_txt = _extract_link(latest_table[:table_ind])
latest_table = latest_table[table_ind:latest_table.rfind("</table>") + len("</table>")] latest_table = latest_table[table_ind:latest_table.rfind("</table>") + len("</table>")]
# enforce max_previous_comments # enforce max_previous_comments
@ -300,11 +344,12 @@ class PRCodeSuggestions:
get_logger().info(f"Persistent mode - updating comment {comment_url} to latest {name} message") get_logger().info(f"Persistent mode - updating comment {comment_url} to latest {name} message")
if progress_response: # publish to 'progress_response' comment, because it refreshes immediately if progress_response: # publish to 'progress_response' comment, because it refreshes immediately
self.git_provider.edit_comment(progress_response, pr_comment_updated) git_provider.edit_comment(progress_response, pr_comment_updated)
self.git_provider.remove_comment(comment) git_provider.remove_comment(comment)
comment = progress_response
else: else:
self.git_provider.edit_comment(comment, pr_comment_updated) git_provider.edit_comment(comment, pr_comment_updated)
return return comment
except Exception as e: except Exception as e:
get_logger().exception(f"Failed to update persistent review, error: {e}") get_logger().exception(f"Failed to update persistent review, error: {e}")
pass pass
@ -313,9 +358,12 @@ class PRCodeSuggestions:
body = pr_comment.replace(initial_header, "").strip() body = pr_comment.replace(initial_header, "").strip()
pr_comment = f"{initial_header}\n\n{latest_commit_html_comment}\n\n{body}\n\n" pr_comment = f"{initial_header}\n\n{latest_commit_html_comment}\n\n{body}\n\n"
if progress_response: if progress_response:
self.git_provider.edit_comment(progress_response, pr_comment) git_provider.edit_comment(progress_response, pr_comment)
new_comment = progress_response
else: else:
self.git_provider.publish_comment(pr_comment) new_comment = git_provider.publish_comment(pr_comment)
return new_comment
def extract_link(self, s): def extract_link(self, s):
r = re.compile(r"<!--.*?-->") r = re.compile(r"<!--.*?-->")
@ -332,6 +380,8 @@ class PRCodeSuggestions:
model, model,
add_line_numbers_to_hunks=True, add_line_numbers_to_hunks=True,
disable_extra_lines=False) disable_extra_lines=False)
self.patches_diff_list = [self.patches_diff]
self.patches_diff_no_line_number = self.remove_line_numbers([self.patches_diff])[0]
if self.patches_diff: if self.patches_diff:
get_logger().debug(f"PR diff", artifact=self.patches_diff) get_logger().debug(f"PR diff", artifact=self.patches_diff)
@ -364,9 +414,19 @@ class PRCodeSuggestions:
response_reflect = await self.self_reflect_on_suggestions(data["code_suggestions"], response_reflect = await self.self_reflect_on_suggestions(data["code_suggestions"],
patches_diff, model=model_reflection) patches_diff, model=model_reflection)
if response_reflect: if response_reflect:
await self.analyze_self_reflection_response(data, response_reflect)
else:
# get_logger().error(f"Could not self-reflect on suggestions. using default score 7")
for i, suggestion in enumerate(data["code_suggestions"]):
suggestion["score"] = 7
suggestion["score_why"] = ""
return data
async def analyze_self_reflection_response(self, data, response_reflect):
response_reflect_yaml = load_yaml(response_reflect) response_reflect_yaml = load_yaml(response_reflect)
code_suggestions_feedback = response_reflect_yaml["code_suggestions"] code_suggestions_feedback = response_reflect_yaml.get("code_suggestions", [])
if len(code_suggestions_feedback) == len(data["code_suggestions"]): if code_suggestions_feedback and len(code_suggestions_feedback) == len(data["code_suggestions"]):
for i, suggestion in enumerate(data["code_suggestions"]): for i, suggestion in enumerate(data["code_suggestions"]):
try: try:
suggestion["score"] = code_suggestions_feedback[i]["suggestion_score"] suggestion["score"] = code_suggestions_feedback[i]["suggestion_score"]
@ -382,8 +442,14 @@ class PRCodeSuggestions:
try: try:
if get_settings().config.publish_output: if get_settings().config.publish_output:
suggestion_statistics_dict = {'score': int(suggestion["score"]), if not suggestion["score"]:
'label': suggestion["label"].lower().strip()} score = -1
else:
score = int(suggestion["score"])
label = suggestion["label"].lower().strip()
label = label.replace('<br>', ' ')
suggestion_statistics_dict = {'score': score,
'label': label}
get_logger().info(f"PR-Agent suggestions statistics", get_logger().info(f"PR-Agent suggestions statistics",
statistics=suggestion_statistics_dict, analytics=True) statistics=suggestion_statistics_dict, analytics=True)
except Exception as e: except Exception as e:
@ -408,13 +474,6 @@ class PRCodeSuggestions:
suggestion['existing_code'] = "" suggestion['existing_code'] = ""
except Exception as e: except Exception as e:
get_logger().error(f"Error processing suggestion {i + 1}, error: {e}") get_logger().error(f"Error processing suggestion {i + 1}, error: {e}")
else:
# get_logger().error(f"Could not self-reflect on suggestions. using default score 7")
for i, suggestion in enumerate(data["code_suggestions"]):
suggestion["score"] = 7
suggestion["score_why"] = ""
return data
@staticmethod @staticmethod
def _truncate_if_needed(suggestion): def _truncate_if_needed(suggestion):
@ -451,6 +510,11 @@ class PRCodeSuggestions:
if not is_valid_keys: if not is_valid_keys:
continue continue
if get_settings().get("pr_code_suggestions.focus_only_on_problems", False):
CRITICAL_LABEL = 'critical'
if CRITICAL_LABEL in suggestion['label'].lower(): # we want the published labels to be less declarative
suggestion['label'] = 'possible issue'
if suggestion['one_sentence_summary'] in one_sentence_summary_list: if suggestion['one_sentence_summary'] in one_sentence_summary_list:
get_logger().debug(f"Skipping suggestion {i + 1}, because it is a duplicate: {suggestion}") get_logger().debug(f"Skipping suggestion {i + 1}, because it is a duplicate: {suggestion}")
continue continue
@ -474,7 +538,7 @@ class PRCodeSuggestions:
return data return data
def push_inline_code_suggestions(self, data): async def push_inline_code_suggestions(self, data):
code_suggestions = [] code_suggestions = []
if not data['code_suggestions']: if not data['code_suggestions']:
@ -572,7 +636,9 @@ class PRCodeSuggestions:
patches_diff_lines = patches_diff.splitlines() patches_diff_lines = patches_diff.splitlines()
for i, line in enumerate(patches_diff_lines): for i, line in enumerate(patches_diff_lines):
if line.strip(): if line.strip():
if line[0].isdigit(): if line.isnumeric():
patches_diff_lines[i] = ''
elif line[0].isdigit():
# find the first letter in the line that starts with a valid letter # find the first letter in the line that starts with a valid letter
for j, char in enumerate(line): for j, char in enumerate(line):
if not char.isdigit(): if not char.isdigit():
@ -630,62 +696,6 @@ class PRCodeSuggestions:
self.data = data = None self.data = data = None
return data return data
async def rank_suggestions(self, data: List) -> List:
"""
Call a model to rank (sort) code suggestions based on their importance order.
Args:
data (List): A list of code suggestions to be ranked.
Returns:
List: The ranked list of code suggestions.
"""
suggestion_list = []
if not data:
return suggestion_list
for suggestion in data:
suggestion_list.append(suggestion)
data_sorted = [[]] * len(suggestion_list)
if len(suggestion_list) == 1:
return suggestion_list
try:
suggestion_str = ""
for i, suggestion in enumerate(suggestion_list):
suggestion_str += f"suggestion {i + 1}: " + str(suggestion) + '\n\n'
variables = {'suggestion_list': suggestion_list, 'suggestion_str': suggestion_str}
model = get_settings().config.model
environment = Environment(undefined=StrictUndefined)
system_prompt = environment.from_string(get_settings().pr_sort_code_suggestions_prompt.system).render(
variables)
user_prompt = environment.from_string(get_settings().pr_sort_code_suggestions_prompt.user).render(variables)
response, finish_reason = await self.ai_handler.chat_completion(model=model, system=system_prompt,
user=user_prompt)
sort_order = load_yaml(response)
for s in sort_order['Sort Order']:
suggestion_number = s['suggestion number']
importance_order = s['importance order']
data_sorted[importance_order - 1] = suggestion_list[suggestion_number - 1]
if get_settings().pr_code_suggestions.final_clip_factor != 1:
max_len = max(
len(data_sorted),
int(get_settings().pr_code_suggestions.num_code_suggestions_per_chunk),
)
new_len = int(0.5 + max_len * get_settings().pr_code_suggestions.final_clip_factor)
if new_len < len(data_sorted):
data_sorted = data_sorted[:new_len]
except Exception as e:
if get_settings().config.verbosity_level >= 1:
get_logger().info(f"Could not sort suggestions, error: {e}")
data_sorted = suggestion_list
return data_sorted
def generate_summarized_suggestions(self, data: Dict) -> str: def generate_summarized_suggestions(self, data: Dict) -> str:
try: try:
pr_body = "## PR Code Suggestions ✨\n\n" pr_body = "## PR Code Suggestions ✨\n\n"
@ -801,7 +811,12 @@ class PRCodeSuggestions:
get_logger().info(f"Failed to publish summarized code suggestions, error: {e}") get_logger().info(f"Failed to publish summarized code suggestions, error: {e}")
return "" return ""
async def self_reflect_on_suggestions(self, suggestion_list: List, patches_diff: str, model: str) -> str: async def self_reflect_on_suggestions(self,
suggestion_list: List,
patches_diff: str,
model: str,
prev_suggestions_str: str = "",
dedicated_prompt: str = "") -> str:
if not suggestion_list: if not suggestion_list:
return "" return ""
@ -814,13 +829,21 @@ class PRCodeSuggestions:
'suggestion_str': suggestion_str, 'suggestion_str': suggestion_str,
"diff": patches_diff, "diff": patches_diff,
'num_code_suggestions': len(suggestion_list), 'num_code_suggestions': len(suggestion_list),
'prev_suggestions_str': prev_suggestions_str,
"is_ai_metadata": get_settings().get("config.enable_ai_metadata", False)} "is_ai_metadata": get_settings().get("config.enable_ai_metadata", False)}
environment = Environment(undefined=StrictUndefined) environment = Environment(undefined=StrictUndefined)
if dedicated_prompt:
system_prompt_reflect = environment.from_string( system_prompt_reflect = environment.from_string(
get_settings().pr_code_suggestions_reflect_prompt.system).render( get_settings().get(dedicated_prompt).system).render(variables)
variables) user_prompt_reflect = environment.from_string(
get_settings().get(dedicated_prompt).user).render(variables)
else:
system_prompt_reflect = environment.from_string(
get_settings().pr_code_suggestions_reflect_prompt.system).render(variables)
user_prompt_reflect = environment.from_string( user_prompt_reflect = environment.from_string(
get_settings().pr_code_suggestions_reflect_prompt.user).render(variables) get_settings().pr_code_suggestions_reflect_prompt.user).render(variables)
with get_logger().contextualize(command="self_reflect_on_suggestions"): with get_logger().contextualize(command="self_reflect_on_suggestions"):
response_reflect, finish_reason_reflect = await self.ai_handler.chat_completion(model=model, response_reflect, finish_reason_reflect = await self.ai_handler.chat_completion(model=model,
system=system_prompt_reflect, system=system_prompt_reflect,
@ -829,4 +852,3 @@ class PRCodeSuggestions:
get_logger().info(f"Could not reflect on suggestions, error: {e}") get_logger().info(f"Could not reflect on suggestions, error: {e}")
return "" return ""
return response_reflect return response_reflect

View File

@ -1,6 +1,7 @@
import asyncio import asyncio
import copy import copy
import re import re
import traceback
from functools import partial from functools import partial
from typing import List, Tuple from typing import List, Tuple
@ -9,19 +10,24 @@ from jinja2 import Environment, StrictUndefined
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models, get_pr_diff_multiple_patchs, \ from pr_agent.algo.pr_processing import (OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD,
OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD get_pr_diff,
get_pr_diff_multiple_patchs,
retry_with_fallback_models)
from pr_agent.algo.token_handler import TokenHandler from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import set_custom_labels, PRDescriptionHeader from pr_agent.algo.utils import (ModelType, PRDescriptionHeader, clip_tokens,
from pr_agent.algo.utils import load_yaml, get_user_labels, ModelType, show_relevant_configurations, get_max_tokens, \ get_max_tokens, get_user_labels, load_yaml,
clip_tokens set_custom_labels,
show_relevant_configurations)
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider, GithubProvider, get_git_provider_with_context from pr_agent.git_providers import (GithubProvider, get_git_provider,
get_git_provider_with_context)
from pr_agent.git_providers.git_provider import get_main_pr_language from pr_agent.git_providers.git_provider import get_main_pr_language
from pr_agent.log import get_logger from pr_agent.log import get_logger
from pr_agent.servers.help import HelpMessage from pr_agent.servers.help import HelpMessage
from pr_agent.tools.ticket_pr_compliance_check import extract_ticket_links_from_pr_description, extract_tickets, \ from pr_agent.tools.ticket_pr_compliance_check import (
extract_and_cache_pr_tickets extract_and_cache_pr_tickets, extract_ticket_links_from_pr_description,
extract_tickets)
class PRDescription: class PRDescription:
@ -52,6 +58,7 @@ class PRDescription:
self.ai_handler.main_pr_language = self.main_pr_language self.ai_handler.main_pr_language = self.main_pr_language
# Initialize the variables dictionary # Initialize the variables dictionary
self.COLLAPSIBLE_FILE_LIST_THRESHOLD = get_settings().pr_description.get("collapsible_file_list_threshold", 8)
self.vars = { self.vars = {
"title": self.git_provider.pr.title, "title": self.git_provider.pr.title,
"branch": self.git_provider.get_pr_branch(), "branch": self.git_provider.get_pr_branch(),
@ -64,6 +71,7 @@ class PRDescription:
"custom_labels_class": "", # will be filled if necessary in 'set_custom_labels' function "custom_labels_class": "", # will be filled if necessary in 'set_custom_labels' function
"enable_semantic_files_types": get_settings().pr_description.enable_semantic_files_types, "enable_semantic_files_types": get_settings().pr_description.enable_semantic_files_types,
"related_tickets": "", "related_tickets": "",
"include_file_summary_changes": len(self.git_provider.get_diff_files()) <= self.COLLAPSIBLE_FILE_LIST_THRESHOLD
} }
self.user_description = self.git_provider.get_user_description() self.user_description = self.git_provider.get_user_description()
@ -80,7 +88,6 @@ class PRDescription:
self.patches_diff = None self.patches_diff = None
self.prediction = None self.prediction = None
self.file_label_dict = None self.file_label_dict = None
self.COLLAPSIBLE_FILE_LIST_THRESHOLD = 8
async def run(self): async def run(self):
try: try:
@ -94,7 +101,7 @@ class PRDescription:
# ticket extraction if exists # ticket extraction if exists
await extract_and_cache_pr_tickets(self.git_provider, self.vars) await extract_and_cache_pr_tickets(self.git_provider, self.vars)
await retry_with_fallback_models(self._prepare_prediction, ModelType.TURBO) await retry_with_fallback_models(self._prepare_prediction, ModelType.WEAK)
if self.prediction: if self.prediction:
self._prepare_data() self._prepare_data()
@ -109,6 +116,8 @@ class PRDescription:
pr_labels, pr_file_changes = [], [] pr_labels, pr_file_changes = [], []
if get_settings().pr_description.publish_labels: if get_settings().pr_description.publish_labels:
pr_labels = self._prepare_labels() pr_labels = self._prepare_labels()
else:
get_logger().debug(f"Publishing labels disabled")
if get_settings().pr_description.use_description_markers: if get_settings().pr_description.use_description_markers:
pr_title, pr_body, changes_walkthrough, pr_file_changes = self._prepare_pr_answer_with_markers() pr_title, pr_body, changes_walkthrough, pr_file_changes = self._prepare_pr_answer_with_markers()
@ -132,6 +141,7 @@ class PRDescription:
pr_body += show_relevant_configurations(relevant_section='pr_description') pr_body += show_relevant_configurations(relevant_section='pr_description')
if get_settings().config.publish_output: if get_settings().config.publish_output:
# publish labels # publish labels
if get_settings().pr_description.publish_labels and pr_labels and self.git_provider.is_supported("get_labels"): if get_settings().pr_description.publish_labels and pr_labels and self.git_provider.is_supported("get_labels"):
original_labels = self.git_provider.get_pr_labels(update=True) original_labels = self.git_provider.get_pr_labels(update=True)
@ -159,43 +169,49 @@ class PRDescription:
self.git_provider.publish_description(pr_title, pr_body) self.git_provider.publish_description(pr_title, pr_body)
# publish final update message # publish final update message
if (get_settings().pr_description.final_update_message): if (get_settings().pr_description.final_update_message and not get_settings().config.get('is_auto_command', False)):
latest_commit_url = self.git_provider.get_latest_commit_url() latest_commit_url = self.git_provider.get_latest_commit_url()
if latest_commit_url: if latest_commit_url:
pr_url = self.git_provider.get_pr_url() pr_url = self.git_provider.get_pr_url()
update_comment = f"**[PR Description]({pr_url})** updated to latest commit ({latest_commit_url})" update_comment = f"**[PR Description]({pr_url})** updated to latest commit ({latest_commit_url})"
self.git_provider.publish_comment(update_comment) self.git_provider.publish_comment(update_comment)
self.git_provider.remove_initial_comment() self.git_provider.remove_initial_comment()
else:
get_logger().info('PR description, but not published since publish_output is False.')
get_settings().data = {"artifact": pr_body}
return
except Exception as e: except Exception as e:
get_logger().error(f"Error generating PR description {self.pr_id}: {e}") get_logger().error(f"Error generating PR description {self.pr_id}: {e}",
artifact={"traceback": traceback.format_exc()})
return "" return ""
async def _prepare_prediction(self, model: str) -> None: async def _prepare_prediction(self, model: str) -> None:
if get_settings().pr_description.use_description_markers and 'pr_agent:' not in self.user_description: if get_settings().pr_description.use_description_markers and 'pr_agent:' not in self.user_description:
get_logger().info( get_logger().info("Markers were enabled, but user description does not contain markers. skipping AI prediction")
"Markers were enabled, but user description does not contain markers. skipping AI prediction")
return None return None
large_pr_handling = get_settings().pr_description.enable_large_pr_handling and "pr_description_only_files_prompts" in get_settings() large_pr_handling = get_settings().pr_description.enable_large_pr_handling and "pr_description_only_files_prompts" in get_settings()
output = get_pr_diff(self.git_provider, self.token_handler, model, large_pr_handling=large_pr_handling, output = get_pr_diff(self.git_provider, self.token_handler, model, large_pr_handling=large_pr_handling, return_remaining_files=True)
return_remaining_files=True)
if isinstance(output, tuple): if isinstance(output, tuple):
patches_diff, remaining_files_list = output patches_diff, remaining_files_list = output
else: else:
patches_diff = output patches_diff = output
remaining_files_list = [] remaining_files_list = []
if not large_pr_handling or patches_diff: if not large_pr_handling or patches_diff:
self.patches_diff = patches_diff self.patches_diff = patches_diff
if patches_diff: if patches_diff:
# generate the prediction
get_logger().debug(f"PR diff", artifact=self.patches_diff) get_logger().debug(f"PR diff", artifact=self.patches_diff)
self.prediction = await self._get_prediction(model, patches_diff, prompt="pr_description_prompt") self.prediction = await self._get_prediction(model, patches_diff, prompt="pr_description_prompt")
if (remaining_files_list and 'pr_files' in self.prediction and 'label:' in self.prediction and
get_settings().pr_description.mention_extra_files): # extend the prediction with additional files not shown
get_logger().debug(f"Extending additional files, {len(remaining_files_list)} files") if get_settings().pr_description.enable_semantic_files_types:
self.prediction = await self.extend_additional_files(remaining_files_list) self.prediction = await self.extend_uncovered_files(self.prediction)
else: else:
get_logger().error(f"Error getting PR diff {self.pr_id}") get_logger().error(f"Error getting PR diff {self.pr_id}",
artifact={"traceback": traceback.format_exc()})
self.prediction = None self.prediction = None
else: else:
# get the diff in multiple patches, with the token handler only for the files prompt # get the diff in multiple patches, with the token handler only for the files prompt
@ -280,43 +296,81 @@ class PRDescription:
prompt="pr_description_only_description_prompts") prompt="pr_description_only_description_prompts")
prediction_headers = prediction_headers.strip().removeprefix('```yaml').strip('`').strip() prediction_headers = prediction_headers.strip().removeprefix('```yaml').strip('`').strip()
# manually add extra files to final prediction # extend the tables with the files not shown
MAX_EXTRA_FILES_TO_OUTPUT = 100 files_walkthrough_extended = await self.extend_uncovered_files(files_walkthrough)
if get_settings().pr_description.mention_extra_files:
for i, file in enumerate(remaining_files_list):
extra_file_yaml = f"""\
- filename: |
{file}
changes_summary: |
...
changes_title: |
...
label: |
additional files (token-limit)
"""
files_walkthrough = files_walkthrough.strip() + "\n" + extra_file_yaml.strip()
if i >= MAX_EXTRA_FILES_TO_OUTPUT:
files_walkthrough += f"""\
extra_file_yaml =
- filename: |
Additional {len(remaining_files_list) - MAX_EXTRA_FILES_TO_OUTPUT} files not shown
changes_summary: |
...
changes_title: |
...
label: |
additional files (token-limit)
"""
break
# final processing # final processing
self.prediction = prediction_headers + "\n" + "pr_files:\n" + files_walkthrough self.prediction = prediction_headers + "\n" + "pr_files:\n" + files_walkthrough_extended
if not load_yaml(self.prediction, keys_fix_yaml=self.keys_fix): if not load_yaml(self.prediction, keys_fix_yaml=self.keys_fix):
get_logger().error(f"Error getting valid YAML in large PR handling for describe {self.pr_id}") get_logger().error(f"Error getting valid YAML in large PR handling for describe {self.pr_id}")
if load_yaml(prediction_headers, keys_fix_yaml=self.keys_fix): if load_yaml(prediction_headers, keys_fix_yaml=self.keys_fix):
get_logger().debug(f"Using only headers for describe {self.pr_id}") get_logger().debug(f"Using only headers for describe {self.pr_id}")
self.prediction = prediction_headers self.prediction = prediction_headers
async def extend_uncovered_files(self, original_prediction: str) -> str:
try:
prediction = original_prediction
# get the original prediction filenames
original_prediction_loaded = load_yaml(original_prediction, keys_fix_yaml=self.keys_fix)
if isinstance(original_prediction_loaded, list):
original_prediction_dict = {"pr_files": original_prediction_loaded}
else:
original_prediction_dict = original_prediction_loaded
filenames_predicted = [file['filename'].strip() for file in original_prediction_dict.get('pr_files', [])]
# extend the prediction with additional files not included in the original prediction
pr_files = self.git_provider.get_diff_files()
prediction_extra = "pr_files:"
MAX_EXTRA_FILES_TO_OUTPUT = 100
counter_extra_files = 0
for file in pr_files:
if file.filename in filenames_predicted:
continue
# add up to MAX_EXTRA_FILES_TO_OUTPUT files
counter_extra_files += 1
if counter_extra_files > MAX_EXTRA_FILES_TO_OUTPUT:
extra_file_yaml = f"""\
- filename: |
Additional files not shown
changes_title: |
...
label: |
additional files
"""
prediction_extra = prediction_extra + "\n" + extra_file_yaml.strip()
get_logger().debug(f"Too many remaining files, clipping to {MAX_EXTRA_FILES_TO_OUTPUT}")
break
extra_file_yaml = f"""\
- filename: |
{file.filename}
changes_title: |
...
label: |
additional files
"""
prediction_extra = prediction_extra + "\n" + extra_file_yaml.strip()
# merge the two dictionaries
if counter_extra_files > 0:
get_logger().info(f"Adding {counter_extra_files} unprocessed extra files to table prediction")
prediction_extra_dict = load_yaml(prediction_extra, keys_fix_yaml=self.keys_fix)
if isinstance(original_prediction_dict, dict) and isinstance(prediction_extra_dict, dict):
original_prediction_dict["pr_files"].extend(prediction_extra_dict["pr_files"])
new_yaml = yaml.dump(original_prediction_dict)
if load_yaml(new_yaml, keys_fix_yaml=self.keys_fix):
prediction = new_yaml
if isinstance(original_prediction, list):
prediction = yaml.dump(original_prediction_dict["pr_files"])
return prediction
except Exception as e:
get_logger().error(f"Error extending uncovered files {self.pr_id}: {e}")
return original_prediction
async def extend_additional_files(self, remaining_files_list) -> str: async def extend_additional_files(self, remaining_files_list) -> str:
prediction = self.prediction prediction = self.prediction
try: try:
@ -388,31 +442,31 @@ extra_file_yaml =
self.data['pr_files'] = self.data.pop('pr_files') self.data['pr_files'] = self.data.pop('pr_files')
def _prepare_labels(self) -> List[str]: def _prepare_labels(self) -> List[str]:
pr_types = [] pr_labels = []
# If the 'PR Type' key is present in the dictionary, split its value by comma and assign it to 'pr_types' # If the 'PR Type' key is present in the dictionary, split its value by comma and assign it to 'pr_types'
if 'labels' in self.data: if 'labels' in self.data and self.data['labels']:
if type(self.data['labels']) == list: if type(self.data['labels']) == list:
pr_types = self.data['labels'] pr_labels = self.data['labels']
elif type(self.data['labels']) == str: elif type(self.data['labels']) == str:
pr_types = self.data['labels'].split(',') pr_labels = self.data['labels'].split(',')
elif 'type' in self.data: elif 'type' in self.data and self.data['type'] and get_settings().pr_description.publish_labels:
if type(self.data['type']) == list: if type(self.data['type']) == list:
pr_types = self.data['type'] pr_labels = self.data['type']
elif type(self.data['type']) == str: elif type(self.data['type']) == str:
pr_types = self.data['type'].split(',') pr_labels = self.data['type'].split(',')
pr_types = [label.strip() for label in pr_types] pr_labels = [label.strip() for label in pr_labels]
# convert lowercase labels to original case # convert lowercase labels to original case
try: try:
if "labels_minimal_to_labels_dict" in self.variables: if "labels_minimal_to_labels_dict" in self.variables:
d: dict = self.variables["labels_minimal_to_labels_dict"] d: dict = self.variables["labels_minimal_to_labels_dict"]
for i, label_i in enumerate(pr_types): for i, label_i in enumerate(pr_labels):
if label_i in d: if label_i in d:
pr_types[i] = d[label_i] pr_labels[i] = d[label_i]
except Exception as e: except Exception as e:
get_logger().error(f"Error converting labels to original case {self.pr_id}: {e}") get_logger().error(f"Error converting labels to original case {self.pr_id}: {e}")
return pr_types return pr_labels
def _prepare_pr_answer_with_markers(self) -> Tuple[str, str, str, List[dict]]: def _prepare_pr_answer_with_markers(self) -> Tuple[str, str, str, List[dict]]:
get_logger().info(f"Using description marker replacements {self.pr_id}") get_logger().info(f"Using description marker replacements {self.pr_id}")
@ -519,14 +573,18 @@ extra_file_yaml =
return file_label_dict return file_label_dict
for file in self.data['pr_files']: for file in self.data['pr_files']:
try: try:
required_fields = ['changes_summary', 'changes_title', 'filename', 'label'] required_fields = ['changes_title', 'filename', 'label']
if not all(field in file for field in required_fields): if not all(field in file for field in required_fields):
# can happen for example if a YAML generation was interrupted in the middle (no more tokens) # can happen for example if a YAML generation was interrupted in the middle (no more tokens)
get_logger().warning(f"Missing required fields in file label dict {self.pr_id}, skipping file", get_logger().warning(f"Missing required fields in file label dict {self.pr_id}, skipping file",
artifact={"file": file}) artifact={"file": file})
continue continue
if not file.get('changes_title'):
get_logger().warning(f"Empty changes title or summary in file label dict {self.pr_id}, skipping file",
artifact={"file": file})
continue
filename = file['filename'].replace("'", "`").replace('"', '`') filename = file['filename'].replace("'", "`").replace('"', '`')
changes_summary = file['changes_summary'] changes_summary = file.get('changes_summary', "").strip()
changes_title = file['changes_title'].strip() changes_title = file['changes_title'].strip()
label = file.get('label').strip().lower() label = file.get('label').strip().lower()
if label not in file_label_dict: if label not in file_label_dict:
@ -569,12 +627,14 @@ extra_file_yaml =
for filename, file_changes_title, file_change_description in list_tuples: for filename, file_changes_title, file_change_description in list_tuples:
filename = filename.replace("'", "`").rstrip() filename = filename.replace("'", "`").rstrip()
filename_publish = filename.split("/")[-1] filename_publish = filename.split("/")[-1]
if file_changes_title and file_changes_title.strip() != "...":
file_changes_title_code = f"<code>{file_changes_title}</code>" file_changes_title_code = f"<code>{file_changes_title}</code>"
file_changes_title_code_br = insert_br_after_x_chars(file_changes_title_code, x=(delta - 5)).strip() file_changes_title_code_br = insert_br_after_x_chars(file_changes_title_code, x=(delta - 5)).strip()
if len(file_changes_title_code_br) < (delta - 5): if len(file_changes_title_code_br) < (delta - 5):
file_changes_title_code_br += "&nbsp; " * ((delta - 5) - len(file_changes_title_code_br)) file_changes_title_code_br += "&nbsp; " * ((delta - 5) - len(file_changes_title_code_br))
filename_publish = f"<strong>{filename_publish}</strong><dd>{file_changes_title_code_br}</dd>" filename_publish = f"<strong>{filename_publish}</strong><dd>{file_changes_title_code_br}</dd>"
else:
filename_publish = f"<strong>{filename_publish}</strong>"
diff_plus_minus = "" diff_plus_minus = ""
delta_nbsp = "" delta_nbsp = ""
diff_files = self.git_provider.get_diff_files() diff_files = self.git_provider.get_diff_files()
@ -583,6 +643,8 @@ extra_file_yaml =
num_plus_lines = f.num_plus_lines num_plus_lines = f.num_plus_lines
num_minus_lines = f.num_minus_lines num_minus_lines = f.num_minus_lines
diff_plus_minus += f"+{num_plus_lines}/-{num_minus_lines}" diff_plus_minus += f"+{num_plus_lines}/-{num_minus_lines}"
if len(diff_plus_minus) > 12 or diff_plus_minus == "+0/-0":
diff_plus_minus = "[link]"
delta_nbsp = "&nbsp; " * max(0, (8 - len(diff_plus_minus))) delta_nbsp = "&nbsp; " * max(0, (8 - len(diff_plus_minus)))
break break
@ -591,8 +653,39 @@ extra_file_yaml =
if hasattr(self.git_provider, 'get_line_link'): if hasattr(self.git_provider, 'get_line_link'):
filename = filename.strip() filename = filename.strip()
link = self.git_provider.get_line_link(filename, relevant_line_start=-1) link = self.git_provider.get_line_link(filename, relevant_line_start=-1)
if (not link or not diff_plus_minus) and ('additional files' not in filename.lower()):
get_logger().warning(f"Error getting line link for '{filename}'")
continue
# Add file data to the PR body
file_change_description_br = insert_br_after_x_chars(file_change_description, x=(delta - 5)) file_change_description_br = insert_br_after_x_chars(file_change_description, x=(delta - 5))
pr_body = self.add_file_data(delta_nbsp, diff_plus_minus, file_change_description_br, filename,
filename_publish, link, pr_body)
# Close the collapsible file list
if use_collapsible_file_list:
pr_body += """</table></details></td></tr>"""
else:
pr_body += """</table></td></tr>"""
pr_body += """</tr></tbody></table>"""
except Exception as e:
get_logger().error(f"Error processing pr files to markdown {self.pr_id}: {str(e)}")
pass
return pr_body, pr_comments
def add_file_data(self, delta_nbsp, diff_plus_minus, file_change_description_br, filename, filename_publish, link,
pr_body) -> str:
if not file_change_description_br:
pr_body += f"""
<tr>
<td>{filename_publish}</td>
<td><a href="{link}">{diff_plus_minus}</a>{delta_nbsp}</td>
</tr>
"""
else:
pr_body += f""" pr_body += f"""
<tr> <tr>
<td> <td>
@ -613,17 +706,7 @@ extra_file_yaml =
</tr> </tr>
""" """
if use_collapsible_file_list: return pr_body
pr_body += """</table></details></td></tr>"""
else:
pr_body += """</table></td></tr>"""
pr_body += """</tr></tbody></table>"""
except Exception as e:
get_logger().error(f"Error processing pr files to markdown {self.pr_id}: {e}")
pass
return pr_body, pr_comments
def count_chars_without_html(string): def count_chars_without_html(string):
if '<' not in string: if '<' not in string:
@ -632,11 +715,14 @@ def count_chars_without_html(string):
return len(no_html_string) return len(no_html_string)
def insert_br_after_x_chars(text, x=70): def insert_br_after_x_chars(text: str, x=70):
""" """
Insert <br> into a string after a word that increases its length above x characters. Insert <br> into a string after a word that increases its length above x characters.
Use proper HTML tags for code and new lines. Use proper HTML tags for code and new lines.
""" """
if not text:
return ""
if count_chars_without_html(text) < x: if count_chars_without_html(text) < x:
return text return text

View File

@ -9,7 +9,7 @@ from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
from pr_agent.algo.token_handler import TokenHandler from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import load_yaml, set_custom_labels, get_user_labels from pr_agent.algo.utils import get_user_labels, load_yaml, set_custom_labels
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider from pr_agent.git_providers import get_git_provider
from pr_agent.git_providers.git_provider import get_main_pr_language from pr_agent.git_providers.git_provider import get_main_pr_language

View File

@ -9,10 +9,10 @@ from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.pr_processing import retry_with_fallback_models from pr_agent.algo.pr_processing import retry_with_fallback_models
from pr_agent.algo.token_handler import TokenHandler from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import ModelType, load_yaml, clip_tokens from pr_agent.algo.utils import ModelType, clip_tokens, load_yaml
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import GithubProvider, BitbucketServerProvider, \ from pr_agent.git_providers import (BitbucketServerProvider, GithubProvider,
get_git_provider_with_context get_git_provider_with_context)
from pr_agent.log import get_logger from pr_agent.log import get_logger
@ -114,7 +114,7 @@ class PRHelpMessage:
self.vars['snippets'] = docs_prompt.strip() self.vars['snippets'] = docs_prompt.strip()
# run the AI model # run the AI model
response = await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.REGULAR) response = await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.WEAK)
response_yaml = load_yaml(response) response_yaml = load_yaml(response)
response_str = response_yaml.get('response') response_str = response_yaml.get('response')
relevant_sections = response_yaml.get('relevant_sections') relevant_sections = response_yaml.get('relevant_sections')

View File

@ -1,79 +0,0 @@
import copy
from functools import partial
from jinja2 import Environment, StrictUndefined
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
from pr_agent.algo.token_handler import TokenHandler
from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider
from pr_agent.git_providers.git_provider import get_main_pr_language
from pr_agent.log import get_logger
class PRInformationFromUser:
def __init__(self, pr_url: str, args: list = None,
ai_handler: partial[BaseAiHandler,] = LiteLLMAIHandler):
self.git_provider = get_git_provider()(pr_url)
self.main_pr_language = get_main_pr_language(
self.git_provider.get_languages(), self.git_provider.get_files()
)
self.ai_handler = ai_handler()
self.ai_handler.main_pr_language = self.main_pr_language
self.vars = {
"title": self.git_provider.pr.title,
"branch": self.git_provider.get_pr_branch(),
"description": self.git_provider.get_pr_description(),
"language": self.main_pr_language,
"diff": "", # empty diff for initial calculation
"commit_messages_str": self.git_provider.get_commit_messages(),
}
self.token_handler = TokenHandler(self.git_provider.pr,
self.vars,
get_settings().pr_information_from_user_prompt.system,
get_settings().pr_information_from_user_prompt.user)
self.patches_diff = None
self.prediction = None
async def run(self):
get_logger().info('Generating question to the user...')
if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing questions...", is_temporary=True)
await retry_with_fallback_models(self._prepare_prediction)
get_logger().info('Preparing questions...')
pr_comment = self._prepare_pr_answer()
if get_settings().config.publish_output:
get_logger().info('Pushing questions...')
self.git_provider.publish_comment(pr_comment)
self.git_provider.remove_initial_comment()
return ""
async def _prepare_prediction(self, model):
get_logger().info('Getting PR diff...')
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
get_logger().info('Getting AI prediction...')
self.prediction = await self._get_prediction(model)
async def _get_prediction(self, model: str):
variables = copy.deepcopy(self.vars)
variables["diff"] = self.patches_diff # update diff
environment = Environment(undefined=StrictUndefined)
system_prompt = environment.from_string(get_settings().pr_information_from_user_prompt.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_information_from_user_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
return response
def _prepare_pr_answer(self) -> str:
model_output = self.prediction.strip()
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"answer_str:\n{model_output}")
answer_str = f"{model_output}\n\n Please respond to the questions above in the following format:\n\n" +\
"\n>/answer\n>1) ...\n>2) ...\n>...\n"
return answer_str

View File

@ -6,8 +6,8 @@ from jinja2 import Environment, StrictUndefined
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.git_patch_processing import convert_to_hunks_with_lines_numbers, \ from pr_agent.algo.git_patch_processing import (
extract_hunk_lines_from_patch convert_to_hunks_with_lines_numbers, extract_hunk_lines_from_patch)
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
from pr_agent.algo.token_handler import TokenHandler from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import ModelType from pr_agent.algo.utils import ModelType
@ -79,13 +79,17 @@ class PR_LineQuestions:
line_end=line_end, line_end=line_end,
side=side) side=side)
if self.patch_with_lines: if self.patch_with_lines:
response = await retry_with_fallback_models(self._get_prediction, model_type=ModelType.TURBO) model_answer = await retry_with_fallback_models(self._get_prediction, model_type=ModelType.WEAK)
# sanitize the answer so that no line will start with "/"
model_answer_sanitized = model_answer.strip().replace("\n/", "\n /")
if model_answer_sanitized.startswith("/"):
model_answer_sanitized = " " + model_answer_sanitized
get_logger().info('Preparing answer...') get_logger().info('Preparing answer...')
if comment_id: if comment_id:
self.git_provider.reply_to_comment_from_comment_id(comment_id, response) self.git_provider.reply_to_comment_from_comment_id(comment_id, model_answer_sanitized)
else: else:
self.git_provider.publish_comment(response) self.git_provider.publish_comment(model_answer_sanitized)
return "" return ""

View File

@ -63,7 +63,7 @@ class PRQuestions:
if img_path: if img_path:
get_logger().debug(f"Image path identified", artifact=img_path) get_logger().debug(f"Image path identified", artifact=img_path)
await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.TURBO) await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.WEAK)
pr_comment = self._prepare_pr_answer() pr_comment = self._prepare_pr_answer()
get_logger().debug(f"PR output", artifact=pr_comment) get_logger().debug(f"PR output", artifact=pr_comment)
@ -117,6 +117,16 @@ class PRQuestions:
return response return response
def _prepare_pr_answer(self) -> str: def _prepare_pr_answer(self) -> str:
model_answer = self.prediction.strip()
# sanitize the answer so that no line will start with "/"
model_answer_sanitized = model_answer.replace("\n/", "\n /")
if model_answer_sanitized.startswith("/"):
model_answer_sanitized = " " + model_answer_sanitized
if model_answer_sanitized != model_answer:
get_logger().debug(f"Sanitized model answer",
artifact={"model_answer": model_answer, "sanitized_answer": model_answer_sanitized})
answer_str = f"### **Ask**❓\n{self.question_str}\n\n" answer_str = f"### **Ask**❓\n{self.question_str}\n\n"
answer_str += f"### **Answer:**\n{self.prediction.strip()}\n\n" answer_str += f"### **Answer:**\n{model_answer_sanitized}\n\n"
return answer_str return answer_str

View File

@ -4,19 +4,27 @@ import traceback
from collections import OrderedDict from collections import OrderedDict
from functools import partial from functools import partial
from typing import List, Tuple from typing import List, Tuple
from jinja2 import Environment, StrictUndefined from jinja2 import Environment, StrictUndefined
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models, add_ai_metadata_to_diff_files from pr_agent.algo.pr_processing import (add_ai_metadata_to_diff_files,
get_pr_diff,
retry_with_fallback_models)
from pr_agent.algo.token_handler import TokenHandler from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import github_action_output, load_yaml, ModelType, \ from pr_agent.algo.utils import (ModelType, PRReviewHeader,
show_relevant_configurations, convert_to_markdown_v2, PRReviewHeader convert_to_markdown_v2, github_action_output,
load_yaml, show_relevant_configurations)
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider, get_git_provider_with_context from pr_agent.git_providers import (get_git_provider,
from pr_agent.git_providers.git_provider import IncrementalPR, get_main_pr_language get_git_provider_with_context)
from pr_agent.git_providers.git_provider import (IncrementalPR,
get_main_pr_language)
from pr_agent.log import get_logger from pr_agent.log import get_logger
from pr_agent.servers.help import HelpMessage from pr_agent.servers.help import HelpMessage
from pr_agent.tools.ticket_pr_compliance_check import extract_tickets, extract_and_cache_pr_tickets from pr_agent.tools.ticket_pr_compliance_check import (
extract_and_cache_pr_tickets, extract_tickets)
class PRReviewer: class PRReviewer:
@ -78,7 +86,6 @@ class PRReviewer:
"require_estimate_effort_to_review": get_settings().pr_reviewer.require_estimate_effort_to_review, "require_estimate_effort_to_review": get_settings().pr_reviewer.require_estimate_effort_to_review,
'require_can_be_split_review': get_settings().pr_reviewer.require_can_be_split_review, 'require_can_be_split_review': get_settings().pr_reviewer.require_can_be_split_review,
'require_security_review': get_settings().pr_reviewer.require_security_review, 'require_security_review': get_settings().pr_reviewer.require_security_review,
'num_code_suggestions': get_settings().pr_reviewer.num_code_suggestions,
'question_str': question_str, 'question_str': question_str,
'answer_str': answer_str, 'answer_str': answer_str,
"extra_instructions": get_settings().pr_reviewer.extra_instructions, "extra_instructions": get_settings().pr_reviewer.extra_instructions,
@ -140,7 +147,7 @@ class PRReviewer:
if get_settings().config.publish_output and not get_settings().config.get('is_auto_command', False): if get_settings().config.publish_output and not get_settings().config.get('is_auto_command', False):
self.git_provider.publish_comment("Preparing review...", is_temporary=True) self.git_provider.publish_comment("Preparing review...", is_temporary=True)
await retry_with_fallback_models(self._prepare_prediction) await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.REGULAR)
if not self.prediction: if not self.prediction:
self.git_provider.remove_initial_comment() self.git_provider.remove_initial_comment()
return None return None
@ -160,8 +167,10 @@ class PRReviewer:
self.git_provider.publish_comment(pr_review) self.git_provider.publish_comment(pr_review)
self.git_provider.remove_initial_comment() self.git_provider.remove_initial_comment()
if get_settings().pr_reviewer.inline_code_comments: else:
self._publish_inline_code_comments() get_logger().info("Review output is not published")
get_settings().data = {"artifact": pr_review}
return
except Exception as e: except Exception as e:
get_logger().error(f"Failed to review PR: {e}") get_logger().error(f"Failed to review PR: {e}")
@ -223,33 +232,6 @@ class PRReviewer:
key_issues_to_review = data['review'].pop('key_issues_to_review') key_issues_to_review = data['review'].pop('key_issues_to_review')
data['review']['key_issues_to_review'] = key_issues_to_review data['review']['key_issues_to_review'] = key_issues_to_review
if 'code_feedback' in data:
code_feedback = data['code_feedback']
# Filter out code suggestions that can be submitted as inline comments
if get_settings().pr_reviewer.inline_code_comments:
del data['code_feedback']
else:
for suggestion in code_feedback:
if ('relevant_file' in suggestion) and (not suggestion['relevant_file'].startswith('``')):
suggestion['relevant_file'] = f"``{suggestion['relevant_file']}``"
if 'relevant_line' not in suggestion:
suggestion['relevant_line'] = ''
relevant_line_str = suggestion['relevant_line'].split('\n')[0]
# removing '+'
suggestion['relevant_line'] = relevant_line_str.lstrip('+').strip()
# try to add line numbers link to code suggestions
if hasattr(self.git_provider, 'generate_link_to_relevant_line_number'):
link = self.git_provider.generate_link_to_relevant_line_number(suggestion)
if link:
suggestion['relevant_line'] = f"[{suggestion['relevant_line']}]({link})"
else:
pass
incremental_review_markdown_text = None incremental_review_markdown_text = None
# Add incremental review section # Add incremental review section
if self.incremental.is_incremental: if self.incremental.is_incremental:
@ -258,7 +240,9 @@ class PRReviewer:
incremental_review_markdown_text = f"Starting from commit {last_commit_url}" incremental_review_markdown_text = f"Starting from commit {last_commit_url}"
markdown_text = convert_to_markdown_v2(data, self.git_provider.is_supported("gfm_markdown"), markdown_text = convert_to_markdown_v2(data, self.git_provider.is_supported("gfm_markdown"),
incremental_review_markdown_text, git_provider=self.git_provider) incremental_review_markdown_text,
git_provider=self.git_provider,
files=self.git_provider.get_diff_files())
# Add help text if gfm_markdown is supported # Add help text if gfm_markdown is supported
if self.git_provider.is_supported("gfm_markdown") and get_settings().pr_reviewer.enable_help_text: if self.git_provider.is_supported("gfm_markdown") and get_settings().pr_reviewer.enable_help_text:
@ -278,38 +262,6 @@ class PRReviewer:
return markdown_text return markdown_text
def _publish_inline_code_comments(self) -> None:
"""
Publishes inline comments on a pull request with code suggestions generated by the AI model.
"""
if get_settings().pr_reviewer.num_code_suggestions == 0:
return
first_key = 'review'
last_key = 'security_concerns'
data = load_yaml(self.prediction.strip(),
keys_fix_yaml=["ticket_compliance_check", "estimated_effort_to_review_[1-5]:", "security_concerns:", "key_issues_to_review:",
"relevant_file:", "relevant_line:", "suggestion:"],
first_key=first_key, last_key=last_key)
comments: List[str] = []
for suggestion in data.get('code_feedback', []):
relevant_file = suggestion.get('relevant_file', '').strip()
relevant_line_in_file = suggestion.get('relevant_line', '').strip()
content = suggestion.get('suggestion', '')
if not relevant_file or not relevant_line_in_file or not content:
get_logger().info("Skipping inline comment with missing file/line/content")
continue
if self.git_provider.is_supported("create_inline_comment"):
comment = self.git_provider.create_inline_comment(content, relevant_file, relevant_line_in_file)
if comment:
comments.append(comment)
else:
self.git_provider.publish_inline_comment(content, relevant_file, relevant_line_in_file, suggestion)
if comments:
self.git_provider.publish_inline_comments(comments)
def _get_user_answers(self) -> Tuple[str, str]: def _get_user_answers(self) -> Tuple[str, str]:
""" """
Retrieves the question and answer strings from the discussion messages related to a pull request. Retrieves the question and answer strings from the discussion messages related to a pull request.

View File

@ -34,9 +34,9 @@ class PRSimilarIssue:
if get_settings().pr_similar_issue.vectordb == "pinecone": if get_settings().pr_similar_issue.vectordb == "pinecone":
try: try:
import pandas as pd
import pinecone import pinecone
from pinecone_datasets import Dataset, DatasetMetadata from pinecone_datasets import Dataset, DatasetMetadata
import pandas as pd
except: except:
raise Exception("Please install 'pinecone' and 'pinecone_datasets' to use pinecone as vectordb") raise Exception("Please install 'pinecone' and 'pinecone_datasets' to use pinecone as vectordb")
# assuming pinecone api key and environment are set in secrets file # assuming pinecone api key and environment are set in secrets file

View File

@ -3,14 +3,16 @@ from datetime import date
from functools import partial from functools import partial
from time import sleep from time import sleep
from typing import Tuple from typing import Tuple
from jinja2 import Environment, StrictUndefined from jinja2 import Environment, StrictUndefined
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
from pr_agent.algo.token_handler import TokenHandler from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import ModelType, show_relevant_configurations from pr_agent.algo.utils import ModelType, show_relevant_configurations
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider, GithubProvider from pr_agent.git_providers import GithubProvider, get_git_provider
from pr_agent.git_providers.git_provider import get_main_pr_language from pr_agent.git_providers.git_provider import get_main_pr_language
from pr_agent.log import get_logger from pr_agent.log import get_logger
@ -39,6 +41,7 @@ class PRUpdateChangelog:
"description": self.git_provider.get_pr_description(), "description": self.git_provider.get_pr_description(),
"language": self.main_language, "language": self.main_language,
"diff": "", # empty diff for initial calculation "diff": "", # empty diff for initial calculation
"pr_link": "",
"changelog_file_str": self.changelog_file_str, "changelog_file_str": self.changelog_file_str,
"today": date.today(), "today": date.today(),
"extra_instructions": get_settings().pr_update_changelog.extra_instructions, "extra_instructions": get_settings().pr_update_changelog.extra_instructions,
@ -71,7 +74,7 @@ class PRUpdateChangelog:
if get_settings().config.publish_output: if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing changelog updates...", is_temporary=True) self.git_provider.publish_comment("Preparing changelog updates...", is_temporary=True)
await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.TURBO) await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.WEAK)
new_file_content, answer = self._prepare_changelog_update() new_file_content, answer = self._prepare_changelog_update()
@ -100,12 +103,23 @@ class PRUpdateChangelog:
async def _get_prediction(self, model: str): async def _get_prediction(self, model: str):
variables = copy.deepcopy(self.vars) variables = copy.deepcopy(self.vars)
variables["diff"] = self.patches_diff # update diff variables["diff"] = self.patches_diff # update diff
if get_settings().pr_update_changelog.add_pr_link:
variables["pr_link"] = self.git_provider.get_pr_url()
environment = Environment(undefined=StrictUndefined) environment = Environment(undefined=StrictUndefined)
system_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.system).render(variables) system_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.user).render(variables) user_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.user).render(variables)
response, finish_reason = await self.ai_handler.chat_completion( response, finish_reason = await self.ai_handler.chat_completion(
model=model, system=system_prompt, user=user_prompt, temperature=get_settings().config.temperature) model=model, system=system_prompt, user=user_prompt, temperature=get_settings().config.temperature)
# post-process the response
response = response.strip()
if not response:
return ""
if response.startswith("```"):
response_lines = response.splitlines()
response_lines = response_lines[1:]
response = "\n".join(response_lines)
response = response.strip("`")
return response return response
def _prepare_changelog_update(self) -> Tuple[str, str]: def _prepare_changelog_update(self) -> Tuple[str, str]:

View File

@ -108,7 +108,7 @@ async def extract_tickets(git_provider):
async def extract_and_cache_pr_tickets(git_provider, vars): async def extract_and_cache_pr_tickets(git_provider, vars):
if get_settings().get('config.require_ticket_analysis_review', False): if not get_settings().get('pr_reviewer.require_ticket_analysis_review', False):
return return
related_tickets = get_settings().get('related_tickets', []) related_tickets = get_settings().get('related_tickets', [])
if not related_tickets: if not related_tickets:

View File

@ -4,21 +4,21 @@ build-backend = "setuptools.build_meta"
[project] [project]
name = "pr-agent" name = "pr-agent"
version = "0.2.4" version = "0.2.5"
authors = [{name= "CodiumAI", email = "tal.r@codium.ai"}] authors = [{ name = "CodiumAI", email = "tal.r@codium.ai" }]
maintainers = [ maintainers = [
{name = "Tal Ridnik", email = "tal.r@codium.ai"}, { name = "Tal Ridnik", email = "tal.r@codium.ai" },
{name = "Ori Kotek", email = "ori.k@codium.ai"}, { name = "Ori Kotek", email = "ori.k@codium.ai" },
{name = "Hussam Lawen", email = "hussam.l@codium.ai"}, { name = "Hussam Lawen", email = "hussam.l@codium.ai" },
] ]
description = "CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedbacks and suggestions." description = "CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedbacks and suggestions."
readme = "README.md" readme = "README.md"
requires-python = ">=3.10" requires-python = ">=3.10"
keywords = ["AI", "Agents", "Pull Request", "Automation", "Code Review"] keywords = ["AI", "Agents", "Pull Request", "Automation", "Code Review"]
license = {name = "Apache 2.0", file = "LICENSE"} license = { name = "Apache 2.0", file = "LICENSE" }
classifiers = [ classifiers = [
"Intended Audience :: Developers", "Intended Audience :: Developers",
@ -28,7 +28,7 @@ dynamic = ["dependencies"]
[tool.setuptools.dynamic] [tool.setuptools.dynamic]
dependencies = {file = ["requirements.txt"]} dependencies = { file = ["requirements.txt"] }
[project.urls] [project.urls]
"Homepage" = "https://github.com/Codium-ai/pr-agent" "Homepage" = "https://github.com/Codium-ai/pr-agent"
@ -40,41 +40,43 @@ license-files = ["LICENSE"]
[tool.setuptools.packages.find] [tool.setuptools.packages.find]
where = ["."] where = ["."]
include = ["pr_agent*"] # include pr_agent and any sub-packages it finds under it. include = [
"pr_agent*",
] # include pr_agent and any sub-packages it finds under it.
[project.scripts] [project.scripts]
pr-agent = "pr_agent.cli:run" pr-agent = "pr_agent.cli:run"
[tool.ruff] [tool.ruff]
line-length = 120 line-length = 120
select = [ lint.select = [
"E", # Pyflakes "E", # Pyflakes
"F", # Pyflakes "F", # Pyflakes
"B", # flake8-bugbear "B", # flake8-bugbear
"I001", # isort basic checks "I001", # isort basic checks
"I002", # isort missing-required-import "I002", # isort missing-required-import
] ]
# First commit - only fixing isort # First commit - only fixing isort
fixable = [ lint.fixable = [
"I001", # isort basic checks "I001", # isort basic checks
] ]
unfixable = [ lint.unfixable = [
"B", # Avoid trying to fix flake8-bugbear (`B`) violations. "B", # Avoid trying to fix flake8-bugbear (`B`) violations.
]
exclude = [
"api/code_completions",
] ]
ignore = [ lint.exclude = ["api/code_completions"]
"E999", "B008"
]
[tool.ruff.per-file-ignores] lint.ignore = ["E999", "B008"]
"__init__.py" = ["E402"] # Ignore `E402` (import violations) in all `__init__.py` files, and in `path/to/file.py`.
# TODO: should decide if maybe not to ignore these. [tool.ruff.lint.per-file-ignores]
"__init__.py" = [
"E402",
] # Ignore `E402` (import violations) in all `__init__.py` files, and in `path/to/file.py`.
[tool.bandit]
exclude_dirs = ["tests"]
skips = ["B101"]
tests = []

View File

@ -1,3 +1,4 @@
pytest==7.4.0 pytest==7.4.0
poetry poetry
twine twine
pre-commit>=4,<5

View File

@ -1,5 +1,5 @@
aiohttp==3.9.5 aiohttp==3.9.5
anthropic[vertex]==0.37.1 anthropic[vertex]==0.39.0
atlassian-python-api==3.41.4 atlassian-python-api==3.41.4
azure-devops==7.1.0b3 azure-devops==7.1.0b3
azure-identity==1.15.0 azure-identity==1.15.0
@ -12,17 +12,17 @@ google-cloud-aiplatform==1.38.0
google-generativeai==0.8.3 google-generativeai==0.8.3
google-cloud-storage==2.10.0 google-cloud-storage==2.10.0
Jinja2==3.1.2 Jinja2==3.1.2
litellm==1.50.2 litellm==1.52.12
loguru==0.7.2 loguru==0.7.2
msrest==0.7.1 msrest==0.7.1
openai==1.52.1 openai==1.55.3
pytest==7.4.0 pytest==7.4.0
PyGithub==1.59.* PyGithub==1.59.*
PyYAML==6.0.1 PyYAML==6.0.1
python-gitlab==3.15.0 python-gitlab==3.15.0
retry==0.9.2 retry==0.9.2
starlette-context==0.3.6 starlette-context==0.3.6
tiktoken==0.7.0 tiktoken==0.8.0
ujson==5.8.0 ujson==5.8.0
uvicorn==0.22.0 uvicorn==0.22.0
tenacity==8.2.3 tenacity==8.2.3

View File

@ -32,4 +32,3 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
main() main()
""" """

View File

@ -5,16 +5,16 @@ import time
from datetime import datetime from datetime import datetime
import jwt import jwt
from atlassian.bitbucket import Cloud
import requests import requests
from atlassian.bitbucket import Cloud
from requests.auth import HTTPBasicAuth from requests.auth import HTTPBasicAuth
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.log import setup_logger, get_logger from pr_agent.log import get_logger, setup_logger
from tests.e2e_tests.e2e_utils import NEW_FILE_CONTENT, FILE_PATH, PR_HEADER_START_WITH, REVIEW_START_WITH, \ from tests.e2e_tests.e2e_utils import (FILE_PATH,
IMPROVE_START_WITH_REGEX_PATTERN, NUM_MINUTES IMPROVE_START_WITH_REGEX_PATTERN,
NEW_FILE_CONTENT, NUM_MINUTES,
PR_HEADER_START_WITH, REVIEW_START_WITH)
log_level = os.environ.get("LOG_LEVEL", "INFO") log_level = os.environ.get("LOG_LEVEL", "INFO")
setup_logger(log_level) setup_logger(log_level)

View File

@ -5,9 +5,11 @@ from datetime import datetime
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider from pr_agent.git_providers import get_git_provider
from pr_agent.log import setup_logger, get_logger from pr_agent.log import get_logger, setup_logger
from tests.e2e_tests.e2e_utils import NEW_FILE_CONTENT, FILE_PATH, PR_HEADER_START_WITH, REVIEW_START_WITH, \ from tests.e2e_tests.e2e_utils import (FILE_PATH,
IMPROVE_START_WITH_REGEX_PATTERN, NUM_MINUTES IMPROVE_START_WITH_REGEX_PATTERN,
NEW_FILE_CONTENT, NUM_MINUTES,
PR_HEADER_START_WITH, REVIEW_START_WITH)
log_level = os.environ.get("LOG_LEVEL", "INFO") log_level = os.environ.get("LOG_LEVEL", "INFO")
setup_logger(log_level) setup_logger(log_level)

View File

@ -7,9 +7,11 @@ import gitlab
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider from pr_agent.git_providers import get_git_provider
from pr_agent.log import setup_logger, get_logger from pr_agent.log import get_logger, setup_logger
from tests.e2e_tests.e2e_utils import NEW_FILE_CONTENT, FILE_PATH, PR_HEADER_START_WITH, REVIEW_START_WITH, \ from tests.e2e_tests.e2e_utils import (FILE_PATH,
IMPROVE_START_WITH_REGEX_PATTERN, NUM_MINUTES IMPROVE_START_WITH_REGEX_PATTERN,
NEW_FILE_CONTENT, NUM_MINUTES,
PR_HEADER_START_WITH, REVIEW_START_WITH)
log_level = os.environ.get("LOG_LEVEL", "INFO") log_level = os.environ.get("LOG_LEVEL", "INFO")
setup_logger(log_level) setup_logger(log_level)

70
tests/health_test/main.py Normal file
View File

@ -0,0 +1,70 @@
import argparse
import asyncio
import copy
import os
from pathlib import Path
from starlette_context import request_cycle_context, context
from pr_agent.cli import run_command
from pr_agent.config_loader import get_settings, global_settings
from pr_agent.agent.pr_agent import PRAgent, commands
from pr_agent.log import get_logger, setup_logger
from tests.e2e_tests import e2e_utils
log_level = os.environ.get("LOG_LEVEL", "INFO")
setup_logger(log_level)
async def run_async():
pr_url = os.getenv('TEST_PR_URL', 'https://github.com/Codium-ai/pr-agent/pull/1385')
get_settings().set("config.git_provider", "github")
get_settings().set("config.publish_output", False)
get_settings().set("config.fallback_models", [])
agent = PRAgent()
try:
# Run the 'describe' command
get_logger().info(f"\nSanity check for the 'describe' command...")
original_settings = copy.deepcopy(get_settings())
await agent.handle_request(pr_url, ['describe'])
pr_header_body = dict(get_settings().data)['artifact']
assert pr_header_body.startswith('###') and 'PR Type' in pr_header_body and 'Description' in pr_header_body
context['settings'] = copy.deepcopy(original_settings) # Restore settings state after each test to prevent test interference
get_logger().info("PR description generated successfully\n")
# Run the 'review' command
get_logger().info(f"\nSanity check for the 'review' command...")
original_settings = copy.deepcopy(get_settings())
await agent.handle_request(pr_url, ['review'])
pr_review_body = dict(get_settings().data)['artifact']
assert pr_review_body.startswith('##') and 'PR Reviewer Guide' in pr_review_body
context['settings'] = copy.deepcopy(original_settings) # Restore settings state after each test to prevent test interference
get_logger().info("PR review generated successfully\n")
# Run the 'improve' command
get_logger().info(f"\nSanity check for the 'improve' command...")
original_settings = copy.deepcopy(get_settings())
await agent.handle_request(pr_url, ['improve'])
pr_improve_body = dict(get_settings().data)['artifact']
assert pr_improve_body.startswith('##') and 'PR Code Suggestions' in pr_improve_body
context['settings'] = copy.deepcopy(original_settings) # Restore settings state after each test to prevent test interference
get_logger().info("PR improvements generated successfully\n")
get_logger().info(f"\n\n========\nHealth test passed successfully\n========")
except Exception as e:
get_logger().exception(f"\n\n========\nHealth test failed\n========")
raise e
def run():
with request_cycle_context({}):
context['settings'] = copy.deepcopy(global_settings)
asyncio.run(run_async())
if __name__ == '__main__':
run()

View File

@ -1,8 +1,10 @@
from unittest.mock import MagicMock
from atlassian.bitbucket import Bitbucket
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.git_providers import BitbucketServerProvider from pr_agent.git_providers import BitbucketServerProvider
from pr_agent.git_providers.bitbucket_provider import BitbucketProvider from pr_agent.git_providers.bitbucket_provider import BitbucketProvider
from unittest.mock import MagicMock
from atlassian.bitbucket import Bitbucket
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
class TestBitbucketProvider: class TestBitbucketProvider:

View File

@ -1,4 +1,5 @@
from unittest.mock import MagicMock from unittest.mock import MagicMock
from pr_agent.git_providers.codecommit_client import CodeCommitClient from pr_agent.git_providers.codecommit_client import CodeCommitClient

View File

@ -1,9 +1,11 @@
import pytest
from unittest.mock import patch from unittest.mock import patch
from pr_agent.git_providers.codecommit_provider import CodeCommitFile
from pr_agent.git_providers.codecommit_provider import CodeCommitProvider import pytest
from pr_agent.git_providers.codecommit_provider import PullRequestCCMimic
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from pr_agent.git_providers.codecommit_provider import (CodeCommitFile,
CodeCommitProvider,
PullRequestCCMimic)
class TestCodeCommitFile: class TestCodeCommitFile:

View File

@ -47,13 +47,10 @@ class TestConvertToMarkdown:
def test_simple_dictionary_input(self): def test_simple_dictionary_input(self):
input_data = {'review': { input_data = {'review': {
'estimated_effort_to_review_[1-5]': '1, because the changes are minimal and straightforward, focusing on a single functionality addition.\n', 'estimated_effort_to_review_[1-5]': '1, because the changes are minimal and straightforward, focusing on a single functionality addition.\n',
'relevant_tests': 'No\n', 'possible_issues': 'No\n', 'security_concerns': 'No\n'}, 'code_feedback': [ 'relevant_tests': 'No\n', 'possible_issues': 'No\n', 'security_concerns': 'No\n'}}
{'relevant_file': '``pr_agent/git_providers/git_provider.py\n``', 'language': 'python\n',
'suggestion': "Consider raising an exception or logging a warning when 'pr_url' attribute is not found. This can help in debugging issues related to the absence of 'pr_url' in instances where it's expected. [important]\n",
'relevant_line': '[return ""](https://github.com/Codium-ai/pr-agent-pro/pull/102/files#diff-52d45f12b836f77ed1aef86e972e65404634ea4e2a6083fb71a9b0f9bb9e062fR199)'}]}
expected_output = f'{PRReviewHeader.REGULAR.value} 🔍\n\nHere are some key observations to aid the review process:\n\n<table>\n<tr><td>⏱️&nbsp;<strong>Estimated effort to review</strong>: 1 🔵⚪⚪⚪⚪</td></tr>\n<tr><td>🧪&nbsp;<strong>No relevant tests</strong></td></tr>\n<tr><td>&nbsp;<strong>Possible issues</strong>: No\n</td></tr>\n<tr><td>🔒&nbsp;<strong>No security concerns identified</strong></td></tr>\n</table>\n\n\n<details><summary> <strong>Code feedback:</strong></summary>\n\n<hr><table><tr><td>relevant file</td><td>pr_agent/git_providers/git_provider.py\n</td></tr><tr><td>suggestion &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>\n\n<strong>\n\nConsider raising an exception or logging a warning when \'pr_url\' attribute is not found. This can help in debugging issues related to the absence of \'pr_url\' in instances where it\'s expected. [important]\n\n</strong>\n</td></tr><tr><td>relevant line</td><td><a href=\'https://github.com/Codium-ai/pr-agent-pro/pull/102/files#diff-52d45f12b836f77ed1aef86e972e65404634ea4e2a6083fb71a9b0f9bb9e062fR199\'>return ""</a></td></tr></table><hr>\n\n</details>' expected_output = f'{PRReviewHeader.REGULAR.value} 🔍\n\nHere are some key observations to aid the review process:\n\n<table>\n<tr><td>⏱️&nbsp;<strong>Estimated effort to review</strong>: 1 🔵⚪⚪⚪⚪</td></tr>\n<tr><td>🧪&nbsp;<strong>No relevant tests</strong></td></tr>\n<tr><td>&nbsp;<strong>Possible issues</strong>: No\n</td></tr>\n<tr><td>🔒&nbsp;<strong>No security concerns identified</strong></td></tr>\n</table>'
assert convert_to_markdown_v2(input_data).strip() == expected_output.strip() assert convert_to_markdown_v2(input_data).strip() == expected_output.strip()
@ -67,7 +64,7 @@ class TestConvertToMarkdown:
assert convert_to_markdown_v2(input_data).strip() == expected_output.strip() assert convert_to_markdown_v2(input_data).strip() == expected_output.strip()
def test_dictionary_with_empty_dictionaries(self): def test_dictionary_with_empty_dictionaries(self):
input_data = {'review': {}, 'code_feedback': [{}]} input_data = {'review': {}}
expected_output = '' expected_output = ''

View File

@ -1,4 +1,5 @@
import pytest import pytest
from pr_agent.algo.git_patch_processing import extend_patch from pr_agent.algo.git_patch_processing import extend_patch
from pr_agent.algo.pr_processing import pr_generate_extended_diff from pr_agent.algo.pr_processing import pr_generate_extended_diff
from pr_agent.algo.token_handler import TokenHandler from pr_agent.algo.token_handler import TokenHandler

View File

@ -1,7 +1,9 @@
import pytest import pytest
from pr_agent.algo.file_filter import filter_ignored from pr_agent.algo.file_filter import filter_ignored
from pr_agent.config_loader import global_settings from pr_agent.config_loader import global_settings
class TestIgnoreFilter: class TestIgnoreFilter:
def test_no_ignores(self): def test_no_ignores(self):
""" """

View File

@ -1,9 +1,10 @@
# Generated by CodiumAI # Generated by CodiumAI
import pytest
from pr_agent.algo.types import FilePatchInfo from pr_agent.algo.types import FilePatchInfo
from pr_agent.algo.utils import find_line_number_of_relevant_line_in_file from pr_agent.algo.utils import find_line_number_of_relevant_line_in_file
import pytest
class TestFindLineNumberOfRelevantLineInFile: class TestFindLineNumberOfRelevantLineInFile:
# Tests that the function returns the correct line number and absolute position when the relevant line is found in the patch # Tests that the function returns the correct line number and absolute position when the relevant line is found in the patch

View File

@ -1,7 +1,9 @@
import os
import json import json
import os
from pr_agent.algo.utils import get_settings, github_action_output from pr_agent.algo.utils import get_settings, github_action_output
class TestGitHubOutput: class TestGitHubOutput:
def test_github_action_output_enabled(self, monkeypatch, tmp_path): def test_github_action_output_enabled(self, monkeypatch, tmp_path):
get_settings().set('GITHUB_ACTION_CONFIG.ENABLE_OUTPUT', True) get_settings().set('GITHUB_ACTION_CONFIG.ENABLE_OUTPUT', True)

View File

@ -47,7 +47,3 @@ PR Feedback:
expected_output = [{'relevant file': 'src/app.py:\n', 'suggestion content': 'The print statement is outside inside the if __name__ ==:'}] expected_output = [{'relevant file': 'src/app.py:\n', 'suggestion content': 'The print statement is outside inside the if __name__ ==:'}]
assert load_yaml(yaml_str) == expected_output assert load_yaml(yaml_str) == expected_output

View File

@ -1,10 +1,10 @@
# Generated by CodiumAI # Generated by CodiumAI
import pytest
from pr_agent.algo.utils import try_fix_yaml from pr_agent.algo.utils import try_fix_yaml
import pytest
class TestTryFixYaml: class TestTryFixYaml:
# The function successfully parses a valid YAML string. # The function successfully parses a valid YAML string.