Compare commits

...

299 Commits
v0.24 ... 1331

Author SHA1 Message Date
ee4f5c765a Add conditional logging for no code suggestions message based on new config option 2024-11-04 07:48:13 +02:00
23cc968331 Merge remote-tracking branch 'origin/main' into 1331
# Conflicts:
#	pr_agent/tools/pr_code_suggestions.py
2024-11-04 07:46:02 +02:00
Tal
16029e66ad Update README.md 2024-11-03 18:15:09 +02:00
Tal
7bd6713335 Merge pull request #1338 from Codium-ai/tr/no_line_numbers
Add support for processing diffs without line numbers in code suggest…
2024-11-03 18:13:12 +02:00
ef3241285d Add support for processing diffs without line numbers in code suggestions tool 2024-11-03 17:34:30 +02:00
Tal
d9ef26dc1c Merge pull request #1337 from Codium-ai/mrT23-patch-5
Update additional_configurations.md
2024-11-03 14:15:53 +02:00
Tal
02949b2b96 Update additional_configurations.md 2024-11-03 14:14:14 +02:00
Tal
d301c76b65 Merge pull request #1332 from ryanzll/main
Merge branch 'main' of https://github.com/ryanzll/pr-agent
2024-11-03 09:53:46 +02:00
dacb45dd8a Merge branch 'main' of https://github.com/ryanzll/pr-agent
update openai api
2024-11-02 09:47:14 +08:00
443d06df06 Add configuration option to control publish of no code suggestions message 2024-11-01 15:38:16 -04:00
Tal
15e8c988a4 Merge pull request #1326 from Codium-ai/tr/certifi
Add certifi==2024.8.30 to requirements.txt
2024-10-31 11:17:09 +02:00
Tal
60fab1b301 Merge pull request #1325 from nomi3/fix-installation-docs-for-bitbucket
update bitbucket-pipelines.yml filename in installation
2024-10-31 11:14:22 +02:00
84c1c1b1ca Add certifi==2024.8.30 to requirements.txt 2024-10-31 11:12:35 +02:00
7419a6d51a chore: update bitbucket-pipelines.yml filename in installation documentation 2024-10-31 17:40:31 +09:00
Tal
ee58a92fb3 Merge pull request #1321 from Codium-ai/hl/fix_reflection
Self-Reflection small fix
2024-10-30 20:33:32 +02:00
6b64924355 switch the order of when to disable the existing code, to make sure reflection see's the full suggestion (before and after) 2024-10-30 17:09:44 +02:00
2f5e8472b9 Add PRDescriptionHeader enum for consistent "Changes walkthrough" usage across modules 2024-10-30 08:48:08 +02:00
Tal
7186bf4bb3 Merge pull request #1313 from yu-iskw/support-google-ai-studio
Support Google AI Studio
2024-10-29 19:22:28 +02:00
Tal
115fca58a3 Merge pull request #1307 from s1moe2/docs/gitlab
Gitlab docs improved; gitlab webhook secret config standardization
2024-10-29 19:20:15 +02:00
cbf60ca636 revert gitlab webhook_secret; docs adjustments 2024-10-29 16:02:06 +00:00
64ac45d03b fix typos 2024-10-29 10:49:53 +00:00
db062e3e35 Support Google AI Studio
Signed-off-by: Yu Ishikawa <yu-iskw@users.noreply.github.com>
2024-10-29 08:00:16 +09:00
Tal
e85472f367 Merge pull request #1311 from Codium-ai/tr/header
Add PRDescriptionHeader enum for consistent "Changes walkthrough" usa…
2024-10-28 08:15:21 +02:00
597f1c6f83 Add PRDescriptionHeader enum for consistent "Changes walkthrough" usage across modules 2024-10-28 08:12:56 +02:00
Tal
66d4f56777 Merge pull request #1309 from Codium-ai/tr/suggestion_tracking
Tr/suggestion tracking
2024-10-27 16:12:56 +02:00
fbfb9e0881 Add suggestion tracking feature with wiki documentation support 2024-10-27 16:07:07 +02:00
Tal
223b5408d7 Merge pull request #1308 from Codium-ai/update-docs-favicon
update docs favicon to fix proportion
2024-10-27 15:56:55 +02:00
509135a8d4 Add suggestion tracking feature with wiki documentation support 2024-10-27 15:56:26 +02:00
8db7151bf0 Add suggestion tracking feature with wiki documentation support 2024-10-27 15:49:10 +02:00
b8cfcdbc12 Improve markdown header sanitization in PR help message tool 2024-10-27 14:58:21 +02:00
Tal
a3cd433184 Update README.md 2024-10-27 10:13:20 +02:00
0f284711e6 update docs favicon to fix proportion 2024-10-26 11:36:28 +03:00
67b46e7f30 fixed secrets file comment typo 2024-10-25 16:20:35 +01:00
68f2cec077 Gitlab docs improved; gitlab webhook secret config standadization 2024-10-25 16:17:33 +01:00
8e94c8b2f5 chore: update Dockerfiles to use Python 3.12 and adjust docs directory addition 2024-10-25 17:35:28 +03:00
Tal
a221f8edd0 Merge pull request #1306 from Codium-ai/tr/help
Tr/help
2024-10-25 17:02:24 +03:00
3b47c75c32 Improve handling of empty markdown headers in PR help message tool and update prompt instructions in pr_help_prompts.toml 2024-10-25 12:27:59 +03:00
2e34d7a05a s 2024-10-24 22:05:25 +03:00
204a0a7912 s 2024-10-24 22:03:44 +03:00
9786499fa6 Refactor PR help message tool to use full documentation content for answering questions and update relevant section handling in prompts 2024-10-24 22:01:40 +03:00
4f14742233 Refactor PR help message tool to use full documentation content for answering questions and update relevant section handling in prompts 2024-10-24 21:38:31 +03:00
Tal
c077c71fdb Merge pull request #1302 from KennyDizi/main
Add support for new Claude models and update dependencies
2024-10-24 08:40:58 +03:00
Tal
7b5a3d45bd Merge pull request #1305 from dheerajsir/main
fix typos in documents
2024-10-24 08:37:31 +03:00
c6c6a9b4f0 Update custom_labels.md 2024-10-23 20:13:45 +05:30
a5e7c37fcc Update pr_agent_pro.md 2024-10-23 20:12:14 +05:30
12a9e13509 Update index.md 2024-10-23 20:10:14 +05:30
Tal
0b4b6b1589 Update configuration_options.md 2024-10-23 17:19:16 +03:00
Tal
bf049381bd Merge pull request #1303 from Codium-ai/mrT23-patch-2
Update atlassian-connect.json
2024-10-23 09:21:30 +03:00
Tal
65c917b84b Update atlassian-connect.json 2024-10-23 09:04:27 +03:00
b4700bd7c0 Add support bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0 model 2024-10-23 08:08:04 +07:00
a957554262 Update anthropic to v0.37.1 2024-10-23 07:44:58 +07:00
d491a942cc Update openai to v1.52.1 2024-10-23 07:20:35 +07:00
6c55a2720a Update litellm to v1.50.2 to support all new added models 2024-10-23 07:12:39 +07:00
f1d0401f82 Add model vertex_ai/claude-3-5-sonnet-v2@20241022 2024-10-23 07:11:33 +07:00
c5bd09e2c9 Add model anthropic/claude-3-5-sonnet-20241022 2024-10-23 07:10:22 +07:00
Tal
c70acdc7cd Merge pull request #1299 from Codium-ai/tr/disable_default_publish_labels
disable publishing labels by default
2024-10-22 08:13:52 +03:00
Tal
1c6f7f9c06 Update README.md 2024-10-22 08:12:15 +03:00
0b32b253ca docs: update default setting for publish_labels to false and adjust related documentation 2024-10-21 17:56:15 +03:00
Tal
3efd2213f2 Merge pull request #1298 from Codium-ai/tr/help
docs: update pr_help_prompts.toml with project name change and format…
2024-10-21 07:41:19 +03:00
0705bd03c4 docs: update pr_help_prompts.toml with project name change and formatting adjustments 2024-10-21 07:37:57 +03:00
927d005e99 docs: update pr_help_prompts.toml with project name change and formatting adjustments 2024-10-21 07:33:26 +03:00
Tal
0dccfdbbf0 Merge pull request #1295 from Codium-ai/tr/azure_o1
fix: correct model type extraction for O1 model handling
2024-10-19 11:35:31 +03:00
Tal
dcb7b66fd7 Update pr_agent/algo/ai_handlers/litellm_ai_handler.py
Co-authored-by: codiumai-pr-agent-pro[bot] <151058649+codiumai-pr-agent-pro[bot]@users.noreply.github.com>
2024-10-19 11:34:57 +03:00
b7437147af fix: correct model type extraction for O1 model handling in litellm_ai_handler.py 2024-10-19 11:32:45 +03:00
e82afdd2cb Merge pull request #1290 from Codium-ai/hl/tickets_support
support more types of github ticket url / references
2024-10-14 14:34:45 +03:00
0946da3810 support github enterprise 2024-10-14 14:31:34 +03:00
d1f4069d3f docs: add news update for improved GitHub Actions support on enterprise server 2024-10-14 13:16:11 +03:00
d45a892fd2 Update pr_agent/tools/ticket_pr_compliance_check.py
Co-authored-by: codiumai-pr-agent-pro[bot] <151058649+codiumai-pr-agent-pro[bot]@users.noreply.github.com>
2024-10-14 10:59:58 +03:00
Tal
4a91b8ed8d Merge pull request #1292 from Codium-ai/tr/server
Tr/server
2024-10-14 10:31:41 +03:00
fb85cb721a docs: fix typo in installation instructions for GitHub Enterprise Server in pr_agent_pro.md 2024-10-14 10:31:06 +03:00
3a52122677 improve and combine regex 2024-10-14 10:30:07 +03:00
13c6ed9098 docs: update installation instructions for GitHub Enterprise Server in pr_agent_pro.md 2024-10-14 10:27:52 +03:00
9dd1995464 docs: update installation guides with GitHub enterprise server instructions and formatting improvements 2024-10-14 10:25:51 +03:00
eb804d0b34 clean 2024-10-14 10:08:36 +03:00
Tal
e0ee878e84 Merge pull request #1291 from Codium-ai/tr/server
Tr/server
2024-10-14 09:44:38 +03:00
27abe48a34 feat: import Range utility in GitHubProvider for enhanced functionality 2024-10-14 09:43:58 +03:00
8fe504a7ec feat: import Range utility in GitHubProvider for enhanced functionality 2024-10-14 09:40:57 +03:00
f6ba49819a feat: enhance GitHubProvider with improved error handling and URL parsing
- Add traceback logging for exceptions in diff file retrieval
- Improve URL parsing to handle '/api/v3' paths and validate GitHub URLs
- Modify `publish_comment` to return None for temporary comments
- Update constructor to accept an optional GitHub client parameter
2024-10-14 09:18:06 +03:00
22bf7af9ba refactor regex 2024-10-14 08:44:01 +03:00
840e8c4d6b support more types of github ticket url / references 2024-10-13 22:41:33 +03:00
Tal
49f8d86c77 Merge pull request #1289 from Codium-ai/tr/ticket_review
fix: handle missing issue body and improve error logging in ticket co…
2024-10-13 08:20:08 +03:00
05827d125b fix: handle missing issue body and improve error logging in ticket compliance check 2024-10-13 08:19:14 +03:00
74ee9a333e fix: handle missing issue body and improve error logging in ticket compliance check 2024-10-13 08:15:04 +03:00
Tal
e9769fa602 Merge pull request #1283 from Codium-ai/mrT23-patch-2
Update README.md
2024-10-10 10:12:24 +03:00
Tal
3adff8cf4c Update README.md 2024-10-10 10:11:31 +03:00
Tal
d249b47ce9 Merge pull request #1282 from Codium-ai/tr/ticket_review
Tr/ticket review
2024-10-10 10:08:05 +03:00
892d1ad15c Merge remote-tracking branch 'origin/tr/ticket_review' into tr/ticket_review 2024-10-10 10:02:43 +03:00
76d95bb6d7 feat: add ticket compliance check
- Implement ticket compliance check logic in `utils.py` and `ticket_pr_compliance_check.py`
- Add functions to extract and cache PR tickets, and check ticket relevancy
2024-10-10 10:01:48 +03:00
7db9a03805 feat: integrate ticket extraction and enhance YAML handling in PR tools
- Add ticket extraction and caching functionality in `pr_description.py` and `pr_reviewer.py`.
- Introduce `keys_fix` parameter to improve YAML loading robustness.
- Enhance error handling for estimated effort parsing in `pr_reviewer.py`.
2024-10-10 08:53:07 +03:00
4eef3e9190 feat: add ticket compliance check and rate limit validation
- Implement ticket compliance check logic in `utils.py` and `ticket_pr_compliance_check.py`
- Add functions to extract and cache PR tickets, and check ticket relevancy
- Introduce rate limit validation for GitHub API requests
- Update `pr_reviewer_prompts.toml` and `pr_description_prompts.toml` to include ticket compliance fields
- Modify configuration to require ticket analysis review
2024-10-10 08:48:37 +03:00
Tal
014ea884d2 Merge pull request #1280 from Codium-ai/update-qodo-brand
Update footer links and branding from CodiumAI to Qodo
2024-10-10 07:41:39 +03:00
c1c5ee7cfb Update footer links and branding from CodiumAI to Qodo 2024-10-09 11:46:10 +03:00
Tal
3ac1ddc5d7 Merge pull request #1279 from Codium-ai/tr/o1_system
feat: add support for O1 model by combining system and user prompt
2024-10-09 08:59:40 +03:00
Tal
e6c56c7355 Update pr_agent/algo/ai_handlers/litellm_ai_handler.py
Co-authored-by: codiumai-pr-agent-pro[bot] <151058649+codiumai-pr-agent-pro[bot]@users.noreply.github.com>
2024-10-09 08:56:31 +03:00
727b08fde3 feat: add support for O1 model by combining system and user prompts in litellm_ai_handler 2024-10-09 08:53:34 +03:00
Tal
5d9d48dc82 Update .pr_agent.toml 2024-10-09 08:46:57 +03:00
Tal
8e8062fefc Merge pull request #1276 from Codium-ai/tr/fold_suggestions_on_review
docs: update improve.md with folding suggestions feature and add conf…
2024-10-08 20:53:26 +03:00
23a3e208a5 docs: update improve.md with folding suggestions feature and add config option 2024-10-08 20:51:45 +03:00
Tal
bb84063ef2 Merge pull request #1274 from CoryBall/azure-devops-pr-new-file-error-fix
bug-fix_azuredevops-new-file
2024-10-08 16:05:21 +03:00
a476e85fa7 bug-fix_azuredevops-new-file 2024-10-08 01:13:44 -05:00
4b05a3e858 refactor: streamline hunk processing logic in git_patch_processing.py
- Simplified logic for handling new and old hunks to ensure consistent presentation of changes.
- Updated documentation in TOML files to reflect changes in hunk section handling and line number references.
2024-10-07 20:32:11 +03:00
cd158f24f6 fix: move settings initialization outside inner function in gitlab_webhook 2024-10-07 20:24:21 +03:00
Tal
ada0a3d10f Merge pull request #1272 from Codium-ai/tr/warning_on_bad_config
Tr/warning on bad config
2024-10-07 09:24:43 +03:00
ddf1afb23f chore: update prompt guidelines to include exception type suggestions 2024-10-07 09:17:26 +03:00
e2b5489495 feat: add error handling for invalid repo settings configuration
- Implement error handling for invalid TOML configurations in repo settings.
- Log warnings and send comments to PRs when configuration errors occur.
- Introduce `handle_configurations_errors` function to manage error reporting.
- Ensure compatibility with different markdown formats for error messages.
2024-10-07 09:13:17 +03:00
Tal
6459535e39 Merge pull request #1271 from Codium-ai/tr/blogs
docs: add links to technical blogs on LLMs and coding tasks in core a…
2024-10-06 18:20:02 +03:00
Tal
5a719c1904 Update docs/docs/core-abilities/index.md
Co-authored-by: codiumai-pr-agent-pro[bot] <151058649+codiumai-pr-agent-pro[bot]@users.noreply.github.com>
2024-10-06 18:19:26 +03:00
1a2ea2c87d docs: add links to technical blogs on LLMs and coding tasks in core abilities index 2024-10-06 18:16:24 +03:00
ca79bafab3 fixed link 2024-10-04 08:06:57 +03:00
618224beef s 2024-10-02 17:15:44 +03:00
Tal
481c2a5985 Merge pull request #1267 from Codium-ai/tr/should_process
feat: enhance PR processing logic across GitLab, GitHub, and Bitbucket
2024-10-02 17:10:49 +03:00
e21d9dc9e3 s 2024-10-02 17:08:52 +03:00
6872a7076b s 2024-10-02 17:07:09 +03:00
c2ae429805 feat: enhance PR processing logic across GitLab, GitHub, and Bitbucket
- Refactor `should_process_pr_logic` to improve PR filtering based on data attributes.
- Update `_perform_commands_*` functions to incorporate new PR processing checks.
- Ensure consistent handling of PRs by checking configurations before executing commands.
2024-10-02 17:02:33 +03:00
Tal
af5a50ac6a Merge pull request #1264 from Codium-ai/tr/commitable_alongside_table
dual publishing mode
2024-10-01 08:38:34 +03:00
bccc2844b9 dual publishing mode 2024-10-01 08:32:29 +03:00
Tal
d80103751e Merge pull request #1263 from Codium-ai/tr/commitable_alongside_table
feat: add dual publishing mode for PR code suggestions
2024-10-01 08:25:00 +03:00
8ff8b1d48e default 2024-10-01 08:22:28 +03:00
da0bd84746 dual 2024-10-01 08:20:16 +03:00
b42ded61f8 docs: add guidelines for implementing proposed code suggestions in improve.md 2024-10-01 08:16:51 +03:00
dfa4f22be2 feat: add dual publishing mode for PR code suggestions
- Introduced dual publishing mode to present high-scoring suggestions as both table entries and commitable PR comments.
- Updated documentation to include configuration options for dual publishing mode.
- Enhanced `pr_code_suggestions.py` to handle dual publishing logic and error handling.
- Modified `configuration.toml` to include `duel_publishing_score_threshold` setting.
2024-10-01 08:01:27 +03:00
Tal
7d55fc174b Merge pull request #1262 from Codium-ai/tr/prompt
style: refine field descriptions in KeyIssuesComponentLink model
2024-09-30 13:06:11 +03:00
968fb71577 refactor: update terminology for issue review recommendations in utils.py 2024-09-30 13:03:42 +03:00
454365913f refactor: update terminology for issue review recommendations in utils.py 2024-09-30 13:00:01 +03:00
bbaba2dbda refactor: update terminology for issue review recommendations in utils.py
style: refine field descriptions in KeyIssuesComponentLink model
2024-09-30 08:58:32 +03:00
e4c6792866 Merge remote-tracking branch 'origin/main' 2024-09-30 07:52:54 +03:00
183dd5d2fc diverse 2024-09-30 07:52:43 +03:00
Tal
2e79392a5f Merge pull request #1256 from Codium-ai/mrT23-patch-2
disable chat message for github action
2024-09-30 07:40:04 +03:00
da4148f336 docs: update links and references to Qodo Merge in documentation 2024-09-29 17:38:02 +03:00
070ed21103 update documentation links to qodo-merge-docs.qodo.ai 2024-09-29 17:32:16 +03:00
Tal
640bc1e28a Merge pull request #1261 from Codium-ai/tr/qodo_merge
Qodo Merge rename
2024-09-29 17:26:13 +03:00
6c4aa468a9 formerly 2024-09-29 17:24:37 +03:00
da20efd050 Qodo Merge rename 2024-09-29 17:19:46 +03:00
c25aaad176 Qodo Merge rename 2024-09-29 17:15:49 +03:00
Tal
b4c20d683c Merge pull request #1260 from Codium-ai/mrT23-patch-3
Update CNAME
2024-09-29 12:26:50 +03:00
Tal
7bc20d6f16 Update CNAME 2024-09-29 12:25:48 +03:00
f894e8831b revert unauthorized merge 2024-09-29 08:37:26 +03:00
25b7e1e777 update docs URL 2024-09-29 08:19:26 +03:00
Tal
6ba2fb212d Merge pull request #1259 from Codium-ai/tr/intro_review
Add intro text option for PR reviews in configuration and utils
2024-09-29 07:32:27 +03:00
4a60046f7c update tests 2024-09-29 07:28:02 +03:00
35b1f5e747 key 2024-09-29 07:23:34 +03:00
d77a819d92 Add intro text option for PR reviews in configuration and utils 2024-09-29 07:06:48 +03:00
f3fd439d47 docs 2024-09-27 16:24:07 +03:00
Tal
6188afff48 Merge pull request #1257 from Codium-ai/tr/code_suggestion_message
docs: update PR-Agent documentation with PR Chat search instructions
2024-09-27 16:15:09 +03:00
57cfdcf274 docs: update PR-Agent documentation with PR Chat search instructions 2024-09-27 16:13:54 +03:00
Tal
1333ac47bc Update configuration.toml 2024-09-26 18:24:11 +03:00
Tal
267e01409b Merge pull request #1255 from Codium-ai/tr/code_suggestion_message
Enable intro and chat text for PR code suggestions in configuration
2024-09-26 17:15:10 +03:00
8bdebcb99f Enable intro and chat text for PR code suggestions in configuration 2024-09-26 17:11:00 +03:00
Tal
a13400b9b8 Merge pull request #1254 from Codium-ai/tr/code_suggestion_message
Add intro and chat text options for PR code suggestions in configuration
2024-09-26 16:31:10 +03:00
89f9cf5adc Add intro and chat text options for PR code suggestions in configuration 2024-09-26 09:07:51 +03:00
Tal
6b653dbe48 Merge pull request #1253 from Codium-ai/tr/code_suggestion_message
Add configuration for auto actions in GitHub Action runner
2024-09-26 08:50:56 +03:00
109b965407 Add configuration for auto actions in GitHub Action runner 2024-09-26 08:03:39 +03:00
Tal
511c5a31db Merge pull request #1252 from Codium-ai/tr/prompts_refactor
improve code suggestion prompt
2024-09-25 21:23:00 +03:00
3dd8050004 improve code suggestion prompt 2024-09-25 21:22:41 +03:00
4b7d01972c improve code suggestion prompt 2024-09-25 21:15:14 +03:00
05ec944a8b improve code suggestion prompt 2024-09-25 17:52:54 +03:00
4713ae74b7 improve code suggestion prompt 2024-09-25 17:42:59 +03:00
c828cdde62 improve code suggestion prompt 2024-09-25 17:41:21 +03:00
6f14f9c8e1 improve code suggestion prompt 2024-09-25 16:22:16 +03:00
Tal
9f8cc75bd3 Merge pull request #1250 from chapeupreto/improve-pipeline-gitlab
docs: improve GitLab installation instruction
2024-09-25 07:11:54 +03:00
0668ccbb9e docs: improve GitLab installation instruction
This commit adds a missing `export` instruction that is required mainly
for self-hosted GitLab installations.
2024-09-24 23:34:40 -03:00
Tal
8a287f8ed6 Merge pull request #1248 from Codium-ai/tr/help_fixes
DocHelper
2024-09-22 16:17:30 +03:00
d5625db3c8 DocHelper 2024-09-22 16:16:59 +03:00
d6b779eef8 DocHelper 2024-09-22 16:14:32 +03:00
804cb9ec1d DocHelper 2024-09-22 15:55:18 +03:00
Tal
47d32283ca Merge pull request #1247 from Codium-ai/tr/help_fixes
Tr/help fixes
2024-09-22 10:23:29 +03:00
397963257d DocHelper 2024-09-22 10:21:47 +03:00
a3fd15bb92 Merge remote-tracking branch 'origin/main' into tr/help_fixes 2024-09-22 09:27:10 +03:00
ded7d96649 DocHelper 2024-09-22 09:23:34 +03:00
Tal
bbf06e25ef Merge pull request #1246 from Codium-ai/tr/docs_update
update docs
2024-09-22 09:20:38 +03:00
7e5ddf7e37 Update improve.md with enhanced self-review configuration details 2024-09-22 09:19:39 +03:00
0198c61cf7 update docs 2024-09-22 09:00:56 +03:00
20d8e76a7f logs 2024-09-22 08:31:56 +03:00
be8052251a Update pr_help_prompts.toml and fix snippet indexing in pr_help_message.py 2024-09-22 08:13:23 +03:00
ba08b13446 update PR-Agent usage instructions in pr_description.py 2024-09-21 21:10:51 +03:00
835684b92a Merge remote-tracking branch 'origin/main'
# Conflicts:
#	pr_agent/tools/pr_description.py
2024-09-21 21:10:38 +03:00
90295b6429 update PR-Agent usage instructions in pr_description.py 2024-09-21 21:10:06 +03:00
Tal
08d6bbc94c Merge pull request #1245 from Codium-ai/tr/help_rag
docs pr help
2024-09-21 20:58:06 +03:00
8229d98842 docs pr help 2024-09-21 20:55:05 +03:00
Tal
9e28aca919 Merge pull request #1244 from Codium-ai/tr/help_rag
Refactor S3 file handling and update Dockerfile to include local Chroma DB file
2024-09-21 19:28:04 +03:00
3e780783cc Add docs db 2024-09-21 19:24:23 +03:00
5c7b65810c Refactor S3 file handling and update Dockerfile to include local Chroma DB file 2024-09-21 19:11:46 +03:00
Tal
f2f82e8805 Merge pull request #1243 from Codium-ai/tr/help_rag
get protection
2024-09-21 17:08:23 +03:00
1e51acff22 get protection 2024-09-21 17:07:46 +03:00
Tal
81e847e477 Merge pull request #1242 from Codium-ai/tr/help_rag
prompts
2024-09-21 16:59:23 +03:00
a70fe27d94 prompts 2024-09-21 16:58:37 +03:00
Tal
1a5835a947 Merge pull request #1241 from Codium-ai/tr/help_rag
Add PR help message functionality and update dependencies
2024-09-21 16:28:42 +03:00
4b74506107 Add PR help message functionality and update dependencies
- Implement PRHelpMessage class to provide AI-powered assistance for pull requests.
- Add methods for similarity search using local, S3, and Pinecone databases.
- Update `requirements.txt` to include new dependencies for langchain and chromadb.
- Modify `configuration.toml` to include `force_local_db` setting for PR help.
- Update `aiohttp` and `openai` package versions.
2024-09-21 16:22:51 +03:00
08319f8492 fixed azure remove comment bug 2024-09-19 12:54:26 +03:00
Tal
b447080777 Merge pull request #1236 from Codium-ai/tr/tldr
TLDR
2024-09-16 09:23:49 +03:00
da398ce56f TLDR 2024-09-16 09:21:52 +03:00
Tal
16763d81b4 Merge pull request #1232 from yzongyue/feature/compatible_with_old_gitlab
func get_line_link compatible with old gitlab versions
2024-09-15 17:06:43 +03:00
Tal
80fe297bc9 Merge pull request #1231 from matanbaruch/main
Update PR Action Handling Logic in GitHub Action Runner
2024-09-15 15:30:34 +03:00
5d68b0c492 Update automations_and_usage.md 2024-09-15 15:19:40 +03:00
Tal
8d5f015e5c Merge pull request #1234 from Codium-ai/tr/dynamic
self_reflection
2024-09-15 14:52:06 +03:00
be03f83318 self_reflection 2024-09-15 14:50:24 +03:00
cbfd250c0c self_reflection 2024-09-15 14:47:27 +03:00
Tal
7ce46e65a1 Merge pull request #1233 from Codium-ai/tr/dynamic
collapsible
2024-09-15 13:45:39 +03:00
600f230ba7 collapsible 2024-09-15 13:44:27 +03:00
Tal
4f4f13b8b2 Update improve.md 2024-09-15 12:44:11 +03:00
Tal
146b8823a9 Merge pull request #1230 from Codium-ai/tr/dynamic
Tr/dynamic
2024-09-15 12:39:00 +03:00
fdb1ff8057 update best practics 2024-09-15 12:36:00 +03:00
ce8e637800 get_line_link compatible with old gitlab versions 2024-09-15 14:56:08 +08:00
306af02d22 Update github_action_runner.py 2024-09-15 09:42:02 +03:00
a23541912b Update github_action_runner.py 2024-09-15 09:40:05 +03:00
0851767774 Update configuration.toml 2024-09-15 09:35:58 +03:00
585a7f1c69 Update github_action_runner.py 2024-09-15 09:33:47 +03:00
8d82cb2e04 f string 2024-09-15 08:50:24 +03:00
7586514abf docs: clarify guidelines on file length and specificity in improve.md 2024-09-15 08:28:01 +03:00
480a025877 Merge remote-tracking branch 'origin/main' into tr/dynamic 2024-09-15 08:08:11 +03:00
8f943a0d44 fix: update error logging messages and system prompt handling in litellm_ai_handler.py 2024-09-15 08:07:59 +03:00
Tal
2102c51422 Merge pull request #1228 from eddielu/eddie/azurecommentstatus
Set azure devops default status to ByDesign
2024-09-14 20:34:13 +03:00
29028d43cf Set azure devops default status to ByDesign 2024-09-13 18:01:08 -07:00
Tal
95d1b0d0c5 Merge pull request #1227 from Codium-ai/tr/dynamic
refactor logic
2024-09-13 22:22:47 +03:00
cc0e432247 refactor logic 2024-09-13 22:17:24 +03:00
0fb158fd47 fix push gitlab 2024-09-13 20:59:45 +03:00
Tal
867a430a38 Merge pull request #1226 from KennyDizi/main
Add new o1-mini and o1-preview models
2024-09-13 17:17:26 +03:00
a94496285f Add o1-preview models 2024-09-13 08:23:18 +07:00
567c144176 Add o1-mini models 2024-09-13 08:22:09 +07:00
Tal
c08b59a74d Merge pull request #1224 from Codium-ai/tr/dynamic_context_
Tr/dynamic context
2024-09-12 11:43:15 +03:00
0ba81e1ac7 docs: add dynamic context strategy documentation and update configuration settings 2024-09-12 11:42:27 +03:00
2cb0dd2496 docs: add dynamic context strategy documentation and update configuration settings 2024-09-12 11:38:54 +03:00
a8367d1a22 docs: add dynamic context strategy documentation and update configuration settings
- Added detailed documentation on the dynamic context strategy in `dynamic_context.md`.
- Updated configuration settings in `configuration.toml` to enable dynamic context by default.
- Adjusted context line parameters in `additional_configurations.md` to reflect new defaults.
- Announced dynamic context as the default option in the `README.md` news section.
2024-09-12 11:37:52 +03:00
Tal
1a3345c6e6 Merge pull request #1223 from Codium-ai/mrT23-patch-12
Update additional_configurations.md
2024-09-12 09:29:02 +03:00
Tal
564845adff Update additional_configurations.md 2024-09-12 09:27:45 +03:00
Tal
3ea691e70a Merge pull request #1216 from Codium-ai/tr/azure_parsing
fix: improve Azure DevOps PR URL parsing and add unit tests
2024-09-12 09:09:24 +03:00
Tal
5047d076f8 Merge pull request #1222 from Codium-ai/tr/docs_and_fixes
enhance: cap patch extra lines and update documentation with separato…
2024-09-12 09:07:06 +03:00
7de6bb0150 enhance: cap patch extra lines and update documentation with separators and context adjustments 2024-09-12 09:05:26 +03:00
a1582b5338 enhance: cap patch extra lines and update documentation with separators and context adjustments 2024-09-12 09:01:36 +03:00
Tal
dd8d78e7d8 Merge pull request #1221 from Codium-ai/tr/updates_and_fixes
FAQ
2024-09-11 16:53:41 +03:00
5af6cc7538 FAQ 2024-09-11 16:52:21 +03:00
Tal
6cc562d6a2 Merge pull request #1220 from Codium-ai/tr/updates_and_fixes
FAQ
2024-09-11 16:41:07 +03:00
19b051b992 FAQ 2024-09-11 16:40:34 +03:00
be68ee89f3 FAQ 2024-09-11 16:36:27 +03:00
db6c75a130 FAQ 2024-09-11 16:28:09 +03:00
Tal
74688846e0 Merge pull request #1219 from Codium-ai/tr/updates_and_fixes
docs and fixes
2024-09-10 20:10:53 +03:00
09b0a04a47 docs and fixes 2024-09-10 20:06:48 +03:00
Tal
cc1b65f886 Merge pull request #1218 from Codium-ai/tr/updates_and_fixes
feat: enhance error handling and logging, update AI metadata terminology
2024-09-10 17:59:48 +03:00
Tal
1451d82d6b Update pr_agent/algo/pr_processing.py
Co-authored-by: codiumai-pr-agent-pro[bot] <151058649+codiumai-pr-agent-pro[bot]@users.noreply.github.com>
2024-09-10 17:50:32 +03:00
01ba6fe63d feat: enhance error handling and logging, update AI metadata terminology
- Improved error handling and logging in `pr_processing.py` and `github_polling.py` to provide more detailed error information.
- Updated AI metadata terminology from "AI-generated file summary" to "AI-generated changes summary" across multiple files for consistency.
- Added a placeholder method `publish_file_comments` in `azuredevops_provider.py`.
- Refined logging messages in `azuredevops_provider.py` for better clarity.
2024-09-10 17:44:26 +03:00
Tal
74f9da1135 Merge pull request #1217 from Codium-ai/mrT23-patch-12
Update index.md
2024-09-10 09:06:55 +03:00
Tal
f80c2ae2c8 Update index.md 2024-09-10 09:05:50 +03:00
e444da8378 fix: improve Azure DevOps PR URL parsing and add unit tests 2024-09-10 08:19:22 +03:00
Tal
25ad8a09ce Merge pull request #1215 from Codium-ai/tr/docs
docs
2024-09-10 08:00:36 +03:00
897e791b1a docs 2024-09-10 08:00:10 +03:00
7f94dda54e docs 2024-09-10 07:58:18 +03:00
Tal
538a592882 Merge pull request #1214 from Codium-ai/tr/docs
docs
2024-09-10 07:53:35 +03:00
a3cb7277a7 docs 2024-09-10 07:51:47 +03:00
Tal
b5cd560402 Merge pull request #1212 from Codium-ai/tr/configurations
docs
2024-09-09 19:47:47 +03:00
fd38c33fcb docs 2024-09-09 19:45:29 +03:00
Tal
f767a3dfde Merge pull request #1211 from Codium-ai/hl/impact_docs
impact docs
2024-09-09 19:34:42 +03:00
9f8b619858 small fix 2024-09-09 18:47:53 +03:00
8de16939ba impact docs 2024-09-09 18:15:56 +03:00
Tal
6ed5537065 Merge pull request #1210 from Codium-ai/tr/configurations
bug fix: remove unused get_pr_description method from Azure DevOps
2024-09-09 16:26:46 +03:00
1a9638cf87 bug fix: remove unused get_pr_description method from Azure DevOps provider 2024-09-09 16:25:34 +03:00
Tal
49521aafff Merge pull request #1208 from Codium-ai/tr/configurations
config docs
2024-09-09 08:34:46 +03:00
c8e8ed89d2 feat: integrate Dynaconf for configuration management and enhance config display 2024-09-09 08:31:20 +03:00
ebc5cafb2b protection 2024-09-08 17:46:21 +03:00
Tal
52e8d7bc6a Merge pull request #1207 from Codium-ai/tr/fixes_metadata
Tr/fixes metadata
2024-09-08 17:32:58 +03:00
f7344fd787 metadata.md docs 2024-09-08 17:31:17 +03:00
86103c65e8 pattern_back 2024-09-08 17:24:13 +03:00
a4658b9960 docs 2024-09-08 16:52:20 +03:00
5fd831c448 impact_evaluation 2024-09-08 16:47:04 +03:00
332d3a0c5e markdown update 2024-09-08 16:43:44 +03:00
Tal
edef712b6a Merge pull request #1206 from Codium-ai/tr/ai_metadata
ai_metadat_injection
2024-09-08 16:33:00 +03:00
1831f2cec4 markdown 2024-09-08 16:32:46 +03:00
8706f643ef enable ai_metadata 2024-09-08 16:26:26 +03:00
Tal
35a75095ea Merge pull request #1205 from ScArLeXiA/fix-typo-in-readme
fix: Typo in README (`Chrom` -> `Chrome`)
2024-09-07 19:20:35 +03:00
0aa296d03e fix: Typo in README (Chrom -> Chrome) 2024-09-08 01:14:41 +09:00
Tal
24f7e8622f Merge pull request #1202 from Codium-ai/tr/ignore_titile_adjustments
avoid_full_files
2024-09-07 11:49:21 +03:00
d01cfe443c avoid_full_files 2024-09-07 11:44:24 +03:00
Tal
6150256040 Merge pull request #1201 from Codium-ai/tr/ignore_titile_adjustments
Tr/ignore titile adjustments
2024-09-07 11:27:28 +03:00
147a8e0ef3 refactor: consolidate PR ignore logic into a single function and update documentation 2024-09-07 11:26:13 +03:00
Tal
9199d84796 Merge pull request #1192 from paolomainardi/feature/1190_exclude_branches_tags
feat: gitlab skip source, target and labels
2024-09-07 10:20:31 +03:00
39913ef12a fix: remove specific configurations 2024-09-06 20:23:33 +02:00
d2a744e70c fix: remove line 2024-09-06 18:42:47 +02:00
be93c52380 fix: remove line 2024-09-06 18:42:28 +02:00
7ccefca35e fix: remove comment 2024-09-06 18:41:36 +02:00
14b4723734 feat: move configuration to a common config section, add documentation 2024-09-06 18:32:46 +02:00
c8f1c03061 fix: correct tuple unpacking in GitHub polling task queue loop 2024-09-05 20:31:17 +03:00
Tal
b02fa22948 Merge pull request #1198 from Codium-ai/tr/polling
Tr/polling
2024-09-05 19:58:51 +03:00
85754d2d79 feat: enhance GitHub polling with synchronous comment processing and improved logging 2024-09-05 16:57:10 +03:00
f0d780c7ec feat: enhance GitHub polling with synchronous comment processing and improved logging 2024-09-05 16:55:10 +03:00
19048ee705 feat: enhance GitHub polling with synchronous comment processing and improved logging 2024-09-05 16:53:31 +03:00
b8d2b263b9 feat: enhance GitHub polling with synchronous comment processing and improved logging and bug fixing 2024-09-05 16:52:47 +03:00
Tal
6f17c08f72 Merge pull request #1197 from proDOOMman/onec_enterprice
Add 1C Enterprise language
2024-09-04 15:20:16 +03:00
65c0bc414f docs: add supported browsers section to Chrome extension documentation 2024-09-04 13:36:44 +03:00
015719134f docs: enhance PR chat section with context-aware explanation in features.md 2024-09-04 12:01:23 +03:00
1ed6b7a54a docs: enhance PR chat section with context-aware explanation in features.md 2024-09-04 11:57:24 +03:00
14067a02db docs: update Chrome extension documentation with installation video link and image update 2024-09-04 11:01:04 +03:00
be75bb6a16 Add 1C Enterprise language 2024-09-04 09:04:39 +03:00
883d945687 Merge remote-tracking branch 'origin/main' 2024-09-04 08:52:32 +03:00
8090115f30 docs: update Chrome extension description and bump version to 0.2.4 2024-09-04 08:52:22 +03:00
Tal
6fa226dee7 Merge pull request #1196 from Codium-ai/mrT23-patch-11
Update README.md
2024-09-03 18:50:47 +03:00
Tal
13c1cdbf90 Update README.md 2024-09-03 18:49:04 +03:00
2f7f60a469 fix: review standardize regex checking 2024-09-02 16:31:19 +02:00
adce35765b feat: implement skip branches for github, add ignore title to gitlab 2024-09-02 16:26:50 +02:00
23af1afa03 feat: gitlab skip source, target and labels 2024-08-30 17:01:18 +02:00
fdcbdfce98 feat: gitlab skip source, target and labels 2024-08-30 16:40:23 +02:00
95 changed files with 3017 additions and 1369 deletions

View File

@ -1,6 +1,3 @@
[pr_reviewer]
enable_review_labels_effort = true
enable_auto_approval = true
[config]
model="claude-3-5-sonnet"

View File

@ -1,10 +1,11 @@
FROM python:3.10 as base
FROM python:3.12 as base
WORKDIR /app
ADD pyproject.toml .
ADD requirements.txt .
RUN pip install . && rm pyproject.toml requirements.txt
ENV PYTHONPATH=/app
ADD docs docs
ADD pr_agent pr_agent
ADD github_action/entrypoint.sh /
RUN chmod +x /entrypoint.sh

View File

@ -10,7 +10,7 @@
</picture>
<br/>
CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedback and suggestions
Qode Merge PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedback and suggestions
</div>
[![GitHub license](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://github.com/Codium-ai/pr-agent/blob/main/LICENSE)
@ -25,11 +25,11 @@ CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by p
</div>
### [Documentation](https://pr-agent-docs.codium.ai/)
- See the [Installation Guide](https://pr-agent-docs.codium.ai/installation/) for instructions on installing PR-Agent on different platforms.
- See the [Installation Guide](https://qodo-merge-docs.qodo.ai/installation/) for instructions on installing Qode Merge PR-Agent on different platforms.
- See the [Usage Guide](https://pr-agent-docs.codium.ai/usage-guide/) for instructions on running PR-Agent tools via different interfaces, such as CLI, PR Comments, or by automatically triggering them when a new PR is opened.
- See the [Usage Guide](https://qodo-merge-docs.qodo.ai/usage-guide/) for instructions on running Qode Merge PR-Agent tools via different interfaces, such as CLI, PR Comments, or by automatically triggering them when a new PR is opened.
- See the [Tools Guide](https://pr-agent-docs.codium.ai/tools/) for a detailed description of the different tools, and the available configurations for each tool.
- See the [Tools Guide](https://qodo-merge-docs.qodo.ai/tools/) for a detailed description of the different tools, and the available configurations for each tool.
## Table of Contents
@ -43,31 +43,50 @@ CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by p
## News and Updates
### August 26, 2024
### November 3, 2024
New version of [PR Agent Chrom Extension](https://chromewebstore.google.com/detail/pr-agent-chrome-extension/ephlnjeghhogofkifjloamocljapahnl) was released, with full support of context-aware **PR Chat**. This novel feature is free to use for any open-source repository. See more details in [here](https://pr-agent-docs.codium.ai/chrome-extension/#pr-chat).
Meaningful improvement to the quality of code suggestions by separating the code suggestion generation from [line number detection](https://github.com/Codium-ai/pr-agent/pull/1338)
<kbd><img src="https://www.codium.ai/images/pr_agent/pr_chat_1.png" width="768"></kbd>
<kbd><img src="https://www.codium.ai/images/pr_agent/pr_chat_2.png" width="768"></kbd>
<kbd>![image](https://github.com/user-attachments/assets/093c185c-31ca-47a1-a4fe-be7d9335ea66)</kbd>
### August 11, 2024
Increased PR context size for improved results, and enabled [asymmetric context](https://github.com/Codium-ai/pr-agent/pull/1114/files#diff-9290a3ad9a86690b31f0450b77acd37ef1914b41fabc8a08682d4da433a77f90R69-R70)
### October 27, 2024
### August 10, 2024
Added support for [Azure devops pipeline](https://pr-agent-docs.codium.ai/installation/azure/) - you can now easily run PR-Agent as an Azure devops pipeline, without needing to set up your own server.
Qodo Merge PR Agent will now automatically document accepted code suggestions in a dedicated wiki page (`.pr_agent_accepted_suggestions`), enabling users to track historical changes, assess the tool's effectiveness, and learn from previously implemented recommendations in the repository.
This dedicated wiki page will also serve as a foundation for future AI model improvements, allowing it to learn from historically implemented suggestions and generate more targeted, contextually relevant recommendations.
Read more about this novel feature [here](https://qodo-merge-docs.qodo.ai/tools/improve/#suggestion-tracking).
<kbd><img href="https://qodo.ai/images/pr_agent/pr_agent_accepted_suggestions1.png" src="https://qodo.ai/images/pr_agent/pr_agent_accepted_suggestions1.png" width="768"></kbd>
### August 5, 2024
Added support for [GitLab pipeline](https://pr-agent-docs.codium.ai/installation/gitlab/#run-as-a-gitlab-pipeline) - you can now run easily PR-Agent as a GitLab pipeline, without needing to set up your own server.
### July 28, 2024
### October 21, 2024
**Disable publishing labels by default:**
(1) improved support for bitbucket server - [auto commands](https://github.com/Codium-ai/pr-agent/pull/1059) and [direct links](https://github.com/Codium-ai/pr-agent/pull/1061)
The default setting for `pr_description.publish_labels` has been updated to `false`. This means that labels generated by the `/describe` tool will no longer be published, unless this configuration is explicitly set to `true`.
(2) custom models are now [supported](https://pr-agent-docs.codium.ai/usage-guide/changing_a_model/#custom-models)
We constantly strive to balance informative AI analysis with reducing unnecessary noise. User feedback indicated that in many cases, the original PR title alone provides sufficient information, making the generated labels (`enhancement`, `documentation`, `bug fix`, ...) redundant.
The [`review_effort`](https://qodo-merge-docs.qodo.ai/tools/review/#configuration-options) label, generated by the `review` tool, will still be published by default, as it provides valuable information enabling reviewers to prioritize small PRs first.
However, every user has different preferences. To still publish the `describe` labels, set `pr_description.publish_labels=true` in the [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/).
For more tailored and relevant labeling, we recommend using the [`custom_labels 💎`](https://qodo-merge-docs.qodo.ai/tools/custom_labels/) tool, that allows generating labels specific to your project's needs.
<kbd>![image](https://github.com/user-attachments/assets/8f38d222-53b1-4742-b2ec-7ea0a30c9076)</kbd>
<kbd>![image](https://github.com/user-attachments/assets/8285bd90-0dda-4c7e-9237-bbfde5e21880)</kbd>
### October 14, 2024
Improved support for GitHub enterprise server with [GitHub Actions](https://qodo-merge-docs.qodo.ai/installation/github/#action-for-github-enterprise-server)
### October 10, 2024
New ability for the `review` tool - **ticket compliance feedback**. If the PR contains a ticket number, PR-Agent will check if the PR code actually [complies](https://github.com/Codium-ai/pr-agent/pull/1279#issuecomment-2404042130) with the ticket requirements.
<kbd><img src="https://github.com/user-attachments/assets/4a2a728b-5f47-40fa-80cc-16efd296938c" width="768"></kbd>
## Overview
@ -79,7 +98,6 @@ Supported commands per platform:
|-------|---------------------------------------------------------------------------------------------------------|:--------------------:|:--------------------:|:--------------------:|:------------:|
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ |
| | ⮑ Incremental | ✅ | | | |
| | ⮑ [SOC2 Compliance](https://pr-agent-docs.codium.ai/tools/review/#soc2-ticket-compliance) 💎 | ✅ | ✅ | ✅ | |
| | Describe | ✅ | ✅ | ✅ | ✅ |
| | ⮑ [Inline File Summary](https://pr-agent-docs.codium.ai/tools/describe#inline-file-summary) 💎 | ✅ | | | |
| | Improve | ✅ | ✅ | ✅ | ✅ |
@ -248,7 +266,7 @@ Note that when you set your own PR-Agent or use CodiumAI hosted PR-Agent, there
2. **Improved privacy** - No data will be stored or used to train models. PR-Agent Pro will employ zero data retention, and will use an OpenAI account with zero data retention.
3. **Improved support** - PR-Agent Pro users will receive priority support, and will be able to request new features and capabilities.
4. **Extra features** -In addition to the benefits listed above, PR-Agent Pro will emphasize more customization, and the usage of static code analysis, in addition to LLM logic, to improve results.
See [here](https://pr-agent-docs.codium.ai/#pr-agent-pro) for a list of features available in PR-Agent Pro.
See [here](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/) for a list of features available in PR-Agent Pro.

View File

@ -3,6 +3,7 @@ FROM python:3.12.3 AS base
WORKDIR /app
ADD pyproject.toml .
ADD requirements.txt .
ADD docs docs
RUN pip install . && rm pyproject.toml requirements.txt
ENV PYTHONPATH=/app

View File

@ -1 +1 @@
# [Visit Our Docs Portal](https://pr-agent-docs.codium.ai/)
# [Visit Our Docs Portal](https://qodo-merge-docs.qodo.ai/)

View File

@ -1 +1 @@
pr-agent-docs.codium.ai
qodo-merge-docs.qodo.ai

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 4.2 KiB

View File

@ -1,140 +1 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 28.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
width="64px" height="64px" viewBox="0 0 64 64" enable-background="new 0 0 64 64" xml:space="preserve">
<g>
<defs>
<rect id="SVGID_1_" x="0.4" y="0.1" width="63.4" height="63.4"/>
</defs>
<clipPath id="SVGID_00000008836131916906499950000015813697852011234749_">
<use xlink:href="#SVGID_1_" overflow="visible"/>
</clipPath>
<g clip-path="url(#SVGID_00000008836131916906499950000015813697852011234749_)">
<path fill="#05E5AD" d="M21.4,9.8c3,0,5.9,0.7,8.5,1.9c-5.7,3.4-9.8,11.1-9.8,20.1c0,9,4,16.7,9.8,20.1c-2.6,1.2-5.5,1.9-8.5,1.9
c-11.6,0-21-9.8-21-22S9.8,9.8,21.4,9.8z"/>
<radialGradient id="SVGID_00000150822754378345238340000008985053211526864828_" cx="-140.0905" cy="350.1757" r="4.8781" gradientTransform="matrix(-4.7708 -6.961580e-02 -0.1061 7.2704 -601.3099 -2523.8489)" gradientUnits="userSpaceOnUse">
<stop offset="0" style="stop-color:#6447FF"/>
<stop offset="6.666670e-02" style="stop-color:#6348FE"/>
<stop offset="0.1333" style="stop-color:#614DFC"/>
<stop offset="0.2" style="stop-color:#5C54F8"/>
<stop offset="0.2667" style="stop-color:#565EF3"/>
<stop offset="0.3333" style="stop-color:#4E6CEC"/>
<stop offset="0.4" style="stop-color:#447BE4"/>
<stop offset="0.4667" style="stop-color:#3A8DDB"/>
<stop offset="0.5333" style="stop-color:#2F9FD1"/>
<stop offset="0.6" style="stop-color:#25B1C8"/>
<stop offset="0.6667" style="stop-color:#1BC0C0"/>
<stop offset="0.7333" style="stop-color:#13CEB9"/>
<stop offset="0.8" style="stop-color:#0DD8B4"/>
<stop offset="0.8667" style="stop-color:#08DFB0"/>
<stop offset="0.9333" style="stop-color:#06E4AE"/>
<stop offset="1" style="stop-color:#05E5AD"/>
</radialGradient>
<path fill="url(#SVGID_00000150822754378345238340000008985053211526864828_)" d="M21.4,9.8c3,0,5.9,0.7,8.5,1.9
c-5.7,3.4-9.8,11.1-9.8,20.1c0,9,4,16.7,9.8,20.1c-2.6,1.2-5.5,1.9-8.5,1.9c-11.6,0-21-9.8-21-22S9.8,9.8,21.4,9.8z"/>
<radialGradient id="SVGID_00000022560571240417802950000012439139323268113305_" cx="-191.7649" cy="385.7387" r="4.8781" gradientTransform="matrix(-2.5514 -0.7616 -0.8125 2.7217 -130.733 -1180.2209)" gradientUnits="userSpaceOnUse">
<stop offset="0" style="stop-color:#6447FF"/>
<stop offset="6.666670e-02" style="stop-color:#6348FE"/>
<stop offset="0.1333" style="stop-color:#614DFC"/>
<stop offset="0.2" style="stop-color:#5C54F8"/>
<stop offset="0.2667" style="stop-color:#565EF3"/>
<stop offset="0.3333" style="stop-color:#4E6CEC"/>
<stop offset="0.4" style="stop-color:#447BE4"/>
<stop offset="0.4667" style="stop-color:#3A8DDB"/>
<stop offset="0.5333" style="stop-color:#2F9FD1"/>
<stop offset="0.6" style="stop-color:#25B1C8"/>
<stop offset="0.6667" style="stop-color:#1BC0C0"/>
<stop offset="0.7333" style="stop-color:#13CEB9"/>
<stop offset="0.8" style="stop-color:#0DD8B4"/>
<stop offset="0.8667" style="stop-color:#08DFB0"/>
<stop offset="0.9333" style="stop-color:#06E4AE"/>
<stop offset="1" style="stop-color:#05E5AD"/>
</radialGradient>
<path fill="url(#SVGID_00000022560571240417802950000012439139323268113305_)" d="M38,18.3c-2.1-2.8-4.9-5.1-8.1-6.6
c2-1.2,4.2-1.9,6.6-1.9c2.2,0,4.3,0.6,6.2,1.7C40.8,12.9,39.2,15.3,38,18.3L38,18.3z"/>
<radialGradient id="SVGID_00000143611122169386473660000017673587931016751800_" cx="-194.7918" cy="395.2442" r="4.8781" gradientTransform="matrix(-2.5514 -0.7616 -0.8125 2.7217 -130.733 -1172.9556)" gradientUnits="userSpaceOnUse">
<stop offset="0" style="stop-color:#6447FF"/>
<stop offset="6.666670e-02" style="stop-color:#6348FE"/>
<stop offset="0.1333" style="stop-color:#614DFC"/>
<stop offset="0.2" style="stop-color:#5C54F8"/>
<stop offset="0.2667" style="stop-color:#565EF3"/>
<stop offset="0.3333" style="stop-color:#4E6CEC"/>
<stop offset="0.4" style="stop-color:#447BE4"/>
<stop offset="0.4667" style="stop-color:#3A8DDB"/>
<stop offset="0.5333" style="stop-color:#2F9FD1"/>
<stop offset="0.6" style="stop-color:#25B1C8"/>
<stop offset="0.6667" style="stop-color:#1BC0C0"/>
<stop offset="0.7333" style="stop-color:#13CEB9"/>
<stop offset="0.8" style="stop-color:#0DD8B4"/>
<stop offset="0.8667" style="stop-color:#08DFB0"/>
<stop offset="0.9333" style="stop-color:#06E4AE"/>
<stop offset="1" style="stop-color:#05E5AD"/>
</radialGradient>
<path fill="url(#SVGID_00000143611122169386473660000017673587931016751800_)" d="M38,45.2c1.2,3,2.9,5.3,4.7,6.8
c-1.9,1.1-4,1.7-6.2,1.7c-2.3,0-4.6-0.7-6.6-1.9C33.1,50.4,35.8,48.1,38,45.2L38,45.2z"/>
<path fill="#684BFE" d="M20.1,31.8c0-9,4-16.7,9.8-20.1c3.2,1.5,6,3.8,8.1,6.6c-1.5,3.7-2.5,8.4-2.5,13.5s0.9,9.8,2.5,13.5
c-2.1,2.8-4.9,5.1-8.1,6.6C24.1,48.4,20.1,40.7,20.1,31.8z"/>
<radialGradient id="SVGID_00000147942998054305738810000004710078864578628519_" cx="-212.7358" cy="363.2475" r="4.8781" gradientTransform="matrix(-2.3342 -1.063 -1.623 3.5638 149.3813 -1470.1027)" gradientUnits="userSpaceOnUse">
<stop offset="0" style="stop-color:#6447FF"/>
<stop offset="6.666670e-02" style="stop-color:#6348FE"/>
<stop offset="0.1333" style="stop-color:#614DFC"/>
<stop offset="0.2" style="stop-color:#5C54F8"/>
<stop offset="0.2667" style="stop-color:#565EF3"/>
<stop offset="0.3333" style="stop-color:#4E6CEC"/>
<stop offset="0.4" style="stop-color:#447BE4"/>
<stop offset="0.4667" style="stop-color:#3A8DDB"/>
<stop offset="0.5333" style="stop-color:#2F9FD1"/>
<stop offset="0.6" style="stop-color:#25B1C8"/>
<stop offset="0.6667" style="stop-color:#1BC0C0"/>
<stop offset="0.7333" style="stop-color:#13CEB9"/>
<stop offset="0.8" style="stop-color:#0DD8B4"/>
<stop offset="0.8667" style="stop-color:#08DFB0"/>
<stop offset="0.9333" style="stop-color:#06E4AE"/>
<stop offset="1" style="stop-color:#05E5AD"/>
</radialGradient>
<path fill="url(#SVGID_00000147942998054305738810000004710078864578628519_)" d="M50.7,42.5c0.6,3.3,1.5,6.1,2.5,8
c-1.8,2-3.8,3.1-6,3.1c-1.6,0-3.1-0.6-4.5-1.7C46.1,50.2,48.9,46.8,50.7,42.5L50.7,42.5z"/>
<radialGradient id="SVGID_00000083770737908230256670000016126156495859285174_" cx="-208.5327" cy="357.2025" r="4.8781" gradientTransform="matrix(-2.3342 -1.063 -1.623 3.5638 149.3813 -1476.8097)" gradientUnits="userSpaceOnUse">
<stop offset="0" style="stop-color:#6447FF"/>
<stop offset="6.666670e-02" style="stop-color:#6348FE"/>
<stop offset="0.1333" style="stop-color:#614DFC"/>
<stop offset="0.2" style="stop-color:#5C54F8"/>
<stop offset="0.2667" style="stop-color:#565EF3"/>
<stop offset="0.3333" style="stop-color:#4E6CEC"/>
<stop offset="0.4" style="stop-color:#447BE4"/>
<stop offset="0.4667" style="stop-color:#3A8DDB"/>
<stop offset="0.5333" style="stop-color:#2F9FD1"/>
<stop offset="0.6" style="stop-color:#25B1C8"/>
<stop offset="0.6667" style="stop-color:#1BC0C0"/>
<stop offset="0.7333" style="stop-color:#13CEB9"/>
<stop offset="0.8" style="stop-color:#0DD8B4"/>
<stop offset="0.8667" style="stop-color:#08DFB0"/>
<stop offset="0.9333" style="stop-color:#06E4AE"/>
<stop offset="1" style="stop-color:#05E5AD"/>
</radialGradient>
<path fill="url(#SVGID_00000083770737908230256670000016126156495859285174_)" d="M42.7,11.5c1.4-1.1,2.9-1.7,4.5-1.7
c2.2,0,4.3,1.1,6,3.1c-1,2-1.9,4.7-2.5,8C48.9,16.7,46.1,13.4,42.7,11.5L42.7,11.5z"/>
<path fill="#684BFE" d="M38,45.2c2.8-3.7,4.4-8.4,4.4-13.5c0-5.1-1.7-9.8-4.4-13.5c1.2-3,2.9-5.3,4.7-6.8c3.4,1.9,6.2,5.3,8,9.5
c-0.6,3.2-0.9,6.9-0.9,10.8s0.3,7.6,0.9,10.8c-1.8,4.3-4.6,7.6-8,9.5C40.8,50.6,39.2,48.2,38,45.2L38,45.2z"/>
<path fill="#321BB2" d="M38,45.2c-1.5-3.7-2.5-8.4-2.5-13.5S36.4,22,38,18.3c2.8,3.7,4.4,8.4,4.4,13.5S40.8,41.5,38,45.2z"/>
<path fill="#05E6AD" d="M53.2,12.9c1.1-2,2.3-3.1,3.6-3.1c3.9,0,7,9.8,7,22s-3.1,22-7,22c-1.3,0-2.6-1.1-3.6-3.1
c3.4-3.8,5.7-10.8,5.7-18.8C58.8,23.8,56.6,16.8,53.2,12.9z"/>
<radialGradient id="SVGID_00000009565123575973598080000009335550354766300606_" cx="-7.8671" cy="278.2442" r="4.8781" gradientTransform="matrix(1.5187 0 0 -7.8271 69.237 2209.3281)" gradientUnits="userSpaceOnUse">
<stop offset="0" style="stop-color:#05E5AD"/>
<stop offset="0.32" style="stop-color:#05E5AD;stop-opacity:0"/>
<stop offset="0.9028" style="stop-color:#6447FF"/>
</radialGradient>
<path fill="url(#SVGID_00000009565123575973598080000009335550354766300606_)" d="M53.2,12.9c1.1-2,2.3-3.1,3.6-3.1
c3.9,0,7,9.8,7,22s-3.1,22-7,22c-1.3,0-2.6-1.1-3.6-3.1c3.4-3.8,5.7-10.8,5.7-18.8C58.8,23.8,56.6,16.8,53.2,12.9z"/>
<path fill="#684BFE" d="M52.8,31.8c0-3.9-0.8-7.6-2.1-10.8c0.6-3.3,1.5-6.1,2.5-8c3.4,3.8,5.7,10.8,5.7,18.8c0,8-2.3,15-5.7,18.8
c-1-2-1.9-4.7-2.5-8C52,39.3,52.8,35.7,52.8,31.8z"/>
<path fill="#321BB2" d="M50.7,42.5c-0.6-3.2-0.9-6.9-0.9-10.8s0.3-7.6,0.9-10.8c1.3,3.2,2.1,6.9,2.1,10.8S52,39.3,50.7,42.5z"/>
</g>
</g>
</svg>
<?xml version="1.0" encoding="UTF-8"?><svg id="Layer_1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 109.77 81.94"><defs><style>.cls-1{fill:#7968fa;}.cls-1,.cls-2{stroke-width:0px;}.cls-2{fill:#5ae3ae;}</style></defs><path class="cls-2" d="m109.77,40.98c0,22.62-7.11,40.96-15.89,40.96-3.6,0-6.89-3.09-9.58-8.31,6.82-7.46,11.22-19.3,11.22-32.64s-4.4-25.21-11.22-32.67C86.99,3.09,90.29,0,93.89,0c8.78,0,15.89,18.33,15.89,40.97"/><path class="cls-1" d="m95.53,40.99c0,13.35-4.4,25.19-11.23,32.64-3.81-7.46-6.28-19.3-6.28-32.64s2.47-25.21,6.28-32.67c6.83,7.46,11.23,19.32,11.23,32.67"/><path class="cls-2" d="m55.38,78.15c-4.99,2.42-10.52,3.79-16.38,3.79C17.46,81.93,0,63.6,0,40.98S17.46,0,39,0C44.86,0,50.39,1.37,55.38,3.79c-9.69,6.47-16.43,20.69-16.43,37.19s6.73,30.7,16.43,37.17"/><path class="cls-1" d="m78.02,40.99c0,16.48-9.27,30.7-22.65,37.17-9.69-6.47-16.43-20.69-16.43-37.17S45.68,10.28,55.38,3.81c13.37,6.49,22.65,20.69,22.65,37.19"/><path class="cls-2" d="m84.31,73.63c-4.73,5.22-10.64,8.31-17.06,8.31-4.24,0-8.27-1.35-11.87-3.79,13.37-6.48,22.65-20.7,22.65-37.17,0,13.35,2.47,25.19,6.28,32.64"/><path class="cls-2" d="m84.31,8.31c-3.81,7.46-6.28,19.32-6.28,32.67,0-16.5-9.27-30.7-22.65-37.19,3.6-2.45,7.63-3.8,11.87-3.8,6.43,0,12.33,3.09,17.06,8.31"/></svg>

Before

Width:  |  Height:  |  Size: 9.0 KiB

After

Width:  |  Height:  |  Size: 1.2 KiB

BIN
docs/docs/assets/logo_.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.7 KiB

View File

@ -1,5 +1,5 @@
We take your code's security and privacy seriously:
- The Chrome extension will not send your code to any external servers.
- For private repositories, we will first validate the user's identity and permissions. After authentication, we generate responses using the existing PR-Agent Pro integration.
- For private repositories, we will first validate the user's identity and permissions. After authentication, we generate responses using the existing Qodo Merge Pro integration.

View File

@ -1,21 +1,25 @@
### PR Chat
### PR chat
The PR-Chat feature allows to freely chat with your PR code, within your GitHub environment.
It will seamlessly add the PR code as context to your chat session, and provide AI-powered feedback.
It will seamlessly use the PR as context to your chat session, and provide AI-powered feedback.
To enable private chat, simply install the PR-Agent Chrome extension. After installation, each PR's file-changed tab will include a chat box, where you may ask questions about your code.
To enable private chat, simply install the Qodo Merge Chrome extension. After installation, each PR's file-changed tab will include a chat box, where you may ask questions about your code.
This chat session is **private**, and won't be visible to other users.
All open-source repositories are supported.
For private repositories, you will also need to install PR-Agent Pro, After installation, make sure to open at least one new PR to fully register your organization. Once done, you can chat with both new and existing PRs across all installed repositories.
For private repositories, you will also need to install Qodo Merge Pro, After installation, make sure to open at least one new PR to fully register your organization. Once done, you can chat with both new and existing PRs across all installed repositories.
<img src="https://codium.ai/images/pr_agent/pr_chat1.png" width="768">
<img src="https://codium.ai/images/pr_agent/pr_chat2.png" width="768">
#### Context-aware PR chat
Qodo Merge constructs a comprehensive context for each pull request, incorporating the PR description, commit messages, and code changes with extended dynamic context. This contextual information, along with additional PR-related data, forms the foundation for an AI-powered chat session. The agent then leverages this rich context to provide intelligent, tailored responses to user inquiries about the pull request.
<img src="https://codium.ai/images/pr_agent/pr_chat_1.png" width="768">
<img src="https://codium.ai/images/pr_agent/pr_chat_2.png" width="768">
### Toolbar extension
With PR-Agent Chrome extension, it's [easier than ever](https://www.youtube.com/watch?v=gT5tli7X4H4) to interactively configure and experiment with the different tools and configuration options.
With Qodo Merge Chrome extension, it's [easier than ever](https://www.youtube.com/watch?v=gT5tli7X4H4) to interactively configure and experiment with the different tools and configuration options.
For private repositories, after you found the setup that works for you, you can also easily export it as a persistent configuration file, and use it for automatic commands.
@ -23,11 +27,11 @@ For private repositories, after you found the setup that works for you, you can
<img src="https://codium.ai/images/pr_agent/toolbar2.png" width="512">
### PR-Agent filters
### Qodo Merge filters
PR-Agent filters is a sidepanel option. that allows you to filter different message in the conversation tab.
Qodo Merge filters is a sidepanel option. that allows you to filter different message in the conversation tab.
For example, you can choose to present only message from PR-Agent, or filter those messages, focusing only on user's comments.
For example, you can choose to present only message from Qodo Merge, or filter those messages, focusing only on user's comments.
<img src="https://codium.ai/images/pr_agent/pr_agent_filters1.png" width="256">
@ -36,7 +40,7 @@ For example, you can choose to present only message from PR-Agent, or filter tho
### Enhanced code suggestions
PR-Agent Chrome extension adds the following capabilities to code suggestions tool's comments:
Qodo Merge Chrome extension adds the following capabilities to code suggestions tool's comments:
- Auto-expand the table when you are viewing a code block, to avoid clipping.
- Adding a "quote-and-reply" button, that enables to address and comment on a specific suggestion (for example, asking the author to fix the issue)

View File

@ -1,8 +1,14 @@
[PR-Agent Chrome extension](https://chromewebstore.google.com/detail/pr-agent-chrome-extension/ephlnjeghhogofkifjloamocljapahnl) is a collection of tools that integrates seamlessly with your GitHub environment, aiming to enhance your Git usage experience, and providing AI-powered capabilities to your PRs.
[Qodo Merge Chrome extension](https://chromewebstore.google.com/detail/pr-agent-chrome-extension/ephlnjeghhogofkifjloamocljapahnl) is a collection of tools that integrates seamlessly with your GitHub environment, aiming to enhance your Git usage experience, and providing AI-powered capabilities to your PRs.
With a single-click installation you will gain access to a context-aware PR chat with top models, a toolbar extension with multiple AI feedbacks, PR-Agent filters, and additional abilities.
With a single-click installation you will gain access to a context-aware chat on your pull requests code, a toolbar extension with multiple AI feedbacks, Qodo Merge filters, and additional abilities.
All the extension's features are free to use on public repositories. For private repositories, you will need to install in addition to the extension [PR-Agent Pro](https://github.com/apps/codiumai-pr-agent-pro) (fast and easy installation with two weeks of trial, no credit card required).
The extension is powered by top code models like Claude 3.5 Sonnet and GPT4. All the extension's features are free to use on public repositories.
<img src="https://codium.ai/images/pr_agent/pr_chat1.png" width="768">
<img src="https://codium.ai/images/pr_agent/pr_chat2.png" width="768">
For private repositories, you will need to install [Qodo Merge Pro](https://github.com/apps/codiumai-pr-agent-pro) in addition to the extension (Quick GitHub app setup with a 14-day free trial. No credit card needed).
For a demonstration of how to install Qodo Merge Pro and use it with the Chrome extension, please refer to the tutorial video at the provided [link](https://codium.ai/images/pr_agent/private_repos.mp4).
<img src="https://codium.ai/images/pr_agent/PR-AgentChat.gif" width="768">
### Supported browsers
The extension is supported on all Chromium-based browsers, including Google Chrome, Arc, Opera, Brave, and Microsoft Edge.

View File

@ -0,0 +1,2 @@
## Overview
TBD

View File

@ -0,0 +1,47 @@
## Overview - PR Compression Strategy
There are two scenarios:
1. The PR is small enough to fit in a single prompt (including system and user prompt)
2. The PR is too large to fit in a single prompt (including system and user prompt)
For both scenarios, we first use the following strategy
#### Repo language prioritization strategy
We prioritize the languages of the repo based on the following criteria:
1. Exclude binary files and non code files (e.g. images, pdfs, etc)
2. Given the main languages used in the repo
3. We sort the PR files by the most common languages in the repo (in descending order):
* ```[[file.py, file2.py],[file3.js, file4.jsx],[readme.md]]```
### Small PR
In this case, we can fit the entire PR in a single prompt:
1. Exclude binary files and non code files (e.g. images, pdfs, etc)
2. We Expand the surrounding context of each patch to 3 lines above and below the patch
### Large PR
#### Motivation
Pull Requests can be very long and contain a lot of information with varying degree of relevance to the pr-agent.
We want to be able to pack as much information as possible in a single LMM prompt, while keeping the information relevant to the pr-agent.
#### Compression strategy
We prioritize additions over deletions:
- Combine all deleted files into a single list (`deleted files`)
- File patches are a list of hunks, remove all hunks of type deletion-only from the hunks in the file patch
#### Adaptive and token-aware file patch fitting
We use [tiktoken](https://github.com/openai/tiktoken) to tokenize the patches after the modifications described above, and we use the following strategy to fit the patches into the prompt:
1. Within each language we sort the files by the number of tokens in the file (in descending order):
- ```[[file2.py, file.py],[file4.jsx, file3.js],[readme.md]]```
2. Iterate through the patches in the order described above
3. Add the patches to the prompt until the prompt reaches a certain buffer from the max token length
4. If there are still patches left, add the remaining patches as a list called `other modified files` to the prompt until the prompt reaches the max token length (hard stop), skip the rest of the patches.
5. If we haven't reached the max token length, add the `deleted files` to the prompt until the prompt reaches the max token length (hard stop), skip the rest of the patches.
#### Example
![Core Abilities](https://codium.ai/images/git_patch_logic.png){width=768}

View File

@ -0,0 +1,72 @@
## TL;DR
Qodo Merge uses an **asymmetric and dynamic context strategy** to improve AI analysis of code changes in pull requests.
It provides more context before changes than after, and dynamically adjusts the context based on code structure (e.g., enclosing functions or classes).
This approach balances providing sufficient context for accurate analysis, while avoiding needle-in-the-haystack information overload that could degrade AI performance or exceed token limits.
## Introduction
Pull request code changes are retrieved in a unified diff format, showing three lines of context before and after each modified section, with additions marked by '+' and deletions by '-'.
```
@@ -12,5 +12,5 @@ def func1():
code line that already existed in the file...
code line that already existed in the file...
code line that already existed in the file....
-code line that was removed in the PR
+new code line added in the PR
code line that already existed in the file...
code line that already existed in the file...
code line that already existed in the file...
@@ -26,2 +26,4 @@ def func2():
...
```
This unified diff format can be challenging for AI models to interpret accurately, as it provides limited context for understanding the full scope of code changes.
The presentation of code using '+', '-', and ' ' symbols to indicate additions, deletions, and unchanged lines respectively also differs from the standard code formatting typically used to train AI models.
## Challenges of expanding the context window
While expanding the context window is technically feasible, it presents a more fundamental trade-off:
Pros:
- Enhanced context allows the model to better comprehend and localize the code changes, results (potentially) in more precise analysis and suggestions. Without enough context, the model may struggle to understand the code changes and provide relevant feedback.
Cons:
- Excessive context may overwhelm the model with extraneous information, creating a "needle in a haystack" scenario where focusing on the relevant details (the code that actually changed) becomes challenging.
LLM quality is known to degrade when the context gets larger.
Pull requests often encompass multiple changes across many files, potentially spanning hundreds of lines of modified code. This complexity presents a genuine risk of overwhelming the model with excessive context.
- Increased context expands the token count, increasing processing time and cost, and may prevent the model from processing the entire pull request in a single pass.
## Asymmetric and dynamic context
To address these challenges, Qodo Merge employs an **asymmetric** and **dynamic** context strategy, providing the model with more focused and relevant context information for each code change.
**Asymmetric:**
We start by recognizing that the context preceding a code change is typically more crucial for understanding the modification than the context following it.
Consequently, Qodo Merge implements an asymmetric context policy, decoupling the context window into two distinct segments: one for the code before the change and another for the code after.
By independently adjusting each context window, Qodo Merge can supply the model with a more tailored and pertinent context for individual code changes.
**Dynamic:**
We also employ a "dynamic" context strategy.
We start by recognizing that the optimal context for a code change often corresponds to its enclosing code component (e.g., function, class), rather than a fixed number of lines.
Consequently, we dynamically adjust the context window based on the code's structure, ensuring the model receives the most pertinent information for each modification.
To prevent overwhelming the model with excessive context, we impose a limit on the number of lines searched when identifying the enclosing component.
This balance allows for comprehensive understanding while maintaining efficiency and limiting context token usage.
## Appendix - relevant configuration options
```
[config]
patch_extension_skip_types =[".md",".txt"] # Skip files with these extensions when trying to extend the context
allow_dynamic_context=true # Allow dynamic context extension
max_extra_lines_before_dynamic_context = 8 # will try to include up to X extra lines before the hunk in the patch, until we reach an enclosing function or class
patch_extra_lines_before = 3 # Number of extra lines (+3 default ones) to include before each hunk in the patch
patch_extra_lines_after = 1 # Number of extra lines (+3 default ones) to include after each hunk in the patch
```

View File

@ -0,0 +1,44 @@
# Overview - Impact Evaluation 💎
Demonstrating the return on investment (ROI) of AI-powered initiatives is crucial for modern organizations.
To address this need, Qodo Merge has developed an AI impact measurement tools and metrics, providing advanced analytics to help businesses quantify the tangible benefits of AI adoption in their PR review process.
## Auto Impact Validator - Real-Time Tracking of Implemented Qodo Merge Suggestions
### How It Works
When a user pushes a new commit to the pull request, Qodo Merge automatically compares the updated code against the previous suggestions, marking them as implemented if the changes address these recommendations, whether directly or indirectly:
1. **Direct Implementation:** The user directly addresses the suggestion as-is in the PR, either by clicking on the "apply code suggestion" checkbox or by making the changes manually.
2. **Indirect Implementation:** Qodo Merge recognizes when a suggestion's intent is fulfilled, even if the exact code changes differ from the original recommendation. It marks these suggestions as implemented, acknowledging that users may achieve the same goal through alternative solutions.
### Real-Time Visual Feedback
Upon confirming that a suggestion was implemented, Qodo Merge automatically adds a ✅ (check mark) to the relevant suggestion, enabling transparent tracking of Qodo Merge's impact analysis.
Qodo Merge will also add, inside the relevant suggestions, an explanation of how the new code was impacted by each suggestion.
![Suggestion_checkmark](https://codium.ai/images/pr_agent/auto_suggestion_checkmark.png){width=512}
### Dashboard Metrics
The dashboard provides macro-level insights into the overall impact of Qodo Merge on the pull-request process with key productivity metrics.
By offering clear, data-driven evidence of Qodo Merge's impact, it empowers leadership teams to make informed decisions about the tool's effectiveness and ROI.
Here are key metrics that the dashboard tracks:
#### Qodo Merge Impacts per 1K Lines
![Dashboard](https://codium.ai/images/pr_agent/impacts_per_1k_llines.png){width=512}
> Explanation: for every 1K lines of code (additions/edits), Qodo Merge had on average ~X suggestions implemented.
**Why This Metric Matters:**
1. **Standardized and Comparable Measurement:** By measuring impacts per 1K lines of code additions, you create a standardized metric that can be compared across different projects, teams, customers, and time periods. This standardization is crucial for meaningful analysis, benchmarking, and identifying where Qodo Merge is most effective.
2. **Accounts for PR Variability and Incentivizes Quality:** This metric addresses the fact that "Not all PRs are created equal." By normalizing against lines of code rather than PR count, you account for the variability in PR sizes and focus on the quality and impact of suggestions rather than just the number of PRs affected.
3. **Quantifies Value and ROI:** The metric directly correlates with the value Qodo Merge is providing, showing how frequently it offers improvements relative to the amount of new code being written. This provides a clear, quantifiable way to demonstrate Qodo Merge's return on investment to stakeholders.
#### Suggestion Effectiveness Across Categories
![Impacted_Suggestion_Score](https://codium.ai/images/pr_agent/impact_by_category.png){width=512}
> Explanation: This chart illustrates the distribution of implemented suggestions across different categories, enabling teams to better understand Qodo Merge's impact on various aspects of code quality and development practices.
#### Suggestion Score Distribution
![Impacted_Suggestion_Score](https://codium.ai/images/pr_agent/impacted_score_dist.png){width=512}
> Explanation: The distribution of the suggestion score for the implemented suggestions, ensuring that higher-scored suggestions truly represent more significant improvements.

View File

@ -1,52 +1,28 @@
## PR Compression Strategy
There are two scenarios:
# Core Abilities
Qodo Merge utilizes a variety of core abilities to provide a comprehensive and efficient code review experience. These abilities include:
1. The PR is small enough to fit in a single prompt (including system and user prompt)
2. The PR is too large to fit in a single prompt (including system and user prompt)
- [Local and global metadata](https://qodo-merge-docs.qodo.ai/core-abilities/metadata/)
- [Dynamic context](https://qodo-merge-docs.qodo.ai/core-abilities/dynamic_context/)
- [Self-reflection](https://qodo-merge-docs.qodo.ai/core-abilities/self_reflection/)
- [Impact evaluation](https://qodo-merge-docs.qodo.ai/core-abilities/impact_evaluation/)
- [Interactivity](https://qodo-merge-docs.qodo.ai/core-abilities/interactivity/)
- [Compression strategy](https://qodo-merge-docs.qodo.ai/core-abilities/compression_strategy/)
- [Code-oriented YAML](https://qodo-merge-docs.qodo.ai/core-abilities/code_oriented_yaml/)
- [Static code analysis](https://qodo-merge-docs.qodo.ai/core-abilities/static_code_analysis/)
- [Code fine-tuning benchmark](https://qodo-merge-docs.qodo.ai/finetuning_benchmark/)
For both scenarios, we first use the following strategy
## Blogs
#### Repo language prioritization strategy
We prioritize the languages of the repo based on the following criteria:
Here are some additional technical blogs from Qodo, that delve deeper into the core capabilities and features of Large Language Models (LLMs) when applied to coding tasks.
These resources provide more comprehensive insights into leveraging LLMs for software development.
1. Exclude binary files and non code files (e.g. images, pdfs, etc)
2. Given the main languages used in the repo
3. We sort the PR files by the most common languages in the repo (in descending order):
* ```[[file.py, file2.py],[file3.js, file4.jsx],[readme.md]]```
### Code Generation and LLMs
- [State-of-the-art Code Generation with AlphaCodium From Prompt Engineering to Flow Engineering](https://www.qodo.ai/blog/qodoflow-state-of-the-art-code-generation-for-code-contests/)
- [RAG for a Codebase with 10k Repos](https://www.qodo.ai/blog/rag-for-large-scale-code-repos/)
### Small PR
In this case, we can fit the entire PR in a single prompt:
1. Exclude binary files and non code files (e.g. images, pdfs, etc)
2. We Expand the surrounding context of each patch to 3 lines above and below the patch
### Development Processes
- [Understanding the Challenges and Pain Points of the Pull Request Cycle](https://www.qodo.ai/blog/understanding-the-challenges-and-pain-points-of-the-pull-request-cycle/)
- [Introduction to Code Coverage Testing](https://www.qodo.ai/blog/introduction-to-code-coverage-testing/)
### Large PR
#### Motivation
Pull Requests can be very long and contain a lot of information with varying degree of relevance to the pr-agent.
We want to be able to pack as much information as possible in a single LMM prompt, while keeping the information relevant to the pr-agent.
#### Compression strategy
We prioritize additions over deletions:
- Combine all deleted files into a single list (`deleted files`)
- File patches are a list of hunks, remove all hunks of type deletion-only from the hunks in the file patch
#### Adaptive and token-aware file patch fitting
We use [tiktoken](https://github.com/openai/tiktoken) to tokenize the patches after the modifications described above, and we use the following strategy to fit the patches into the prompt:
1. Within each language we sort the files by the number of tokens in the file (in descending order):
- ```[[file2.py, file.py],[file4.jsx, file3.js],[readme.md]]```
2. Iterate through the patches in the order described above
3. Add the patches to the prompt until the prompt reaches a certain buffer from the max token length
4. If there are still patches left, add the remaining patches as a list called `other modified files` to the prompt until the prompt reaches the max token length (hard stop), skip the rest of the patches.
5. If we haven't reached the max token length, add the `deleted files` to the prompt until the prompt reaches the max token length (hard stop), skip the rest of the patches.
#### Example
![Core Abilities](https://codium.ai/images/git_patch_logic.png){width=768}
## YAML Prompting
TBD
## Static Code Analysis 💎
TBD
### Cost Optimization
- [Reduce Your Costs by 30% When Using GPT for Python Code](https://www.qodo.ai/blog/reduce-your-costs-by-30-when-using-gpt-3-for-python-code/)

View File

@ -0,0 +1,2 @@
## Interactive invocation 💎
TBD

View File

@ -0,0 +1,56 @@
## Local and global metadata injection with multi-stage analysis
(1)
Qodo Merge initially retrieves for each PR the following data:
- PR title and branch name
- PR original description
- Commit messages history
- PR diff patches, in [hunk diff](https://loicpefferkorn.net/2014/02/diff-files-what-are-hunks-and-how-to-extract-them/) format
- The entire content of the files that were modified in the PR
!!! tip "Tip: Organization-level metadata"
In addition to the inputs above, Qodo Merge can incorporate supplementary preferences provided by the user, like [`extra_instructions` and `organization best practices`](https://qodo-merge-docs.qodo.ai/tools/improve/#extra-instructions-and-best-practices). This information can be used to enhance the PR analysis.
(2)
By default, the first command that Qodo Merge executes is [`describe`](https://qodo-merge-docs.qodo.ai/tools/describe/), which generates three types of outputs:
- PR Type (e.g. bug fix, feature, refactor, etc)
- PR Description - a bullet point summary of the PR
- Changes walkthrough - for each modified file, provide a one-line summary followed by a detailed bullet point list of the changes.
These AI-generated outputs are now considered as part of the PR metadata, and can be used in subsequent commands like `review` and `improve`.
This effectively enables multi-stage chain-of-thought analysis, without doing any additional API calls which will cost time and money.
For example, when generating code suggestions for different files, Qodo Merge can inject the AI-generated ["Changes walkthrough"](https://github.com/Codium-ai/pr-agent/pull/1202#issue-2511546839) file summary in the prompt:
```
## File: 'src/file1.py'
### AI-generated file summary:
- edited function `func1` that does X
- Removed function `func2` that was not used
- ....
@@ ... @@ def func1():
__new hunk__
11 unchanged code line0 in the PR
12 unchanged code line1 in the PR
13 +new code line2 added in the PR
14 unchanged code line3 in the PR
__old hunk__
unchanged code line0
unchanged code line1
-old code line2 removed in the PR
unchanged code line3
@@ ... @@ def func2():
__new hunk__
...
__old hunk__
...
```
(3) The entire PR files that were retrieved are also used to expand and enhance the PR context (see [Dynamic Context](https://qodo-merge-docs.qodo.ai/core-abilities/dynamic_context/)).
(4) All the metadata described above represents several level of cumulative analysis - ranging from hunk level, to file level, to PR level, to organization level.
This comprehensive approach enables Qodo Merge AI models to generate more precise and contextually relevant suggestions and feedback.

View File

@ -0,0 +1,50 @@
## TL;DR
Qodo Merge implements a **self-reflection** process where the AI model reflects, scores, and re-ranks its own suggestions, eliminating irrelevant or incorrect ones.
This approach improves the quality and relevance of suggestions, saving users time and enhancing their experience.
Configuration options allow users to set a score threshold for further filtering out suggestions.
## Introduction - Efficient Review with Hierarchical Presentation
Given that not all generated code suggestions will be relevant, it is crucial to enable users to review them in a fast and efficient way, allowing quick identification and filtering of non-applicable ones.
To achieve this goal, Qodo Merge offers a dedicated hierarchical structure when presenting suggestions to users:
- A "category" section groups suggestions by their category, allowing users to quickly dismiss irrelevant suggestions.
- Each suggestion is first described by a one-line summary, which can be expanded to a full description by clicking on a collapsible.
- Upon expanding a suggestion, the user receives a more comprehensive description, and a code snippet demonstrating the recommendation.
!!! note "Fast Review"
This hierarchical structure is designed to facilitate rapid review of each suggestion, with users spending an average of ~5-10 seconds per item.
## Self-reflection and Re-ranking
The AI model is initially tasked with generating suggestions, and outputting them in order of importance.
However, in practice we observe that models often struggle to simultaneously generate high-quality code suggestions and rank them well in a single pass.
Furthermore, the initial set of generated suggestions sometimes contains easily identifiable errors.
To address these issues, we implemented a "self-reflection" process that refines suggestion ranking and eliminates irrelevant or incorrect proposals.
This process consists of the following steps:
1. Presenting the generated suggestions to the model in a follow-up call.
2. Instructing the model to score each suggestion on a scale of 0-10 and provide a rationale for the assigned score.
3. Utilizing these scores to re-rank the suggestions and filter out incorrect ones (with a score of 0).
4. Optionally, filtering out all suggestions below a user-defined score threshold.
Note that presenting all generated suggestions simultaneously provides the model with a comprehensive context, enabling it to make more informed decisions compared to evaluating each suggestion individually.
To conclude, the self-reflection process enables Qodo Merge to prioritize suggestions based on their importance, eliminate inaccurate or irrelevant proposals, and optionally exclude suggestions that fall below a specified threshold of significance.
This results in a more refined and valuable set of suggestions for the user, saving time and improving the overall experience.
## Example Results
![self_reflection](https://codium.ai/images/pr_agent/self_reflection1.png){width=768}
![self_reflection](https://codium.ai/images/pr_agent/self_reflection2.png){width=768}
## Appendix - Relevant Configuration Options
```
[pr_code_suggestions]
suggestions_score_threshold = 0 # Filter out suggestions with a score below this threshold (0-10)
```

View File

@ -0,0 +1,70 @@
## Overview - Static Code Analysis 💎
By combining static code analysis with LLM capabilities, Qodo Merge can provide a comprehensive analysis of the PR code changes on a component level.
It scans the PR code changes, finds all the code components (methods, functions, classes) that changed, and enables to interactively generate tests, docs, code suggestions and similar code search for each component.
!!! note "Language that are currently supported:"
Python, Java, C++, JavaScript, TypeScript, C#.
## Capabilities
### Analyze PR
The [`analyze`](https://qodo-merge-docs.qodo.ai/tools/analyze/) tool enables to interactively generate tests, docs, code suggestions and similar code search for each component that changed in the PR.
It can be invoked manually by commenting on any PR:
```
/analyze
```
An example result:
![Analyze 1](https://codium.ai/images/pr_agent/analyze_1.png){width=768}
Clicking on each checkbox will trigger the relevant tool for the selected component.
### Generate Tests
The [`test`](https://qodo-merge-docs.qodo.ai/tools/test/) tool generate tests for a selected component, based on the PR code changes.
It can be invoked manually by commenting on any PR:
```
/test component_name
```
where 'component_name' is the name of a specific component in the PR, Or be triggered interactively by using the `analyze` tool.
![test1](https://codium.ai/images/pr_agent/test1.png){width=768}
### Generate Docs for a Component
The [`add_docs`](https://qodo-merge-docs.qodo.ai/tools/documentation/) tool scans the PR code changes, and automatically generate docstrings for any code components that changed in the PR.
It can be invoked manually by commenting on any PR:
```
/add_docs component_name
```
Or be triggered interactively by using the `analyze` tool.
![Docs single component](https://codium.ai/images/pr_agent/docs_single_component.png){width=768}
### Generate Code Suggestions for a Component
The [`improve_component`](https://qodo-merge-docs.qodo.ai/tools/improve_component/) tool generates code suggestions for a specific code component that changed in the PR.
It can be invoked manually by commenting on any PR:
```
/improve_component component_name
```
Or be triggered interactively by using the `analyze` tool.
![improve_component2](https://codium.ai/images/pr_agent/improve_component2.png){width=768}
### Find Similar Code
The [`similar code`](https://qodo-merge-docs.qodo.ai/tools/similar_code/) tool retrieves the most similar code components from inside the organization's codebase, or from open-source code.
For example:
`Global Search` for a method called `chat_completion`:
![similar code global](https://codium.ai/images/pr_agent/similar_code_global2.png){width=768}

67
docs/docs/faq/index.md Normal file
View File

@ -0,0 +1,67 @@
# FAQ
??? note "Question: Can Qodo Merge serve as a substitute for a human reviewer?"
#### Answer:<span style="display:none;">1</span>
Qodo Merge is designed to assist, not replace, human reviewers.
Reviewing PRs is a tedious and time-consuming task often seen as a "chore". In addition, the longer the PR the shorter the relative feedback, since long PRs can overwhelm reviewers, both in terms of technical difficulty, and the actual review time.
Qodo Merge aims to address these pain points, and to assist and empower both the PR author and reviewer.
However, Qodo Merge has built-in safeguards to ensure the developer remains in the driver's seat. For example:
1. Preserves user's original PR header
2. Places user's description above the AI-generated PR description
3. Cannot approve PRs; approval remains reviewer's responsibility
4. The code suggestions are optional, and aim to:
- Encourage self-review and self-reflection
- Highlight potential bugs or oversights
- Enhance code quality and promote best practices
Read more about this issue in our [blog](https://www.codium.ai/blog/understanding-the-challenges-and-pain-points-of-the-pull-request-cycle/)
___
??? note "Question: I received an incorrect or irrelevant suggestion. Why?"
#### Answer:<span style="display:none;">2</span>
- Modern AI models, like Claude 3.5 Sonnet and GPT-4, are improving rapidly but remain imperfect. Users should critically evaluate all suggestions rather than accepting them automatically.
- AI errors are rare, but possible. A main value from reviewing the code suggestions lies in their high probability of catching **mistakes or bugs made by the PR author**. We believe it's worth spending 30-60 seconds reviewing suggestions, even if some aren't relevant, as this practice can enhances code quality and prevent bugs in production.
- The hierarchical structure of the suggestions is designed to help the user to _quickly_ understand them, and to decide which ones are relevant and which are not:
- Only if the `Category` header is relevant, the user should move to the summarized suggestion description.
- Only if the summarized suggestion description is relevant, the user should click on the collapsible, to read the full suggestion description with a code preview example.
- In addition, we recommend to use the [`extra_instructions`](https://qodo-merge-docs.qodo.ai/tools/improve/#extra-instructions-and-best-practices) field to guide the model to suggestions that are more relevant to the specific needs of the project.
- The interactive [PR chat](https://qodo-merge-docs.qodo.ai/chrome-extension/) also provides an easy way to get more tailored suggestions and feedback from the AI model.
___
??? note "Question: How can I get more tailored suggestions?"
#### Answer:<span style="display:none;">3</span>
See [here](https://qodo-merge-docs.qodo.ai/tools/improve/#extra-instructions-and-best-practices) for more information on how to use the `extra_instructions` and `best_practices` configuration options, to guide the model to more tailored suggestions.
___
??? note "Question: Will you store my code ? Are you using my code to train models?"
#### Answer:<span style="display:none;">4</span>
No. Qodo Merge strict privacy policy ensures that your code is not stored or used for training purposes.
For a detailed overview of our data privacy policy, please refer to [this link](https://qodo-merge-docs.qodo.ai/overview/data_privacy/)
___
??? note "Question: Can I use my own LLM keys with Qodo Merge?"
#### Answer:<span style="display:none;">5</span>
When you self-host, you use your own keys.
Qodo Merge Pro with SaaS deployment is a hosted version of Qodo Merge, where Qodo manages the infrastructure and the keys.
For enterprise customers, on-prem deployment is also available. [Contact us](https://www.codium.ai/contact/#pricing) for more information.
___

View File

@ -1,10 +1,10 @@
# PR-Agent Code Fine-tuning Benchmark
# Qodo Merge Code Fine-tuning Benchmark
On coding tasks, the gap between open-source models and top closed-source models such as GPT4 is significant.
<br>
In practice, open-source models are unsuitable for most real-world code tasks, and require further fine-tuning to produce acceptable results.
_PR-Agent fine-tuning benchmark_ aims to benchmark open-source models on their ability to be fine-tuned for a coding task.
_Qodo Merge fine-tuning benchmark_ aims to benchmark open-source models on their ability to be fine-tuned for a coding task.
Specifically, we chose to fine-tune open-source models on the task of analyzing a pull request, and providing useful feedback and code suggestions.
Here are the results:
@ -53,8 +53,8 @@ Here are the results:
### Training dataset
Our training dataset comprises 25,000 pull requests, aggregated from permissive license repos. For each pull request, we generated responses for the three main tools of PR-Agent:
[Describe](https://pr-agent-docs.codium.ai/tools/describe/), [Review](https://pr-agent-docs.codium.ai/tools/improve/) and [Improve](https://pr-agent-docs.codium.ai/tools/improve/).
Our training dataset comprises 25,000 pull requests, aggregated from permissive license repos. For each pull request, we generated responses for the three main tools of Qodo Merge:
[Describe](https://qodo-merge-docs.qodo.ai/tools/describe/), [Review](https://qodo-merge-docs.qodo.ai/tools/improve/) and [Improve](https://qodo-merge-docs.qodo.ai/tools/improve/).
On the raw data collected, we employed various automatic and manual cleaning techniques to ensure the outputs were of the highest quality, and suitable for instruct-tuning.

View File

@ -1,25 +1,37 @@
# Overview
CodiumAI PR-Agent is an open-source tool to help efficiently review and handle pull requests.
Qodo Merge is an open-source tool to help efficiently review and handle pull requests.
- See the [Installation Guide](./installation/index.md) for instructions on installing and running the tool on different git platforms.
- See the [Usage Guide](./usage-guide/index.md) for instructions on running the PR-Agent commands via different interfaces, including _CLI_, _online usage_, or by _automatically triggering_ them when a new PR is opened.
- See the [Usage Guide](./usage-guide/index.md) for instructions on running the Qodo Merge commands via different interfaces, including _CLI_, _online usage_, or by _automatically triggering_ them when a new PR is opened.
- See the [Tools Guide](./tools/index.md) for a detailed description of the different tools.
## PR-Agent Features
PR-Agent offers extensive pull request functionalities across various git providers.
## Qodo Merge Docs Smart Search
To search the documentation site using natural language:
1) Comment `/help "your question"` in either:
- A pull request where Qodo Merge is installed
- A [PR Chat](https://qodo-merge-docs.qodo.ai/chrome-extension/features/#pr-chat)
2) Qodo Merge will respond with an [answer](https://github.com/Codium-ai/pr-agent/pull/1241#issuecomment-2365259334) that includes relevant documentation links.
## Qodo Merge Features
Qodo Merge offers extensive pull request functionalities across various git providers.
| | | GitHub | Gitlab | Bitbucket | Azure DevOps |
|-------|-----------------------------------------------------------------------------------------------------------------------|:------:|:------:|:---------:|:------------:|
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ |
| | ⮑ Incremental | ✅ | | | |
| | ⮑ [SOC2 Compliance](https://pr-agent-docs.codium.ai/tools/review/#soc2-ticket-compliance){:target="_blank"} 💎 | ✅ | ✅ | ✅ | |
| | Ask | ✅ | ✅ | ✅ | ✅ |
| | Describe | ✅ | ✅ | ✅ | ✅ |
| | ⮑ [Inline file summary](https://pr-agent-docs.codium.ai/tools/describe/#inline-file-summary){:target="_blank"} 💎 | ✅ | ✅ | | |
| | ⮑ [Inline file summary](https://qodo-merge-docs.qodo.ai/tools/describe/#inline-file-summary){:target="_blank"} 💎 | ✅ | ✅ | | |
| | Improve | ✅ | ✅ | ✅ | ✅ |
| | ⮑ Extended | ✅ | ✅ | ✅ | ✅ |
| | [Custom Prompt](./tools/custom_prompt.md){:target="_blank"} 💎 | ✅ | ✅ | ✅ | |
@ -42,7 +54,7 @@ PR-Agent offers extensive pull request functionalities across various git provid
| | [Static code analysis](./tools/analyze.md/){:target="_blank"} 💎 | ✅ | ✅ | ✅ | |
| | [Multiple configuration options](./usage-guide/configuration_options.md){:target="_blank"} 💎 | ✅ | ✅ | ✅ | |
💎 marks a feature available only in [PR-Agent Pro](https://www.codium.ai/pricing/){:target="_blank"}
💎 marks a feature available only in [Qodo Merge Pro](https://www.codium.ai/pricing/){:target="_blank"}
## Example Results
@ -74,8 +86,8 @@ PR-Agent offers extensive pull request functionalities across various git provid
## How it Works
The following diagram illustrates PR-Agent tools and their flow:
The following diagram illustrates Qodo Merge tools and their flow:
![PR-Agent Tools](https://codium.ai/images/pr_agent/diagram-v0.9.png)
![Qodo Merge Tools](https://codium.ai/images/pr_agent/diagram-v0.9.png)
Check out the [PR Compression strategy](core-abilities/index.md) page for more details on how we convert a code diff to a manageable LLM prompt
Check out the [core abilities](core-abilities/index.md) page for a comprehensive overview of the variety of core abilities used by Qodo Merge.

View File

@ -1,5 +1,5 @@
## Azure DevOps Pipeline
You can use a pre-built Action Docker image to run PR-Agent as an Azure devops pipeline.
You can use a pre-built Action Docker image to run Qodo Merge as an Azure devops pipeline.
add the following file to your repository under `azure-pipelines.yml`:
```yaml
# Opt out of CI triggers
@ -47,11 +47,11 @@ stages:
env:
azure_devops__pat: $(azure_devops_pat)
openai__key: $(OPENAI_KEY)
displayName: 'Run PR Agent'
displayName: 'Run Qodo Merge'
```
This script will run PR-Agent on every new merge request, with the `improve`, `review`, and `describe` commands.
This script will run Qodo Merge on every new merge request, with the `improve`, `review`, and `describe` commands.
Note that you need to export the `azure_devops__pat` and `OPENAI_KEY` variables in the Azure DevOps pipeline settings (Pipelines -> Library -> + Variable group):
![PR Agent Pro](https://codium.ai/images/pr_agent/azure_devops_pipeline_secrets.png){width=468}
![Qodo Merge Pro](https://codium.ai/images/pr_agent/azure_devops_pipeline_secrets.png){width=468}
Make sure to give pipeline permissions to the `pr_agent` variable group.

View File

@ -1,9 +1,9 @@
## Run as a Bitbucket Pipeline
You can use the Bitbucket Pipeline system to run PR-Agent on every pull request open or update.
You can use the Bitbucket Pipeline system to run Qodo Merge on every pull request open or update.
1. Add the following file in your repository bitbucket_pipelines.yml
1. Add the following file in your repository bitbucket-pipelines.yml
```yaml
pipelines:
@ -29,7 +29,7 @@ Note that comments on a PR are not supported in Bitbucket Pipeline.
## Run using CodiumAI-hosted Bitbucket app 💎
Please contact visit [PR-Agent pro](https://www.codium.ai/pricing/) if you're interested in a hosted BitBucket app solution that provides full functionality including PR reviews and comment handling. It's based on the [bitbucket_app.py](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/git_providers/bitbucket_provider.py) implementation.
Please contact visit [Qodo Merge Pro](https://www.codium.ai/pricing/) if you're interested in a hosted BitBucket app solution that provides full functionality including PR reviews and comment handling. It's based on the [bitbucket_app.py](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/git_providers/bitbucket_provider.py) implementation.
## Bitbucket Server and Data Center
@ -58,7 +58,7 @@ python cli.py --pr_url https://git.onpreminstanceofbitbucket.com/projects/PROJEC
### Run it as service
To run pr-agent as webhook, build the docker image:
To run Qodo Merge as webhook, build the docker image:
```
docker build . -t codiumai/pr-agent:bitbucket_server_webhook --target bitbucket_server_webhook -f docker/Dockerfile
docker push codiumai/pr-agent:bitbucket_server_webhook # Push to your Docker repository

View File

@ -1,6 +1,6 @@
## Run as a GitHub Action
You can use our pre-built Github Action Docker image to run PR-Agent as a Github Action.
You can use our pre-built Github Action Docker image to run Qodo Merge as a Github Action.
1) Add the following file to your repository under `.github/workflows/pr_agent.yml`:
@ -27,27 +27,6 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
```
if you want to pin your action to a specific release (v0.23 for example) for stability reasons, use:
```yaml
...
steps:
- name: PR Agent action step
id: pragent
uses: docker://codiumai/pr-agent:0.23-github_action
...
```
For enhanced security, you can also specify the Docker image by its [digest](https://hub.docker.com/repository/docker/codiumai/pr-agent/tags):
```yaml
...
steps:
- name: PR Agent action step
id: pragent
uses: docker://codiumai/pr-agent@sha256:14165e525678ace7d9b51cda8652c2d74abb4e1d76b57c4a6ccaeba84663cc64
...
```
2) Add the following secret to your repository under `Settings > Secrets and variables > Actions > New repository secret > Add secret`:
```
@ -60,7 +39,7 @@ The GITHUB_TOKEN secret is automatically created by GitHub.
3) Merge this change to your main branch.
When you open your next PR, you should see a comment from `github-actions` bot with a review of your PR, and instructions on how to use the rest of the tools.
4) You may configure PR-Agent by adding environment variables under the env section corresponding to any configurable property in the [configuration](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml) file. Some examples:
4) You may configure Qodo Merge by adding environment variables under the env section corresponding to any configurable property in the [configuration](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml) file. Some examples:
```yaml
env:
# ... previous environment values
@ -68,7 +47,41 @@ When you open your next PR, you should see a comment from `github-actions` bot w
PR_REVIEWER.REQUIRE_TESTS_REVIEW: "false" # Disable tests review
PR_CODE_SUGGESTIONS.NUM_CODE_SUGGESTIONS: 6 # Increase number of code suggestions
```
See detailed usage instructions in the [USAGE GUIDE](https://pr-agent-docs.codium.ai/usage-guide/automations_and_usage/#github-action)
See detailed usage instructions in the [USAGE GUIDE](https://qodo-merge-docs.qodo.ai/usage-guide/automations_and_usage/#github-action)
### Using a specific release
!!! tip ""
if you want to pin your action to a specific release (v0.23 for example) for stability reasons, use:
```yaml
...
steps:
- name: PR Agent action step
id: pragent
uses: docker://codiumai/pr-agent:0.23-github_action
...
```
For enhanced security, you can also specify the Docker image by its [digest](https://hub.docker.com/repository/docker/codiumai/pr-agent/tags):
```yaml
...
steps:
- name: PR Agent action step
id: pragent
uses: docker://codiumai/pr-agent@sha256:14165e525678ace7d9b51cda8652c2d74abb4e1d76b57c4a6ccaeba84663cc64
...
```
### Action for GitHub enterprise server
!!! tip ""
To use the action with a GitHub enterprise server, add an environment variable `GITHUB.BASE_URL` with the API URL of your GitHub server.
For example, if your GitHub server is at `https://github.mycompany.com`, add the following to your workflow file:
```yaml
env:
# ... previous environment values
GITHUB.BASE_URL: "https://github.mycompany.com/api/v3"
```
---
@ -155,7 +168,7 @@ cp pr_agent/settings/.secrets_template.toml pr_agent/settings/.secrets.toml
9. Install the app by navigating to the "Install App" tab and selecting your desired repositories.
> **Note:** When running PR-Agent from GitHub App, the default configuration file (configuration.toml) will be loaded.
> **Note:** When running Qodo Merge from GitHub app, the default configuration file (configuration.toml) will be loaded.
> However, you can override the default tool parameters by uploading a local configuration file `.pr_agent.toml`
> For more information please check out the [USAGE GUIDE](../usage-guide/automations_and_usage.md#github-app)
---
@ -185,7 +198,7 @@ For example: `GITHUB.WEBHOOK_SECRET` --> `GITHUB__WEBHOOK_SECRET`
## AWS CodeCommit Setup
Not all features have been added to CodeCommit yet. As of right now, CodeCommit has been implemented to run the pr-agent CLI on the command line, using AWS credentials stored in environment variables. (More features will be added in the future.) The following is a set of instructions to have pr-agent do a review of your CodeCommit pull request from the command line:
Not all features have been added to CodeCommit yet. As of right now, CodeCommit has been implemented to run the Qodo Merge CLI on the command line, using AWS credentials stored in environment variables. (More features will be added in the future.) The following is a set of instructions to have Qodo Merge do a review of your CodeCommit pull request from the command line:
1. Create an IAM user that you will use to read CodeCommit pull requests and post comments
* Note: That user should have CLI access only, not Console access

View File

@ -1,5 +1,5 @@
## Run as a GitLab Pipeline
You can use a pre-built Action Docker image to run PR-Agent as a GitLab pipeline. This is a simple way to get started with PR-Agent without setting up your own server.
You can use a pre-built Action Docker image to run Qodo Merge as a GitLab pipeline. This is a simple way to get started with Qodo Merge without setting up your own server.
(1) Add the following file to your repository under `.gitlab-ci.yml`:
```yaml
@ -8,7 +8,7 @@ stages:
pr_agent_job:
stage: pr_agent
image:
image:
name: codiumai/pr-agent:latest
entrypoint: [""]
script:
@ -16,7 +16,8 @@ pr_agent_job:
- echo "Running PR Agent action step"
- export MR_URL="$CI_MERGE_REQUEST_PROJECT_URL/merge_requests/$CI_MERGE_REQUEST_IID"
- echo "MR_URL=$MR_URL"
- export gitlab__PERSONAL_ACCESS_TOKEN=$GITLAB_PERSONAL_ACCESS_TOKEN
- export gitlab__url=$CI_SERVER_PROTOCOL://$CI_SERVER_FQDN
- export gitlab__PERSONAL_ACCESS_TOKEN=$GITLAB_PERSONAL_ACCESS_TOKEN
- export config__git_provider="gitlab"
- export openai__key=$OPENAI_KEY
- python -m pr_agent.cli --pr_url="$MR_URL" describe
@ -25,8 +26,8 @@ pr_agent_job:
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
```
This script will run PR-Agent on every new merge request. You can modify the `rules` section to run PR-Agent on different events.
You can also modify the `script` section to run different PR-Agent commands, or with different parameters by exporting different environment variables.
This script will run Qodo Merge on every new merge request. You can modify the `rules` section to run Qodo Merge on different events.
You can also modify the `script` section to run different Qodo Merge commands, or with different parameters by exporting different environment variables.
(2) Add the following masked variables to your GitLab repository (CI/CD -> Variables):
@ -41,21 +42,36 @@ Note that if your base branches are not protected, don't set the variables as `p
## Run a GitLab webhook server
1. From the GitLab workspace or group, create an access token. Enable the "api" scope only.
1. From the GitLab workspace or group, create an access token with "Reporter" role ("Developer" if using Pro version of the agent) and "api" scope.
2. Generate a random secret for your app, and save it for later. For example, you can use:
```
WEBHOOK_SECRET=$(python -c "import secrets; print(secrets.token_hex(10))")
```
3. Follow the instructions to build the Docker image, setup a secrets file and deploy on your own server from [here](https://pr-agent-docs.codium.ai/installation/github/#run-as-a-github-app) steps 4-7.
4. In the secrets file, fill in the following:
- Your OpenAI key.
- In the [gitlab] section, fill in personal_access_token and shared_secret. The access token can be a personal access token, or a group or project access token.
- Set deployment_type to 'gitlab' in [configuration.toml](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml)
5. Create a webhook in GitLab. Set the URL to ```http[s]://<PR_AGENT_HOSTNAME>/webhook```. Set the secret token to the generated secret from step 2.
In the "Trigger" section, check the comments and merge request events boxes.
3. Clone this repository:
6. Test your installation by opening a merge request or commenting or a merge request using one of CodiumAI's commands.
```
git clone https://github.com/Codium-ai/pr-agent.git
```
4. Prepare variables and secrets. Skip this step if you plan on settings these as environment variables when running the agent:
1. In the configuration file/variables:
- Set `deployment_type` to "gitlab"
2. In the secrets file/variables:
- Set your AI model key in the respective section
- In the [gitlab] section, set `personal_access_token` (with token from step 1) and `shared_secret` (with secret from step 2)
5. Build a Docker image for the app and optionally push it to a Docker repository. We'll use Dockerhub as an example:
```
docker build . -t gitlab_pr_agent --target gitlab_webhook -f docker/Dockerfile
docker push codiumai/pr-agent:gitlab_webhook # Push to your Docker repository
```
6. Create a webhook in GitLab. Set the URL to ```http[s]://<PR_AGENT_HOSTNAME>/webhook```, the secret token to the generated secret from step 2, and enable the triggers `push`, `comments` and `merge request events`.
7. Test your installation by opening a merge request or commenting on a merge request using one of CodiumAI's commands.
boxes

View File

@ -1,12 +1,12 @@
# Installation
## Self-hosted PR-Agent
If you choose to host you own PR-Agent, you first need to acquire two tokens:
## Self-hosted Qodo Merge
If you choose to host your own Qodo Merge, you first need to acquire two tokens:
1. An OpenAI key from [here](https://platform.openai.com/api-keys), with access to GPT-4 (or a key for other [language models](https://pr-agent-docs.codium.ai/usage-guide/changing_a_model/), if you prefer).
1. An OpenAI key from [here](https://platform.openai.com/api-keys), with access to GPT-4 (or a key for other [language models](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/), if you prefer).
2. A GitHub\GitLab\BitBucket personal access token (classic), with the repo scope. [GitHub from [here](https://github.com/settings/tokens)]
There are several ways to use self-hosted PR-Agent:
There are several ways to use self-hosted Qodo Merge:
- [Locally](./locally.md)
- [GitHub](./github.md)
@ -14,8 +14,8 @@ There are several ways to use self-hosted PR-Agent:
- [BitBucket](./bitbucket.md)
- [Azure DevOps](./azure.md)
## PR-Agent Pro 💎
PR-Agent Pro, an app hosted by CodiumAI for GitHub\GitLab\BitBucket, is also available.
## Qodo Merge Pro 💎
Qodo Merge Pro, an app hosted by CodiumAI for GitHub\GitLab\BitBucket, is also available.
<br>
With PR-Agent Pro, installation is as simple as signing up and adding the PR-Agent app to your relevant repo.
See [here](https://pr-agent-docs.codium.ai/installation/pr_agent_pro/) for more details.
With Qodo Merge Pro, installation is as simple as signing up and adding the Qodo Merge app to your relevant repo.
See [here](https://qodo-merge-docs.qodo.ai/installation/pr_agent_pro/) for more details.

View File

@ -16,8 +16,8 @@ from pr_agent.config_loader import get_settings
def main():
# Fill in the following values
provider = "github" # GitHub provider
user_token = "..." # GitHub user token
provider = "github" # github/gitlab/bitbucket/azure_devops
user_token = "..." # user token
openai_key = "..." # OpenAI key
pr_url = "..." # PR URL, for example 'https://github.com/Codium-ai/pr-agent/pull/809'
command = "/review" # Command to run (e.g. '/review', '/describe', '/ask="What is the purpose of this PR?"', ...)
@ -42,42 +42,34 @@ A list of the relevant tools can be found in the [tools guide](../tools/ask.md).
To invoke a tool (for example `review`), you can run directly from the Docker image. Here's how:
- For GitHub:
```
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
```
```
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
```
If you are using GitHub enterprise server, you need to specify the custom url as variable.
For example, if your GitHub server is at `https://github.mycompany.com`, add the following to the command:
```
-e GITHUB.BASE_URL=https://github.mycompany.com/api/v3
```
- For GitLab:
```
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
```
```
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> codiumai/pr-agent:latest --pr_url <pr_url> review
```
Note: If you have a dedicated GitLab instance, you need to specify the custom url as variable:
```
docker run --rm -it -e OPENAI.KEY=<your key> -e CONFIG.GIT_PROVIDER=gitlab -e GITLAB.PERSONAL_ACCESS_TOKEN=<your token> -e GITLAB.URL=<your gitlab instance url> codiumai/pr-agent:latest --pr_url <pr_url> review
```
If you have a dedicated GitLab instance, you need to specify the custom url as variable:
```
-e GITLAB.URL=<your gitlab instance url>
```
- For BitBucket:
```
docker run --rm -it -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=<pr_url> review
```
```
docker run --rm -it -e CONFIG.GIT_PROVIDER=bitbucket -e OPENAI.KEY=$OPENAI_API_KEY -e BITBUCKET.BEARER_TOKEN=$BITBUCKET_BEARER_TOKEN codiumai/pr-agent:latest --pr_url=<pr_url> review
```
For other git providers, update CONFIG.GIT_PROVIDER accordingly, and check the `pr_agent/settings/.secrets_template.toml` file for the environment variables expected names and values.
---
If you want to ensure you're running a specific version of the Docker image, consider using the image's digest:
```bash
docker run --rm -it -e OPENAI.KEY=<your key> -e GITHUB.USER_TOKEN=<your token> codiumai/pr-agent@sha256:71b5ee15df59c745d352d84752d01561ba64b6d51327f97d46152f0c58a5f678 --pr_url <pr_url> review
```
Or you can run a [specific released versions](https://github.com/Codium-ai/pr-agent/blob/main/RELEASE_NOTES.md) of pr-agent, for example:
```
codiumai/pr-agent@v0.9
```
---
## Run from source
1. Clone this repository:
@ -115,7 +107,7 @@ python3 -m pr_agent.cli --issue_url <issue_url> similar_issue
...
```
[Optional] Add the pr_agent folder to your PYTHONPATH
[Optional] Add the pr_agent folder to your PYTHONPATH
```
export PYTHONPATH=$PYTHONPATH:<PATH to pr_agent folder>
```

View File

@ -1,33 +1,33 @@
## Getting Started with PR-Agent Pro
## Getting Started with Qodo Merge Pro
PR-Agent Pro is a versatile application compatible with GitHub, GitLab, and BitBucket, hosted by CodiumAI.
See [here](https://pr-agent-docs.codium.ai/#pr-agent-pro) for more details about the benefits of using PR-Agent Pro.
Qodo Merge Pro is a versatile application compatible with GitHub, GitLab, and BitBucket, hosted by CodiumAI.
See [here](https://qodo-merge-docs.qodo.ai/overview/pr_agent_pro/) for more details about the benefits of using Qodo Merge Pro.
Interested parties can subscribe to PR-Agent Pro through the following [link](https://www.codium.ai/pricing/).
Interested parties can subscribe to Qodo Merge Pro through the following [link](https://www.codium.ai/pricing/).
After subscribing, you are granted the ability to easily install the application across any of your repositories.
![PR Agent Pro](https://codium.ai/images/pr_agent/pr_agent_pro_install.png){width=468}
![Qodo Merge Pro](https://codium.ai/images/pr_agent/pr_agent_pro_install.png){width=468}
Each user who wants to use PR-Agent pro needs to buy a seat.
Each user who wants to use Qodo Merge pro needs to buy a seat.
Initially, CodiumAI offers a two-week trial period at no cost, after which continued access requires each user to secure a personal seat.
Once a user acquires a seat, they gain the flexibility to use PR-Agent Pro across any repository where it was enabled.
Once a user acquires a seat, they gain the flexibility to use Qodo Merge Pro across any repository where it was enabled.
Users without a purchased seat who interact with a repository featuring PR-Agent Pro are entitled to receive up to five complimentary feedbacks.
Beyond this limit, PR-Agent Pro will cease to respond to their inquiries unless a seat is purchased.
Users without a purchased seat who interact with a repository featuring Qodo Merge Pro are entitled to receive up to five complimentary feedbacks.
Beyond this limit, Qodo Merge Pro will cease to respond to their inquiries unless a seat is purchased.
## Install PR-Agent Pro for GitHub Enterprise Server
You can install PR-Agent Pro application on your GitHub Enterprise Server, and enjoy two weeks of free trial.
After the trial period, to continue using PR-Agent Pro, you will need to contact us for an [Enterprise license](https://www.codium.ai/pricing/).
## Install Qodo Merge Pro for GitHub Enterprise Server
To use Qodo Merge Pro application on your private GitHub Enterprise Server, you will need to contact us for starting an [Enterprise](https://www.codium.ai/pricing/) trial.
## Install PR-Agent Pro for GitLab (Teams & Enterprise)
## Install Qodo Merge Pro for GitLab (Teams & Enterprise)
Since GitLab platform does not support apps, installing PR-Agent Pro for GitLab is a bit more involved, and requires the following steps:
Since GitLab platform does not support apps, installing Qodo Merge Pro for GitLab is a bit more involved, and requires the following steps:
### Step 1
Acquire a personal, project or group level access token. Enable the “api” scope in order to allow PR-Agent to read pull requests, comment and respond to requests.
Acquire a personal, project or group level access token. Enable the “api” scope in order to allow Qodo Merge to read pull requests, comment and respond to requests.
<figure markdown="1">
![Step 1](https://www.codium.ai/images/pr_agent/gitlab_pro_pat.png){width=750}
@ -65,4 +65,4 @@ Enable SSL verification: Check the box.
Youre all set!
Open a new merge request or add a MR comment with one of PR-Agents commands such as /review, /describe or /improve.
Open a new merge request or add a MR comment with one of Qodo Merges commands such as /review, /describe or /improve.

View File

@ -1,16 +1,16 @@
## Self-hosted PR-Agent
## Self-hosted Qodo Merge
- If you self-host PR-Agent with your OpenAI (or other LLM provider) API key, it is between you and the provider. We don't send your code data to PR-Agent servers.
- If you self-host Qodo Merge with your OpenAI (or other LLM provider) API key, it is between you and the provider. We don't send your code data to Qodo Merge servers.
## PR-Agent Pro 💎
## Qodo Merge Pro 💎
- When using PR-Agent Pro 💎, hosted by CodiumAI, we will not store any of your data, nor will we use it for training. You will also benefit from an OpenAI account with zero data retention.
- When using Qodo Merge Pro 💎, hosted by CodiumAI, we will not store any of your data, nor will we use it for training. You will also benefit from an OpenAI account with zero data retention.
- For certain clients, CodiumAI-hosted PR-Agent Pro will use CodiumAIs proprietary models. If this is the case, you will be notified.
- For certain clients, CodiumAI-hosted Qodo Merge Pro will use CodiumAIs proprietary models. If this is the case, you will be notified.
- No passive collection of Code and Pull Requests data — PR-Agent will be active only when you invoke it, and it will then extract and analyze only data relevant to the executed command and queried pull request.
- No passive collection of Code and Pull Requests data — Qodo Merge will be active only when you invoke it, and it will then extract and analyze only data relevant to the executed command and queried pull request.
## PR-Agent Chrome extension
## Qodo Merge Chrome extension
- The [PR-Agent Chrome extension](https://chromewebstore.google.com/detail/pr-agent-chrome-extension/ephlnjeghhogofkifjloamocljapahnl) will not send your code to any external servers.
- The [Qodo Merge Chrome extension](https://chromewebstore.google.com/detail/pr-agent-chrome-extension/ephlnjeghhogofkifjloamocljapahnl) will not send your code to any external servers.

View File

@ -1,25 +1,37 @@
# Overview
CodiumAI PR-Agent is an open-source tool to help efficiently review and handle pull requests.
Qodo Merge is an open-source tool to help efficiently review and handle pull requests.
- See the [Installation Guide](./installation/index.md) for instructions on installing and running the tool on different git platforms.
- See the [Usage Guide](./usage-guide/index.md) for instructions on running the PR-Agent commands via different interfaces, including _CLI_, _online usage_, or by _automatically triggering_ them when a new PR is opened.
- See the [Usage Guide](./usage-guide/index.md) for instructions on running the Qodo Merge commands via different interfaces, including _CLI_, _online usage_, or by _automatically triggering_ them when a new PR is opened.
- See the [Tools Guide](./tools/index.md) for a detailed description of the different tools.
## PR-Agent Features
PR-Agent offers extensive pull request functionalities across various git providers.
## Qodo Merge Docs Smart Search
To search the documentation site using natural language:
1) Comment `/help "your question"` in either:
- A pull request where Qodo Merge is installed
- A [PR Chat](https://qodo-merge-docs.qodo.ai/chrome-extension/features/#pr-chat)
2) Qodo Merge will respond with an [answer](https://github.com/Codium-ai/pr-agent/pull/1241#issuecomment-2365259334) that includes relevant documentation links.
## Qodo Merge Features
Qodo Merge offers extensive pull request functionalities across various git providers.
| | | GitHub | Gitlab | Bitbucket | Azure DevOps |
|-------|-----------------------------------------------------------------------------------------------------------------------|:------:|:------:|:---------:|:------------:|
| TOOLS | Review | ✅ | ✅ | ✅ | ✅ |
| | ⮑ Incremental | ✅ | | | |
| | ⮑ [SOC2 Compliance](https://pr-agent-docs.codium.ai/tools/review/#soc2-ticket-compliance){:target="_blank"} 💎 | ✅ | ✅ | ✅ | ✅ |
| | Ask | ✅ | ✅ | ✅ | ✅ |
| | Describe | ✅ | ✅ | ✅ | ✅ |
| | ⮑ [Inline file summary](https://pr-agent-docs.codium.ai/tools/describe/#inline-file-summary){:target="_blank"} 💎 | ✅ | ✅ | | ✅ |
| | ⮑ [Inline file summary](https://qodo-merge-docs.qodo.ai/tools/describe/#inline-file-summary){:target="_blank"} 💎 | ✅ | ✅ | | ✅ |
| | Improve | ✅ | ✅ | ✅ | ✅ |
| | ⮑ Extended | ✅ | ✅ | ✅ | ✅ |
| | [Custom Prompt](./tools/custom_prompt.md){:target="_blank"} 💎 | ✅ | ✅ | ✅ | ✅ |
@ -42,7 +54,7 @@ PR-Agent offers extensive pull request functionalities across various git provid
| | [Static code analysis](./tools/analyze.md/){:target="_blank"} 💎 | ✅ | ✅ | ✅ | ✅ |
| | [Multiple configuration options](./usage-guide/configuration_options.md){:target="_blank"} 💎 | ✅ | ✅ | ✅ | ✅ |
💎 marks a feature available only in [PR-Agent Pro](https://www.codium.ai/pricing/){:target="_blank"}
💎 marks a feature available only in [Qodo Merge Pro](https://www.codium.ai/pricing/){:target="_blank"}
## Example Results
@ -74,8 +86,8 @@ PR-Agent offers extensive pull request functionalities across various git provid
## How it Works
The following diagram illustrates PR-Agent tools and their flow:
The following diagram illustrates Qodo Merge tools and their flow:
![PR-Agent Tools](https://codium.ai/images/pr_agent/diagram-v0.9.png)
![Qodo Merge Tools](https://codium.ai/images/pr_agent/diagram-v0.9.png)
Check out the [PR Compression strategy](core-abilities/index.md) page for more details on how we convert a code diff to a manageable LLM prompt

View File

@ -1,42 +1,51 @@
[PR-Agent Pro](https://www.codium.ai/pricing/) is a hosted version of PR-Agent, provided by CodiumAI. A complimentary two-week trial is offered, followed by a monthly subscription fee.
PR-Agent Pro is designed for companies and teams that require additional features and capabilities. It provides the following benefits:
### Overview
1. **Fully managed** - We take care of everything for you - hosting, models, regular updates, and more. Installation is as simple as signing up and adding the PR-Agent app to your GitHub\GitLab\BitBucket repo.
[Qodo Merge Pro](https://www.codium.ai/pricing/) is a hosted version of Qodo Merge, provided by Qodo. A complimentary two-week trial is offered, followed by a monthly subscription fee.
Qodo Merge Pro is designed for companies and teams that require additional features and capabilities. It provides the following benefits:
2. **Improved privacy** - No data will be stored or used to train models. PR-Agent Pro will employ zero data retention, and will use an OpenAI account with zero data retention.
1. **Fully managed** - We take care of everything for you - hosting, models, regular updates, and more. Installation is as simple as signing up and adding the Qodo Merge app to your GitHub\GitLab\BitBucket repo.
3. **Improved support** - PR-Agent Pro users will receive priority support, and will be able to request new features and capabilities.
2. **Improved privacy** - No data will be stored or used to train models. Qodo Merge Pro will employ zero data retention, and will use an OpenAI and Claude accounts with zero data retention.
4. **Supporting self-hosted git servers** - PR-Agent Pro can be installed on GitHub Enterprise Server, GitLab, and BitBucket. For more information, see the [installation guide](https://pr-agent-docs.codium.ai/installation/pr_agent_pro/).
3. **Improved support** - Qodo Merge Pro users will receive priority support, and will be able to request new features and capabilities.
**Additional features:**
4. **Supporting self-hosted git servers** - Qodo Merge Pro can be installed on GitHub Enterprise Server, GitLab, and BitBucket. For more information, see the [installation guide](https://qodo-merge-docs.qodo.ai/installation/pr_agent_pro/).
Here are some of the additional features and capabilities that PR-Agent Pro offers:
5. **PR Chat** - Qodo Merge Pro allows you to engage in [private chat](https://qodo-merge-docs.qodo.ai/chrome-extension/features/#pr-chat) about your pull requests on private repositories.
### Additional features
Here are some of the additional features and capabilities that Qodo Merge Pro offers:
| Feature | Description |
|----------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| [**Model selection**](https://pr-agent-docs.codium.ai/usage-guide/PR_agent_pro_models/#pr-agent-pro-models) | Choose the model that best fits your needs, among top models like `GPT4` and `Claude-Sonnet-3.5`
| [**Global and wiki configuration**](https://pr-agent-docs.codium.ai/usage-guide/configuration_options/) | Control configurations for many repositories from a single location; <br>Edit configuration of a single repo without commiting code |
| [**Apply suggestions**](https://pr-agent-docs.codium.ai/tools/improve/#overview) | Generate commitable code from the relevant suggestions interactively by clicking on a checkbox |
| [**Suggestions impact**](https://pr-agent-docs.codium.ai/tools/improve/#assessing-impact) | Automatically mark suggestions that were implemented by the user (either directly in GitHub, or indirectly in the IDE) to enable tracking of the impact of the suggestions |
| [**CI feedback**](https://pr-agent-docs.codium.ai/tools/ci_feedback/) | Automatically analyze failed CI checks on GitHub and provide actionable feedback in the PR conversation, helping to resolve issues quickly |
| [**Advanced usage statistics**](https://www.codium.ai/contact/#/) | PR-Agent Pro offers detailed statistics at user, repository, and company levels, including metrics about PR-Agent usage, and also general statistics and insights |
| [**Incorporating companies' best practices**](https://pr-agent-docs.codium.ai/tools/improve/#best-practices) | Use the companies' best practices as reference to increase the effectiveness and the relevance of the code suggestions |
| [**Interactive triggering**](https://pr-agent-docs.codium.ai/tools/analyze/#example-usage) | Interactively apply different tools via the `analyze` command |
| [**SOC2 compliance check**](https://pr-agent-docs.codium.ai/tools/review/#configuration-options) | Ensures the PR contains a ticket to a project management system (e.g., Jira, Asana, Trello, etc.)
| [**Custom labels**](https://pr-agent-docs.codium.ai/tools/describe/#handle-custom-labels-from-the-repos-labels-page) | Define custom labels for PR-Agent to assign to the PR |
| [**Model selection**](https://qodo-merge-docs.qodo.ai/usage-guide/PR_agent_pro_models/) | Choose the model that best fits your needs, among top models like `GPT4` and `Claude-Sonnet-3.5`
| [**Global and wiki configuration**](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) | Control configurations for many repositories from a single location; <br>Edit configuration of a single repo without committing code |
| [**Apply suggestions**](https://qodo-merge-docs.qodo.ai/tools/improve/#overview) | Generate committable code from the relevant suggestions interactively by clicking on a checkbox |
| [**Suggestions impact**](https://qodo-merge-docs.qodo.ai/tools/improve/#assessing-impact) | Automatically mark suggestions that were implemented by the user (either directly in GitHub, or indirectly in the IDE) to enable tracking of the impact of the suggestions |
| [**CI feedback**](https://qodo-merge-docs.qodo.ai/tools/ci_feedback/) | Automatically analyze failed CI checks on GitHub and provide actionable feedback in the PR conversation, helping to resolve issues quickly |
| [**Advanced usage statistics**](https://www.codium.ai/contact/#/) | Qodo Merge Pro offers detailed statistics at user, repository, and company levels, including metrics about Qodo Merge usage, and also general statistics and insights |
| [**Incorporating companies' best practices**](https://qodo-merge-docs.qodo.ai/tools/improve/#best-practices) | Use the companies' best practices as reference to increase the effectiveness and the relevance of the code suggestions |
| [**Interactive triggering**](https://qodo-merge-docs.qodo.ai/tools/analyze/#example-usage) | Interactively apply different tools via the `analyze` command |
| [**Custom labels**](https://qodo-merge-docs.qodo.ai/tools/describe/#handle-custom-labels-from-the-repos-labels-page) | Define custom labels for Qodo Merge to assign to the PR |
**Additional tools:**
### Additional tools
Here are additional tools that are available only for PR-Agent Pro users:
Here are additional tools that are available only for Qodo Merge Pro users:
| Feature | Description |
|---------|-------------|
| [**Custom Prompt Suggestions**](https://pr-agent-docs.codium.ai/tools/custom_prompt/) | Generate code suggestions based on custom prompts from the user |
| [**Analyze PR components**](https://pr-agent-docs.codium.ai/tools/analyze/) | Identify the components that changed in the PR, and enable to interactively apply different tools to them |
| [**Tests**](https://pr-agent-docs.codium.ai/tools/test/) | Generate tests for code components that changed in the PR |
| [**PR documentation**](https://pr-agent-docs.codium.ai/tools/documentation/) | Generate docstring for code components that changed in the PR |
| [**Improve Component**](https://pr-agent-docs.codium.ai/tools/improve_component/) | Generate code suggestions for code components that changed in the PR |
| [**Similar code search**](https://pr-agent-docs.codium.ai/tools/similar_code/) | Search for similar code in the repository, organization, or entire GitHub |
| [**Custom Prompt Suggestions**](https://qodo-merge-docs.qodo.ai/tools/custom_prompt/) | Generate code suggestions based on custom prompts from the user |
| [**Analyze PR components**](https://qodo-merge-docs.qodo.ai/tools/analyze/) | Identify the components that changed in the PR, and enable to interactively apply different tools to them |
| [**Tests**](https://qodo-merge-docs.qodo.ai/tools/test/) | Generate tests for code components that changed in the PR |
| [**PR documentation**](https://qodo-merge-docs.qodo.ai/tools/documentation/) | Generate docstring for code components that changed in the PR |
| [**Improve Component**](https://qodo-merge-docs.qodo.ai/tools/improve_component/) | Generate code suggestions for code components that changed in the PR |
| [**Similar code search**](https://qodo-merge-docs.qodo.ai/tools/similar_code/) | Search for similar code in the repository, organization, or entire GitHub |
### Supported languages
Qodo Merge Pro leverages the world's leading code models - Claude 3.5 Sonnet and GPT-4.
As a result, its primary tools such as `describe`, `review`, and `improve`, as well as the PR-chat feature, support virtually all programming languages.
For specialized commands that require static code analysis, Qodo Merge Pro offers support for specific languages. For more details about features that require static code analysis, please refer to the [documentation](https://qodo-merge-docs.qodo.ai/tools/analyze/#overview).

View File

@ -1,7 +1,7 @@
## Overview
The `analyze` tool combines advanced static code analysis with LLM capabilities to provide a comprehensive analysis of the PR code changes.
The tool scans the PR code changes, find the code components (methods, functions, classes) that changed, and enables to interactively generate tests, docs, code suggestions and similar code search for each component.
The tool scans the PR code changes, finds the code components (methods, functions, classes) that changed, and enables to interactively generate tests, docs, code suggestions and similar code search for each component.
It can be invoked manually by commenting on any PR:
```

View File

@ -25,10 +25,10 @@ There are 3 ways to enable custom labels:
When working from CLI, you need to apply the [configuration changes](#configuration-options) to the [custom_labels file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/custom_labels.toml):
#### 2. Repo configuration file
To enable custom labels, you need to apply the [configuration changes](#configuration-options) to the local `.pr_agent.toml` file in you repository.
To enable custom labels, you need to apply the [configuration changes](#configuration-options) to the local `.pr_agent.toml` file in your repository.
#### 3. Handle custom labels from the Repo's labels page 💎
> This feature is available only in PR-Agent Pro
> This feature is available only in Qodo Merge Pro
* GitHub : `https://github.com/{owner}/{repo}/labels`, or click on the "Labels" tab in the issues or PRs page.
* GitLab : `https://gitlab.com/{owner}/{repo}/-/labels`, or click on "Manage" -> "Labels" on the left menu.
@ -57,4 +57,4 @@ description = "Description of when AI should suggest this label"
[custom_labels."Custom Label 2"]
description = "Description of when AI should suggest this label 2"
```
```

View File

@ -25,7 +25,7 @@ If you want to edit [configurations](#configuration-options), add the relevant o
### Automatic triggering
To run the `describe` automatically when a PR is opened, define in a [configuration file](https://pr-agent-docs.codium.ai/usage-guide/configuration_options/#wiki-configuration-file):
To run the `describe` automatically when a PR is opened, define in a [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#wiki-configuration-file):
```
[github_app]
pr_commands = [
@ -34,7 +34,7 @@ pr_commands = [
]
[pr_description]
publish_labels = ...
publish_labels = true
...
```
@ -49,7 +49,7 @@ publish_labels = ...
<table>
<tr>
<td><b>publish_labels</b></td>
<td>If set to true, the tool will publish the labels to the PR. Default is true.</td>
<td>If set to true, the tool will publish labels to the PR. Default is false.</td>
</tr>
<tr>
<td><b>publish_description_as_comment</b></td>
@ -157,7 +157,7 @@ The marker `pr_agent:type` will be replaced with the PR type, `pr_agent:summary`
The default labels of the describe tool are quite generic, since they are meant to be used in any repo: [`Bug fix`, `Tests`, `Enhancement`, `Documentation`, `Other`].
You can define custom labels that are relevant for your repo and use cases.
Custom labels can be defined in a [configuration file](https://pr-agent-docs.codium.ai/tools/custom_labels/#configuration-options), or directly in the repo's [labels page](#handle-custom-labels-from-the-repos-labels-page).
Custom labels can be defined in a [configuration file](https://qodo-merge-docs.qodo.ai/tools/custom_labels/#configuration-options), or directly in the repo's [labels page](#handle-custom-labels-from-the-repos-labels-page).
Make sure to provide proper title, and a detailed and well-phrased description for each label, so the tool will know when to suggest it.
Each label description should be a **conditional statement**, that indicates if to add the label to the PR or not, according to the PR content.
@ -205,7 +205,7 @@ The description should be comprehensive and detailed, indicating when to add the
## Usage Tips
!!! tip "Automation"
- When you first install PR-Agent app, the [default mode](../usage-guide/automations_and_usage.md#github-app) for the describe tool is:
- When you first install Qodo Merge app, the [default mode](../usage-guide/automations_and_usage.md#github-app) for the describe tool is:
```
pr_commands = ["/describe", ...]
```

View File

@ -1,6 +1,6 @@
## Overview
The `help` tool provides a list of all the available tools and their descriptions.
For PR-Agent Pro users, it also enables to trigger each tool by checking the relevant box.
For Qodo Merge Pro users, it also enables to trigger each tool by checking the relevant box.
It can be invoked manually by commenting on any PR:
```

View File

@ -1,7 +1,7 @@
## Overview
The `improve` tool scans the PR code changes, and automatically generates [meaningful](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/pr_code_suggestions_prompts.toml#L41) suggestions for improving the PR code.
The tool can be triggered automatically every time a new PR is [opened](../usage-guide/automations_and_usage.md#github-app-automatic-tools-when-a-new-pr-is-opened), or it can be invoked manually by commenting on any PR:
```
```toml
/improve
```
@ -9,7 +9,7 @@ The tool can be triggered automatically every time a new PR is [opened](../usage
![code_suggestions_as_comment_open.png](https://codium.ai/images/pr_agent/code_suggestions_as_comment_open.png){width=512}
Note that the `Apply this suggestion` checkbox, which interactively converts a suggestion into a commitable code comment, is available only for PR-Agent Pro 💎 users.
Note that the `Apply this suggestion` checkbox, which interactively converts a suggestion into a commitable code comment, is available only for Qodo Merge Pro 💎 users.
## Example usage
@ -19,12 +19,12 @@ Note that the `Apply this suggestion` checkbox, which interactively converts a s
Invoke the tool manually by commenting `/improve` on any PR. The code suggestions by default are presented as a single comment:
To edit [configurations](#configuration-options) related to the improve tool, use the following template:
```
```toml
/improve --pr_code_suggestions.some_config1=... --pr_code_suggestions.some_config2=...
```
For example, you can choose to present all the suggestions as commitable code comments, by running the following command:
```
```toml
/improve --pr_code_suggestions.commitable_code_suggestions=true
```
@ -36,8 +36,8 @@ Also note that collapsible are not supported in _Bitbucket_. Hence, the suggesti
### Automatic triggering
To run the `improve` automatically when a PR is opened, define in a [configuration file](https://pr-agent-docs.codium.ai/usage-guide/configuration_options/#wiki-configuration-file):
```
To run the `improve` automatically when a PR is opened, define in a [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#wiki-configuration-file):
```toml
[github_app]
pr_commands = [
"/improve",
@ -54,31 +54,86 @@ num_code_suggestions_per_chunk = ...
### Assessing Impact 💎
Note that PR-Agent pro tracks two types of implementations:
Note that Qodo Merge pro tracks two types of implementations:
- Direct implementation - when the user directly applies the suggestion by clicking the `Apply` checkbox.
- Indirect implementation - when the user implements the suggestion in their IDE environment. In this case, PR-Agent will utilize, after each commit, a dedicated logic to identify if a suggestion was implemented, and will mark it as implemented.
- Indirect implementation - when the user implements the suggestion in their IDE environment. In this case, Qodo Merge will utilize, after each commit, a dedicated logic to identify if a suggestion was implemented, and will mark it as implemented.
![code_suggestions_asses_impact](https://codium.ai/images/pr_agent/code_suggestions_asses_impact.png){width=512}
In post-process, PR-Agent counts the number of suggestions that were implemented, and provides general statistics and insights about the suggestions' impact on the PR process.
In post-process, Qodo Merge counts the number of suggestions that were implemented, and provides general statistics and insights about the suggestions' impact on the PR process.
![code_suggestions_asses_impact_stats_1](https://codium.ai/images/pr_agent/code_suggestions_asses_impact_stats_1.png){width=512}
![code_suggestions_asses_impact_stats_2](https://codium.ai/images/pr_agent/code_suggestions_asses_impact_stats_2.png){width=512}
## Suggestion tracking 💎
`Platforms supported: GitHub, GitLab`
Qodo Merge employs an novel detection system to automatically [identify](https://qodo-merge-docs.qodo.ai/core-abilities/impact_evaluation/) AI code suggestions that PR authors have accepted and implemented.
Accepted suggestions are also automatically documented in a dedicated wiki page called `.pr_agent_accepted_suggestions`, allowing users to track historical changes, assess the tool's effectiveness, and learn from previously implemented recommendations in the repository.
An example [result](https://github.com/Codium-ai/pr-agent/wiki/.pr_agent_accepted_suggestions):
[![pr_agent_accepted_suggestions1.png](https://qodo.ai/images/pr_agent/pr_agent_accepted_suggestions1.png){width=768}](https://github.com/Codium-ai/pr-agent/wiki/.pr_agent_accepted_suggestions)
This dedicated wiki page will also serve as a foundation for future AI model improvements, allowing it to learn from historically implemented suggestions and generate more targeted, contextually relevant recommendations.
This feature is controlled by a boolean configuration parameter: `pr_code_suggestions.wiki_page_accepted_suggestions` (default is true).
!!! note "Wiki must be enabled"
While the aggregation process is automatic, GitHub repositories require a one-time manual wiki setup.
To initialize the wiki: navigate to `Wiki`, select `Create the first page`, then click `Save page`.
![pr_agent_accepted_suggestions_create_first_page.png](https://qodo.ai/images/pr_agent/pr_agent_accepted_suggestions_create_first_page.png){width=768}
Once a wiki repo is created, the tool will automatically use this wiki for tracking suggestions.
!!! note "Why a wiki page?"
Your code belongs to you, and we respect your privacy. Hence, we won't store any code suggestions in an external database.
Instead, we leverage a dedicated private page, within your repository wiki, to track suggestions. This approach offers convenient secure suggestion tracking while avoiding pull requests or any noise to the main repository.
## Usage Tips
### Implementing the proposed code suggestions
Each generated suggestion consists of three key elements:
1. A single-line summary of the proposed change
2. An expandable section containing a comprehensive description of the suggestion
3. A diff snippet showing the recommended code modification (before and after)
We advise users to apply critical analysis and judgment when implementing the proposed suggestions.
In addition to mistakes (which may happen, but are rare), sometimes the presented code modification may serve more as an _illustrative example_ than a direct applicable solution.
In such cases, we recommend prioritizing the suggestion's detailed description, using the diff snippet primarily as a supporting reference.
### Dual publishing mode
Our recommended approach for presenting code suggestions is through a [table](https://qodo-merge-docs.qodo.ai/tools/improve/#overview) (`--pr_code_suggestions.commitable_code_suggestions=false`).
This method significantly reduces the PR footprint and allows for quick and easy digestion of multiple suggestions.
We also offer a complementary **dual publishing mode**. When enabled, suggestions exceeding a certain score threshold are not only displayed in the table, but also presented as commitable PR comments.
This mode helps highlight suggestions deemed more critical.
To activate dual publishing mode, use the following setting:
```toml
[pr_code_suggestions]
dual_publishing_score_threshold = x
```
Where x represents the minimum score threshold (>=) for suggestions to be presented as commitable PR comments in addition to the table. Default is -1 (disabled).
### Self-review
If you set in a configuration file:
```
```toml
[pr_code_suggestions]
demand_code_suggestions_self_review = true
```
The `improve` tool will add a checkbox below the suggestions, prompting user to acknowledge that they have reviewed the suggestions.
You can set the content of the checkbox text via:
```
```toml
[pr_code_suggestions]
code_suggestions_self_review_text = "... (your text here) ..."
```
@ -86,26 +141,67 @@ code_suggestions_self_review_text = "... (your text here) ..."
![self_review_1](https://codium.ai/images/pr_agent/self_review_1.png){width=512}
💎 In addition, by setting:
```
[pr_code_suggestions]
approve_pr_on_self_review = true
```
the tool can automatically approve the PR when the user checks the self-review checkbox.
!!! tip "Tip - Reducing visual footprint after self-review 💎"
!!! tip "Tip - demanding self-review from the PR author"
If you set the number of required reviewers for a PR to 2, this effectively means that the PR author must click the self-review checkbox before the PR can be merged (in addition to a human reviewer).
The configuration parameter `pr_code_suggestions.fold_suggestions_on_self_review` (default is True)
can be used to automatically fold the suggestions after the user clicks the self-review checkbox.
This reduces the visual footprint of the suggestions, and also indicates to the PR reviewer that the suggestions have been reviewed by the PR author, and don't require further attention.
!!! tip "Tip - Demanding self-review from the PR author 💎"
By setting:
```toml
[pr_code_suggestions]
approve_pr_on_self_review = true
```
the tool can automatically add an approval when the PR author clicks the self-review checkbox.
- If you set the number of required reviewers for a PR to 2, this effectively means that the PR author must click the self-review checkbox before the PR can be merged (in addition to a human reviewer).
![self_review_2](https://codium.ai/images/pr_agent/self_review_2.png){width=512}
### `Extra instructions` and `best practices`
- If you keep the number of required reviewers for a PR to 1 and enable this configuration, this effectively means that the PR author can approve the PR by actively clicking the self-review checkbox.
To prevent unauthorized approvals, this configuration defaults to false, and cannot be altered through online comments; enabling requires a direct update to the configuration file and a commit to the repository. This ensures that utilizing the feature demands a deliberate documented decision by the repository owner.
### How many code suggestions are generated?
Qodo Merge uses a dynamic strategy to generate code suggestions based on the size of the pull request (PR). Here's how it works:
1) Chunking large PRs:
- Qodo Merge divides large PRs into 'chunks'.
- Each chunk contains up to `pr_code_suggestions.max_context_tokens` tokens (default: 14,000).
2) Generating suggestions:
- For each chunk, Qodo Merge generates up to `pr_code_suggestions.num_code_suggestions_per_chunk` suggestions (default: 4).
This approach has two main benefits:
- Scalability: The number of suggestions scales with the PR size, rather than being fixed.
- Quality: By processing smaller chunks, the AI can maintain higher quality suggestions, as larger contexts tend to decrease AI performance.
Note: Chunking is primarily relevant for large PRs. For most PRs (up to 500 lines of code), Qodo Merge will be able to process the entire code in a single call.
### 'Extra instructions' and 'best practices'
#### Extra instructions
>`Platforms supported: GitHub, GitLab, Bitbucket`
You can use the `extra_instructions` configuration option to give the AI model additional instructions for the `improve` tool.
Be specific, clear, and concise in the instructions. With extra instructions, you are the prompter. Specify relevant aspects that you want the model to focus on.
Examples for possible instructions:
```
```toml
[pr_code_suggestions]
extra_instructions="""\
(1) Answer in japanese
@ -117,10 +213,13 @@ extra_instructions="""\
Use triple quotes to write multi-line instructions. Use bullet points or numbers to make the instructions more readable.
#### Best practices 💎
Another option to give additional guidance to the AI model is by creating a dedicated [**wiki page**](https://github.com/Codium-ai/pr-agent/wiki) called `best_practices.md`.
This page can contain a list of best practices, coding standards, and guidelines that are specific to your repo/organization
The AI model will use this page as a reference, and in case the PR code violates any of the guidelines, it will suggest improvements accordingly, with a dedicated label: `Organization
>`Platforms supported: GitHub, GitLab`
Another option to give additional guidance to the AI model is by creating a dedicated [**wiki page**](https://github.com/Codium-ai/pr-agent/wiki) called `best_practices.md`.
This page can contain a list of best practices, coding standards, and guidelines that are specific to your repo/organization.
The AI model will use this wiki page as a reference, and in case the PR code violates any of the guidelines, it will suggest improvements accordingly, with a dedicated label: `Organization
best practice`.
Example for a `best_practices.md` content can be found [here](https://github.com/Codium-ai/pr-agent/blob/main/docs/docs/usage-guide/EXAMPLE_BEST_PRACTICE.md) (adapted from Google's [pyguide](https://google.github.io/styleguide/pyguide.html)).
@ -128,93 +227,109 @@ This file is only an example. Since it is used as a prompt for an AI model, we w
- It should be written in a clear and concise manner
- If needed, it should give short relevant code snippets as examples
- Up to 800 lines are allowed
- Recommended to limit the text to 800 lines or fewer. Heres why:
1) Extremely long best practices documents may not be fully processed by the AI model.
2) A lengthy file probably represent a more "**generic**" set of guidelines, which the AI model is already familiar with. The objective is to focus on a more targeted set of guidelines tailored to the specific needs of this project.
Example results:
##### Local and global best practices
By default, Qodo Merge will look for a local `best_practices.md` wiki file in the root of the relevant local repo.
If you want to enable also a global `best_practices.md` wiki file, set first in the global configuration file:
```toml
[best_practices]
enable_global_best_practices = true
```
Then, create a `best_practices.md` wiki file in the root of [global](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#global-configuration-file) configuration repository, `pr-agent-settings`.
##### Example results
![best_practice](https://codium.ai/images/pr_agent/org_best_practice.png){width=512}
Note that while the `extra instructions` are more related to the way the `improve` tool behaves, the `best_practices.md` file is a general guideline for the way code should be written in the repo.
#### How to combine `extra instructions` and `best practices`
The `extra instructions` configuration is more related to the `improve` tool prompt. It can be used, for example, to avoid specific suggestions ("Don't suggest to add try-except block", "Ignore changes in toml files", ...) or to emphasize specific aspects or formats ("Answer in Japanese", "Give only short suggestions", ...)
In contrast, the `best_practices.md` file is a general guideline for the way code should be written in the repo.
Using a combination of both can help the AI model to provide relevant and tailored suggestions.
## Configuration options
!!! example "General options"
??? example "General options"
<table>
<tr>
<td><b>extra_instructions</b></td>
<td>Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".</td>
</tr>
<tr>
<td><b>commitable_code_suggestions</b></td>
<td>If set to true, the tool will display the suggestions as commitable code comments. Default is false.</td>
</tr>
<tr>
<td><b>dual_publishing_score_threshold</b></td>
<td>Minimum score threshold for suggestions to be presented as commitable PR comments in addition to the table. Default is -1 (disabled).</td>
</tr>
<tr>
<td><b>persistent_comment</b></td>
<td>If set to true, the improve comment will be persistent, meaning that every new improve request will edit the previous one. Default is false.</td>
</tr>
<tr>
<td><b>suggestions_score_threshold</b></td>
<td> Any suggestion with importance score less than this threshold will be removed. Default is 0. Highly recommend not to set this value above 7-8, since above it may clip relevant suggestions that can be useful. </td>
</tr>
<tr>
<td><b>apply_suggestions_checkbox</b></td>
<td> Enable the checkbox to create a committable suggestion. Default is true.</td>
</tr>
<tr>
<td><b>enable_help_text</b></td>
<td>If set to true, the tool will display a help text in the comment. Default is true.</td>
</tr>
<tr>
<td><b>enable_chat_text</b></td>
<td>If set to true, the tool will display a reference to the PR chat in the comment. Default is true.</td>
</tr>
<tr>
<td><b>wiki_page_accepted_suggestions</b></td>
<td>If set to true, the tool will automatically track accepted suggestions in a dedicated wiki page called `.pr_agent_accepted_suggestions`. Default is true.</td>
</tr>
</table>
<table>
<tr>
<td><b>num_code_suggestions</b></td>
<td>Number of code suggestions provided by the 'improve' tool. Default is 4 for CLI, 0 for auto tools.</td>
</tr>
<tr>
<td><b>extra_instructions</b></td>
<td>Optional extra instructions to the tool. For example: "focus on the changes in the file X. Ignore change in ...".</td>
</tr>
<tr>
<td><b>rank_suggestions</b></td>
<td>If set to true, the tool will rank the suggestions, based on importance. Default is false.</td>
</tr>
<tr>
<td><b>commitable_code_suggestions</b></td>
<td>If set to true, the tool will display the suggestions as commitable code comments. Default is false.</td>
</tr>
<tr>
<td><b>persistent_comment</b></td>
<td>If set to true, the improve comment will be persistent, meaning that every new improve request will edit the previous one. Default is false.</td>
</tr>
<tr>
<td><b>self_reflect_on_suggestions</b></td>
<td>If set to true, the improve tool will calculate an importance score for each suggestion [1-10], and sort the suggestion labels group based on this score. Default is true.</td>
</tr>
<tr>
<td><b>suggestions_score_threshold</b></td>
<td> Any suggestion with importance score less than this threshold will be removed. Default is 0. Highly recommend not to set this value above 7-8, since above it may clip relevant suggestions that can be useful. </td>
</tr>
<tr>
<td><b>apply_suggestions_checkbox</b></td>
<td> Enable the checkbox to create a committable suggestion. Default is true.</td>
</tr>
<tr>
<td><b>enable_help_text</b></td>
<td>If set to true, the tool will display a help text in the comment. Default is true.</td>
</tr>
</table>
??? example "Params for number of suggestions and AI calls"
!!! example "params for 'extended' mode"
<table>
<tr>
<td><b>auto_extended_mode</b></td>
<td>Enable extended mode automatically (no need for the --extended option). Default is true.</td>
</tr>
<tr>
<td><b>num_code_suggestions_per_chunk</b></td>
<td>Number of code suggestions provided by the 'improve' tool, per chunk. Default is 5.</td>
</tr>
<tr>
<td><b>rank_extended_suggestions</b></td>
<td>If set to true, the tool will rank the suggestions, based on importance. Default is true.</td>
</tr>
<tr>
<td><b>max_number_of_calls</b></td>
<td>Maximum number of chunks. Default is 5.</td>
</tr>
<tr>
<td><b>final_clip_factor</b></td>
<td>Factor to remove suggestions with low confidence. Default is 0.9.</td>
</tr>
</table>
<table>
<tr>
<td><b>auto_extended_mode</b></td>
<td>Enable chunking the PR code and running the tool on each chunk. Default is true.</td>
</tr>
<tr>
<td><b>num_code_suggestions_per_chunk</b></td>
<td>Number of code suggestions provided by the 'improve' tool, per chunk. Default is 4.</td>
</tr>
<tr>
<td><b>max_number_of_calls</b></td>
<td>Maximum number of chunks. Default is 3.</td>
</tr>
<tr>
<td><b>rank_extended_suggestions</b></td>
<td>If set to true, the tool will rank the suggestions, based on importance. Default is true.</td>
</tr>
</table>
## A note on code suggestions quality
- While the current AI for code is getting better and better (GPT-4), it's not flawless. Not all the suggestions will be perfect, and a user should not accept all of them automatically. Critical reading and judgment are required.
- While mistakes of the AI are rare but can happen, a real benefit from the suggestions of the `improve` (and [`review`](https://pr-agent-docs.codium.ai/tools/review/)) tool is to catch, with high probability, **mistakes or bugs done by the PR author**, when they happen. So, it's a good practice to spend the needed ~30-60 seconds to review the suggestions, even if not all of them are always relevant.
- AI models for code are getting better and better (Sonnet-3.5 and GPT-4), but they are not flawless. Not all the suggestions will be perfect, and a user should not accept all of them automatically. Critical reading and judgment are required.
- While mistakes of the AI are rare but can happen, a real benefit from the suggestions of the `improve` (and [`review`](https://qodo-merge-docs.qodo.ai/tools/review/)) tool is to catch, with high probability, **mistakes or bugs done by the PR author**, when they happen. So, it's a good practice to spend the needed ~30-60 seconds to review the suggestions, even if not all of them are always relevant.
- The hierarchical structure of the suggestions is designed to help the user to _quickly_ understand them, and to decide which ones are relevant and which are not:
- Only if the `Category` header is relevant, the user should move to the summarized suggestion description
- Only if the summarized suggestion description is relevant, the user should click on the collapsible, to read the full suggestion description with a code preview example.
In addition, we recommend to use the `extra_instructions` field to guide the model to suggestions that are more relevant to the specific needs of the project.
<br>
Consider also trying the [Custom Prompt Tool](./custom_prompt.md) 💎, that will **only** propose code suggestions that follow specific guidelines defined by user.
- In addition, we recommend to use the [`extra_instructions`](https://qodo-merge-docs.qodo.ai/tools/improve/#extra-instructions-and-best-practices) field to guide the model to suggestions that are more relevant to the specific needs of the project.
- The interactive [PR chat](https://qodo-merge-docs.qodo.ai/chrome-extension/) also provides an easy way to get more tailored suggestions and feedback from the AI model.

View File

@ -1,6 +1,6 @@
# Tools
Here is a list of PR-Agent tools, each with a dedicated page that explains how to use it:
Here is a list of Qodo Merge tools, each with a dedicated page that explains how to use it:
| Tool | Description |
|------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------|
@ -19,4 +19,4 @@ Here is a list of PR-Agent tools, each with a dedicated page that explains how t
| **💎 [Improve Component (`/improve_component component_name`](./improve_component.md))** | Generates code suggestions for a specific code component that changed in the PR |
| **💎 [CI Feedback (`/checks ci_job`](./ci_feedback.md))** | Automatically generates feedback and analysis for a failed CI job |
Note that the tools marked with 💎 are available only for PR-Agent Pro users.
Note that the tools marked with 💎 are available only for Qodo Merge Pro users.

View File

@ -8,6 +8,9 @@ The tool can be triggered automatically every time a new PR is [opened](../usage
Note that the main purpose of the `review` tool is to provide the **PR reviewer** with useful feedbacks and insights. The PR author, in contrast, may prefer to save time and focus on the output of the [improve](./improve.md) tool, which provides actionable code suggestions.
(Read more about the different personas in the PR process and how Qodo Merge aims to assist them in our [blog](https://www.codium.ai/blog/understanding-the-challenges-and-pain-points-of-the-pull-request-cycle/))
## Example usage
### Manual triggering
@ -27,7 +30,7 @@ If you want to edit [configurations](#configuration-options), add the relevant o
### Automatic triggering
To run the `review` automatically when a PR is opened, define in a [configuration file](https://pr-agent-docs.codium.ai/usage-guide/configuration_options/#wiki-configuration-file):
To run the `review` automatically when a PR is opened, define in a [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#wiki-configuration-file):
```
[github_app]
pr_commands = [
@ -46,7 +49,7 @@ num_code_suggestions = ...
[//]: # ()
[//]: # (### Incremental Mode)
[//]: # (Incremental review only considers changes since the last PR-Agent review. This can be useful when working on the PR in an iterative manner, and you want to focus on the changes since the last review instead of reviewing the entire PR again.)
[//]: # (Incremental review only considers changes since the last Qodo Merge review. This can be useful when working on the PR in an iterative manner, and you want to focus on the changes since the last review instead of reviewing the entire PR again.)
[//]: # (For invoking the incremental mode, the following command can be used:)
@ -135,20 +138,9 @@ num_code_suggestions = ...
<td><b>require_security_review</b></td>
<td>If set to true, the tool will add a section that checks if the PR contains a possible security or vulnerability issue. Default is true.</td>
</tr>
</table>
!!! example "SOC2 ticket compliance 💎"
This sub-tool checks if the PR description properly contains a ticket to a project management system (e.g., Jira, Asana, Trello, etc.), as required by SOC2 compliance. If not, it will add a label to the PR: "Missing SOC2 ticket".
<table>
<tr>
<td><b>require_soc2_ticket</b></td>
<td>If set to true, the SOC2 ticket checker sub-tool will be enabled. Default is false.</td>
</tr>
<tr>
<td><b>soc2_ticket_prompt</b></td>
<td>The prompt for the SOC2 ticket review. Default is: `Does the PR description include a link to ticket in a project management system (e.g., Jira, Asana, Trello, etc.) ?`. Edit this field if your compliance requirements are different.</td>
<td><b>require_ticket_analysis_review</b></td>
<td>If set to true, and the PR contains a GitHub ticket number, the tool will add a section that checks if the PR in fact fulfilled the ticket requirements. Default is true.</td>
</tr>
</table>
@ -190,12 +182,12 @@ If enabled, the `review` tool can approve a PR when a specific comment, `/review
It is recommended to review the [Configuration options](#configuration-options) section, and choose the relevant options for your use case.
Some of the features that are disabled by default are quite useful, and should be considered for enabling. For example:
`require_score_review`, `require_soc2_ticket`, and more.
`require_score_review`, and more.
On the other hand, if you find one of the enabled features to be irrelevant for your use case, disable it. No default configuration can fit all use cases.
!!! tip "Automation"
When you first install PR-Agent app, the [default mode](../usage-guide/automations_and_usage.md#github-app-automatic-tools-when-a-new-pr-is-opened) for the `review` tool is:
When you first install Qodo Merge app, the [default mode](../usage-guide/automations_and_usage.md#github-app-automatic-tools-when-a-new-pr-is-opened) for the `review` tool is:
```
pr_commands = ["/review --pr_reviewer.num_code_suggestions=0", ...]
```
@ -234,7 +226,7 @@ If enabled, the `review` tool can approve a PR when a specific comment, `/review
!!! tip "Auto-approval"
PR-Agent can approve a PR when a specific comment is invoked.
Qodo Merge can approve a PR when a specific comment is invoked.
To ensure safety, the auto-approval feature is disabled by default. To enable auto-approval, you need to actively set in a pre-defined configuration file the following:
```
@ -248,7 +240,7 @@ If enabled, the `review` tool can approve a PR when a specific comment, `/review
```
/review auto_approve
```
PR-Agent will automatically approve the PR, and add a comment with the approval.
Qodo Merge will automatically approve the PR, and add a comment with the approval.
You can also enable auto-approval only if the PR meets certain requirements, such as that the `estimated_review_effort` label is equal or below a certain threshold, by adjusting the flag:

View File

@ -8,9 +8,9 @@ For example:
![similar code global](https://codium.ai/images/pr_agent/similar_code_global2.png){width=768}
PR-Agent will examine the code component and will extract the most relevant keywords to search for similar code:
Qodo Merge will examine the code component and will extract the most relevant keywords to search for similar code:
- `extracted keywords`: the keywords that were extracted from the code by PR-Agent. the link will open a search page with the extracted keywords, to allow the user to modify the search if needed.
- `extracted keywords`: the keywords that were extracted from the code by Qodo Merge. the link will open a search page with the extracted keywords, to allow the user to modify the search if needed.
- `search context`: the context in which the search will be performed, organization's codebase or open-source code (Global).
- `similar code`: the most similar code components found. the link will open the code component in the relevant file.
- `relevant repositories`: the open-source repositories in which that are relevant to the searched code component and it's keywords.

View File

@ -10,14 +10,10 @@ To get a list of the components that changed in the PR and choose the relevant c
## Example usage
Invoke the tool manually by commenting `/test` on any PR:
![test1](https://codium.ai/images/pr_agent/test1.png){width=704}
The tool will generate tests for the selected component (if no component is stated, it will generate tests for largest component):
![test2](https://codium.ai/images/pr_agent/test2.png){width=768}
![test1](https://codium.ai/images/pr_agent/test1.png){width=768}
![test3](https://codium.ai/images/pr_agent/test3.png){width=768}
(Example taken from [here](https://github.com/Codium-ai/pr-agent/pull/598#issuecomment-1913679429)):

View File

@ -1,30 +1,18 @@
## PR-Agent Pro Models
## Qodo Merge Pro Models
The default models used by PR-Agent Pro are OpenAI's GPT-4 models. We use a combination of GPT-4-Turbo and GPT-4o to strike a balance between speed and quality.
The default models used by Qodo Merge Pro are a combination of Claude-3.5-sonnet and OpenAI's GPT-4 models.
However, users can change the model used by PR-Agent Pro to Claude-3.5-sonnet, which also excels at code tasks.
To do so, add the following to your [configuration](https://pr-agent-docs.codium.ai/usage-guide/configuration_options/) file:
Users can configure Qodo Merge Pro to use solely a specific model by editing the [configuration](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/) file.
For example, to restrict Qodo Merge Pro to using only `Claude-3.5-sonnet`, add this setting:
```
[config]
model="claude-3-5-sonnet"
```
Note that Claude models tend to give lower scores for each suggestion, so if you are using a [threshold](https://pr-agent-docs.codium.ai/tools/improve/#configuration-options):
Or to restrict Qodo Merge Pro to using only `GPT-4o`, add this setting:
```
[pr_code_suggestions]
suggestions_score_threshold=...
```
You might need to adjust this value when switching models.
### Dedicated models per tool
You can also use different models for different tools. For example, you can use the Claude-3.5-sonnet model only for the `improve` tool (and keep the default GPT-4 model for the other tools) by adding the following to your configuration file:
```
[github_app]
pr_commands = [
"/describe --pr_description.final_update_message=false",
"/review --pr_reviewer.num_code_suggestions=0",
"/improve --config.model=claude-3-5-sonnet",
]
[config]
model="gpt-4o"
```

View File

@ -1,6 +1,28 @@
## Show possible configurations
The possible configurations of Qodo Merge are stored in [here](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml).
In the [tools](https://qodo-merge-docs.qodo.ai/tools/) page you can find explanations on how to use these configurations for each tool.
To print all the available configurations as a comment on your PR, you can use the following command:
```
/config
```
![possible_config1](https://codium.ai/images/pr_agent/possible_config1.png){width=512}
To view the **actual** configurations used for a specific tool, after all the user settings are applied, you can add for each tool a `--config.output_relevant_configurations=true` suffix.
For example:
```
/improve --config.output_relevant_configurations=true
```
Will output an additional field showing the actual configurations used for the `improve` tool.
![possible_config2](https://codium.ai/images/pr_agent/possible_config2.png){width=512}
## Ignoring files from analysis
In some cases, you may want to exclude specific files or directories from the analysis performed by CodiumAI PR-Agent. This can be useful, for example, when you have files that are generated automatically or files that shouldn't be reviewed, like vendored code.
In some cases, you may want to exclude specific files or directories from the analysis performed by Qodo Merge. This can be useful, for example, when you have files that are generated automatically or files that shouldn't be reviewed, like vendor code.
You can ignore files or folders using the following methods:
- `IGNORE.GLOB`
@ -30,7 +52,7 @@ regex = ['.*\.py$']
## Extra instructions
All PR-Agent tools have a parameter called `extra_instructions`, that enables to add free-text extra instructions. Example usage:
All Qodo Merge tools have a parameter called `extra_instructions`, that enables to add free-text extra instructions. Example usage:
```
/update_changelog --pr_update_changelog.extra_instructions="Make sure to update also the version ..."
```
@ -42,9 +64,9 @@ This mode provides a very good speed-quality-cost tradeoff, and can handle most
When the PR is above the token limit, it employs a [PR Compression strategy](../core-abilities/index.md).
However, for very large PRs, or in case you want to emphasize quality over speed and cost, there are two possible solutions:
1) [Use a model](https://codium-ai.github.io/Docs-PR-Agent/usage-guide/#changing-a-model) with larger context, like GPT-32K, or claude-100K. This solution will be applicable for all the tools.
2) For the `/improve` tool, there is an ['extended' mode](https://codium-ai.github.io/Docs-PR-Agent/tools/#improve) (`/improve --extended`),
which divides the PR to chunks, and processes each chunk separately. With this mode, regardless of the model, no compression will be done (but for large PRs, multiple model calls may occur)
1) [Use a model](https://qodo-merge-docs.qodo.ai/usage-guide/changing_a_model/) with larger context, like GPT-32K, or claude-100K. This solution will be applicable for all the tools.
2) For the `/improve` tool, there is an ['extended' mode](https://qodo-merge-docs.qodo.ai/tools/improve/) (`/improve --extended`),
which divides the PR into chunks, and processes each chunk separately. With this mode, regardless of the model, no compression will be done (but for large PRs, multiple model calls may occur)
@ -63,20 +85,21 @@ By default, around any change in your PR, git patch provides three lines of cont
code line that already existed in the file...
```
For the `review`, `describe`, `ask` and `add_docs` tools, if the token budget allows, PR-Agent tries to increase the number of lines of context, via the parameter:
Qodo Merge will try to increase the number of lines of context, via the parameter:
```
[config]
patch_extra_lines_before=4
patch_extra_lines_after=2
patch_extra_lines_before=3
patch_extra_lines_after=1
```
Increasing this number provides more context to the model, but will also increase the token budget.
If the PR is too large (see [PR Compression strategy](https://github.com/Codium-ai/pr-agent/blob/main/PR_COMPRESSION.md)), PR-Agent automatically sets this number to 0, using the original git patch.
Increasing this number provides more context to the model, but will also increase the token budget, and may overwhelm the model with too much information, unrelated to the actual PR code changes.
If the PR is too large (see [PR Compression strategy](https://github.com/Codium-ai/pr-agent/blob/main/PR_COMPRESSION.md)), Qodo Merge may automatically set this number to 0, and will use the original git patch.
## Editing the prompts
The prompts for the various PR-Agent tools are defined in the `pr_agent/settings` folder.
The prompts for the various Qodo Merge tools are defined in the `pr_agent/settings` folder.
In practice, the prompts are loaded and stored as a standard setting object.
Hence, editing them is similar to editing any other configuration value - just place the relevant key in `.pr_agent.toml`file, and override the default value.
@ -112,3 +135,38 @@ LANGSMITH_API_KEY=<api_key>
LANGSMITH_PROJECT=<project>
LANGSMITH_BASE_URL=<url>
```
## Ignoring automatic commands in PRs
In some cases, you may want to automatically ignore specific PRs . Qodo Merge enables you to ignore PR with a specific title, or from/to specific branches (regex matching).
To ignore PRs with a specific title such as "[Bump]: ...", you can add the following to your `configuration.toml` file:
```
[config]
ignore_pr_title = ["\\[Bump\\]"]
```
Where the `ignore_pr_title` is a list of regex patterns to match the PR title you want to ignore. Default is `ignore_pr_title = ["^\\[Auto\\]", "^Auto"]`.
To ignore PRs from specific source or target branches, you can add the following to your `configuration.toml` file:
```
[config]
ignore_pr_source_branches = ['develop', 'main', 'master', 'stage']
ignore_pr_target_branches = ["qa"]
```
Where the `ignore_pr_source_branches` and `ignore_pr_target_branches` are lists of regex patterns to match the source and target branches you want to ignore.
They are not mutually exclusive, you can use them together or separately.
To allow only specific folders (often needed in large monorepos), set:
```
[config]
allow_only_specific_folders=['folder1','folder2']
```
For the configuration above, automatic feedback will only be triggered when the PR changes include files from 'folder1' or 'folder2'

View File

@ -1,5 +1,5 @@
## Local repo (CLI)
When running from your locally cloned PR-Agent repo (CLI), your local configuration file will be used.
When running from your locally cloned Qodo Merge repo (CLI), your local configuration file will be used.
Examples of invoking the different tools via the CLI:
- **Review**: `python -m pr_agent.cli --pr_url=<pr_url> review`
@ -28,7 +28,7 @@ This is useful for debugging or experimenting with different tools.
(3)
**git provider**: The [git_provider](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L5) field in a configuration file determines the GIT provider that will be used by PR-Agent. Currently, the following providers are supported:
**git provider**: The [git_provider](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L5) field in a configuration file determines the GIT provider that will be used by Qodo Merge. Currently, the following providers are supported:
`
"github", "gitlab", "bitbucket", "azure", "codecommit", "local", "gerrit"
`
@ -39,7 +39,7 @@ Default is "github".
### Online usage
Online usage means invoking PR-Agent tools by [comments](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021901) on a PR.
Online usage means invoking Qodo Merge tools by [comments](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021901) on a PR.
Commands for invoking the different tools via comments:
- **Review**: `/review`
@ -60,8 +60,8 @@ Any configuration value in [configuration file](https://github.com/Codium-ai/pr-
## GitHub App
!!! note "Configurations for PR-Agent Pro"
PR-Agent Pro for GitHub is an App, hosted by CodiumAI. So all the instructions below are relevant also for PR-Agent Pro users.
!!! note "Configurations for Qodo Merge Pro"
Qodo Merge Pro for GitHub is an App, hosted by CodiumAI. So all the instructions below are relevant also for Qodo Merge Pro users.
Same goes for [GitLab webhook](#gitlab-webhook) and [BitBucket App](#bitbucket-app) sections.
### GitHub app automatic tools when a new PR is opened
@ -77,10 +77,10 @@ pr_commands = [
"/improve",
]
```
This means that when a new PR is opened/reopened or marked as ready for review, PR-Agent will run the `describe`, `review` and `improve` tools.
This means that when a new PR is opened/reopened or marked as ready for review, Qodo Merge will run the `describe`, `review` and `improve` tools.
For the `review` tool, for example, the `num_code_suggestions` parameter will be set to 0.
You can override the default tool parameters by using one the three options for a [configuration file](https://codium-ai.github.io/Docs-PR-Agent/usage-guide/#configuration-options): **wiki**, **local**, or **global**.
You can override the default tool parameters by using one the three options for a [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/): **wiki**, **local**, or **global**.
For example, if your local `.pr_agent.toml` file contains:
```
[pr_description]
@ -94,13 +94,6 @@ To cancel the automatic run of all the tools, set:
pr_commands = []
```
You can also disable automatic runs for PRs with specific titles, by setting the `ignore_pr_titles` parameter with the relevant regex. For example:
```
[github_app]
ignore_pr_title = ["^[Auto]", ".*ignore.*"]
```
will ignore PRs with titles that start with "Auto" or contain the word "ignore".
### GitHub app automatic tools for push actions (commits to an open PR)
In addition to running automatic tools when a PR is opened, the GitHub app can also respond to new code that is pushed to an open PR.
@ -115,10 +108,10 @@ push_commands = [
"/review --pr_reviewer.num_code_suggestions=0 --pr_reviewer.final_update_message=false",
]
```
This means that when new code is pushed to the PR, the PR-Agent will run the `describe` and `review` tools, with the specified parameters.
This means that when new code is pushed to the PR, the Qodo Merge will run the `describe` and `review` tools, with the specified parameters.
## GitHub Action
`GitHub Action` is a different way to trigger PR-Agent tools, and uses a different configuration mechanism than `GitHub App`.<br>
`GitHub Action` is a different way to trigger Qodo Merge tools, and uses a different configuration mechanism than `GitHub App`.<br>
You can configure settings for `GitHub Action` by adding environment variables under the env section in `.github/workflows/pr_agent.yml` file.
Specifically, start by setting the following environment variables:
```yaml
@ -128,22 +121,26 @@ Specifically, start by setting the following environment variables:
github_action_config.auto_review: "true" # enable\disable auto review
github_action_config.auto_describe: "true" # enable\disable auto describe
github_action_config.auto_improve: "true" # enable\disable auto improve
github_action_config.pr_actions: ["opened", "reopened", "ready_for_review", "review_requested"]
```
`github_action_config.auto_review`, `github_action_config.auto_describe` and `github_action_config.auto_improve` are used to enable/disable automatic tools that run when a new PR is opened.
If not set, the default configuration is for all three tools to run automatically when a new PR is opened.
`github_action_config.pr_actions` is used to configure which `pull_requests` events will trigger the enabled auto flags
If not set, the default configuration is `["opened", "reopened", "ready_for_review", "review_requested"]`
`github_action_config.enable_output` are used to enable/disable github actions [output parameter](https://docs.github.com/en/actions/creating-actions/metadata-syntax-for-github-actions#outputs-for-docker-container-and-javascript-actions) (default is `true`).
Review result is output as JSON to `steps.{step-id}.outputs.review` property.
The JSON structure is equivalent to the yaml data structure defined in [pr_reviewer_prompts.toml](https://github.com/idubnori/pr-agent/blob/main/pr_agent/settings/pr_reviewer_prompts.toml).
Note that you can give additional config parameters by adding environment variables to `.github/workflows/pr_agent.yml`, or by using a `.pr_agent.toml` [configuration file](https://pr-agent-docs.codium.ai/usage-guide/configuration_options/#global-configuration-file) in the root of your repo
Note that you can give additional config parameters by adding environment variables to `.github/workflows/pr_agent.yml`, or by using a `.pr_agent.toml` [configuration file](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/#global-configuration-file) in the root of your repo
For example, you can set an environment variable: `pr_description.publish_labels=false`, or add a `.pr_agent.toml` file with the following content:
```
[pr_description]
publish_labels = false
```
to prevent PR-Agent from publishing labels when running the `describe` tool.
to prevent Qodo Merge from publishing labels when running the `describe` tool.
## GitLab Webhook
After setting up a GitLab webhook, to control which commands will run automatically when a new MR is opened, you can set the `pr_commands` parameter in the configuration file, similar to the GitHub App:
@ -171,17 +168,23 @@ push_commands = [
Note that to use the 'handle_push_trigger' feature, you need to give the gitlab webhook also the "Push events" scope.
## BitBucket App
Similar to GitHub app, when running PR-Agent from BitBucket App, the default [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml) from a pre-built docker will be initially loaded.
Similar to GitHub app, when running Qodo Merge from BitBucket App, the default [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml) from a pre-built docker will be initially loaded.
By uploading a local `.pr_agent.toml` file to the root of the repo's main branch, you can edit and customize any configuration parameter. Note that you need to upload `.pr_agent.toml` prior to creating a PR, in order for the configuration to take effect.
For example, if your local `.pr_agent.toml` file contains:
```
[pr_reviewer]
inline_code_comments = true
extra_instructions = "Answer in japanese"
```
Each time you invoke a `/review` tool, it will use inline code comments.
Each time you invoke a `/review` tool, it will use the extra instructions you set in the local configuration file.
Note that among other limitations, BitBucket provides relatively low rate-limits for applications (up to 1000 requests per hour), and does not provide an API to track the actual rate-limit usage.
If you experience lack of responses from Qodo Merge, you might want to set: `bitbucket_app.avoid_full_files=true` in your configuration file.
This will prevent Qodo Merge from acquiring the full file content, and will only use the diff content. This will reduce the number of requests made to BitBucket, at the cost of small decrease in accuracy, as dynamic context will not be applicable.
### BitBucket Self-Hosted App automatic tools

View File

@ -133,9 +133,26 @@ Your [application default credentials](https://cloud.google.com/docs/authenticat
If you do want to set explicit credentials, then you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable set to a path to a json credentials file.
### Google AI Studio
To use [Google AI Studio](https://aistudio.google.com/) models, set the relevant models in the configuration section of the configuration file:
```toml
[config] # in configuration.toml
model="google_ai_studio/gemini-1.5-flash"
model_turbo="google_ai_studio/gemini-1.5-flash"
fallback_models=["google_ai_studio/gemini-1.5-flash"]
[google_ai_studio] # in .secrets.toml
gemini_api_key = "..."
```
If you don't want to set the API key in the .secrets.toml file, you can set the `GOOGLE_AI_STUDIO.GEMINI_API_KEY` environment variable.
### Anthropic
To use Anthropic models, set the relevant models in the configuration section of the configuration file:
```
[config]
model="anthropic/claude-3-opus-20240229"

View File

@ -1,7 +1,7 @@
The different tools and sub-tools used by CodiumAI PR-Agent are adjustable via the **[configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml)**.
The different tools and sub-tools used by Qodo Merge are adjustable via the **[configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml)**.
In addition to general configuration options, each tool has its own configurations. For example, the `review` tool will use parameters from the [pr_reviewer](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L16) section in the configuration file.
See the [Tools Guide](https://codium-ai.github.io/Docs-PR-Agent/tools/) for a detailed description of the different tools and their configurations.
See the [Tools Guide](https://qodo-merge-docs.qodo.ai/tools/) for a detailed description of the different tools and their configurations.
There are three ways to set persistent configurations:
@ -18,9 +18,9 @@ In terms of precedence, wiki configurations will override local configurations,
## Wiki configuration file 💎
`Platforms supported: GitHub, GitLab`
`Platforms supported: GitHub, GitLab, Bitbucket`
With PR-Agent-Pro, you can set configurations by creating a page called `.pr_agent.toml` in the [wiki](https://github.com/Codium-ai/pr-agent/wiki/pr_agent.toml) of the repo.
With Qodo Merge Pro, you can set configurations by creating a page called `.pr_agent.toml` in the [wiki](https://github.com/Codium-ai/pr-agent/wiki/pr_agent.toml) of the repo.
The advantage of this method is that it allows to set configurations without needing to commit new content to the repo - just edit the wiki page and **save**.
@ -34,7 +34,7 @@ An example content:
generate_ai_title=true
```
PR-Agent will know to remove the surrounding quotes when reading the configuration content.
Qodo Merge will know to remove the surrounding quotes when reading the configuration content.
## Local configuration file

View File

@ -1,7 +1,7 @@
# Usage guide
This page provides a detailed guide on how to use PR-Agent.
It includes information on how to adjust PR-Agent configurations, define which tools will run automatically, and other advanced configurations.
This page provides a detailed guide on how to use Qodo Merge.
It includes information on how to adjust Qodo Merge configurations, define which tools will run automatically, and other advanced configurations.
- [Introduction](./introduction.md)
@ -23,4 +23,4 @@ It includes information on how to adjust PR-Agent configurations, define which t
- [Changing a model](./additional_configurations.md#changing-a-model)
- [Patch Extra Lines](./additional_configurations.md#patch-extra-lines)
- [Editing the prompts](./additional_configurations.md#editing-the-prompts)
- [PR-Agent Pro Models](./PR_agent_pro_models.md)
- [Qodo Merge Pro Models](./PR_agent_pro_models.md)

View File

@ -1,13 +1,13 @@
After [installation](https://pr-agent-docs.codium.ai/installation/), there are three basic ways to invoke CodiumAI PR-Agent:
After [installation](https://qodo-merge-docs.qodo.ai/installation/), there are three basic ways to invoke Qodo Merge:
1. Locally running a CLI command
2. Online usage - by [commenting](https://github.com/Codium-ai/pr-agent/pull/229#issuecomment-1695021901) on a PR
3. Enabling PR-Agent tools to run automatically when a new PR is opened
3. Enabling Qodo Merge tools to run automatically when a new PR is opened
Specifically, CLI commands can be issued by invoking a pre-built [docker image](https://pr-agent-docs.codium.ai/installation/locally/#using-docker-image), or by invoking a [locally cloned repo](https://pr-agent-docs.codium.ai/installation/locally/#run-from-source).
Specifically, CLI commands can be issued by invoking a pre-built [docker image](https://qodo-merge-docs.qodo.ai/installation/locally/#using-docker-image), or by invoking a [locally cloned repo](https://qodo-merge-docs.qodo.ai/installation/locally/#run-from-source).
For online usage, you will need to setup either a [GitHub App](https://pr-agent-docs.codium.ai/installation/github/#run-as-a-github-app) or a [GitHub Action](https://pr-agent-docs.codium.ai/installation/github/#run-as-a-github-action) (GitHub), a [GitLab webhook](https://pr-agent-docs.codium.ai/installation/gitlab/#run-a-gitlab-webhook-server) (GitLab), or a [BitBucket App](https://pr-agent-docs.codium.ai/installation/bitbucket/#run-using-codiumai-hosted-bitbucket-app) (BitBucket).
These platforms also enable to run PR-Agent specific tools automatically when a new PR is opened, or on each push to a branch.
For online usage, you will need to setup either a [GitHub App](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-app) or a [GitHub Action](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-action) (GitHub), a [GitLab webhook](https://qodo-merge-docs.qodo.ai/installation/gitlab/#run-a-gitlab-webhook-server) (GitLab), or a [BitBucket App](https://qodo-merge-docs.qodo.ai/installation/bitbucket/#run-using-codiumai-hosted-bitbucket-app) (BitBucket).
These platforms also enable to run Qodo Merge specific tools automatically when a new PR is opened, or on each push to a branch.

View File

@ -1,15 +1,15 @@
Unfortunately, it is not possible in GitHub to disable mail notifications from a specific user.
If you are subscribed to notifications for a repo with PR-Agent, we recommend turning off notifications for PR comments, to avoid lengthy emails:
If you are subscribed to notifications for a repo with Qodo Merge, we recommend turning off notifications for PR comments, to avoid lengthy emails:
![notifications](https://codium.ai/images/pr_agent/notifications.png){width=512}
As an alternative, you can filter in your mail provider the notifications specifically from the PR-Agent bot, [see how](https://www.quora.com/How-can-you-filter-emails-for-specific-people-in-Gmail#:~:text=On%20the%20Filters%20and%20Blocked,the%20body%20of%20the%20email).
As an alternative, you can filter in your mail provider the notifications specifically from the Qodo Merge bot, [see how](https://www.quora.com/How-can-you-filter-emails-for-specific-people-in-Gmail#:~:text=On%20the%20Filters%20and%20Blocked,the%20body%20of%20the%20email).
![filter_mail_notifications](https://codium.ai/images/pr_agent/filter_mail_notifications.png){width=512}
Another option to reduce the mail overload, yet still receive notifications on PR-Agent tools, is to disable the help collapsible section in PR-Agent bot comments.
Another option to reduce the mail overload, yet still receive notifications on Qodo Merge tools, is to disable the help collapsible section in Qodo Merge bot comments.
This can done by setting `enable_help_text=false` for the relevant tool in the configuration file.
For example, to disable the help text for the `pr_reviewer` tool, set:
```

View File

@ -1,11 +1,11 @@
site_name: PR-Agent Documentation
site_name: Qodo Merge (formerly known as PR-Agent)
repo_url: https://github.com/Codium-ai/pr-agent
repo_name: Codium-ai/pr-agent
nav:
- Overview:
- 'index.md'
- 💎 PR-Agent Pro: 'overview/pr_agent_pro.md'
- 💎 Qodo Merge Pro: 'overview/pr_agent_pro.md'
- Data Privacy: 'overview/data_privacy.md'
- Installation:
- 'installation/index.md'
@ -14,7 +14,7 @@ nav:
- GitLab: 'installation/gitlab.md'
- BitBucket: 'installation/bitbucket.md'
- Azure DevOps: 'installation/azure.md'
- 💎 PR-Agent Pro: 'installation/pr_agent_pro.md'
- 💎 Qodo Merge Pro: 'installation/pr_agent_pro.md'
- Usage Guide:
- 'usage-guide/index.md'
- Introduction: 'usage-guide/introduction.md'
@ -23,7 +23,7 @@ nav:
- Managing Mail Notifications: 'usage-guide/mail_notifications.md'
- Changing a Model: 'usage-guide/changing_a_model.md'
- Additional Configurations: 'usage-guide/additional_configurations.md'
- 💎 PR-Agent Pro Models: 'usage-guide/PR_agent_pro_models'
- 💎 Qodo Merge Pro Models: 'usage-guide/PR_agent_pro_models'
- Tools:
- 'tools/index.md'
- Describe: 'tools/describe.md'
@ -41,12 +41,24 @@ nav:
- 💎 Custom Prompt: 'tools/custom_prompt.md'
- 💎 CI Feedback: 'tools/ci_feedback.md'
- 💎 Similar Code: 'tools/similar_code.md'
- Core Abilities: 'core-abilities/index.md'
- Core Abilities:
- 'core-abilities/index.md'
- Local and global metadata: 'core-abilities/metadata.md'
- Dynamic context: 'core-abilities/dynamic_context.md'
- Self-reflection: 'core-abilities/self_reflection.md'
- Impact evaluation: 'core-abilities/impact_evaluation.md'
- Interactivity: 'core-abilities/interactivity.md'
- Compression strategy: 'core-abilities/compression_strategy.md'
- Code-oriented YAML: 'core-abilities/code_oriented_yaml.md'
- Static code analysis: 'core-abilities/static_code_analysis.md'
- Code Fine-tuning Benchmark: 'finetuning_benchmark/index.md'
- Chrome Extension:
- PR-Agent Chrome Extension: 'chrome-extension/index.md'
- Qodo Merge Chrome Extension: 'chrome-extension/index.md'
- Features: 'chrome-extension/features.md'
- Data Privacy: 'chrome-extension/data_privacy.md'
- Code Fine-tuning Benchmark: 'finetuning_benchmark/index.md'
- FAQ:
- FAQ: 'faq/index.md'
# - Code Fine-tuning Benchmark: 'finetuning_benchmark/index.md'
theme:
logo: assets/logo.svg

View File

@ -82,11 +82,11 @@
<footer class="wrapper">
<div class="container">
<p class="footer-text">© 2024 <a href="https://www.codium.ai/" target="_blank" rel="noopener">CodiumAI</a></p>
<p class="footer-text">© 2024 <a href="https://www.qodo.ai/" target="_blank" rel="noopener">Qodo</a></p>
<div class="footer-links">
<a href="https://codiumate-docs.codium.ai/">Codiumate</a>
<a href="https://qodo-gen-docs.qodo.ai/">Qodo Gen</a>
<p>|</p>
<a href="https://alpha-codium-docs.codium.ai/">AlphaCodium</a>
<a href="https://qodo-flow-docs.qodo.ai/">AlphaCodium</a>
</div>
<div class="social-icons">
<a href="https://github.com/Codium-ai" target="_blank" rel="noopener" title="github.com" class="social-link">
@ -95,16 +95,16 @@
<a href="https://discord.com/invite/SgSxuQ65GF" target="_blank" rel="noopener" title="discord.com" class="social-link">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 640 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M524.531 69.836a1.5 1.5 0 0 0-.764-.7A485.065 485.065 0 0 0 404.081 32.03a1.816 1.816 0 0 0-1.923.91 337.461 337.461 0 0 0-14.9 30.6 447.848 447.848 0 0 0-134.426 0 309.541 309.541 0 0 0-15.135-30.6 1.89 1.89 0 0 0-1.924-.91 483.689 483.689 0 0 0-119.688 37.107 1.712 1.712 0 0 0-.788.676C39.068 183.651 18.186 294.69 28.43 404.354a2.016 2.016 0 0 0 .765 1.375 487.666 487.666 0 0 0 146.825 74.189 1.9 1.9 0 0 0 2.063-.676A348.2 348.2 0 0 0 208.12 430.4a1.86 1.86 0 0 0-1.019-2.588 321.173 321.173 0 0 1-45.868-21.853 1.885 1.885 0 0 1-.185-3.126 251.047 251.047 0 0 0 9.109-7.137 1.819 1.819 0 0 1 1.9-.256c96.229 43.917 200.41 43.917 295.5 0a1.812 1.812 0 0 1 1.924.233 234.533 234.533 0 0 0 9.132 7.16 1.884 1.884 0 0 1-.162 3.126 301.407 301.407 0 0 1-45.89 21.83 1.875 1.875 0 0 0-1 2.611 391.055 391.055 0 0 0 30.014 48.815 1.864 1.864 0 0 0 2.063.7A486.048 486.048 0 0 0 610.7 405.729a1.882 1.882 0 0 0 .765-1.352c12.264-126.783-20.532-236.912-86.934-334.541ZM222.491 337.58c-28.972 0-52.844-26.587-52.844-59.239s23.409-59.241 52.844-59.241c29.665 0 53.306 26.82 52.843 59.239 0 32.654-23.41 59.241-52.843 59.241Zm195.38 0c-28.971 0-52.843-26.587-52.843-59.239s23.409-59.241 52.843-59.241c29.667 0 53.307 26.82 52.844 59.239 0 32.654-23.177 59.241-52.844 59.241Z"></path></svg>
</a>
<a href="https://www.youtube.com/@Codium-AI" target="_blank" rel="noopener" title="www.youtube.com" class="social-link">
<a href="https://www.youtube.com/@QodoAI" target="_blank" rel="noopener" title="www.youtube.com" class="social-link">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M549.655 124.083c-6.281-23.65-24.787-42.276-48.284-48.597C458.781 64 288 64 288 64S117.22 64 74.629 75.486c-23.497 6.322-42.003 24.947-48.284 48.597-11.412 42.867-11.412 132.305-11.412 132.305s0 89.438 11.412 132.305c6.281 23.65 24.787 41.5 48.284 47.821C117.22 448 288 448 288 448s170.78 0 213.371-11.486c23.497-6.321 42.003-24.171 48.284-47.821 11.412-42.867 11.412-132.305 11.412-132.305s0-89.438-11.412-132.305zm-317.51 213.508V175.185l142.739 81.205-142.739 81.201z"></path></svg>
</a>
<a href="https://www.linkedin.com/company/codiumai" target="_blank" rel="noopener" title="www.linkedin.com" class="social-link">
<a href="https://www.linkedin.com/company/qodoai" target="_blank" rel="noopener" title="www.linkedin.com" class="social-link">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M416 32H31.9C14.3 32 0 46.5 0 64.3v383.4C0 465.5 14.3 480 31.9 480H416c17.6 0 32-14.5 32-32.3V64.3c0-17.8-14.4-32.3-32-32.3zM135.4 416H69V202.2h66.5V416zm-33.2-243c-21.3 0-38.5-17.3-38.5-38.5S80.9 96 102.2 96c21.2 0 38.5 17.3 38.5 38.5 0 21.3-17.2 38.5-38.5 38.5zm282.1 243h-66.4V312c0-24.8-.5-56.7-34.5-56.7-34.6 0-39.9 27-39.9 54.9V416h-66.4V202.2h63.7v29.2h.9c8.9-16.8 30.6-34.5 62.9-34.5 67.2 0 79.7 44.3 79.7 101.9V416z"></path></svg>
</a>
<a href="https://twitter.com/CodiumAI" target="_blank" rel="noopener" title="twitter.com" class="social-link">
<a href="https://twitter.com/QodoAI" target="_blank" rel="noopener" title="twitter.com" class="social-link">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M459.37 151.716c.325 4.548.325 9.097.325 13.645 0 138.72-105.583 298.558-298.558 298.558-59.452 0-114.68-17.219-161.137-47.106 8.447.974 16.568 1.299 25.34 1.299 49.055 0 94.213-16.568 130.274-44.832-46.132-.975-84.792-31.188-98.112-72.772 6.498.974 12.995 1.624 19.818 1.624 9.421 0 18.843-1.3 27.614-3.573-48.081-9.747-84.143-51.98-84.143-102.985v-1.299c13.969 7.797 30.214 12.67 47.431 13.319-28.264-18.843-46.781-51.005-46.781-87.391 0-19.492 5.197-37.36 14.294-52.954 51.655 63.675 129.3 105.258 216.365 109.807-1.624-7.797-2.599-15.918-2.599-24.04 0-57.828 46.782-104.934 104.934-104.934 30.213 0 57.502 12.67 76.67 33.137 23.715-4.548 46.456-13.32 66.599-25.34-7.798 24.366-24.366 44.833-46.132 57.827 21.117-2.273 41.584-8.122 60.426-16.243-14.292 20.791-32.161 39.308-52.628 54.253z"></path></svg>
</a>
<a href="https://www.instagram.com/codiumai/" target="_blank" rel="noopener" title="www.instagram.com" class="social-link">
<a href="https://www.instagram.com/qodo_ai" target="_blank" rel="noopener" title="www.instagram.com" class="social-link">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M224.1 141c-63.6 0-114.9 51.3-114.9 114.9s51.3 114.9 114.9 114.9S339 319.5 339 255.9 287.7 141 224.1 141zm0 189.6c-41.1 0-74.7-33.5-74.7-74.7s33.5-74.7 74.7-74.7 74.7 33.5 74.7 74.7-33.6 74.7-74.7 74.7zm146.4-194.3c0 14.9-12 26.8-26.8 26.8-14.9 0-26.8-12-26.8-26.8s12-26.8 26.8-26.8 26.8 12 26.8 26.8zm76.1 27.2c-1.7-35.9-9.9-67.7-36.2-93.9-26.2-26.2-58-34.4-93.9-36.2-37-2.1-147.9-2.1-184.9 0-35.8 1.7-67.6 9.9-93.9 36.1s-34.4 58-36.2 93.9c-2.1 37-2.1 147.9 0 184.9 1.7 35.9 9.9 67.7 36.2 93.9s58 34.4 93.9 36.2c37 2.1 147.9 2.1 184.9 0 35.9-1.7 67.7-9.9 93.9-36.2 26.2-26.2 34.4-58 36.2-93.9 2.1-37 2.1-147.8 0-184.8zM398.8 388c-7.8 19.6-22.9 34.7-42.6 42.6-29.5 11.7-99.5 9-132.1 9s-102.7 2.6-132.1-9c-19.6-7.8-34.7-22.9-42.6-42.6-11.7-29.5-9-99.5-9-132.1s-2.6-102.7 9-132.1c7.8-19.6 22.9-34.7 42.6-42.6 29.5-11.7 99.5-9 132.1-9s102.7-2.6 132.1 9c19.6 7.8 34.7 22.9 42.6 42.6 11.7 29.5 9 99.5 9 132.1s2.7 102.7-9 132.1z"></path></svg>
</a>
</div>

View File

@ -19,6 +19,10 @@ MAX_TOKENS = {
'gpt-4o-mini': 128000, # 128K, but may be limited by config.max_model_tokens
'gpt-4o-mini-2024-07-18': 128000, # 128K, but may be limited by config.max_model_tokens
'gpt-4o-2024-08-06': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-mini': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-mini-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-preview': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-preview-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens
'claude-instant-1': 100000,
'claude-2': 100000,
'command-nightly': 4096,
@ -30,9 +34,12 @@ MAX_TOKENS = {
'vertex_ai/claude-3-sonnet@20240229': 100000,
'vertex_ai/claude-3-opus@20240229': 100000,
'vertex_ai/claude-3-5-sonnet@20240620': 100000,
'vertex_ai/claude-3-5-sonnet-v2@20241022': 100000,
'vertex_ai/gemini-1.5-pro': 1048576,
'vertex_ai/gemini-1.5-flash': 1048576,
'vertex_ai/gemma2': 8200,
'gemini/gemini-1.5-pro': 1048576,
'gemini/gemini-1.5-flash': 1048576,
'codechat-bison': 6144,
'codechat-bison-32k': 32000,
'anthropic.claude-instant-v1': 100000,
@ -40,12 +47,14 @@ MAX_TOKENS = {
'anthropic.claude-v2': 100000,
'anthropic/claude-3-opus-20240229': 100000,
'anthropic/claude-3-5-sonnet-20240620': 100000,
'anthropic/claude-3-5-sonnet-20241022': 100000,
'bedrock/anthropic.claude-instant-v1': 100000,
'bedrock/anthropic.claude-v2': 100000,
'bedrock/anthropic.claude-v2:1': 100000,
'bedrock/anthropic.claude-3-sonnet-20240229-v1:0': 100000,
'bedrock/anthropic.claude-3-haiku-20240307-v1:0': 100000,
'bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0': 100000,
'bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0': 100000,
'claude-3-5-sonnet': 100000,
'groq/llama3-8b-8192': 8192,
'groq/llama3-70b-8192': 8192,

View File

@ -83,6 +83,11 @@ class LiteLLMAIHandler(BaseAiHandler):
litellm.vertex_location = get_settings().get(
"VERTEXAI.VERTEX_LOCATION", None
)
# Google AI Studio
# SEE https://docs.litellm.ai/docs/providers/gemini
if get_settings().get("GOOGLE_AI_STUDIO.GEMINI_API_KEY", None):
os.environ["GEMINI_API_KEY"] = get_settings().google_ai_studio.gemini_api_key
def prepare_logs(self, response, system, user, resp, finish_reason):
response_log = response.dict().copy()
response_log['system'] = system
@ -167,10 +172,11 @@ class LiteLLMAIHandler(BaseAiHandler):
if self.azure:
model = 'azure/' + model
if 'claude' in model and not system:
system = "\n"
system = "No system prompt provided"
get_logger().warning(
"Empty system prompt for claude model. Adding a newline character to prevent OpenAI API error.")
messages = [{"role": "system", "content": system}, {"role": "user", "content": user}]
if img_path:
try:
# check if the image link is alive
@ -185,14 +191,30 @@ class LiteLLMAIHandler(BaseAiHandler):
messages[1]["content"] = [{"type": "text", "text": messages[1]["content"]},
{"type": "image_url", "image_url": {"url": img_path}}]
kwargs = {
"model": model,
"deployment_id": deployment_id,
"messages": messages,
"temperature": temperature,
"timeout": get_settings().config.ai_timeout,
"api_base": self.api_base,
}
# Currently O1 does not support separate system and user prompts
O1_MODEL_PREFIX = 'o1-'
model_type = model.split('/')[-1] if '/' in model else model
if model_type.startswith(O1_MODEL_PREFIX):
user = f"{system}\n\n\n{user}"
system = ""
get_logger().info(f"Using O1 model, combining system and user prompts")
messages = [{"role": "user", "content": user}]
kwargs = {
"model": model,
"deployment_id": deployment_id,
"messages": messages,
"timeout": get_settings().config.ai_timeout,
"api_base": self.api_base,
}
else:
kwargs = {
"model": model,
"deployment_id": deployment_id,
"messages": messages,
"temperature": temperature,
"timeout": get_settings().config.ai_timeout,
"api_base": self.api_base,
}
if get_settings().litellm.get("enable_callbacks", False):
kwargs = self.add_litellm_callbacks(kwargs)
@ -215,13 +237,13 @@ class LiteLLMAIHandler(BaseAiHandler):
response = await acompletion(**kwargs)
except (openai.APIError, openai.APITimeoutError) as e:
get_logger().warning("Error during OpenAI inference: ", e)
get_logger().warning(f"Error during LLM inference: {e}")
raise
except (openai.RateLimitError) as e:
get_logger().error("Rate limit error during OpenAI inference: ", e)
get_logger().error(f"Rate limit error during LLM inference: {e}")
raise
except (Exception) as e:
get_logger().warning("Unknown error during OpenAI inference: ", e)
get_logger().warning(f"Unknown error during LLM inference: {e}")
raise openai.APIError from e
if response is None or len(response["choices"]) == 0:
raise openai.APIError

View File

@ -1,6 +1,7 @@
from os import environ
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
import openai
from openai.error import APIError, RateLimitError, Timeout, TryAgain
from openai import APIError, AsyncOpenAI, RateLimitError, Timeout
from retry import retry
from pr_agent.config_loader import get_settings
@ -14,7 +15,7 @@ class OpenAIHandler(BaseAiHandler):
# Initialize OpenAIHandler specific attributes here
try:
super().__init__()
openai.api_key = get_settings().openai.key
environ["OPENAI_API_KEY"] = get_settings().openai.key
if get_settings().get("OPENAI.ORG", None):
openai.organization = get_settings().openai.org
if get_settings().get("OPENAI.API_TYPE", None):
@ -24,7 +25,7 @@ class OpenAIHandler(BaseAiHandler):
if get_settings().get("OPENAI.API_VERSION", None):
openai.api_version = get_settings().openai.api_version
if get_settings().get("OPENAI.API_BASE", None):
openai.api_base = get_settings().openai.api_base
environ["OPENAI_BASE_URL"] = get_settings().openai.api_base
except AttributeError as e:
raise ValueError("OpenAI key is required") from e
@ -36,7 +37,7 @@ class OpenAIHandler(BaseAiHandler):
"""
return get_settings().get("OPENAI.DEPLOYMENT_ID", None)
@retry(exceptions=(APIError, Timeout, TryAgain, AttributeError, RateLimitError),
@retry(exceptions=(APIError, Timeout, AttributeError, RateLimitError),
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2):
try:
@ -44,20 +45,19 @@ class OpenAIHandler(BaseAiHandler):
get_logger().info("System: ", system)
get_logger().info("User: ", user)
messages = [{"role": "system", "content": system}, {"role": "user", "content": user}]
chat_completion = await openai.ChatCompletion.acreate(
client = AsyncOpenAI()
chat_completion = await client.chat.completions.create(
model=model,
deployment_id=deployment_id,
messages=messages,
temperature=temperature,
)
resp = chat_completion["choices"][0]['message']['content']
finish_reason = chat_completion["choices"][0]["finish_reason"]
usage = chat_completion.get("usage")
resp = chat_completion.choices[0].message.content
finish_reason = chat_completion.choices[0].finish_reason
usage = chat_completion.usage
get_logger().info("AI response", response=resp, messages=messages, finish_reason=finish_reason,
model=model, usage=usage)
return resp, finish_reason
except (APIError, Timeout, TryAgain) as e:
except (APIError, Timeout) as e:
get_logger().error("Error during OpenAI inference: ", e)
raise
except (RateLimitError) as e:
@ -65,4 +65,4 @@ class OpenAIHandler(BaseAiHandler):
raise
except (Exception) as e:
get_logger().error("Unknown error during OpenAI inference: ", e)
raise TryAgain from e
raise

View File

@ -1,6 +1,7 @@
from __future__ import annotations
import re
import traceback
from pr_agent.config_loader import get_settings
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
@ -12,27 +13,48 @@ def extend_patch(original_file_str, patch_str, patch_extra_lines_before=0,
if not patch_str or (patch_extra_lines_before == 0 and patch_extra_lines_after == 0) or not original_file_str:
return patch_str
if type(original_file_str) == bytes:
original_file_str = decode_if_bytes(original_file_str)
if not original_file_str:
return patch_str
if should_skip_patch(filename):
return patch_str
try:
extended_patch_str = process_patch_lines(patch_str, original_file_str,
patch_extra_lines_before, patch_extra_lines_after)
except Exception as e:
get_logger().warning(f"Failed to extend patch: {e}", artifact={"traceback": traceback.format_exc()})
return patch_str
return extended_patch_str
def decode_if_bytes(original_file_str):
if isinstance(original_file_str, bytes):
try:
original_file_str = original_file_str.decode('utf-8')
return original_file_str.decode('utf-8')
except UnicodeDecodeError:
encodings_to_try = ['iso-8859-1', 'latin-1', 'ascii', 'utf-16']
for encoding in encodings_to_try:
try:
return original_file_str.decode(encoding)
except UnicodeDecodeError:
continue
return ""
return original_file_str
# skip patches
patch_extension_skip_types = get_settings().config.patch_extension_skip_types #[".md",".txt"]
def should_skip_patch(filename):
patch_extension_skip_types = get_settings().config.patch_extension_skip_types
if patch_extension_skip_types and filename:
if any([filename.endswith(skip_type) for skip_type in patch_extension_skip_types]):
return patch_str
return any(filename.endswith(skip_type) for skip_type in patch_extension_skip_types)
return False
# dynamic context settings
def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before, patch_extra_lines_after):
allow_dynamic_context = get_settings().config.allow_dynamic_context
max_extra_lines_before_dynamic_context = get_settings().config.max_extra_lines_before_dynamic_context
patch_extra_lines_before_dynamic = patch_extra_lines_before
if allow_dynamic_context:
if max_extra_lines_before_dynamic_context > patch_extra_lines_before:
patch_extra_lines_before_dynamic = max_extra_lines_before_dynamic_context
else:
get_logger().warning(f"'max_extra_lines_before_dynamic_context' should be greater than 'patch_extra_lines_before'")
patch_extra_lines_before_dynamic = get_settings().config.max_extra_lines_before_dynamic_context
original_lines = original_file_str.splitlines()
len_original_lines = len(original_lines)
@ -46,23 +68,14 @@ def extend_patch(original_file_str, patch_str, patch_extra_lines_before=0,
for line in patch_lines:
if line.startswith('@@'):
match = RE_HUNK_HEADER.match(line)
# identify hunk header
if match:
# finish last hunk
# finish processing previous hunk
if start1 != -1 and patch_extra_lines_after > 0:
delta_lines = original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after]
delta_lines = [f' {line}' for line in delta_lines]
delta_lines = [f' {line}' for line in original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after]]
extended_patch_lines.extend(delta_lines)
res = list(match.groups())
for i in range(len(res)):
if res[i] is None:
res[i] = 0
try:
start1, size1, start2, size2 = map(int, res[:4])
except: # '@@ -0,0 +1 @@' case
start1, size1, size2 = map(int, res[:3])
start2 = 0
section_header = res[4]
section_header, size1, size2, start1, start2 = extract_hunk_headers(match)
if patch_extra_lines_before > 0 or patch_extra_lines_after > 0:
def _calc_context_limits(patch_lines_before):
@ -82,29 +95,30 @@ def extend_patch(original_file_str, patch_str, patch_extra_lines_before=0,
_calc_context_limits(patch_extra_lines_before_dynamic)
lines_before = original_lines[extended_start1 - 1:start1 - 1]
found_header = False
for i,line, in enumerate(lines_before):
for i, line, in enumerate(lines_before):
if section_header in line:
found_header = True
# Update start and size in one line each
extended_start1, extended_start2 = extended_start1 + i, extended_start2 + i
extended_size1, extended_size2 = extended_size1 - i, extended_size2 - i
get_logger().debug(f"Found section header in line {i} before the hunk")
# get_logger().debug(f"Found section header in line {i} before the hunk")
section_header = ''
break
if not found_header:
get_logger().debug(f"Section header not found in the extra lines before the hunk")
# get_logger().debug(f"Section header not found in the extra lines before the hunk")
extended_start1, extended_size1, extended_start2, extended_size2 = \
_calc_context_limits(patch_extra_lines_before)
else:
extended_start1, extended_size1, extended_start2, extended_size2 = \
_calc_context_limits(patch_extra_lines_before)
delta_lines = original_lines[extended_start1 - 1:start1 - 1]
delta_lines = [f' {line}' for line in delta_lines]
delta_lines = [f' {line}' for line in original_lines[extended_start1 - 1:start1 - 1]]
# logic to remove section header if its in the extra delta lines (in dynamic context, this is also done)
if section_header and not allow_dynamic_context:
for line in delta_lines:
if section_header in line:
section_header = '' # remove section header if it is in the extra delta lines
section_header = '' # remove section header if it is in the extra delta lines
break
else:
extended_start1 = start1
@ -120,11 +134,10 @@ def extend_patch(original_file_str, patch_str, patch_extra_lines_before=0,
continue
extended_patch_lines.append(line)
except Exception as e:
if get_settings().config.verbosity_level >= 2:
get_logger().error(f"Failed to extend patch: {e}")
get_logger().warning(f"Failed to extend patch: {e}", artifact={"traceback": traceback.format_exc()})
return patch_str
# finish last hunk
# finish processing last hunk
if start1 != -1 and patch_extra_lines_after > 0:
delta_lines = original_lines[start1 + size1 - 1:start1 + size1 - 1 + patch_extra_lines_after]
# add space at the beginning of each extra line
@ -135,6 +148,20 @@ def extend_patch(original_file_str, patch_str, patch_extra_lines_before=0,
return extended_patch_str
def extract_hunk_headers(match):
res = list(match.groups())
for i in range(len(res)):
if res[i] is None:
res[i] = 0
try:
start1, size1, start2, size2 = map(int, res[:4])
except: # '@@ -0,0 +1 @@' case
start1, size1, size2 = map(int, res[:3])
start2 = 0
section_header = res[4]
return section_header, size1, size2, start1, start2
def omit_deletion_hunks(patch_lines) -> str:
"""
Omit deletion hunks from the patch and return the modified patch.
@ -243,7 +270,7 @@ __old hunk__
if hasattr(file, 'edit_type') and file.edit_type == EDIT_TYPE.DELETED:
return f"\n\n## file '{file.filename.strip()}' was deleted\n"
patch_with_lines_str = f"\n\n## file: '{file.filename.strip()}'\n"
patch_with_lines_str = f"\n\n## File: '{file.filename.strip()}'\n"
patch_lines = patch.splitlines()
RE_HUNK_HEADER = re.compile(
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
@ -253,7 +280,7 @@ __old hunk__
start1, size1, start2, size2 = -1, -1, -1, -1
prev_header_line = []
header_line = []
for line in patch_lines:
for line_i, line in enumerate(patch_lines):
if 'no newline at end of file' in line.lower():
continue
@ -263,63 +290,62 @@ __old hunk__
if match and (new_content_lines or old_content_lines): # found a new hunk, split the previous lines
if prev_header_line:
patch_with_lines_str += f'\n{prev_header_line}\n'
is_plus_lines = is_minus_lines = False
if new_content_lines:
is_plus_lines = any([line.startswith('+') for line in new_content_lines])
if is_plus_lines:
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__new hunk__\n'
for i, line_new in enumerate(new_content_lines):
patch_with_lines_str += f"{start2 + i} {line_new}\n"
if old_content_lines:
is_minus_lines = any([line.startswith('-') for line in old_content_lines])
if is_minus_lines:
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__old hunk__\n'
for line_old in old_content_lines:
patch_with_lines_str += f"{line_old}\n"
if is_plus_lines or is_minus_lines: # notice 'True' here - we always present __new hunk__ for section, otherwise LLM gets confused
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__new hunk__\n'
for i, line_new in enumerate(new_content_lines):
patch_with_lines_str += f"{start2 + i} {line_new}\n"
if is_minus_lines:
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__old hunk__\n'
for line_old in old_content_lines:
patch_with_lines_str += f"{line_old}\n"
new_content_lines = []
old_content_lines = []
if match:
prev_header_line = header_line
res = list(match.groups())
for i in range(len(res)):
if res[i] is None:
res[i] = 0
try:
start1, size1, start2, size2 = map(int, res[:4])
except: # '@@ -0,0 +1 @@' case
start1, size1, size2 = map(int, res[:3])
start2 = 0
section_header, size1, size2, start1, start2 = extract_hunk_headers(match)
elif line.startswith('+'):
new_content_lines.append(line)
elif line.startswith('-'):
old_content_lines.append(line)
else:
if not line and line_i: # if this line is empty and the next line is a hunk header, skip it
if line_i + 1 < len(patch_lines) and patch_lines[line_i + 1].startswith('@@'):
continue
elif line_i + 1 == len(patch_lines):
continue
new_content_lines.append(line)
old_content_lines.append(line)
# finishing last hunk
if match and new_content_lines:
patch_with_lines_str += f'\n{header_line}\n'
is_plus_lines = is_minus_lines = False
if new_content_lines:
is_plus_lines = any([line.startswith('+') for line in new_content_lines])
if is_plus_lines:
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__new hunk__\n'
for i, line_new in enumerate(new_content_lines):
patch_with_lines_str += f"{start2 + i} {line_new}\n"
if old_content_lines:
is_minus_lines = any([line.startswith('-') for line in old_content_lines])
if is_minus_lines:
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__old hunk__\n'
for line_old in old_content_lines:
patch_with_lines_str += f"{line_old}\n"
if is_plus_lines or is_minus_lines: # notice 'True' here - we always present __new hunk__ for section, otherwise LLM gets confused
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__new hunk__\n'
for i, line_new in enumerate(new_content_lines):
patch_with_lines_str += f"{start2 + i} {line_new}\n"
if is_minus_lines:
patch_with_lines_str = patch_with_lines_str.rstrip() + '\n__old hunk__\n'
for line_old in old_content_lines:
patch_with_lines_str += f"{line_old}\n"
return patch_with_lines_str.rstrip()
def extract_hunk_lines_from_patch(patch: str, file_name, line_start, line_end, side) -> tuple[str, str]:
patch_with_lines_str = f"\n\n## file: '{file_name.strip()}'\n\n"
patch_with_lines_str = f"\n\n## File: '{file_name.strip()}'\n\n"
selected_lines = ""
patch_lines = patch.splitlines()
RE_HUNK_HEADER = re.compile(
@ -339,15 +365,7 @@ def extract_hunk_lines_from_patch(patch: str, file_name, line_start, line_end, s
match = RE_HUNK_HEADER.match(line)
res = list(match.groups())
for i in range(len(res)):
if res[i] is None:
res[i] = 0
try:
start1, size1, start2, size2 = map(int, res[:4])
except: # '@@ -0,0 +1 @@' case
start1, size1, size2 = map(int, res[:3])
start2 = 0
section_header, size1, size2, start1, start2 = extract_hunk_headers(match)
# check if line range is in this hunk
if side.lower() == 'left':

View File

@ -23,8 +23,15 @@ ADDED_FILES_ = "Additional added files (insufficient token budget to process):\n
OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD = 1500
OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD = 1000
MAX_EXTRA_LINES = 10
def cap_and_log_extra_lines(value, direction) -> int:
if value > MAX_EXTRA_LINES:
get_logger().warning(f"patch_extra_lines_{direction} was {value}, capping to {MAX_EXTRA_LINES}")
return MAX_EXTRA_LINES
return value
def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler,
model: str,
@ -38,6 +45,8 @@ def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler,
else:
PATCH_EXTRA_LINES_BEFORE = get_settings().config.patch_extra_lines_before
PATCH_EXTRA_LINES_AFTER = get_settings().config.patch_extra_lines_after
PATCH_EXTRA_LINES_BEFORE = cap_and_log_extra_lines(PATCH_EXTRA_LINES_BEFORE, "before")
PATCH_EXTRA_LINES_AFTER = cap_and_log_extra_lines(PATCH_EXTRA_LINES_AFTER, "after")
try:
diff_files_original = git_provider.get_diff_files()
@ -200,6 +209,10 @@ def pr_generate_extended_diff(pr_languages: list,
if add_line_numbers_to_hunks:
full_extended_patch = convert_to_hunks_with_lines_numbers(extended_patch, file)
# add AI-summary metadata to the patch
if file.ai_file_summary and get_settings().get("config.enable_ai_metadata", False):
full_extended_patch = add_ai_summary_top_patch(file, full_extended_patch)
patch_tokens = token_handler.count_tokens(full_extended_patch)
file.tokens = patch_tokens
total_tokens += patch_tokens
@ -239,6 +252,10 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
if convert_hunks_to_line_numbers:
patch = convert_to_hunks_with_lines_numbers(patch, file)
## add AI-summary metadata to the patch (disabled, since we are in the compressed diff)
# if file.ai_file_summary and get_settings().config.get('config.is_auto_command', False):
# patch = add_ai_summary_top_patch(file, patch)
new_patch_tokens = token_handler.count_tokens(patch)
file_dict[file.filename] = {'patch': patch, 'tokens': new_patch_tokens, 'edit_type': file.edit_type}
@ -304,7 +321,7 @@ def generate_full_patch(convert_hunks_to_line_numbers, file_dict, max_tokens_mod
if patch:
if not convert_hunks_to_line_numbers:
patch_final = f"\n\n## file: '{filename.strip()}\n\n{patch.strip()}\n'"
patch_final = f"\n\n## File: '{filename.strip()}\n\n{patch.strip()}\n'"
else:
patch_final = "\n\n" + patch.strip()
patches.append(patch_final)
@ -330,11 +347,9 @@ async def retry_with_fallback_models(f: Callable, model_type: ModelType = ModelT
except:
get_logger().warning(
f"Failed to generate prediction with {model}"
f"{(' from deployment ' + deployment_id) if deployment_id else ''}: "
f"{traceback.format_exc()}"
)
if i == len(all_models) - 1: # If it's the last iteration
raise # Re-raise the last exception
raise Exception(f"Failed to generate prediction with any model of {all_models}")
def _get_all_models(model_type: ModelType = ModelType.REGULAR) -> List[str]:
@ -400,11 +415,17 @@ def get_pr_multi_diffs(git_provider: GitProvider,
for lang in pr_languages:
sorted_files.extend(sorted(lang['files'], key=lambda x: x.tokens, reverse=True))
# Get the maximum number of extra lines before and after the patch
PATCH_EXTRA_LINES_BEFORE = get_settings().config.patch_extra_lines_before
PATCH_EXTRA_LINES_AFTER = get_settings().config.patch_extra_lines_after
PATCH_EXTRA_LINES_BEFORE = cap_and_log_extra_lines(PATCH_EXTRA_LINES_BEFORE, "before")
PATCH_EXTRA_LINES_AFTER = cap_and_log_extra_lines(PATCH_EXTRA_LINES_AFTER, "after")
# try first a single run with standard diff string, with patch extension, and no deletions
patches_extended, total_tokens, patches_extended_tokens = pr_generate_extended_diff(
pr_languages, token_handler, add_line_numbers_to_hunks=True,
patch_extra_lines_before=get_settings().config.patch_extra_lines_before,
patch_extra_lines_after=get_settings().config.patch_extra_lines_after)
patch_extra_lines_before=PATCH_EXTRA_LINES_BEFORE,
patch_extra_lines_after=PATCH_EXTRA_LINES_AFTER)
# if we are under the limit, return the full diff
if total_tokens + OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD < get_max_tokens(model):
@ -432,6 +453,9 @@ def get_pr_multi_diffs(git_provider: GitProvider,
continue
patch = convert_to_hunks_with_lines_numbers(patch, file)
# add AI-summary metadata to the patch
if file.ai_file_summary and get_settings().get("config.enable_ai_metadata", False):
patch = add_ai_summary_top_patch(file, patch)
new_patch_tokens = token_handler.count_tokens(patch)
if patch and (token_handler.prompt_tokens + new_patch_tokens) > get_max_tokens(
@ -479,3 +503,46 @@ def get_pr_multi_diffs(git_provider: GitProvider,
final_diff_list.append(final_diff)
return final_diff_list
def add_ai_metadata_to_diff_files(git_provider, pr_description_files):
"""
Adds AI metadata to the diff files based on the PR description files (FilePatchInfo.ai_file_summary).
"""
try:
if not pr_description_files:
get_logger().warning(f"PR description files are empty.")
return
available_files = {pr_file['full_file_name'].strip(): pr_file for pr_file in pr_description_files}
diff_files = git_provider.get_diff_files()
found_any_match = False
for file in diff_files:
filename = file.filename.strip()
if filename in available_files:
file.ai_file_summary = available_files[filename]
found_any_match = True
if not found_any_match:
get_logger().error(f"Failed to find any matching files between PR description and diff files.",
artifact={"pr_description_files": pr_description_files})
except Exception as e:
get_logger().error(f"Failed to add AI metadata to diff files: {e}",
artifact={"traceback": traceback.format_exc()})
def add_ai_summary_top_patch(file, full_extended_patch):
try:
# below every instance of '## File: ...' in the patch, add the ai-summary metadata
full_extended_patch_lines = full_extended_patch.split("\n")
for i, line in enumerate(full_extended_patch_lines):
if line.startswith("## File:") or line.startswith("## file:"):
full_extended_patch_lines.insert(i + 1,
f"### AI-generated changes summary:\n{file.ai_file_summary['long_summary']}")
full_extended_patch = "\n".join(full_extended_patch_lines)
return full_extended_patch
# if no '## File: ...' was found
return full_extended_patch
except Exception as e:
get_logger().error(f"Failed to add AI summary to the top of the patch: {e}",
artifact={"traceback": traceback.format_exc()})
return full_extended_patch

View File

@ -21,3 +21,4 @@ class FilePatchInfo:
old_filename: str = None
num_plus_lines: int = -1
num_minus_lines: int = -1
ai_file_summary: str = None

View File

@ -1,17 +1,22 @@
from __future__ import annotations
import html
import copy
import difflib
import hashlib
import html
import json
import os
import re
import textwrap
import time
import traceback
from datetime import datetime
from enum import Enum
from typing import Any, List, Tuple
import html2text
import requests
import yaml
from pydantic import BaseModel
from starlette_context import context
@ -38,6 +43,10 @@ class PRReviewHeader(str, Enum):
INCREMENTAL = "## Incremental PR Reviewer Guide"
class PRDescriptionHeader(str, Enum):
CHANGES_WALKTHROUGH = "### **Changes walkthrough** 📝"
def get_setting(key: str) -> Any:
try:
key = key.upper()
@ -99,8 +108,8 @@ def convert_to_markdown_v2(output_data: dict,
emojis = {
"Can be split": "🔀",
"Possible issues": "",
"Key issues to review": "",
"Recommended focus areas for review": "",
"Score": "🏅",
"Relevant tests": "🧪",
"Focused PR": "",
@ -109,6 +118,7 @@ def convert_to_markdown_v2(output_data: dict,
"Insights from user's answers": "📝",
"Code feedback": "🤖",
"Estimated effort to review [1-5]": "⏱️",
"Ticket compliance check": "🎫",
}
markdown_text = ""
if not incremental_review:
@ -119,6 +129,9 @@ def convert_to_markdown_v2(output_data: dict,
if not output_data or not output_data.get('review', {}):
return ""
if get_settings().get("pr_reviewer.enable_intro_text", False):
markdown_text += f"Here are some key observations to aid the review process:\n\n"
if gfm_supported:
markdown_text += "<table>\n"
@ -161,6 +174,8 @@ def convert_to_markdown_v2(output_data: dict,
markdown_text += f'### {emoji} No relevant tests\n\n'
else:
markdown_text += f"### PR contains tests\n\n"
elif 'ticket compliance check' in key_nice.lower():
markdown_text = ticket_markdown_logic(emoji, markdown_text, value, gfm_supported)
elif 'security concerns' in key_nice.lower():
if gfm_supported:
markdown_text += f"<tr><td>"
@ -188,23 +203,21 @@ def convert_to_markdown_v2(output_data: dict,
if is_value_no(value):
if gfm_supported:
markdown_text += f"<tr><td>"
markdown_text += f"{emoji}&nbsp;<strong>No key issues to review</strong>"
markdown_text += f"{emoji}&nbsp;<strong>No major issues detected</strong>"
markdown_text += f"</td></tr>\n"
else:
markdown_text += f"### {emoji} No key issues to review\n\n"
markdown_text += f"### {emoji} No major issues detected\n\n"
else:
# issues = value.split('\n- ')
issues =value
# for i, _ in enumerate(issues):
# issues[i] = issues[i].strip().strip('-').strip()
issues = value
if gfm_supported:
markdown_text += f"<tr><td>"
markdown_text += f"{emoji}&nbsp;<strong>{key_nice}</strong><br><br>\n\n"
# markdown_text += f"{emoji}&nbsp;<strong>{key_nice}</strong><br><br>\n\n"
markdown_text += f"{emoji}&nbsp;<strong>Recommended focus areas for review</strong><br><br>\n\n"
else:
markdown_text += f"### {emoji} Key issues to review\n\n#### \n"
markdown_text += f"### {emoji} Recommended focus areas for review\n\n#### \n"
for i, issue in enumerate(issues):
try:
if not issue:
if not issue or not isinstance(issue, dict):
continue
relevant_file = issue.get('relevant_file', '').strip()
issue_header = issue.get('issue_header', '').strip()
@ -214,25 +227,12 @@ def convert_to_markdown_v2(output_data: dict,
reference_link = git_provider.get_line_link(relevant_file, start_line, end_line)
if gfm_supported:
if get_settings().pr_reviewer.extra_issue_links:
issue_content_linked =copy.deepcopy(issue_content)
referenced_variables_list = issue.get('referenced_variables', [])
for component in referenced_variables_list:
name = component['variable_name'].strip().strip('`')
ind = issue_content.find(name)
if ind != -1:
reference_link_component = git_provider.get_line_link(relevant_file, component['relevant_line'], component['relevant_line'])
issue_content_linked = issue_content_linked[:ind-1] + f"[`{name}`]({reference_link_component})" + issue_content_linked[ind+len(name)+1:]
else:
get_logger().info(f"Failed to find variable in issue content: {component['variable_name'].strip()}")
issue_content = issue_content_linked
issue_str = f"<a href='{reference_link}'><strong>{issue_header}</strong></a><br>{issue_content}"
else:
issue_str = f"[**{issue_header}**]({reference_link})\n\n{issue_content}\n\n"
markdown_text += f"{issue_str}\n\n"
except Exception as e:
get_logger().exception(f"Failed to process key issues to review: {e}")
get_logger().exception(f"Failed to process 'Recommended focus areas for review': {e}")
if gfm_supported:
markdown_text += f"</td></tr>\n"
else:
@ -265,6 +265,52 @@ def convert_to_markdown_v2(output_data: dict,
return markdown_text
def ticket_markdown_logic(emoji, markdown_text, value, gfm_supported) -> str:
ticket_compliance_str = ""
final_compliance_level = -1
if isinstance(value, list):
for v in value:
ticket_url = v.get('ticket_url', '').strip()
compliance_level = v.get('overall_compliance_level', '').strip()
# add emojis, if 'Fully compliant' ✅, 'Partially compliant' 🔶, or 'Not compliant' ❌
if compliance_level.lower() == 'fully compliant':
# compliance_level = '✅ Fully compliant'
final_compliance_level = 2 if final_compliance_level == -1 else 1
elif compliance_level.lower() == 'partially compliant':
# compliance_level = '🔶 Partially compliant'
final_compliance_level = 1
elif compliance_level.lower() == 'not compliant':
# compliance_level = '❌ Not compliant'
final_compliance_level = 0 if final_compliance_level < 1 else 1
# explanation = v.get('compliance_analysis', '').strip()
explanation = ''
fully_compliant_str = v.get('fully_compliant_requirements', '').strip()
not_compliant_str = v.get('not_compliant_requirements', '').strip()
if fully_compliant_str:
explanation += f"Fully compliant requirements:\n{fully_compliant_str}\n\n"
if not_compliant_str:
explanation += f"Not compliant requirements:\n{not_compliant_str}\n\n"
ticket_compliance_str += f"\n\n**[{ticket_url.split('/')[-1]}]({ticket_url}) - {compliance_level}**\n\n{explanation}\n\n"
if final_compliance_level == 2:
compliance_level = ''
elif final_compliance_level == 1:
compliance_level = '🔶'
else:
compliance_level = ''
if gfm_supported:
markdown_text += f"<tr><td>\n\n"
markdown_text += f"**{emoji} Ticket compliance analysis {compliance_level}**\n\n"
markdown_text += ticket_compliance_str
markdown_text += f"</td></tr>\n"
else:
markdown_text += f"### {emoji} Ticket compliance analysis {compliance_level}\n\n"
markdown_text += ticket_compliance_str+"\n\n"
return markdown_text
def process_can_be_split(emoji, value):
try:
# key_nice = "Can this PR be split?"
@ -565,7 +611,8 @@ def load_yaml(response_text: str, keys_fix_yaml: List[str] = [], first_key="", l
get_logger().warning(f"Initial failure to parse AI prediction: {e}")
data = try_fix_yaml(response_text, keys_fix_yaml=keys_fix_yaml, first_key=first_key, last_key=last_key)
if not data:
get_logger().error(f"Failed to parse AI prediction after fallbacks", artifact={'response_text': response_text})
get_logger().error(f"Failed to parse AI prediction after fallbacks",
artifact={'response_text': response_text})
else:
get_logger().info(f"Successfully parsed AI prediction after fallbacks",
artifact={'response_text': response_text})
@ -852,56 +899,64 @@ def find_line_number_of_relevant_line_in_file(diff_files: List[FilePatchInfo],
break
return position, absolute_position
def validate_and_await_rate_limit(rate_limit_status=None, git_provider=None, get_rate_limit_status_func=None):
if git_provider and not rate_limit_status:
rate_limit_status = {'resources': git_provider.github_client.get_rate_limit().raw_data}
def get_rate_limit_status(github_token) -> dict:
GITHUB_API_URL = get_settings(use_context=False).get("GITHUB.BASE_URL", "https://api.github.com").rstrip("/") # "https://api.github.com"
# GITHUB_API_URL = "https://api.github.com"
RATE_LIMIT_URL = f"{GITHUB_API_URL}/rate_limit"
HEADERS = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"token {github_token}"
}
if not rate_limit_status:
rate_limit_status = get_rate_limit_status_func()
response = requests.get(RATE_LIMIT_URL, headers=HEADERS)
try:
rate_limit_info = response.json()
if rate_limit_info.get('message') == 'Rate limiting is not enabled.': # for github enterprise
return {'resources': {}}
response.raise_for_status() # Check for HTTP errors
except: # retry
time.sleep(0.1)
response = requests.get(RATE_LIMIT_URL, headers=HEADERS)
return response.json()
return rate_limit_info
def validate_rate_limit_github(github_token, installation_id=None, threshold=0.1) -> bool:
try:
rate_limit_status = get_rate_limit_status(github_token)
if installation_id:
get_logger().debug(f"installation_id: {installation_id}, Rate limit status: {rate_limit_status['rate']}")
# validate that the rate limit is not exceeded
is_rate_limit = False
for key, value in rate_limit_status['resources'].items():
if value['remaining'] == 0:
print(f"key: {key}, value: {value}")
is_rate_limit = True
sleep_time_sec = value['reset'] - datetime.now().timestamp()
sleep_time_hour = sleep_time_sec / 3600.0
print(f"Rate limit exceeded. Sleeping for {sleep_time_hour} hours")
if sleep_time_sec > 0:
time.sleep(sleep_time_sec+1)
if git_provider:
rate_limit_status = {'resources': git_provider.github_client.get_rate_limit().raw_data}
else:
rate_limit_status = get_rate_limit_status_func()
return is_rate_limit
# validate that the rate limit is not exceeded
for key, value in rate_limit_status['resources'].items():
if value['remaining'] < value['limit'] * threshold:
get_logger().error(f"key: {key}, value: {value}")
return False
return True
except Exception as e:
get_logger().error(f"Error in rate limit {e}",
artifact={"traceback": traceback.format_exc()})
return True
def get_largest_component(pr_url):
from pr_agent.tools.pr_analyzer import PRAnalyzer
publish_output = get_settings().config.publish_output
get_settings().config.publish_output = False # disable publish output
analyzer = PRAnalyzer(pr_url)
methods_dict_files = analyzer.run_sync()
get_settings().config.publish_output = publish_output
max_lines_changed = 0
file_b = ""
component_name_b = ""
for file in methods_dict_files:
for method in methods_dict_files[file]:
try:
if methods_dict_files[file][method]['num_plus_lines'] > max_lines_changed:
max_lines_changed = methods_dict_files[file][method]['num_plus_lines']
file_b = file
component_name_b = method
except:
pass
if component_name_b:
get_logger().info(f"Using the largest changed component: '{component_name_b}'")
return component_name_b, file_b
else:
return None, None
def validate_and_await_rate_limit(github_token):
try:
rate_limit_status = get_rate_limit_status(github_token)
# validate that the rate limit is not exceeded
for key, value in rate_limit_status['resources'].items():
if value['remaining'] < value['limit'] // 80:
get_logger().error(f"key: {key}, value: {value}")
sleep_time_sec = value['reset'] - datetime.now().timestamp()
sleep_time_hour = sleep_time_sec / 3600.0
get_logger().error(f"Rate limit exceeded. Sleeping for {sleep_time_hour} hours")
if sleep_time_sec > 0:
time.sleep(sleep_time_sec + 1)
rate_limit_status = get_rate_limit_status(github_token)
return rate_limit_status
except:
get_logger().error("Error in rate limit")
return None
def github_action_output(output_data: dict, key_name: str):
try:
@ -917,21 +972,24 @@ def github_action_output(output_data: dict, key_name: str):
def show_relevant_configurations(relevant_section: str) -> str:
forbidden_keys = ['ai_disclaimer', 'ai_disclaimer_title', 'ANALYTICS_FOLDER', 'secret_provider',
skip_keys = ['ai_disclaimer', 'ai_disclaimer_title', 'ANALYTICS_FOLDER', 'secret_provider', "skip_keys", "app_id", "redirect",
'trial_prefix_message', 'no_eligible_message', 'identity_provider', 'ALLOWED_REPOS','APP_NAME']
extra_skip_keys = get_settings().config.get('config.skip_keys', [])
if extra_skip_keys:
skip_keys.extend(extra_skip_keys)
markdown_text = ""
markdown_text += "\n<hr>\n<details> <summary><strong>🛠️ Relevant configurations:</strong></summary> \n\n"
markdown_text +="<br>These are the relevant [configurations](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml) for this tool:\n\n"
markdown_text += f"**[config**]\n```yaml\n\n"
for key, value in get_settings().config.items():
if key in forbidden_keys:
if key in skip_keys:
continue
markdown_text += f"{key}: {value}\n"
markdown_text += "\n```\n"
markdown_text += f"\n**[{relevant_section}]**\n```yaml\n\n"
for key, value in get_settings().get(relevant_section, {}).items():
if key in forbidden_keys:
if key in skip_keys:
continue
markdown_text += f"{key}: {value}\n"
markdown_text += "\n```"
@ -945,3 +1003,97 @@ def is_value_no(value):
if value_str == 'no' or value_str == 'none' or value_str == 'false':
return True
return False
def set_pr_string(repo_name, pr_number):
return f"{repo_name}#{pr_number}"
def string_to_uniform_number(s: str) -> float:
"""
Convert a string to a uniform number in the range [0, 1].
The uniform distribution is achieved by the nature of the SHA-256 hash function, which produces a uniformly distributed hash value over its output space.
"""
# Generate a hash of the string
hash_object = hashlib.sha256(s.encode())
# Convert the hash to an integer
hash_int = int(hash_object.hexdigest(), 16)
# Normalize the integer to the range [0, 1]
max_hash_int = 2 ** 256 - 1
uniform_number = float(hash_int) / max_hash_int
return uniform_number
def process_description(description_full: str) -> Tuple[str, List]:
if not description_full:
return "", []
description_split = description_full.split(PRDescriptionHeader.CHANGES_WALKTHROUGH.value)
base_description_str = description_split[0]
changes_walkthrough_str = ""
files = []
if len(description_split) > 1:
changes_walkthrough_str = description_split[1]
else:
get_logger().debug("No changes walkthrough found")
try:
if changes_walkthrough_str:
# get the end of the table
if '</table>\n\n___' in changes_walkthrough_str:
end = changes_walkthrough_str.index("</table>\n\n___")
elif '\n___' in changes_walkthrough_str:
end = changes_walkthrough_str.index("\n___")
else:
end = len(changes_walkthrough_str)
changes_walkthrough_str = changes_walkthrough_str[:end]
h = html2text.HTML2Text()
h.body_width = 0 # Disable line wrapping
# find all the files
pattern = r'<tr>\s*<td>\s*(<details>\s*<summary>(.*?)</summary>(.*?)</details>)\s*</td>'
files_found = re.findall(pattern, changes_walkthrough_str, re.DOTALL)
for file_data in files_found:
try:
if isinstance(file_data, tuple):
file_data = file_data[0]
pattern = r'<details>\s*<summary><strong>(.*?)</strong>\s*<dd><code>(.*?)</code>.*?</summary>\s*<hr>\s*(.*?)\s*<li>(.*?)</details>'
res = re.search(pattern, file_data, re.DOTALL)
if not res or res.lastindex != 4:
pattern_back = r'<details>\s*<summary><strong>(.*?)</strong><dd><code>(.*?)</code>.*?</summary>\s*<hr>\s*(.*?)\n\n\s*(.*?)</details>'
res = re.search(pattern_back, file_data, re.DOTALL)
if not res or res.lastindex != 4:
pattern_back = r'<details>\s*<summary><strong>(.*?)</strong>\s*<dd><code>(.*?)</code>.*?</summary>\s*<hr>\s*(.*?)\s*-\s*(.*?)\s*</details>' # looking for hypen ('- ')
res = re.search(pattern_back, file_data, re.DOTALL)
if res and res.lastindex == 4:
short_filename = res.group(1).strip()
short_summary = res.group(2).strip()
long_filename = res.group(3).strip()
long_summary = res.group(4).strip()
long_summary = long_summary.replace('<br> *', '\n*').replace('<br>','').replace('\n','<br>')
long_summary = h.handle(long_summary).strip()
if long_summary.startswith('\\-'):
long_summary = "* " + long_summary[2:]
elif not long_summary.startswith('*'):
long_summary = f"* {long_summary}"
files.append({
'short_file_name': short_filename,
'full_file_name': long_filename,
'short_summary': short_summary,
'long_summary': long_summary
})
else:
if '<code>...</code>' in file_data:
pass # PR with many files. some did not get analyzed
else:
get_logger().error(f"Failed to parse description", artifact={'description': file_data})
except Exception as e:
get_logger().exception(f"Failed to process description: {e}", artifact={'description': file_data})
except Exception as e:
get_logger().exception(f"Failed to process description: {e}")
return base_description_str, files

View File

@ -27,8 +27,9 @@ global_settings = Dynaconf(
"settings/pr_update_changelog_prompts.toml",
"settings/pr_custom_labels.toml",
"settings/pr_add_docs.toml",
"settings/custom_labels.toml",
"settings/pr_help_prompts.toml",
"settings_prod/.secrets.toml",
"settings/custom_labels.toml"
]]
)

View File

@ -5,7 +5,7 @@ from urllib.parse import urlparse
from ..algo.file_filter import filter_ignored
from ..log import get_logger
from ..algo.language_handler import is_valid_file
from ..algo.utils import clip_tokens, find_line_number_of_relevant_line_in_file, load_large_diff
from ..algo.utils import clip_tokens, find_line_number_of_relevant_line_in_file, load_large_diff, PRDescriptionHeader
from ..config_loader import get_settings
from .git_provider import GitProvider
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
@ -316,7 +316,7 @@ class AzureDevopsProvider(GitProvider):
new_file_content_str = new_file_content_str.content
except Exception as error:
get_logger().error(f"Failed to retrieve new file content of {file} at version {version}. Error: {str(error)}")
get_logger().error(f"Failed to retrieve new file content of {file} at version {version}", error=error)
# get_logger().error(
# "Failed to retrieve new file content of %s at version %s. Error: %s",
# file,
@ -336,19 +336,22 @@ class AzureDevopsProvider(GitProvider):
version = GitVersionDescriptor(
version=base_sha.commit_id, version_type="commit"
)
try:
original_file_content_str = self.azure_devops_client.get_item(
repository_id=self.repo_slug,
path=file,
project=self.workspace_slug,
version_descriptor=version,
download=False,
include_content=True,
)
original_file_content_str = original_file_content_str.content
except Exception as error:
get_logger().error(f"Failed to retrieve original file content of {file} at version {version}. Error: {str(error)}")
if edit_type == EDIT_TYPE.ADDED:
original_file_content_str = ""
else:
try:
original_file_content_str = self.azure_devops_client.get_item(
repository_id=self.repo_slug,
path=file,
project=self.workspace_slug,
version_descriptor=version,
download=False,
include_content=True,
)
original_file_content_str = original_file_content_str.content
except Exception as error:
get_logger().error(f"Failed to retrieve original file content of {file} at version {version}", error=error)
original_file_content_str = ""
patch = load_large_diff(
file, new_file_content_str, original_file_content_str, show_warning=False
@ -375,12 +378,12 @@ class AzureDevopsProvider(GitProvider):
self.diff_files = diff_files
return diff_files
except Exception as e:
print(f"Error: {str(e)}")
get_logger().exception(f"Failed to get diff files, error: {e}")
return []
def publish_comment(self, pr_comment: str, is_temporary: bool = False, thread_context=None):
comment = Comment(content=pr_comment)
thread = CommentThread(comments=[comment], thread_context=thread_context, status=1)
thread = CommentThread(comments=[comment], thread_context=thread_context, status=5)
thread_response = self.azure_devops_client.create_thread(
comment_thread=thread,
project=self.workspace_slug,
@ -401,7 +404,7 @@ class AzureDevopsProvider(GitProvider):
pr_body = pr_body[:ind]
if len(pr_body) > MAX_PR_DESCRIPTION_AZURE_LENGTH:
changes_walkthrough_text = '## **Changes walkthrough**'
changes_walkthrough_text = PRDescriptionHeader.CHANGES_WALKTHROUGH.value
ind = pr_body.find(changes_walkthrough_text)
if ind != -1:
pr_body = pr_body[:ind]
@ -516,16 +519,9 @@ class AzureDevopsProvider(GitProvider):
source_branch = pr_info.source_ref_name.split("/")[-1]
return source_branch
def get_pr_description(self, *, full: bool = True) -> str:
max_tokens = get_settings().get("CONFIG.MAX_DESCRIPTION_TOKENS", None)
if max_tokens:
return clip_tokens(self.pr.description, max_tokens)
return self.pr.description
def get_user_id(self):
return 0
def get_issue_comments(self):
threads = self.azure_devops_client.get_threads(repository_id=self.repo_slug, pull_request_id=self.pr_num, project=self.workspace_slug)
threads.reverse()
@ -549,18 +545,20 @@ class AzureDevopsProvider(GitProvider):
parsed_url = urlparse(pr_url)
path_parts = parsed_url.path.strip("/").split("/")
if len(path_parts) < 6 or path_parts[4] != "pullrequest":
if "pullrequest" not in path_parts:
raise ValueError(
"The provided URL does not appear to be a Azure DevOps PR URL"
)
workspace_slug = path_parts[1]
repo_slug = path_parts[3]
try:
if len(path_parts) == 6: # "https://dev.azure.com/organization/project/_git/repo/pullrequest/1"
workspace_slug = path_parts[1]
repo_slug = path_parts[3]
pr_number = int(path_parts[5])
except ValueError as e:
raise ValueError("Unable to convert PR number to integer") from e
elif len(path_parts) == 5: # 'https://organization.visualstudio.com/project/_git/repo/pullrequest/1'
workspace_slug = path_parts[0]
repo_slug = path_parts[2]
pr_number = int(path_parts[4])
else:
raise ValueError("The provided URL does not appear to be a Azure DevOps PR URL")
return workspace_slug, repo_slug, pr_number
@ -620,3 +618,6 @@ class AzureDevopsProvider(GitProvider):
get_logger().error(f"Failed to get pr id, error: {e}")
return ""
def publish_file_comments(self, file_comments: list) -> bool:
pass

View File

@ -227,7 +227,10 @@ class BitbucketProvider(GitProvider):
try:
counter_valid += 1
if counter_valid < MAX_FILES_ALLOWED_FULL // 2: # factor 2 because bitbucket has limited API calls
if get_settings().get("bitbucket_app.avoid_full_files", False):
original_file_content_str = ""
new_file_content_str = ""
elif counter_valid < MAX_FILES_ALLOWED_FULL // 2: # factor 2 because bitbucket has limited API calls
if diff.old.get_data("links"):
original_file_content_str = self._get_pr_file_content(
diff.old.get_data("links")['self']['href'])

View File

@ -3,7 +3,7 @@ from abc import ABC, abstractmethod
# enum EDIT_TYPE (ADDED, DELETED, MODIFIED, RENAMED)
from typing import Optional
from pr_agent.algo.utils import Range
from pr_agent.algo.utils import Range, process_description
from pr_agent.config_loader import get_settings
from pr_agent.algo.types import FilePatchInfo
from pr_agent.log import get_logger
@ -61,14 +61,20 @@ class GitProvider(ABC):
def reply_to_comment_from_comment_id(self, comment_id: int, body: str):
pass
def get_pr_description(self, *, full: bool = True) -> str:
def get_pr_description(self, full: bool = True, split_changes_walkthrough=False) -> str or tuple:
from pr_agent.config_loader import get_settings
from pr_agent.algo.utils import clip_tokens
max_tokens_description = get_settings().get("CONFIG.MAX_DESCRIPTION_TOKENS", None)
description = self.get_pr_description_full() if full else self.get_user_description()
if max_tokens_description:
return clip_tokens(description, max_tokens_description)
return description
if split_changes_walkthrough:
description, files = process_description(description)
if max_tokens_description:
description = clip_tokens(description, max_tokens_description)
return description, files
else:
if max_tokens_description:
description = clip_tokens(description, max_tokens_description)
return description
def get_user_description(self) -> str:
if hasattr(self, 'user_description') and not (self.user_description is None):

View File

@ -1,22 +1,22 @@
import itertools
import time
import hashlib
import traceback
from datetime import datetime
from typing import Optional, Tuple
from urllib.parse import urlparse
from github import AppAuthentication, Auth, Github, GithubException
from github import AppAuthentication, Auth, Github
from retry import retry
from starlette_context import context
from ..algo.file_filter import filter_ignored
from ..algo.language_handler import is_valid_file
from ..algo.types import EDIT_TYPE
from ..algo.utils import PRReviewHeader, load_large_diff, clip_tokens, find_line_number_of_relevant_line_in_file, Range
from ..config_loader import get_settings
from ..log import get_logger
from ..servers.utils import RateLimitExceeded
from .git_provider import GitProvider, IncrementalPR, MAX_FILES_ALLOWED_FULL
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
from .git_provider import FilePatchInfo, GitProvider, IncrementalPR, MAX_FILES_ALLOWED_FULL
class GithubProvider(GitProvider):
@ -27,10 +27,8 @@ class GithubProvider(GitProvider):
except Exception:
self.installation_id = None
self.max_comment_chars = 65000
self.base_url = get_settings().get("GITHUB.BASE_URL", "https://api.github.com").rstrip("/")
self.base_url = get_settings().get("GITHUB.BASE_URL", "https://api.github.com").rstrip("/") # "https://api.github.com"
self.base_url_html = self.base_url.split("api/")[0].rstrip("/") if "api/" in self.base_url else "https://github.com"
self.base_domain = self.base_url.replace("https://", "").replace("http://", "")
self.base_domain_html = self.base_url_html.replace("https://", "").replace("http://", "")
self.github_client = self._get_github_client()
self.repo = None
self.pr_num = None
@ -233,8 +231,9 @@ class GithubProvider(GitProvider):
return diff_files
except GithubException.RateLimitExceededException as e:
get_logger().error(f"Rate limit exceeded for GitHub API. Original message: {e}")
except Exception as e:
get_logger().error(f"Failing to get diff files: {e}",
artifact={"traceback": traceback.format_exc()})
raise RateLimitExceeded("Rate limit exceeded for GitHub API.") from e
def publish_description(self, pr_title: str, pr_body: str):
@ -256,7 +255,7 @@ class GithubProvider(GitProvider):
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
if is_temporary and not get_settings().config.publish_output_progress:
get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
return
return None
pr_comment = self.limit_output_characters(pr_comment, self.max_comment_chars)
response = self.pr.create_issue_comment(pr_comment)
if hasattr(response, "user") and hasattr(response.user, "login"):
@ -611,8 +610,11 @@ class GithubProvider(GitProvider):
def _parse_pr_url(self, pr_url: str) -> Tuple[str, int]:
parsed_url = urlparse(pr_url)
if parsed_url.path.startswith('/api/v3'):
parsed_url = urlparse(pr_url.replace("/api/v3", ""))
path_parts = parsed_url.path.strip('/').split('/')
if self.base_domain in parsed_url.netloc:
if 'api.github.com' in parsed_url.netloc or '/api/v3' in pr_url:
if len(path_parts) < 5 or path_parts[3] != 'pulls':
raise ValueError("The provided URL does not appear to be a GitHub PR URL")
repo_name = '/'.join(path_parts[1:3])
@ -635,8 +637,12 @@ class GithubProvider(GitProvider):
def _parse_issue_url(self, issue_url: str) -> Tuple[str, int]:
parsed_url = urlparse(issue_url)
if 'github.com' not in parsed_url.netloc:
raise ValueError("The provided URL is not a valid GitHub URL")
path_parts = parsed_url.path.strip('/').split('/')
if self.base_domain in parsed_url.netloc:
if 'api.github.com' in parsed_url.netloc:
if len(path_parts) < 5 or path_parts[3] != 'issues':
raise ValueError("The provided URL does not appear to be a GitHub ISSUE URL")
repo_name = '/'.join(path_parts[1:3])

View File

@ -551,7 +551,7 @@ class GitLabProvider(GitProvider):
if relevant_line_start == -1:
link = f"{self.gl.url}/{self.id_project}/-/blob/{self.mr.source_branch}/{relevant_file}?ref_type=heads"
elif relevant_line_end:
link = f"{self.gl.url}/{self.id_project}/-/blob/{self.mr.source_branch}/{relevant_file}?ref_type=heads#L{relevant_line_start}-L{relevant_line_end}"
link = f"{self.gl.url}/{self.id_project}/-/blob/{self.mr.source_branch}/{relevant_file}?ref_type=heads#L{relevant_line_start}-{relevant_line_end}"
else:
link = f"{self.gl.url}/{self.id_project}/-/blob/{self.mr.source_branch}/{relevant_file}?ref_type=heads#L{relevant_line_start}"
return link

View File

@ -27,18 +27,27 @@ def apply_repo_settings(pr_url):
except Exception:
pass
error_local = None
if repo_settings:
repo_settings_file = None
fd, repo_settings_file = tempfile.mkstemp(suffix='.toml')
os.write(fd, repo_settings)
new_settings = Dynaconf(settings_files=[repo_settings_file])
for section, contents in new_settings.as_dict().items():
section_dict = copy.deepcopy(get_settings().as_dict().get(section, {}))
for key, value in contents.items():
section_dict[key] = value
get_settings().unset(section)
get_settings().set(section, section_dict, merge=False)
get_logger().info(f"Applying repo settings:\n{new_settings.as_dict()}")
category = 'local'
try:
fd, repo_settings_file = tempfile.mkstemp(suffix='.toml')
os.write(fd, repo_settings)
new_settings = Dynaconf(settings_files=[repo_settings_file])
for section, contents in new_settings.as_dict().items():
section_dict = copy.deepcopy(get_settings().as_dict().get(section, {}))
for key, value in contents.items():
section_dict[key] = value
get_settings().unset(section)
get_settings().set(section, section_dict, merge=False)
get_logger().info(f"Applying repo settings:\n{new_settings.as_dict()}")
except Exception as e:
get_logger().warning(f"Failed to apply repo {category} settings, error: {str(e)}")
error_local = {'error': str(e), 'settings': repo_settings, 'category': category}
if error_local:
handle_configurations_errors([error_local], git_provider)
except Exception as e:
get_logger().exception("Failed to apply repo settings", e)
finally:
@ -49,10 +58,40 @@ def apply_repo_settings(pr_url):
get_logger().error(f"Failed to remove temporary settings file {repo_settings_file}", e)
# enable switching models with a short definition
if get_settings().config.model.lower()=='claude-3-5-sonnet':
if get_settings().config.model.lower() == 'claude-3-5-sonnet':
set_claude_model()
def handle_configurations_errors(config_errors, git_provider):
try:
if not any(config_errors):
return
for err in config_errors:
if err:
configuration_file_content = err['settings'].decode()
err_message = err['error']
config_type = err['category']
header = f"❌ **PR-Agent failed to apply '{config_type}' repo settings**"
body = f"{header}\n\nThe configuration file needs to be a valid [TOML](https://qodo-merge-docs.qodo.ai/usage-guide/configuration_options/), please fix it.\n\n"
body += f"___\n\n**Error message:**\n`{err_message}`\n\n"
if git_provider.is_supported("gfm_markdown"):
body += f"\n\n<details><summary>Configuration content:</summary>\n\n```toml\n{configuration_file_content}\n```\n\n</details>"
else:
body += f"\n\n**Configuration content:**\n\n```toml\n{configuration_file_content}\n```\n\n"
get_logger().warning(f"Sending a 'configuration error' comment to the PR", artifact={'body': body})
# git_provider.publish_comment(body)
if hasattr(git_provider, 'publish_persistent_comment'):
git_provider.publish_persistent_comment(body,
initial_header=header,
update_header=False,
final_update_message=False)
else:
git_provider.publish_comment(body)
except Exception as e:
get_logger().exception(f"Failed to handle configurations errors", e)
def set_claude_model():
"""
set the claude-sonnet-3.5 model easily (even by users), just by stating: --config.model='claude-3-5-sonnet'

View File

@ -16,8 +16,9 @@
},
"scopes": [
"account",
"repository",
"pullrequest"
"repository:write",
"pullrequest:write",
"wiki"
],
"contexts": [
"account"
@ -30,4 +31,4 @@
}
]
}
}
}

View File

@ -68,6 +68,7 @@ def authorize(credentials: HTTPBasicCredentials = Depends(security)):
async def _perform_commands_azure(commands_conf: str, agent: PRAgent, api_url: str, log_context: dict):
apply_repo_settings(api_url)
commands = get_settings().get(f"azure_devops_server.{commands_conf}")
get_settings().set("config.is_auto_command", True)
for command in commands:
try:
split_command = command.split(" ")

View File

@ -3,6 +3,7 @@ import copy
import hashlib
import json
import os
import re
import time
import jwt
@ -74,9 +75,13 @@ async def handle_manifest(request: Request, response: Response):
return JSONResponse(manifest_obj)
async def _perform_commands_bitbucket(commands_conf: str, agent: PRAgent, api_url: str, log_context: dict):
async def _perform_commands_bitbucket(commands_conf: str, agent: PRAgent, api_url: str, log_context: dict, data: dict):
apply_repo_settings(api_url)
if data.get("event", "") == "pullrequest:created":
if not should_process_pr_logic(data):
return
commands = get_settings().get(f"bitbucket_app.{commands_conf}", {})
get_settings().set("config.is_auto_command", True)
for command in commands:
try:
split_command = command.split(" ")
@ -91,6 +96,48 @@ async def _perform_commands_bitbucket(commands_conf: str, agent: PRAgent, api_ur
get_logger().error(f"Failed to perform command {command}: {e}")
def is_bot_user(data) -> bool:
try:
if data["data"]["actor"]["type"] != "user":
get_logger().info(f"BitBucket actor type is not 'user': {data['data']['actor']['type']}")
return True
except Exception as e:
get_logger().error("Failed 'is_bot_user' logic: {e}")
return False
def should_process_pr_logic(data) -> bool:
try:
pr_data = data.get("data", {}).get("pullrequest", {})
title = pr_data.get("title", "")
source_branch = pr_data.get("source", {}).get("branch", {}).get("name", "")
target_branch = pr_data.get("destination", {}).get("branch", {}).get("name", "")
# logic to ignore PRs with specific titles
if title:
ignore_pr_title_re = get_settings().get("CONFIG.IGNORE_PR_TITLE", [])
if not isinstance(ignore_pr_title_re, list):
ignore_pr_title_re = [ignore_pr_title_re]
if ignore_pr_title_re and any(re.search(regex, title) for regex in ignore_pr_title_re):
get_logger().info(f"Ignoring PR with title '{title}' due to config.ignore_pr_title setting")
return False
ignore_pr_source_branches = get_settings().get("CONFIG.IGNORE_PR_SOURCE_BRANCHES", [])
ignore_pr_target_branches = get_settings().get("CONFIG.IGNORE_PR_TARGET_BRANCHES", [])
if (ignore_pr_source_branches or ignore_pr_target_branches):
if any(re.search(regex, source_branch) for regex in ignore_pr_source_branches):
get_logger().info(
f"Ignoring PR with source branch '{source_branch}' due to config.ignore_pr_source_branches settings")
return False
if any(re.search(regex, target_branch) for regex in ignore_pr_target_branches):
get_logger().info(
f"Ignoring PR with target branch '{target_branch}' due to config.ignore_pr_target_branches settings")
return False
except Exception as e:
get_logger().error(f"Failed 'should_process_pr_logic': {e}")
return True
@router.post("/webhook")
async def handle_github_webhooks(background_tasks: BackgroundTasks, request: Request):
app_name = get_settings().get("CONFIG.APP_NAME", "Unknown")
@ -101,13 +148,17 @@ async def handle_github_webhooks(background_tasks: BackgroundTasks, request: Req
input_jwt = jwt_header.split(" ")[1]
data = await request.json()
get_logger().debug(data)
async def inner():
try:
try:
if data["data"]["actor"]["type"] != "user":
# ignore bot users
if is_bot_user(data):
return "OK"
# Check if the PR should be processed
if data.get("event", "") == "pullrequest:created":
if not should_process_pr_logic(data):
return "OK"
except KeyError:
get_logger().error("Failed to get actor type, check previous logs, this shouldn't happen.")
# Get the username of the sender
try:
@ -145,17 +196,7 @@ async def handle_github_webhooks(background_tasks: BackgroundTasks, request: Req
if get_identity_provider().verify_eligibility("bitbucket",
sender_id, pr_url) is not Eligibility.NOT_ELIGIBLE:
if get_settings().get("bitbucket_app.pr_commands"):
await _perform_commands_bitbucket("pr_commands", PRAgent(), pr_url, log_context)
else: # backwards compatibility
auto_review = get_setting_or_env("BITBUCKET_APP.AUTO_REVIEW", None)
if is_true(auto_review): # by default, auto review is disabled
await PRReviewer(pr_url).run()
auto_improve = get_setting_or_env("BITBUCKET_APP.AUTO_IMPROVE", None)
if is_true(auto_improve): # by default, auto improve is disabled
await PRCodeSuggestions(pr_url).run()
auto_describe = get_setting_or_env("BITBUCKET_APP.AUTO_DESCRIBE", None)
if is_true(auto_describe): # by default, auto describe is disabled
await PRDescription(pr_url).run()
await _perform_commands_bitbucket("pr_commands", PRAgent(), pr_url, log_context, data)
elif event == "pullrequest:comment_created":
pr_url = data["data"]["pullrequest"]["links"]["html"]["href"]
log_context["api_url"] = pr_url

View File

@ -80,10 +80,14 @@ async def run_action():
except Exception as e:
get_logger().info(f"github action: failed to apply repo settings: {e}")
# Handle pull request event
# Handle pull request opened event
if GITHUB_EVENT_NAME == "pull_request":
action = event_payload.get("action")
if action in ["opened", "reopened", "ready_for_review", "review_requested"]:
# Retrieve the list of actions from the configuration
pr_actions = get_settings().get("GITHUB_ACTION_CONFIG.PR_ACTIONS", ["opened", "reopened", "ready_for_review", "review_requested"])
if action in pr_actions:
pr_url = event_payload.get("pull_request", {}).get("url")
if pr_url:
# legacy - supporting both GITHUB_ACTION and GITHUB_ACTION_CONFIG
@ -97,9 +101,13 @@ async def run_action():
if auto_improve is None:
auto_improve = get_setting_or_env("GITHUB_ACTION_CONFIG.AUTO_IMPROVE", None)
# Set the configuration for auto actions
get_settings().config.is_auto_command = True # Set the flag to indicate that the command is auto
get_settings().pr_description.final_update_message = False # No final update message when auto_describe is enabled
get_logger().info(f"Running auto actions: auto_describe={auto_describe}, auto_review={auto_review}, auto_improve={auto_improve}")
# invoke by default all three tools
if auto_describe is None or is_true(auto_describe):
get_settings().pr_description.final_update_message = False # No final update message when auto_describe is enabled
await PRDescription(pr_url).run()
if auto_review is None or is_true(auto_review):
await PRReviewer(pr_url).run()

View File

@ -128,8 +128,6 @@ async def handle_new_pr_opened(body: Dict[str, Any],
log_context: Dict[str, Any],
agent: PRAgent):
title = body.get("pull_request", {}).get("title", "")
get_settings().config.is_auto_command = True
pull_request, api_url = _check_pull_request_event(action, body, log_context)
if not (pull_request and api_url):
@ -138,13 +136,6 @@ async def handle_new_pr_opened(body: Dict[str, Any],
if action in get_settings().github_app.handle_pr_actions: # ['opened', 'reopened', 'ready_for_review']
# logic to ignore PRs with specific titles (e.g. "[Auto] ...")
apply_repo_settings(api_url)
ignore_pr_title_re = get_settings().get("GITHUB_APP.IGNORE_PR_TITLE", [])
if not isinstance(ignore_pr_title_re, list):
ignore_pr_title_re = [ignore_pr_title_re]
if ignore_pr_title_re and any(re.search(regex, title) for regex in ignore_pr_title_re):
get_logger().info(f"Ignoring PR with title '{title}' due to github_app.ignore_pr_title setting")
return {}
if get_identity_provider().verify_eligibility("github", sender_id, api_url) is not Eligibility.NOT_ELIGIBLE:
await _perform_auto_commands_github("pr_commands", agent, body, api_url, log_context)
else:
@ -246,6 +237,60 @@ def get_log_context(body, event, action, build_number):
return log_context, sender, sender_id, sender_type
def is_bot_user(sender, sender_type):
try:
# logic to ignore PRs opened by bot
if get_settings().get("GITHUB_APP.IGNORE_BOT_PR", False) and sender_type == "Bot":
if 'pr-agent' not in sender:
get_logger().info(f"Ignoring PR from '{sender=}' because it is a bot")
return True
except Exception as e:
get_logger().error(f"Failed 'is_bot_user' logic: {e}")
return False
def should_process_pr_logic(body) -> bool:
try:
pull_request = body.get("pull_request", {})
title = pull_request.get("title", "")
pr_labels = pull_request.get("labels", [])
source_branch = pull_request.get("head", {}).get("ref", "")
target_branch = pull_request.get("base", {}).get("ref", "")
# logic to ignore PRs with specific titles
if title:
ignore_pr_title_re = get_settings().get("CONFIG.IGNORE_PR_TITLE", [])
if not isinstance(ignore_pr_title_re, list):
ignore_pr_title_re = [ignore_pr_title_re]
if ignore_pr_title_re and any(re.search(regex, title) for regex in ignore_pr_title_re):
get_logger().info(f"Ignoring PR with title '{title}' due to config.ignore_pr_title setting")
return False
# logic to ignore PRs with specific labels or source branches or target branches.
ignore_pr_labels = get_settings().get("CONFIG.IGNORE_PR_LABELS", [])
if pr_labels and ignore_pr_labels:
labels = [label['name'] for label in pr_labels]
if any(label in ignore_pr_labels for label in labels):
labels_str = ", ".join(labels)
get_logger().info(f"Ignoring PR with labels '{labels_str}' due to config.ignore_pr_labels settings")
return False
ignore_pr_source_branches = get_settings().get("CONFIG.IGNORE_PR_SOURCE_BRANCHES", [])
ignore_pr_target_branches = get_settings().get("CONFIG.IGNORE_PR_TARGET_BRANCHES", [])
if pull_request and (ignore_pr_source_branches or ignore_pr_target_branches):
if any(re.search(regex, source_branch) for regex in ignore_pr_source_branches):
get_logger().info(
f"Ignoring PR with source branch '{source_branch}' due to config.ignore_pr_source_branches settings")
return False
if any(re.search(regex, target_branch) for regex in ignore_pr_target_branches):
get_logger().info(
f"Ignoring PR with target branch '{target_branch}' due to config.ignore_pr_target_branches settings")
return False
except Exception as e:
get_logger().error(f"Failed 'should_process_pr_logic': {e}")
return True
async def handle_request(body: Dict[str, Any], event: str):
"""
Handle incoming GitHub webhook requests.
@ -254,19 +299,20 @@ async def handle_request(body: Dict[str, Any], event: str):
body: The request body.
event: The GitHub event type (e.g. "pull_request", "issue_comment", etc.).
"""
action = body.get("action") # "created", "opened", "reopened", "ready_for_review", "review_requested", "synchronize"
action = body.get("action") # "created", "opened", "reopened", "ready_for_review", "review_requested", "synchronize"
if not action:
return {}
agent = PRAgent()
log_context, sender, sender_id, sender_type = get_log_context(body, event, action, build_number)
# logic to ignore PRs opened by bot
if get_settings().get("GITHUB_APP.IGNORE_BOT_PR", False) and sender_type == "Bot":
if 'pr-agent' not in sender:
get_logger().info(f"Ignoring PR from '{sender=}' because it is a bot")
# logic to ignore PRs opened by bot, PRs with specific titles, labels, source branches, or target branches
if is_bot_user(sender, sender_type) and 'check_run' not in body:
return {}
if action != 'created' and 'check_run' not in body:
if not should_process_pr_logic(body):
return {}
if 'check_run' in body: # handle failed checks
if 'check_run' in body: # handle failed checks
# get_logger().debug(f'Request body', artifact=body, event=event) # added inside handle_checks
pass
# handle comments on PRs
@ -281,7 +327,6 @@ async def handle_request(body: Dict[str, Any], event: str):
pass # handle_checkbox_clicked
# handle pull_request event with synchronize action - "push trigger" for new commits
elif event == 'pull_request' and action == 'synchronize':
# get_logger().debug(f'Request body', artifact=body, event=event) # added inside handle_push_trigger_for_new_commits
await handle_push_trigger_for_new_commits(body, event, sender,sender_id, action, log_context, agent)
elif event == 'pull_request' and action == 'closed':
if get_settings().get("CONFIG.ANALYTICS_FOLDER", ""):
@ -325,12 +370,16 @@ def _check_pull_request_event(action: str, body: dict, log_context: dict) -> Tup
return pull_request, api_url
async def _perform_auto_commands_github(commands_conf: str, agent: PRAgent, body: dict, api_url: str, log_context: dict):
async def _perform_auto_commands_github(commands_conf: str, agent: PRAgent, body: dict, api_url: str,
log_context: dict):
apply_repo_settings(api_url)
if not should_process_pr_logic(body): # Here we already updated the configuration with the repo settings
return {}
commands = get_settings().get(f"github_app.{commands_conf}")
if not commands:
get_logger().info(f"New PR, but no auto commands configured")
return
get_settings().set("config.is_auto_command", True)
for command in commands:
split_command = command.split(" ")
command = split_command[0]

View File

@ -1,7 +1,10 @@
import asyncio
import multiprocessing
from collections import deque
import traceback
from datetime import datetime, timezone
import time
import requests
import aiohttp
from pr_agent.agent.pr_agent import PRAgent
@ -13,6 +16,15 @@ setup_logger(fmt=LoggingFormat.JSON, level="DEBUG")
NOTIFICATION_URL = "https://api.github.com/notifications"
async def mark_notification_as_read(headers, notification, session):
async with session.patch(
f"https://api.github.com/notifications/threads/{notification['id']}",
headers=headers) as mark_read_response:
if mark_read_response.status != 205:
get_logger().error(
f"Failed to mark notification as read. Status code: {mark_read_response.status}")
def now() -> str:
"""
Get the current UTC time in ISO 8601 format.
@ -24,6 +36,108 @@ def now() -> str:
now_utc = now_utc.replace("+00:00", "Z")
return now_utc
async def async_handle_request(pr_url, rest_of_comment, comment_id, git_provider):
agent = PRAgent()
success = await agent.handle_request(
pr_url,
rest_of_comment,
notify=lambda: git_provider.add_eyes_reaction(comment_id)
)
return success
def run_handle_request(pr_url, rest_of_comment, comment_id, git_provider):
return asyncio.run(async_handle_request(pr_url, rest_of_comment, comment_id, git_provider))
def process_comment_sync(pr_url, rest_of_comment, comment_id):
try:
# Run the async handle_request in a separate function
git_provider = get_git_provider()(pr_url=pr_url)
success = run_handle_request(pr_url, rest_of_comment, comment_id, git_provider)
except Exception as e:
get_logger().error(f"Error processing comment: {e}", artifact={"traceback": traceback.format_exc()})
async def process_comment(pr_url, rest_of_comment, comment_id):
try:
git_provider = get_git_provider()(pr_url=pr_url)
git_provider.set_pr(pr_url)
agent = PRAgent()
success = await agent.handle_request(
pr_url,
rest_of_comment,
notify=lambda: git_provider.add_eyes_reaction(comment_id)
)
get_logger().info(f"Finished processing comment for PR: {pr_url}")
except Exception as e:
get_logger().error(f"Error processing comment: {e}", artifact={"traceback": traceback.format_exc()})
async def is_valid_notification(notification, headers, handled_ids, session, user_id):
try:
if 'reason' in notification and notification['reason'] == 'mention':
if 'subject' in notification and notification['subject']['type'] == 'PullRequest':
pr_url = notification['subject']['url']
latest_comment = notification['subject']['latest_comment_url']
if not latest_comment or not isinstance(latest_comment, str):
get_logger().debug(f"no latest_comment")
return False, handled_ids
async with session.get(latest_comment, headers=headers) as comment_response:
check_prev_comments = False
if comment_response.status == 200:
comment = await comment_response.json()
if 'id' in comment:
if comment['id'] in handled_ids:
get_logger().debug(f"comment['id'] in handled_ids")
return False, handled_ids
else:
handled_ids.add(comment['id'])
if 'user' in comment and 'login' in comment['user']:
if comment['user']['login'] == user_id:
get_logger().debug(f"comment['user']['login'] == user_id")
check_prev_comments = True
comment_body = comment.get('body', '')
if not comment_body:
get_logger().debug(f"no comment_body")
check_prev_comments = True
else:
user_tag = "@" + user_id
if user_tag not in comment_body:
get_logger().debug(f"user_tag not in comment_body")
check_prev_comments = True
else:
get_logger().info(f"Polling, pr_url: {pr_url}",
artifact={"comment": comment_body})
if not check_prev_comments:
return True, handled_ids, comment, comment_body, pr_url, user_tag
else: # we could not find the user tag in the latest comment. Check previous comments
# get all comments in the PR
requests_url = f"{pr_url}/comments".replace("pulls", "issues")
comments_response = requests.get(requests_url, headers=headers)
comments = comments_response.json()[::-1]
max_comment_to_scan = 4
for comment in comments[:max_comment_to_scan]:
if 'user' in comment and 'login' in comment['user']:
if comment['user']['login'] == user_id:
continue
comment_body = comment.get('body', '')
if not comment_body:
continue
if user_tag in comment_body:
get_logger().info("found user tag in previous comments")
get_logger().info(f"Polling, pr_url: {pr_url}",
artifact={"comment": comment_body})
return True, handled_ids, comment, comment_body, pr_url, user_tag
get_logger().error(f"Failed to fetch comments for PR: {pr_url}")
return False, handled_ids
return False, handled_ids
except Exception as e:
get_logger().error(f"Error processing notification: {e}", artifact={"traceback": traceback.format_exc()})
return False, handled_ids
async def polling_loop():
"""
@ -34,7 +148,6 @@ async def polling_loop():
last_modified = [None]
git_provider = get_git_provider()()
user_id = git_provider.get_user_id()
agent = PRAgent()
get_settings().set("CONFIG.PUBLISH_OUTPUT_PROGRESS", False)
get_settings().set("pr_description.publish_description_as_comment", True)
@ -74,43 +187,45 @@ async def polling_loop():
notifications = await response.json()
if not notifications:
continue
get_logger().info(f"Received {len(notifications)} notifications")
task_queue = deque()
for notification in notifications:
if not notification:
continue
# mark notification as read
await mark_notification_as_read(headers, notification, session)
handled_ids.add(notification['id'])
if 'reason' in notification and notification['reason'] == 'mention':
if 'subject' in notification and notification['subject']['type'] == 'PullRequest':
pr_url = notification['subject']['url']
latest_comment = notification['subject']['latest_comment_url']
if not latest_comment or not isinstance(latest_comment, str):
continue
async with session.get(latest_comment, headers=headers) as comment_response:
if comment_response.status == 200:
comment = await comment_response.json()
if 'id' in comment:
if comment['id'] in handled_ids:
continue
else:
handled_ids.add(comment['id'])
if 'user' in comment and 'login' in comment['user']:
if comment['user']['login'] == user_id:
continue
comment_body = comment.get('body', '')
if not comment_body:
continue
commenter_github_user = comment['user']['login'] \
if 'user' in comment else ''
get_logger().info(f"Polling, pr_url: {pr_url}",
artifact={"comment": comment_body})
user_tag = "@" + user_id
if user_tag not in comment_body:
continue
rest_of_comment = comment_body.split(user_tag)[1].strip()
comment_id = comment['id']
git_provider.set_pr(pr_url)
success = await agent.handle_request(pr_url, rest_of_comment,
notify=lambda: git_provider.add_eyes_reaction(
comment_id)) # noqa E501
if not success:
git_provider.set_pr(pr_url)
output = await is_valid_notification(notification, headers, handled_ids, session, user_id)
if output[0]:
_, handled_ids, comment, comment_body, pr_url, user_tag = output
rest_of_comment = comment_body.split(user_tag)[1].strip()
comment_id = comment['id']
# Add to the task queue
get_logger().info(
f"Adding comment processing to task queue for PR, {pr_url}, comment_body: {comment_body}")
task_queue.append((process_comment_sync, (pr_url, rest_of_comment, comment_id)))
get_logger().info(f"Queued comment processing for PR: {pr_url}")
else:
get_logger().debug(f"Skipping comment processing for PR")
max_allowed_parallel_tasks = 10
if task_queue:
processes = []
for i, (func, args) in enumerate(task_queue): # Create parallel tasks
p = multiprocessing.Process(target=func, args=args)
processes.append(p)
p.start()
if i > max_allowed_parallel_tasks:
get_logger().error(
f"Dropping {len(task_queue) - max_allowed_parallel_tasks} tasks from polling session")
break
task_queue.clear()
# Dont wait for all processes to complete. Move on to the next iteration
# for p in processes:
# p.join()
elif response.status != 304:
print(f"Failed to fetch notifications. Status code: {response.status}")
@ -121,4 +236,4 @@ async def polling_loop():
if __name__ == '__main__':
asyncio.run(polling_loop())
asyncio.run(polling_loop())

View File

@ -1,4 +1,5 @@
import copy
import re
import json
from datetime import datetime
@ -58,9 +59,12 @@ async def handle_request(api_url: str, body: str, log_context: dict, sender_id:
async def _perform_commands_gitlab(commands_conf: str, agent: PRAgent, api_url: str,
log_context: dict):
log_context: dict, data: dict):
apply_repo_settings(api_url)
if not should_process_pr_logic(data): # Here we already updated the configurations
return
commands = get_settings().get(f"gitlab.{commands_conf}", {})
get_settings().set("config.is_auto_command", True)
for command in commands:
try:
split_command = command.split(" ")
@ -75,11 +79,67 @@ async def _perform_commands_gitlab(commands_conf: str, agent: PRAgent, api_url:
get_logger().error(f"Failed to perform command {command}: {e}")
def is_bot_user(data) -> bool:
try:
# logic to ignore bot users (unlike Github, no direct flag for bot users in gitlab)
sender_name = data.get("user", {}).get("name", "unknown").lower()
bot_indicators = ['codium', 'bot_', 'bot-', '_bot', '-bot']
if any(indicator in sender_name for indicator in bot_indicators):
get_logger().info(f"Skipping GitLab bot user: {sender_name}")
return True
except Exception as e:
get_logger().error(f"Failed 'is_bot_user' logic: {e}")
return False
def should_process_pr_logic(data) -> bool:
try:
if not data.get('object_attributes', {}):
return False
title = data['object_attributes'].get('title')
# logic to ignore MRs for titles, labels and source, target branches.
ignore_mr_title = get_settings().get("CONFIG.IGNORE_PR_TITLE", [])
ignore_mr_labels = get_settings().get("CONFIG.IGNORE_PR_LABELS", [])
ignore_mr_source_branches = get_settings().get("CONFIG.IGNORE_PR_SOURCE_BRANCHES", [])
ignore_mr_target_branches = get_settings().get("CONFIG.IGNORE_PR_TARGET_BRANCHES", [])
#
if ignore_mr_source_branches:
source_branch = data['object_attributes'].get('source_branch')
if any(re.search(regex, source_branch) for regex in ignore_mr_source_branches):
get_logger().info(
f"Ignoring MR with source branch '{source_branch}' due to gitlab.ignore_mr_source_branches settings")
return False
if ignore_mr_target_branches:
target_branch = data['object_attributes'].get('target_branch')
if any(re.search(regex, target_branch) for regex in ignore_mr_target_branches):
get_logger().info(
f"Ignoring MR with target branch '{target_branch}' due to gitlab.ignore_mr_target_branches settings")
return False
if ignore_mr_labels:
labels = [label['title'] for label in data['object_attributes'].get('labels', [])]
if any(label in ignore_mr_labels for label in labels):
labels_str = ", ".join(labels)
get_logger().info(f"Ignoring MR with labels '{labels_str}' due to gitlab.ignore_mr_labels settings")
return False
if ignore_mr_title:
if any(re.search(regex, title) for regex in ignore_mr_title):
get_logger().info(f"Ignoring MR with title '{title}' due to gitlab.ignore_mr_title settings")
return False
except Exception as e:
get_logger().error(f"Failed 'should_process_pr_logic': {e}")
return True
@router.post("/webhook")
async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
start_time = datetime.now()
request_json = await request.json()
context["settings"] = copy.deepcopy(global_settings)
async def inner(data: dict):
log_context = {"server_type": "gitlab_app"}
@ -95,7 +155,6 @@ async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
secret_dict = json.loads(secret)
gitlab_token = secret_dict["gitlab_token"]
log_context["token_id"] = secret_dict.get("token_name", secret_dict.get("id", "unknown"))
context["settings"] = copy.deepcopy(global_settings)
context["settings"].gitlab.personal_access_token = gitlab_token
except Exception as e:
get_logger().error(f"Failed to validate secret {request_token}: {e}")
@ -117,28 +176,30 @@ async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
sender = data.get("user", {}).get("username", "unknown")
sender_id = data.get("user", {}).get("id", "unknown")
# logic to ignore bot users (unlike Github, no direct flag for bot users in gitlab)
sender_name = data.get("user", {}).get("name", "unknown").lower()
if 'codium' in sender_name or 'bot_' in sender_name or 'bot-' in sender_name or '_bot' in sender_name or '-bot' in sender_name:
get_logger().info(f"Skipping bot user: {sender_name}")
# ignore bot users
if is_bot_user(data):
return JSONResponse(status_code=status.HTTP_200_OK, content=jsonable_encoder({"message": "success"}))
if data.get('event_type') != 'note': # not a comment
# ignore MRs based on title, labels, source and target branches
if not should_process_pr_logic(data):
return JSONResponse(status_code=status.HTTP_200_OK, content=jsonable_encoder({"message": "success"}))
log_context["sender"] = sender
if data.get('object_kind') == 'merge_request' and data['object_attributes'].get('action') in ['open', 'reopen']:
title = data['object_attributes'].get('title')
url = data['object_attributes'].get('url')
draft = data['object_attributes'].get('draft')
get_logger().info(f"New merge request: {url}")
if draft:
get_logger().info(f"Skipping draft MR: {url}")
return JSONResponse(status_code=status.HTTP_200_OK, content=jsonable_encoder({"message": "success"}))
await _perform_commands_gitlab("pr_commands", PRAgent(), url, log_context)
await _perform_commands_gitlab("pr_commands", PRAgent(), url, log_context, data)
elif data.get('object_kind') == 'note' and data.get('event_type') == 'note': # comment on MR
if 'merge_request' in data:
mr = data['merge_request']
url = mr.get('url')
get_logger().info(f"A comment has been added to a merge request: {url}")
body = data.get('object_attributes', {}).get('note')
if data.get('object_attributes', {}).get('type') == 'DiffNote' and '/ask' in body: # /ask_line
@ -165,7 +226,7 @@ async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
content=jsonable_encoder({"message": "success"}))
get_logger().debug(f'A push event has been received: {url}')
await _perform_commands_gitlab("push_commands", PRAgent(), url, log_context)
await _perform_commands_gitlab("push_commands", PRAgent(), url, log_context, data)
except Exception as e:
get_logger().error(f"Failed to handle push event: {e}")

View File

@ -43,6 +43,9 @@ api_base = "" # the base url for your local Llama 2, Code Llama, and other model
vertex_project = "" # the google cloud platform project name for your vertexai deployment
vertex_location = "" # the google cloud platform location for your vertexai deployment
[google_ai_studio]
gemini_api_key = "" # the google AI Studio API key
[github]
# ---- Set the following only for deployment type == "user"
user_token = "" # A GitHub personal access token with 'repo' scope.
@ -60,6 +63,7 @@ webhook_secret = "<WEBHOOK SECRET>" # Optional, may be commented out.
[gitlab]
# Gitlab personal access token
personal_access_token = ""
shared_secret = "" # webhook secret
[bitbucket]
# For Bitbucket personal/repository bearer token

View File

@ -7,6 +7,7 @@ fallback_models=["gpt-4o-2024-05-13"]
git_provider="github"
publish_output=true
publish_output_progress=true
publish_output_no_suggestions=true
verbosity_level=0 # 0,1,2
use_extra_bad_extensions=false
# Configurations
@ -14,6 +15,7 @@ use_wiki_settings_file=true
use_repo_settings_file=true
use_global_settings_file=true
ai_timeout=120 # 2minutes
skip_keys = []
# token limits
max_description_tokens = 500
max_commits_tokens = 500
@ -21,8 +23,8 @@ max_model_tokens = 32000 # Limits the maximum number of tokens that can be used
custom_model_max_tokens=-1 # for models not in the default list
# patch extension logic
patch_extension_skip_types =[".md",".txt"]
allow_dynamic_context=false
max_extra_lines_before_dynamic_context = 10 # will try to include up to 10 extra lines before the hunk in the patch, until we reach an enclosing function or class
allow_dynamic_context=true
max_extra_lines_before_dynamic_context = 8 # will try to include up to 10 extra lines before the hunk in the patch, until we reach an enclosing function or class
patch_extra_lines_before = 3 # Number of extra lines (+3 default ones) to include before each hunk in the patch
patch_extra_lines_after = 1 # Number of extra lines (+3 default ones) to include after each hunk in the patch
secret_provider=""
@ -31,10 +33,17 @@ ai_disclaimer_title="" # Pro feature, title for a collapsible disclaimer to AI
ai_disclaimer="" # Pro feature, full text for the AI disclaimer
output_relevant_configurations=false
large_patch_policy = "clip" # "clip", "skip"
is_auto_command=false
# seed
seed=-1 # set positive value to fix the seed (and ensure temperature=0)
temperature=0.2
# ignore logic
ignore_pr_title = ["^\\[Auto\\]", "^Auto"] # a list of regular expressions to match against the PR title to ignore the PR agent
ignore_pr_target_branches = [] # a list of regular expressions of target branches to ignore from PR agent when an PR is created
ignore_pr_source_branches = [] # a list of regular expressions of source branches to ignore from PR agent when an PR is created
ignore_pr_labels = [] # labels to ignore from PR agent when an PR is created
#
is_auto_command = false # will be auto-set to true if the command is triggered by an automation
enable_ai_metadata = false # will enable adding ai metadata
[pr_reviewer] # /review #
# enable/disable features
@ -43,10 +52,7 @@ require_tests_review=true
require_estimate_effort_to_review=true
require_can_be_split_review=false
require_security_review=true
extra_issue_links=false
# soc2
require_soc2_ticket=false
soc2_ticket_prompt="Does the PR description include a link to ticket in a project management system (e.g., Jira, Asana, Trello, etc.) ?"
require_ticket_analysis_review=true
# general options
num_code_suggestions=0
inline_code_comments = false
@ -62,6 +68,7 @@ enable_review_labels_effort=true
require_all_thresholds_for_incremental_review=false
minimal_commits_for_incremental_review=0
minimal_minutes_for_incremental_review=0
enable_intro_text=true
enable_help_text=false # Determines whether to include help text in the PR review. Enabled by default.
# auto approval
enable_auto_approval=false
@ -69,7 +76,7 @@ maximal_review_effort=5
[pr_description] # /describe #
publish_labels=true
publish_labels=false
add_original_user_description=true
generate_ai_title=false
use_bullet_points=true
@ -101,18 +108,21 @@ enable_help_text=false
[pr_code_suggestions] # /improve #
max_context_tokens=14000
num_code_suggestions=4
#
commitable_code_suggestions = false
dual_publishing_score_threshold=-1 # -1 to disable, [0-10] to set the threshold (>=) for publishing a code suggestion both in a table and as commitable
#
extra_instructions = ""
rank_suggestions = false
enable_help_text=false
enable_chat_text=false
enable_intro_text=true
persistent_comment=true
max_history_len=4
# enable to apply suggestion 💎
apply_suggestions_checkbox=true
# suggestions scoring
self_reflect_on_suggestions=true
suggestions_score_threshold=0 # [0-10]. highly recommend not to set this value above 8, since above it may clip highly relevant suggestions
suggestions_score_threshold=0 # [0-10]| recommend not to set this value above 8, since above it may clip highly relevant suggestions
# params for '/improve --extended' mode
auto_extended_mode=true
num_code_suggestions_per_chunk=4
@ -124,8 +134,10 @@ final_clip_factor = 0.8
demand_code_suggestions_self_review=false # add a checkbox for the author to self-review the code suggestions
code_suggestions_self_review_text= "**Author self-review**: I have reviewed the PR code suggestions, and addressed the relevant ones."
approve_pr_on_self_review=false # Pro feature. if true, the PR will be auto-approved after the author clicks on the self-review checkbox
# Suggestion impact
fold_suggestions_on_self_review=true # Pro feature. if true, the code suggestions will be folded after the author clicks on the self-review checkbox
# Suggestion impact 💎
publish_post_process_suggestion_impact=true
wiki_page_accepted_suggestions=true
[pr_custom_prompt] # /custom_prompt #
prompt = """\
@ -176,6 +188,8 @@ enable_help_text=true
final_update_message = false
[pr_help] # /help #
force_local_db=false
num_retrieved_snippets=5
[pr_config] # /config #
@ -187,11 +201,13 @@ base_url = "https://api.github.com"
publish_inline_comments_fallback_with_verification = true
try_fix_invalid_inline_comments = true
app_name = "pr-agent"
ignore_bot_pr = true
[github_action_config]
# auto_review = true # set as env var in .github/workflows/pr-agent.yaml
# auto_describe = true # set as env var in .github/workflows/pr-agent.yaml
# auto_improve = true # set as env var in .github/workflows/pr-agent.yaml
# pr_actions = ['opened', 'reopened', 'ready_for_review', 'review_requested']
[github_app]
# these toggles allows running the github app from custom deployments
@ -215,8 +231,6 @@ push_commands = [
"/describe",
"/review --pr_reviewer.num_code_suggestions=0",
]
ignore_pr_title = []
ignore_bot_pr = true
[gitlab]
url = "https://gitlab.com"
@ -237,7 +251,7 @@ pr_commands = [
"/review --pr_reviewer.num_code_suggestions=0",
"/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7",
]
avoid_full_files = false
[local]
# LocalGitProvider settings - uncomment to use paths other than default
@ -293,7 +307,7 @@ number_of_results = 5
[lancedb]
uri = "./lancedb"
[best_practices]
content = ""
max_lines_allowed = 800
enable_global_best_practices = false

View File

@ -63,6 +63,7 @@ extra = [
]
[language_extension_map_org]
"1C Enterprise" = ["*.bsl", ]
ABAP = [".abap", ]
"AGS Script" = [".ash", ]
AMPL = [".ampl", ]

View File

@ -5,7 +5,7 @@ Your task is to generate {{ docs_for_language }} for code components in the PR D
Example for the PR Diff format:
======
## file: 'src/file1.py'
## File: 'src/file1.py'
@@ -12,3 +12,4 @@ def func1():
__new hunk__
@ -25,7 +25,7 @@ __old hunk__
...
## file: 'src/file2.py'
## File: 'src/file2.py'
...
======

View File

@ -1,18 +1,23 @@
[pr_code_suggestions_prompt]
system="""You are PR-Reviewer, a language model that specializes in suggesting improvements to a Pull Request (PR) code.
Your task is to provide meaningful and actionable code suggestions, to improve the new code presented in a PR code diff (lines starting with '+').
system="""You are PR-Reviewer, an AI specializing in Pull Request (PR) code analysis and suggestions.
Your task is to examine the provided code diff, focusing on new code (lines prefixed with '+'), and offer concise, actionable suggestions to fix possible bugs and problems, and enhance code quality, readability, and performance.
The format we will use to present the PR code diff:
The PR code diff will be in the following structured format:
======
## file: 'src/file1.py'
## File: 'src/file1.py'
{%- if is_ai_metadata %}
### AI-generated changes summary:
* ...
* ...
{%- endif %}
@@ ... @@ def func1():
__new hunk__
11 unchanged code line0 in the PR
12 unchanged code line1 in the PR
13 +new code line2 added in the PR
14 unchanged code line3 in the PR
unchanged code line0 in the PR
unchanged code line1 in the PR
+new code line2 added in the PR
unchanged code line3 in the PR
__old hunk__
unchanged code line0
unchanged code line1
@ -21,35 +26,34 @@ __old hunk__
@@ ... @@ def func2():
__new hunk__
...
__old hunk__
...
unchanged code line4
+new code line5 removed in the PR
unchanged code line6
## file: 'src/file2.py'
## File: 'src/file2.py'
...
======
- In this format, we separate each hunk of diff code to '__new hunk__' and '__old hunk__' sections. The '__new hunk__' section contains the new code of the chunk, and the '__old hunk__' section contains the old code, that was removed. If no new code was added in a specific hunk, '__new hunk__' section will not be presented. If no code was removed, '__old hunk__' section will not be presented.
- We also added line numbers for the '__new hunk__' code, to help you refer to the code lines in your suggestions. These line numbers are not part of the actual code, and should only used for reference.
- Code lines are prefixed with symbols ('+', '-', ' '). The '+' symbol indicates new code added in the PR, the '-' symbol indicates code removed in the PR, and the ' ' symbol indicates unchanged code. \
- In the format above, the diff is organized into separate '__new hunk__' and '__old hunk__' sections for each code chunk. '__new hunk__' contains the updated code, while '__old hunk__' shows the removed code. If no code was removed in a specific chunk, the __old hunk__ section will be omitted.
- Code lines are prefixed with symbols: '+' for new code added in the PR, '-' for code removed, and ' ' for unchanged code.
{%- if is_ai_metadata %}
- When available, an AI-generated summary will precede each file's diff, with a high-level overview of the changes. Note that this summary may not be fully accurate or complete.
{%- endif %}
Specific instructions for generating code suggestions:
- Provide up to {{ num_code_suggestions }} code suggestions.
- The suggestions should be diverse and insightful. They should focus on improving only the new code introduced in the PR, meaning lines from '__new hunk__' sections, starting with '+' (after the line numbers).
- Prioritize suggestions that address possible issues, major problems, and bugs in the PR code. Don't repeat changes already present in the PR. If there are no relevant suggestions for the PR, return an empty list.
- Don't suggest to add docstring, type hints, or comments, or to remove unused imports.
- Suggestions should not repeat code already present in the '__new hunk__' sections.
- Provide the exact line numbers range (inclusive) for each suggestion. Use the line numbers from the '__new hunk__' sections.
- Every time you cite variables or names from the code, use backticks ('`'). For example: 'ensure that `variable_name` is ...'
- Take into account that you are reviewing a PR code diff, and that the entire codebase is not available for you as context. Hence, avoid suggestions that might conflict with unseen parts of the codebase.
Specific guidelines for generating code suggestions:
- Provide up to {{ num_code_suggestions }} distinct and insightful code suggestions.
- Focus solely on enhancing new code introduced in the PR, identified by '+' prefixes in '__new hunk__' sections.
- Prioritize suggestions that address potential issues, critical problems, and bugs in the PR code. Avoid repeating changes already implemented in the PR. If no pertinent suggestions are applicable, return an empty list.
- Don't suggest to add docstring, type hints, or comments, to remove unused imports, or to use more specific exception types.
- When referencing variables or names from the code, enclose them in backticks (`). Example: "ensure that `variable_name` is..."
- Be mindful you are viewing a partial PR code diff, not the full codebase. Avoid suggestions that might conflict with unseen code or alerting variables not declared in the visible scope, as the context is incomplete.
{%- if extra_instructions %}
Extra instructions from the user, that should be taken into account with high priority:
Extra user-provided instructions (should be addressed with high priority):
======
{{ extra_instructions }}
======
@ -59,15 +63,14 @@ Extra instructions from the user, that should be taken into account with high pr
The output must be a YAML object equivalent to type $PRCodeSuggestions, according to the following Pydantic definitions:
=====
class CodeSuggestion(BaseModel):
relevant_file: str = Field(description="The full file path of the relevant file")
language: str = Field(description="The programming language of the relevant file")
suggestion_content: str = Field(description="an actionable suggestion for meaningfully improving the new code introduced in the PR")
existing_code: str = Field(description="a short code snippet, demonstrating the relevant code lines from a '__new hunk__' section. It must be without line numbers. Quote only full code lines, not partial ones. Use abbreviations ("...") of full lines if needed")
improved_code: str = Field(description="a new code snippet, that can be used to replace the relevant 'existing_code' lines in '__new hunk__' code after applying the suggestion")
one_sentence_summary: str = Field(description="a short summary of the suggestion action, in a single sentence. Focus on the 'what'. Be general, and avoid method or variable names.")
relevant_lines_start: int = Field(description="The relevant line number, from a '__new hunk__' section, where the suggestion starts (inclusive). Should be derived from the hunk line numbers, and correspond to the 'existing code' snippet above")
relevant_lines_end: int = Field(description="The relevant line number, from a '__new hunk__' section, where the suggestion ends (inclusive). Should be derived from the hunk line numbers, and correspond to the 'existing code' snippet above")
label: str = Field(description="a single label for the suggestion, to help the user understand the suggestion type. For example: 'security', 'possible bug', 'possible issue', 'performance', 'enhancement', 'best practice', 'maintainability', etc. Other labels are also allowed")
relevant_file: str = Field(description="Full path of the relevant file")
language: str = Field(description="Programming language used by the relevant file")
suggestion_content: str = Field(description="An actionable suggestion to enhance, improve or fix the new code introduced in the PR. Don't present here actual code snippets, just the suggestion. Be short and concise")
existing_code: str = Field(description="A short code snippet from a '__new hunk__' section that the suggestion aims to enhance or fix. Include only complete code lines. Use ellipsis (...) for brevity if needed. This snippet should represent the specific PR code targeted for improvement.")
improved_code: str = Field(description="A refined code snippet that replaces the 'existing_code' snippet after implementing the suggestion.")
one_sentence_summary: str = Field(description="A concise, single-sentence overview of the suggested improvement. Focus on the 'what'. Be general, and avoid method or variable names.")
label: str = Field(description="A single, descriptive label that best characterizes the suggestion type. Possible labels include 'security', 'possible bug', 'possible issue', 'performance', 'enhancement', 'best practice', 'maintainability', 'typo'. Other relevant labels are also acceptable.")
class PRCodeSuggestions(BaseModel):
code_suggestions: List[CodeSuggestion]
@ -89,8 +92,6 @@ code_suggestions:
...
one_sentence_summary: |
...
relevant_lines_start: 12
relevant_lines_end: 13
label: |
...
```
@ -106,112 +107,10 @@ Title: '{{title}}'
The PR Diff:
======
{{ diff|trim }}
{{ diff_no_line_numbers|trim }}
======
Response (should be a valid YAML, and nothing else):
```yaml
"""
[pr_code_suggestions_prompt_claude]
system="""You are PR-Reviewer, a language model that specializes in suggesting improvements to a Pull Request (PR) code.
Your task is to provide meaningful and actionable code suggestions, to improve the new code presented in a PR code diff (lines starting with '+').
The format we will use to present the PR code diff:
======
## file: 'src/file1.py'
@@ ... @@ def func1():
__new hunk__
11 unchanged code line0 in the PR
12 unchanged code line1 in the PR
13 +new code line2 added in the PR
14 unchanged code line3 in the PR
__old hunk__
unchanged code line0
unchanged code line1
-old code line2 removed in the PR
unchanged code line3
@@ ... @@ def func2():
__new hunk__
...
__old hunk__
...
## file: 'src/file2.py'
...
======
- In this format, we separate each hunk of diff code to '__new hunk__' and '__old hunk__' sections. The '__new hunk__' section contains the new code of the chunk, and the '__old hunk__' section contains the old code, that was removed. If no new code was added in a specific hunk, '__new hunk__' section will not be presented. If no code was removed, '__old hunk__' section will not be presented.
- We also added line numbers for the '__new hunk__' code, to help you refer to the code lines in your suggestions. These line numbers are not part of the actual code, and should only used for reference.
- Code lines are prefixed with symbols ('+', '-', ' '). The '+' symbol indicates new code added in the PR, the '-' symbol indicates code removed in the PR, and the ' ' symbol indicates unchanged code. \
Specific instructions for generating code suggestions:
- Provide up to {{ num_code_suggestions }} code suggestions.
- The suggestions should be diverse and insightful. They should focus on improving only the new code introduced in the PR, meaning lines from '__new hunk__' sections, starting with '+' (after the line numbers).
- Prioritize suggestions that address possible issues, major problems, and bugs in the PR code. Don't repeat changes already present in the PR. If there are no relevant suggestions for the PR, return an empty list.
- Don't suggest to add docstring, type hints, or comments, or to remove unused imports.
- Provide the exact line numbers range (inclusive) for each suggestion. Use the line numbers from the '__new hunk__' sections.
- Every time you cite variables or names from the code, use backticks ('`'). For example: 'ensure that `variable_name` is ...'
- Take into account that you are recieving as an input only a PR code diff. The entire codebase is not available for you as context. Hence, avoid suggestions that might conflict with unseen parts of the codebase, like imports, global variables, etc.
{%- if extra_instructions %}
Extra instructions from the user, that should be taken into account with high priority:
======
{{ extra_instructions }}
======
{%- endif %}
The output must be a YAML object equivalent to type $PRCodeSuggestions, according to the following Pydantic definitions:
=====
class CodeSuggestion(BaseModel):
relevant_file: str = Field(description="The full file path of the relevant file")
language: str = Field(description="the programming language of the relevant file")
suggestion_content: str = Field(description="an actionable suggestion for meaningfully improving the new code introduced in the PR. Don't present here actual code snippets, just the suggestion. Be short and concise")
existing_code: str = Field(description="a short code snippet, demonstrating the relevant code lines from a '__new hunk__' section. It must be without line numbers. Quote only full code lines, not partial ones. Use abbreviations ("...") of full lines if needed")
improved_code: str = Field(description="a new code snippet, that can be used to replace the relevant 'existing_code' lines in '__new hunk__' code after applying the suggestion")
one_sentence_summary: str = Field(description="a short summary of the suggestion action, in a single sentence. Focus on the 'what'. Be general, and avoid method or variable names.")
relevant_lines_start: int = Field(description="The relevant line number, from a '__new hunk__' section, where the suggestion starts (inclusive). Should be derived from the hunk line numbers, and correspond to the 'existing code' snippet above")
relevant_lines_end: int = Field(description="The relevant line number, from a '__new hunk__' section, where the suggestion ends (inclusive). Should be derived from the hunk line numbers, and correspond to the 'existing code' snippet above")
label: str = Field(description="a single label for the suggestion, to help the user understand the suggestion type. For example: 'security', 'possible bug', 'possible issue', 'performance', 'enhancement', 'best practice', 'maintainability', etc. Other labels are also allowed")
class PRCodeSuggestions(BaseModel):
code_suggestions: List[CodeSuggestion]
=====
Example output:
```yaml
code_suggestions:
- relevant_file: |
src/file1.py
language: |
python
suggestion_content: |
...
existing_code: |
...
improved_code: |
...
one_sentence_summary: |
...
relevant_lines_start: 12
relevant_lines_end: 13
label: |
...
```
Each YAML output MUST be after a newline, indented, with block scalar indicator ('|').
"""

View File

@ -1,32 +1,63 @@
[pr_code_suggestions_reflect_prompt]
system="""You are a language model that specializes in reviewing and evaluating suggestions for a Pull Request (PR) code.
system="""You are an AI language model specialized in reviewing and evaluating code suggestions for a Pull Request (PR).
Your task is to analyze a PR code diff and evaluate a set of AI-generated code suggestions. These suggestions aim to address potential bugs and problems, and enhance the new code introduced in the PR.
Your input is a PR code, and a list of code suggestions that were generated for the PR.
Your goal is to inspect, review and score the suggestsions.
Be aware - the suggestions may not always be correct or accurate, and you should evaluate them in relation to the actual PR code diff presented. Sometimes the suggestion may ignore parts of the actual code diff, and in that case, you should give it a score of 0.
Examine each suggestion meticulously, assessing its quality, relevance, and accuracy within the context of PR. Keep in mind that the suggestions may vary in their correctness and accuracy. Your evaluation should be based on a thorough comparison between each suggestion and the actual PR code diff.
Consider the following components of each suggestion:
1. 'one_sentence_summary' - A brief summary of the suggestion's purpose
2. 'suggestion_content' - The detailed suggestion content, explaining the proposed modification
3. 'existing_code' - a code snippet from a __new hunk__ section in the PR code diff that the suggestion addresses
4. 'improved_code' - a code snippet demonstrating how the 'existing_code' should be after the suggestion is applied
Specific instructions:
- Carefully review both the suggestion content, and the related PR code diff. Mistakes in the suggestions can occur. Make sure the suggestions are logical and correct, and properly derived from the PR code diff.
- In addition to the exact code lines mentioned in each suggestion, review the code around them, to ensure that the suggestions are contextually accurate.
- Check that the 'existing_code' field is valid. The 'existing_code' content should match, or be derived, from code lines from a 'new hunk' section in the PR code diff.
- Check that the 'improved_code' section correctly reflects the suggestion content.
- High scores (8 to 10) should be given to correct suggestions that address major bugs and issues, or security concerns. Lower scores (3 to 7) should be for correct suggestions addressing minor issues, code style, code readability, maintainability, etc. Don't give high scores to suggestions that are not crucial, and bring only small improvement or optimization.
- Order the feedback the same way the suggestions are ordered in the input.
Be particularly vigilant for suggestions that:
- Overlook crucial details in the PR
- The 'improved_code' section does not accurately reflect the suggested changes, in relation to the 'existing_code'
- Contradict or ignore parts of the PR's modifications
In such cases, assign the suggestion a score of 0.
Evaluate each valid suggestion by scoring its potential impact on the PR's correctness, quality and functionality.
In addition, you should also detect the line numbers in the '__new hunk__' section that correspond to the 'existing_code' snippet.
Key guidelines for evaluation:
- Thoroughly examine both the suggestion content and the corresponding PR code diff. Be vigilant for potential errors in each suggestion, ensuring they are logically sound, accurate, and directly derived from the PR code diff.
- Extend your review beyond the specifically mentioned code lines to encompass surrounding context, verifying the suggestions' contextual accuracy.
- Validate the 'existing_code' field by confirming it matches or is accurately derived from code lines within a '__new hunk__' section of the PR code diff.
- Ensure the 'improved_code' section accurately reflects the 'existing_code' segment after the suggested modification is applied.
- Apply a nuanced scoring system:
- Reserve high scores (8-10) for suggestions addressing critical issues such as major bugs or security concerns.
- Assign moderate scores (3-7) to suggestions that tackle minor issues, improve code style, enhance readability, or boost maintainability.
- Avoid inflating scores for suggestions that, while correct, offer only marginal improvements or optimizations.
- Maintain the original order of suggestions in your feedback, corresponding to their input sequence.
Additional scoring considerations:
- If the suggestion is not actionable, and only asks the user to verify or ensure a change, reduce its score by 1-2 points.
- Assign a score of 0 to suggestions aiming at:
- Adding docstring, type hints, or comments
- Remove unused imports or variables
- Using more specific exception types.
The format that is used to present the PR code diff is as follows:
The PR code diff will be presented in the following structured format:
======
## file: 'src/file1.py'
## File: 'src/file1.py'
{%- if is_ai_metadata %}
### AI-generated changes summary:
* ...
* ...
{%- endif %}
@@ ... @@ def func1():
__new hunk__
12 code line1 that remained unchanged in the PR
11 unchanged code line0 in the PR
12 unchanged code line1 in the PR
13 +new code line2 added in the PR
14 code line3 that remained unchanged in the PR
14 unchanged code line3 in the PR
__old hunk__
code line1 that remained unchanged in the PR
-old code line2 that was removed in the PR
code line3 that remained unchanged in the PR
unchanged code line0
unchanged code line1
-old code line2 removed in the PR
unchanged code line3
@@ ... @@ def func2():
__new hunk__
@ -35,22 +66,26 @@ __old hunk__
...
## file: 'src/file2.py'
## File: 'src/file2.py'
...
======
- In this format, we separated each hunk of code to '__new hunk__' and '__old hunk__' sections. The '__new hunk__' section contains the new code of the chunk, and the '__old hunk__' section contains the old code that was removed.
- If no new code was added in a specific hunk, '__new hunk__' section will not be presented. If no code was removed, '__old hunk__' section will not be presented.
- We added line numbers for the '__new hunk__' sections, to help you refer to the code lines in your suggestions. These line numbers are not part of the actual code, and are only used for reference.
- Code lines are prefixed symbols ('+', '-', ' '). The '+' symbol indicates new code added in the PR, the '-' symbol indicates code removed in the PR, and the ' ' symbol indicates unchanged code.
- In the format above, the diff is organized into separate '__new hunk__' and '__old hunk__' sections for each code chunk. '__new hunk__' contains the updated code, while '__old hunk__' shows the removed code. If no code was added or removed in a specific chunk, the corresponding section will be omitted.
- Line numbers are included for the '__new hunk__' sections to enable referencing specific lines in the code suggestions. These numbers are for reference only and are not part of the actual code.
- Code lines are prefixed with symbols: '+' for new code added in the PR, '-' for code removed, and ' ' for unchanged code.
{%- if is_ai_metadata %}
- When available, an AI-generated summary will precede each file's diff, with a high-level overview of the changes. Note that this summary may not be fully accurate or comprehensive.
{%- endif %}
The output must be a YAML object equivalent to type $PRCodeSuggestionsFeedback, according to the following Pydantic definitions:
=====
class CodeSuggestionFeedback(BaseModel):
suggestion_summary: str = Field(description="repeated from the input")
relevant_file: str = Field(description="repeated from the input")
suggestion_score: int = Field(description="The actual output - the score of the suggestion, from 0 to 10. Give 0 if the suggestion is wrong. Otherwise, give a score from 1 to 10 (inclusive), where 1 is the lowest and 10 is the highest.")
why: str = Field(description="Short and concise explanation of why the suggestion received the score (one to two sentences).")
suggestion_summary: str = Field(description="Repeated from the input")
relevant_file: str = Field(description="Repeated from the input")
relevant_lines_start: int = Field(description="The relevant line number, from a '__new hunk__' section, where the suggestion starts (inclusive). Should be derived from the hunk line numbers, and correspond to the beginning of the relevant 'existing code' snippet")
relevant_lines_end: int = Field(description="The relevant line number, from a '__new hunk__' section, where the suggestion ends (inclusive). Should be derived from the hunk line numbers, and correspond to the end of the relevant 'existing code' snippet")
suggestion_score: int = Field(description="Evaluate the suggestion and assign a score from 0 to 10. Give 0 if the suggestion is wrong. For valid suggestions, score from 1 (lowest impact/importance) to 10 (highest impact/importance).")
why: str = Field(description="Briefly explain the score given in 1-2 sentences, focusing on the suggestion's impact, relevance, and accuracy.")
class PRCodeSuggestionsFeedback(BaseModel):
code_suggestions: List[CodeSuggestionFeedback]
@ -63,6 +98,8 @@ code_suggestions:
- suggestion_summary: |
Use a more descriptive variable name here
relevant_file: "src/file1.py"
relevant_lines_start: 13
relevant_lines_end: 14
suggestion_score: 6
why: |
The variable name 't' is not descriptive enough
@ -79,7 +116,7 @@ user="""You are given a Pull Request (PR) code diff:
======
And here is a list of corresponding {{ num_code_suggestions }} code suggestions to improve this Pull Request code:
Below are {{ num_code_suggestions }} AI-generated code suggestions for enhancing the Pull Request:
======
{{ suggestion_str|trim }}
======

View File

@ -78,9 +78,9 @@ pr_files:
...
...
{%- endif %}
description: |-
description: |
...
title: |-
title: |
...
{%- if enable_custom_labels %}
labels:
@ -94,7 +94,26 @@ labels:
Answer should be a valid YAML, and nothing else. Each YAML output MUST be after a newline, with proper indent, and block scalar indicator ('|')
"""
user="""PR Info:
user="""
{%- if related_tickets %}
Related Ticket Info:
{% for ticket in related_tickets %}
=====
Ticket Title: '{{ ticket.title }}'
{%- if ticket.labels %}
Ticket Labels: {{ ticket.labels }}
{%- endif %}
{%- if ticket.body %}
Ticket Description:
#####
{{ ticket.body }}
#####
{%- endif %}
=====
{% endfor %}
{%- endif %}
PR Info:
Previous title: '{{title}}'

View File

@ -0,0 +1,53 @@
[pr_help_prompts]
system="""You are Doc-helper, a language models designed to answer questions about a documentation website for an open-soure project called "PR-Agent" (recently renamed to "Qodo Merge").
You will recieve a question, and the full documentation website content.
Your goal is to provide the best answer to the question using the documentation provided.
Additional instructions:
- Try to be short and concise in your answers. Try to give examples if needed.
- The main tools of PR-Agent are 'describe', 'review', 'improve'. If there is ambiguity to which tool the user is referring to, prioritize snippets of these tools over others.
- If the question has ambiguity and can relate to different tools or platfroms, provide the best answer possible based on what is available, but also state in your answer what additional information would be needed to give a more accurate answer.
The output must be a YAML object equivalent to type $DocHelper, according to the following Pydantic definitions:
=====
class relevant_section(BaseModel):
file_name: str = Field(description="The name of the relevant file")
relevant_section_header_string: str = Field(description="From the relevant file, exact text of the relevant section heading. If no markdown heading is relevant, return empty string")
class DocHelper(BaseModel):
user_question: str = Field(description="The user's question")
response: str = Field(description="The response to the user's question")
relevant_sections: List[relevant_section] = Field(description="A list of the relevant markdown sections in the documentation that answer the user's question, ordered by importance (most relevant first)")
=====
Example output:
```yaml
user_question: |
...
response: |
...
relevant_sections:
- file_name: "src/file1.py"
relevant_section_header_string: |
...
- ...
"""
user="""\
User's Question:
=====
{{ question|trim }}
=====
Documentation website content:
=====
{{ snippets|trim }}
=====
Response (should be a valid YAML, and nothing else):
```yaml
"""

View File

@ -12,7 +12,7 @@ Additional guidelines:
Example Hunk Structure:
======
## file: 'src/file1.py'
## File: 'src/file1.py'
@@ -12,5 +12,5 @@ def func1():
code line 1 that remained unchanged in the PR

View File

@ -10,7 +10,13 @@ The review should focus on new code added in the PR code diff (lines starting wi
The format we will use to present the PR code diff:
======
## file: 'src/file1.py'
## File: 'src/file1.py'
{%- if is_ai_metadata %}
### AI-generated changes summary:
* ...
* ...
{%- endif %}
@@ ... @@ def func1():
__new hunk__
@ -26,19 +32,21 @@ __old hunk__
@@ ... @@ def func2():
__new hunk__
...
__old hunk__
...
unchanged code line4
+new code line5 removed in the PR
unchanged code line6
## file: 'src/file2.py'
## File: 'src/file2.py'
...
======
- In this format, we separated each hunk of diff code to '__new hunk__' and '__old hunk__' sections. The '__new hunk__' section contains the new code of the chunk, and the '__old hunk__' section contains the old code, that was removed. If no new code was added in a specific hunk, '__new hunk__' section will not be presented. If no code was removed, '__old hunk__' section will not be presented.
- In the format above, the diff is organized into separate '__new hunk__' and '__old hunk__' sections for each code chunk. '__new hunk__' contains the updated code, while '__old hunk__' shows the removed code. If no code was removed in a specific chunk, the __old hunk__ section will be omitted.
- We also added line numbers for the '__new hunk__' code, to help you refer to the code lines in your suggestions. These line numbers are not part of the actual code, and should only used for reference.
- Code lines are prefixed with symbols ('+', '-', ' '). The '+' symbol indicates new code added in the PR, the '-' symbol indicates code removed in the PR, and the ' ' symbol indicates unchanged code. \
The review should address new code added in the PR code diff (lines starting with '+')
{%- if is_ai_metadata %}
- If available, an AI-generated summary will appear and provide a high-level overview of the file changes. Note that this summary may not be fully accurate or complete.
{%- endif %}
- When quoting variables or names from the code, use backticks (`) instead of single quote (').
{%- if num_code_suggestions > 0 %}
@ -72,21 +80,25 @@ class SubPR(BaseModel):
class KeyIssuesComponentLink(BaseModel):
relevant_file: str = Field(description="The full file path of the relevant file")
issue_header: str = Field(description="one or two word title for the the issue. For example: 'Possible Bug', 'Performance Issue', 'Code Smell', etc.")
issue_content: str = Field(description="a short and concise description of the issue that needs to be reviewed")
start_line: int = Field(description="the start line that corresponds to this issue in the relevant file")
end_line: int = Field(description="the end line that corresponds to this issue in the relevant file")
{%- if extra_issue_links %}
referenced_variables: List[Refs] = Field(description="a list of relevant variables or names that appear in the 'issue_content' output. For each variable, output is name, and the line number where it appears in the relevant file")
{% endif %}
issue_header: str = Field(description="One or two word title for the the issue. For example: 'Possible Bug', 'Performance Issue', 'Code Smell', etc.")
issue_content: str = Field(description="A short and concise summary of what should be further inspected and validated during the PR review process for this issue. Don't state line numbers here")
start_line: int = Field(description="The start line that corresponds to this issue in the relevant file")
end_line: int = Field(description="The end line that corresponds to this issue in the relevant file")
{%- if extra_issue_links %}
class Refs(BaseModel):
variable_name: str = Field(description="the name of a variable or name that appears in the relevant 'issue_content' output.")
relevant_line: int = Field(description="the line number where the variable or name appears in the relevant file")
{%- if related_tickets %}
class TicketCompliance(BaseModel):
ticket_url: str = Field(description="Ticket URL or ID")
ticket_requirements: str = Field(description="Repeat, in your own words, all ticket requirements, in bullet points")
fully_compliant_requirements: str = Field(description="A list, in bullet points, of which requirements are met by the PR code. Don't explain how the requirements are met, just list them shortly. Can be empty")
not_compliant_requirements: str = Field(description="A list, in bullet points, of which requirements are not met by the PR code. Don't explain how the requirements are not met, just list them shortly. Can be empty")
overall_compliance_level: str = Field(description="Overall give this PR one of these three values in relation to the ticket: 'Fully compliant', 'Partially compliant', or 'Not compliant'")
{%- endif %}
class Review(BaseModel):
{%- if related_tickets %}
ticket_compliance_check: List[TicketCompliance] = Field(description="A list of compliance checks for the related tickets")
{%- endif %}
{%- if require_estimate_effort_to_review %}
estimated_effort_to_review_[1-5]: int = Field(description="Estimate, on a scale of 1-5 (inclusive), the time and effort required to review this PR by an experienced and knowledgeable developer. 1 means short and easy review , 5 means long and hard review. Take into account the size, complexity, quality, and the needed changes of the PR code diff.")
{%- endif %}
@ -99,7 +111,7 @@ class Review(BaseModel):
{%- if question_str %}
insights_from_user_answers: str = Field(description="shortly summarize the insights you gained from the user's answers to the questions")
{%- endif %}
key_issues_to_review: List[KeyIssuesComponentLink] = Field("A list of bugs, issue or major performance concerns introduced in this PR, which the PR reviewer should further investigate")
key_issues_to_review: List[KeyIssuesComponentLink] = Field("A diverse list of bugs, issue or major performance concerns introduced in this PR, which the PR reviewer should further investigate")
{%- if require_security_review %}
security_concerns: str = Field(description="Does this PR code introduce possible vulnerabilities such as exposure of sensitive information (e.g., API keys, secrets, passwords), or security concerns like SQL injection, XSS, CSRF, and others ? Answer 'No' (without explaining why) if there are no possible issues. If there are security concerns or issues, start your answer with a short header, such as: 'Sensitive information exposure: ...', 'SQL injection: ...' etc. Explain your answer. Be specific and give examples if possible")
{%- endif %}
@ -131,6 +143,19 @@ class PRReview(BaseModel):
Example output:
```yaml
review:
{%- if related_tickets %}
ticket_compliance_check:
- ticket_url: |
...
ticket_requirements: |
...
fully_compliant_requirements: |
...
not_compliant_requirements: |
...
overall_compliance_level: |
...
{%- endif %}
{%- if require_estimate_effort_to_review %}
estimated_effort_to_review_[1-5]: |
3
@ -149,12 +174,6 @@ review:
...
start_line: 12
end_line: 14
{%- if extra_issue_links %}
referenced_variables:
- variable_name: |
...
relevant_line: 13
{%- endif %}
- ...
security_concerns: |
No
@ -183,7 +202,33 @@ code_feedback:
Answer should be a valid YAML, and nothing else. Each YAML output MUST be after a newline, with proper indent, and block scalar indicator ('|')
"""
user="""--PR Info--
user="""
{%- if related_tickets %}
--PR Ticket Info--
{%- for ticket in related_tickets %}
=====
Ticket URL: '{{ ticket.ticket_url }}'
Ticket Title: '{{ ticket.title }}'
{%- if ticket.labels %}
Ticket Labels: {{ ticket.labels }}
{%- endif %}
{%- if ticket.body %}
Ticket Description:
#####
{{ ticket.body }}
#####
{%- endif %}
=====
{% endfor %}
{%- endif %}
--PR Info--
Title: '{{title}}'

View File

@ -1,13 +1,15 @@
import asyncio
import copy
import textwrap
import traceback
from functools import partial
from typing import Dict, List
from jinja2 import Environment, StrictUndefined
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.pr_processing import get_pr_diff, get_pr_multi_diffs, retry_with_fallback_models
from pr_agent.algo.pr_processing import get_pr_diff, get_pr_multi_diffs, retry_with_fallback_models, \
add_ai_metadata_to_diff_files
from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import load_yaml, replace_code_tags, ModelType, show_relevant_configurations
from pr_agent.config_loader import get_settings
@ -43,10 +45,8 @@ class PRCodeSuggestions:
self.is_extended = self._get_is_extended(args or [])
except:
self.is_extended = False
if self.is_extended:
num_code_suggestions = get_settings().pr_code_suggestions.num_code_suggestions_per_chunk
else:
num_code_suggestions = get_settings().pr_code_suggestions.num_code_suggestions
num_code_suggestions = int(get_settings().pr_code_suggestions.num_code_suggestions_per_chunk)
self.ai_handler = ai_handler()
self.ai_handler.main_pr_language = self.main_language
@ -54,22 +54,30 @@ class PRCodeSuggestions:
self.prediction = None
self.pr_url = pr_url
self.cli_mode = cli_mode
self.pr_description, self.pr_description_files = (
self.git_provider.get_pr_description(split_changes_walkthrough=True))
if (self.pr_description_files and get_settings().get("config.is_auto_command", False) and
get_settings().get("config.enable_ai_metadata", False)):
add_ai_metadata_to_diff_files(self.git_provider, self.pr_description_files)
get_logger().debug(f"AI metadata added to the this command")
else:
get_settings().set("config.enable_ai_metadata", False)
get_logger().debug(f"AI metadata is disabled for this command")
self.vars = {
"title": self.git_provider.pr.title,
"branch": self.git_provider.get_pr_branch(),
"description": self.git_provider.get_pr_description(),
"description": self.pr_description,
"language": self.main_language,
"diff": "", # empty diff for initial calculation
"diff_no_line_numbers": "", # empty diff for initial calculation
"num_code_suggestions": num_code_suggestions,
"extra_instructions": get_settings().pr_code_suggestions.extra_instructions,
"commit_messages_str": self.git_provider.get_commit_messages(),
"relevant_best_practices": "",
"is_ai_metadata": get_settings().get("config.enable_ai_metadata", False),
}
if 'claude' in get_settings().config.model:
# prompt for Claude, with minor adjustments
self.pr_code_suggestions_prompt_system = get_settings().pr_code_suggestions_prompt_claude.system
else:
self.pr_code_suggestions_prompt_system = get_settings().pr_code_suggestions_prompt.system
self.pr_code_suggestions_prompt_system = get_settings().pr_code_suggestions_prompt.system
self.token_handler = TokenHandler(self.git_provider.pr,
self.vars,
@ -104,15 +112,17 @@ class PRCodeSuggestions:
if not data:
data = {"code_suggestions": []}
if (data is None or 'code_suggestions' not in data or not data['code_suggestions']
and get_settings().config.publish_output):
get_logger().warning('No code suggestions found for the PR.')
if (data is None or 'code_suggestions' not in data or not data['code_suggestions']):
pr_body = "## PR Code Suggestions ✨\n\nNo code suggestions found for the PR."
get_logger().debug(f"PR output", artifact=pr_body)
if self.progress_response:
self.git_provider.edit_comment(self.progress_response, body=pr_body)
get_logger().warning('No code suggestions found for the PR.')
if get_settings().config.publish_output and get_settings().config.publish_output_no_suggestions:
get_logger().debug(f"PR output", artifact=pr_body)
if self.progress_response:
self.git_provider.edit_comment(self.progress_response, body=pr_body)
else:
self.git_provider.publish_comment(pr_body)
else:
self.git_provider.publish_comment(pr_body)
get_settings().data = {"artifact": ""}
return
if (not self.is_extended and get_settings().pr_code_suggestions.rank_suggestions) or \
@ -137,6 +147,9 @@ class PRCodeSuggestions:
pr_body += ' <!-- approve pr self-review -->'
# add usage guide
if (get_settings().pr_code_suggestions.enable_chat_text and get_settings().config.is_auto_command
and isinstance(self.git_provider, GithubProvider)):
pr_body += "\n\n>💡 Need additional feedback ? start a [PR chat](https://chromewebstore.google.com/detail/ephlnjeghhogofkifjloamocljapahnl) \n\n"
if get_settings().pr_code_suggestions.enable_help_text:
pr_body += "<hr>\n\n<details> <summary><strong>💡 Tool usage guide:</strong></summary><hr> \n\n"
pr_body += HelpMessage.get_improve_usage_guide()
@ -146,6 +159,7 @@ class PRCodeSuggestions:
if get_settings().get('config', {}).get('output_relevant_configurations', False):
pr_body += show_relevant_configurations(relevant_section='pr_code_suggestions')
# publish the PR comment
if get_settings().pr_code_suggestions.persistent_comment:
final_update_message = False
self.publish_persistent_comment_with_history(pr_body,
@ -161,14 +175,35 @@ class PRCodeSuggestions:
else:
self.git_provider.publish_comment(pr_body)
# dual publishing mode
if int(get_settings().pr_code_suggestions.dual_publishing_score_threshold) > 0:
data_above_threshold = {'code_suggestions': []}
try:
for suggestion in data['code_suggestions']:
if int(suggestion.get('score', 0)) >= int(get_settings().pr_code_suggestions.dual_publishing_score_threshold) \
and suggestion.get('improved_code'):
data_above_threshold['code_suggestions'].append(suggestion)
if not data_above_threshold['code_suggestions'][-1]['existing_code']:
get_logger().info(f'Identical existing and improved code for dual publishing found')
data_above_threshold['code_suggestions'][-1]['existing_code'] = suggestion[
'improved_code']
if data_above_threshold['code_suggestions']:
get_logger().info(
f"Publishing {len(data_above_threshold['code_suggestions'])} suggestions in dual publishing mode")
self.push_inline_code_suggestions(data_above_threshold)
except Exception as e:
get_logger().error(f"Failed to publish dual publishing suggestions, error: {e}")
else:
self.push_inline_code_suggestions(data)
if self.progress_response:
self.progress_response.delete()
self.git_provider.remove_comment(self.progress_response)
else:
get_logger().info('Code suggestions generated for PR, but not published since publish_output is False.')
get_settings().data = {"artifact": data}
return
except Exception as e:
get_logger().error(f"Failed to generate code suggestions for PR, error: {e}")
get_logger().error(f"Failed to generate code suggestions for PR, error: {e}",
artifact={"traceback": traceback.format_exc()})
if get_settings().config.publish_output:
if self.progress_response:
self.progress_response.delete()
@ -300,7 +335,7 @@ class PRCodeSuggestions:
if self.patches_diff:
get_logger().debug(f"PR diff", artifact=self.patches_diff)
self.prediction = await self._get_prediction(model, self.patches_diff)
self.prediction = await self._get_prediction(model, self.patches_diff, self.patches_diff_no_line_number)
else:
get_logger().warning(f"Empty PR diff")
self.prediction = None
@ -308,42 +343,76 @@ class PRCodeSuggestions:
data = self.prediction
return data
async def _get_prediction(self, model: str, patches_diff: str) -> dict:
async def _get_prediction(self, model: str, patches_diff: str, patches_diff_no_line_number: str) -> dict:
variables = copy.deepcopy(self.vars)
variables["diff"] = patches_diff # update diff
variables["diff_no_line_numbers"] = patches_diff_no_line_number # update diff
environment = Environment(undefined=StrictUndefined)
system_prompt = environment.from_string(self.pr_code_suggestions_prompt_system).render(variables)
user_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.user).render(variables)
response, finish_reason = await self.ai_handler.chat_completion(
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
if not get_settings().config.publish_output:
get_settings().system_prompt = system_prompt
get_settings().user_prompt = user_prompt
# load suggestions from the AI response
data = self._prepare_pr_code_suggestions(response)
# self-reflect on suggestions
if get_settings().pr_code_suggestions.self_reflect_on_suggestions:
model_turbo = get_settings().config.model_turbo # use turbo model for self-reflection, since it is an easier task
response_reflect = await self.self_reflect_on_suggestions(data["code_suggestions"],
patches_diff, model=model_turbo)
if response_reflect:
response_reflect_yaml = load_yaml(response_reflect)
code_suggestions_feedback = response_reflect_yaml["code_suggestions"]
if len(code_suggestions_feedback) == len(data["code_suggestions"]):
for i, suggestion in enumerate(data["code_suggestions"]):
try:
suggestion["score"] = code_suggestions_feedback[i]["suggestion_score"]
suggestion["score_why"] = code_suggestions_feedback[i]["why"]
except Exception as e: #
get_logger().error(f"Error processing suggestion score {i}",
artifact={"suggestion": suggestion,
"code_suggestions_feedback": code_suggestions_feedback[i]})
suggestion["score"] = 7
suggestion["score_why"] = ""
else:
# get_logger().error(f"Could not self-reflect on suggestions. using default score 7")
# self-reflect on suggestions (mandatory, since line numbers are generated now here)
model_reflection = get_settings().config.model
response_reflect = await self.self_reflect_on_suggestions(data["code_suggestions"],
patches_diff, model=model_reflection)
if response_reflect:
response_reflect_yaml = load_yaml(response_reflect)
code_suggestions_feedback = response_reflect_yaml["code_suggestions"]
if len(code_suggestions_feedback) == len(data["code_suggestions"]):
for i, suggestion in enumerate(data["code_suggestions"]):
suggestion["score"] = 7
suggestion["score_why"] = ""
try:
suggestion["score"] = code_suggestions_feedback[i]["suggestion_score"]
suggestion["score_why"] = code_suggestions_feedback[i]["why"]
if 'relevant_lines_start' not in suggestion:
relevant_lines_start = code_suggestions_feedback[i].get('relevant_lines_start', -1)
relevant_lines_end = code_suggestions_feedback[i].get('relevant_lines_end', -1)
suggestion['relevant_lines_start'] = relevant_lines_start
suggestion['relevant_lines_end'] = relevant_lines_end
if relevant_lines_start < 0 or relevant_lines_end < 0:
suggestion["score"] = 0
try:
if get_settings().config.publish_output:
suggestion_statistics_dict = {'score': int(suggestion["score"]),
'label': suggestion["label"].lower().strip()}
get_logger().info(f"PR-Agent suggestions statistics",
statistics=suggestion_statistics_dict, analytics=True)
except Exception as e:
get_logger().error(f"Failed to log suggestion statistics, error: {e}")
pass
except Exception as e: #
get_logger().error(f"Error processing suggestion score {i}",
artifact={"suggestion": suggestion,
"code_suggestions_feedback": code_suggestions_feedback[i]})
suggestion["score"] = 7
suggestion["score_why"] = ""
# if the before and after code is the same, clear one of them
try:
if suggestion['existing_code'] == suggestion['improved_code']:
get_logger().debug(
f"edited improved suggestion {i + 1}, because equal to existing code: {suggestion['existing_code']}")
if get_settings().pr_code_suggestions.commitable_code_suggestions:
suggestion['improved_code'] = "" # we need 'existing_code' to locate the code in the PR
else:
suggestion['existing_code'] = ""
except Exception as e:
get_logger().error(f"Error processing suggestion {i + 1}, error: {e}")
else:
# get_logger().error(f"Could not self-reflect on suggestions. using default score 7")
for i, suggestion in enumerate(data["code_suggestions"]):
suggestion["score"] = 7
suggestion["score_why"] = ""
return data
@ -353,10 +422,10 @@ class PRCodeSuggestions:
suggestion_truncation_message = get_settings().get("PR_CODE_SUGGESTIONS.SUGGESTION_TRUNCATION_MESSAGE", "")
if max_code_suggestion_length > 0:
if len(suggestion['improved_code']) > max_code_suggestion_length:
suggestion['improved_code'] = suggestion['improved_code'][:max_code_suggestion_length]
suggestion['improved_code'] += f"\n{suggestion_truncation_message}"
get_logger().info(f"Truncated suggestion from {len(suggestion['improved_code'])} "
f"characters to {max_code_suggestion_length} characters")
suggestion['improved_code'] = suggestion['improved_code'][:max_code_suggestion_length]
suggestion['improved_code'] += f"\n{suggestion_truncation_message}"
return suggestion
def _prepare_pr_code_suggestions(self, predictions: str) -> Dict:
@ -371,8 +440,7 @@ class PRCodeSuggestions:
one_sentence_summary_list = []
for i, suggestion in enumerate(data['code_suggestions']):
try:
needed_keys = ['one_sentence_summary', 'label', 'relevant_file', 'relevant_lines_start',
'relevant_lines_end']
needed_keys = ['one_sentence_summary', 'label', 'relevant_file']
is_valid_keys = True
for key in needed_keys:
if key not in suggestion:
@ -394,13 +462,6 @@ class PRCodeSuggestions:
continue
if ('existing_code' in suggestion) and ('improved_code' in suggestion):
if suggestion['existing_code'] == suggestion['improved_code']:
get_logger().debug(
f"edited improved suggestion {i + 1}, because equal to existing code: {suggestion['existing_code']}")
if get_settings().pr_code_suggestions.commitable_code_suggestions:
suggestion['improved_code'] = "" # we need 'existing_code' to locate the code in the PR
else:
suggestion['existing_code'] = ""
suggestion = self._truncate_if_needed(suggestion)
one_sentence_summary_list.append(suggestion['one_sentence_summary'])
suggestion_list.append(suggestion)
@ -503,9 +564,33 @@ class PRCodeSuggestions:
return True
return False
def remove_line_numbers(self, patches_diff_list: List[str]) -> List[str]:
# create a copy of the patches_diff_list, without line numbers for '__new hunk__' sections
try:
self.patches_diff_list_no_line_numbers = []
for patches_diff in self.patches_diff_list:
patches_diff_lines = patches_diff.splitlines()
for i, line in enumerate(patches_diff_lines):
if line.strip():
if line[0].isdigit():
# find the first letter in the line that starts with a valid letter
for j, char in enumerate(line):
if not char.isdigit():
patches_diff_lines[i] = line[j + 1:]
break
self.patches_diff_list_no_line_numbers.append('\n'.join(patches_diff_lines))
return self.patches_diff_list_no_line_numbers
except Exception as e:
get_logger().error(f"Error removing line numbers from patches_diff_list, error: {e}")
return patches_diff_list
async def _prepare_prediction_extended(self, model: str) -> dict:
self.patches_diff_list = get_pr_multi_diffs(self.git_provider, self.token_handler, model,
max_calls=get_settings().pr_code_suggestions.max_number_of_calls)
# create a copy of the patches_diff_list, without line numbers for '__new hunk__' sections
self.patches_diff_list_no_line_numbers = self.remove_line_numbers(self.patches_diff_list)
if self.patches_diff_list:
get_logger().info(f"Number of PR chunk calls: {len(self.patches_diff_list)}")
get_logger().debug(f"PR diff:", artifact=self.patches_diff_list)
@ -513,12 +598,14 @@ class PRCodeSuggestions:
# parallelize calls to AI:
if get_settings().pr_code_suggestions.parallel_calls:
prediction_list = await asyncio.gather(
*[self._get_prediction(model, patches_diff) for patches_diff in self.patches_diff_list])
*[self._get_prediction(model, patches_diff, patches_diff_no_line_numbers) for
patches_diff, patches_diff_no_line_numbers in
zip(self.patches_diff_list, self.patches_diff_list_no_line_numbers)])
self.prediction_list = prediction_list
else:
prediction_list = []
for i, patches_diff in enumerate(self.patches_diff_list):
prediction = await self._get_prediction(model, patches_diff)
for patches_diff, patches_diff_no_line_numbers in zip(self.patches_diff_list, self.patches_diff_list_no_line_numbers):
prediction = await self._get_prediction(model, patches_diff, patches_diff_no_line_numbers)
prediction_list.append(prediction)
data = {"code_suggestions": []}
@ -527,18 +614,16 @@ class PRCodeSuggestions:
score_threshold = max(1, int(get_settings().pr_code_suggestions.suggestions_score_threshold))
for i, prediction in enumerate(predictions["code_suggestions"]):
try:
if get_settings().pr_code_suggestions.self_reflect_on_suggestions:
score = int(prediction.get("score", 1))
if score >= score_threshold:
data["code_suggestions"].append(prediction)
else:
get_logger().info(
f"Removing suggestions {i} from call {j}, because score is {score}, and score_threshold is {score_threshold}",
artifact=prediction)
else:
score = int(prediction.get("score", 1))
if score >= score_threshold:
data["code_suggestions"].append(prediction)
else:
get_logger().info(
f"Removing suggestions {i} from call {j}, because score is {score}, and score_threshold is {score_threshold}",
artifact=prediction)
except Exception as e:
get_logger().error(f"Error getting PR diff for suggestion {i} in call {j}, error: {e}")
get_logger().error(f"Error getting PR diff for suggestion {i} in call {j}, error: {e}",
artifact={"prediction": prediction})
self.data = data
else:
get_logger().warning(f"Empty PR diff list")
@ -589,8 +674,7 @@ class PRCodeSuggestions:
if get_settings().pr_code_suggestions.final_clip_factor != 1:
max_len = max(
len(data_sorted),
get_settings().pr_code_suggestions.num_code_suggestions,
get_settings().pr_code_suggestions.num_code_suggestions_per_chunk,
int(get_settings().pr_code_suggestions.num_code_suggestions_per_chunk),
)
new_len = int(0.5 + max_len * get_settings().pr_code_suggestions.final_clip_factor)
if new_len < len(data_sorted):
@ -610,22 +694,20 @@ class PRCodeSuggestions:
pr_body += "No suggestions found to improve this PR."
return pr_body
if get_settings().pr_code_suggestions.enable_intro_text and get_settings().config.is_auto_command:
pr_body += "Explore these optional code suggestions:\n\n"
language_extension_map_org = get_settings().language_extension_map_org
extension_to_language = {}
for language, extensions in language_extension_map_org.items():
for ext in extensions:
extension_to_language[ext] = language
pr_body = "## PR Code Suggestions ✨\n\n"
pr_body += "<table>"
header = f"Suggestion"
delta = 66
header += "&nbsp; " * delta
if get_settings().pr_code_suggestions.self_reflect_on_suggestions:
pr_body += f"""<thead><tr><td>Category</td><td align=left>{header}</td><td align=center>Score</td></tr>"""
else:
pr_body += f"""<thead><tr><td>Category</td><td align=left>{header}</td></tr>"""
pr_body += f"""<thead><tr><td>Category</td><td align=left>{header}</td><td align=center>Score</td></tr>"""
pr_body += """<tbody>"""
suggestions_labels = dict()
# add all suggestions related to each label
@ -636,12 +718,11 @@ class PRCodeSuggestions:
suggestions_labels[label].append(suggestion)
# sort suggestions_labels by the suggestion with the highest score
if get_settings().pr_code_suggestions.self_reflect_on_suggestions:
suggestions_labels = dict(
sorted(suggestions_labels.items(), key=lambda x: max([s['score'] for s in x[1]]), reverse=True))
# sort the suggestions inside each label group by score
for label, suggestions in suggestions_labels.items():
suggestions_labels[label] = sorted(suggestions, key=lambda x: x['score'], reverse=True)
suggestions_labels = dict(
sorted(suggestions_labels.items(), key=lambda x: max([s['score'] for s in x[1]]), reverse=True))
# sort the suggestions inside each label group by score
for label, suggestions in suggestions_labels.items():
suggestions_labels[label] = sorted(suggestions, key=lambda x: x['score'], reverse=True)
counter_suggestions = 0
for label, suggestions in suggestions_labels.items():
@ -678,7 +759,7 @@ class PRCodeSuggestions:
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
example_code = ""
example_code += f"```diff\n{patch}\n```\n"
example_code += f"```diff\n{patch.rstrip()}\n```\n"
if i == 0:
pr_body += f"""<td>\n\n"""
else:
@ -700,16 +781,14 @@ class PRCodeSuggestions:
{example_code.rstrip()}
"""
if get_settings().pr_code_suggestions.self_reflect_on_suggestions:
pr_body += f"<details><summary>Suggestion importance[1-10]: {suggestion['score']}</summary>\n\n"
pr_body += f"Why: {suggestion['score_why']}\n\n"
pr_body += f"</details>"
pr_body += f"<details><summary>Suggestion importance[1-10]: {suggestion['score']}</summary>\n\n"
pr_body += f"Why: {suggestion['score_why']}\n\n"
pr_body += f"</details>"
pr_body += f"</details>"
# # add another column for 'score'
if get_settings().pr_code_suggestions.self_reflect_on_suggestions:
pr_body += f"</td><td align=center>{suggestion['score']}\n\n"
pr_body += f"</td><td align=center>{suggestion['score']}\n\n"
pr_body += f"</td></tr>"
counter_suggestions += 1
@ -734,7 +813,8 @@ class PRCodeSuggestions:
variables = {'suggestion_list': suggestion_list,
'suggestion_str': suggestion_str,
"diff": patches_diff,
'num_code_suggestions': len(suggestion_list)}
'num_code_suggestions': len(suggestion_list),
"is_ai_metadata": get_settings().get("config.enable_ai_metadata", False)}
environment = Environment(undefined=StrictUndefined)
system_prompt_reflect = environment.from_string(
get_settings().pr_code_suggestions_reflect_prompt.system).render(

View File

@ -1,3 +1,5 @@
from dynaconf import Dynaconf
from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider
from pr_agent.log import get_logger
@ -28,20 +30,33 @@ class PRConfig:
return ""
def _prepare_pr_configs(self) -> str:
import tomli
with open(get_settings().find_file("configuration.toml"), "rb") as conf_file:
configuration_headers = [header.lower() for header in tomli.load(conf_file).keys()]
conf_file = get_settings().find_file("configuration.toml")
conf_settings = Dynaconf(settings_files=[conf_file])
configuration_headers = [header.lower() for header in conf_settings.keys()]
relevant_configs = {
header: configs for header, configs in get_settings().to_dict().items()
if header.lower().startswith("pr_") and header.lower() in configuration_headers
if (header.lower().startswith("pr_") or header.lower().startswith("config")) and header.lower() in configuration_headers
}
comment_str = "Possible Configurations:"
skip_keys = ['ai_disclaimer', 'ai_disclaimer_title', 'ANALYTICS_FOLDER', 'secret_provider', "skip_keys",
'trial_prefix_message', 'no_eligible_message', 'identity_provider', 'ALLOWED_REPOS',
'APP_NAME']
extra_skip_keys = get_settings().config.get('config.skip_keys', [])
if extra_skip_keys:
skip_keys.extend(extra_skip_keys)
markdown_text = "<details> <summary><strong>🛠️ PR-Agent Configurations:</strong></summary> \n\n"
markdown_text += f"\n\n```yaml\n\n"
for header, configs in relevant_configs.items():
if configs:
comment_str += "\n"
markdown_text += "\n\n"
markdown_text += f"==================== {header} ===================="
for key, value in configs.items():
comment_str += f"\n{header.lower()}.{key.lower()} = {repr(value) if isinstance(value, str) else value}"
comment_str += " "
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"comment_str:\n{comment_str}")
return comment_str
if key in skip_keys:
continue
markdown_text += f"\n{header.lower()}.{key.lower()} = {repr(value) if isinstance(value, str) else value}"
markdown_text += " "
markdown_text += "\n```"
markdown_text += "\n</details>\n"
get_logger().info(f"Possible Configurations outputted to PR comment", artifact=markdown_text)
return markdown_text

View File

@ -12,7 +12,7 @@ from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models, get_pr_diff_multiple_patchs, \
OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD
from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import set_custom_labels
from pr_agent.algo.utils import set_custom_labels, PRDescriptionHeader
from pr_agent.algo.utils import load_yaml, get_user_labels, ModelType, show_relevant_configurations, get_max_tokens, \
clip_tokens
from pr_agent.config_loader import get_settings
@ -20,6 +20,8 @@ from pr_agent.git_providers import get_git_provider, GithubProvider, get_git_pro
from pr_agent.git_providers.git_provider import get_main_pr_language
from pr_agent.log import get_logger
from pr_agent.servers.help import HelpMessage
from pr_agent.tools.ticket_pr_compliance_check import extract_ticket_links_from_pr_description, extract_tickets, \
extract_and_cache_pr_tickets
class PRDescription:
@ -38,6 +40,7 @@ class PRDescription:
self.git_provider.get_languages(), self.git_provider.get_files()
)
self.pr_id = self.git_provider.get_pr_id()
self.keys_fix = ["filename:", "language:", "changes_summary:", "changes_title:", "description:", "title:"]
if get_settings().pr_description.enable_semantic_files_types and not self.git_provider.is_supported(
"gfm_markdown"):
@ -60,6 +63,7 @@ class PRDescription:
"enable_custom_labels": get_settings().config.enable_custom_labels,
"custom_labels_class": "", # will be filled if necessary in 'set_custom_labels' function
"enable_semantic_files_types": get_settings().pr_description.enable_semantic_files_types,
"related_tickets": "",
}
self.user_description = self.git_provider.get_user_description()
@ -87,6 +91,9 @@ class PRDescription:
if get_settings().config.publish_output and not get_settings().config.get('is_auto_command', False):
self.git_provider.publish_comment("Preparing PR description...", is_temporary=True)
# ticket extraction if exists
await extract_and_cache_pr_tickets(self.git_provider, self.vars)
await retry_with_fallback_models(self._prepare_prediction, ModelType.TURBO)
if self.prediction:
@ -117,9 +124,8 @@ class PRDescription:
pr_body += "<hr>\n\n<details> <summary><strong>✨ Describe tool usage guide:</strong></summary><hr> \n\n"
pr_body += HelpMessage.get_describe_usage_guide()
pr_body += "\n</details>\n"
elif self.git_provider.is_supported("gfm_markdown") and get_settings().pr_description.enable_help_comment:
pr_body += "\n\n___\n\n> 💡 **PR-Agent usage**:"
pr_body += "\n>Comment `/help` on the PR to get a list of all available PR-Agent tools and their descriptions\n\n"
elif get_settings().pr_description.enable_help_comment:
pr_body += '\n\n___\n\n> 💡 **PR-Agent usage**: Comment `/help "your question"` on any pull request to receive relevant information'
# Output the relevant configurations if enabled
if get_settings().get('config', {}).get('output_relevant_configurations', False):
@ -127,7 +133,7 @@ class PRDescription:
if get_settings().config.publish_output:
# publish labels
if get_settings().pr_description.publish_labels and self.git_provider.is_supported("get_labels"):
if get_settings().pr_description.publish_labels and pr_labels and self.git_provider.is_supported("get_labels"):
original_labels = self.git_provider.get_pr_labels(update=True)
get_logger().debug(f"original labels", artifact=original_labels)
user_labels = get_user_labels(original_labels)
@ -227,7 +233,7 @@ class PRDescription:
file_description_str_list = []
for i, result in enumerate(results):
prediction_files = result.strip().removeprefix('```yaml').strip('`').strip()
if load_yaml(prediction_files) and prediction_files.startswith('pr_files'):
if load_yaml(prediction_files, keys_fix_yaml=self.keys_fix) and prediction_files.startswith('pr_files'):
prediction_files = prediction_files.removeprefix('pr_files:').strip()
file_description_str_list.append(prediction_files)
else:
@ -305,16 +311,16 @@ extra_file_yaml =
# final processing
self.prediction = prediction_headers + "\n" + "pr_files:\n" + files_walkthrough
if not load_yaml(self.prediction):
if not load_yaml(self.prediction, keys_fix_yaml=self.keys_fix):
get_logger().error(f"Error getting valid YAML in large PR handling for describe {self.pr_id}")
if load_yaml(prediction_headers):
if load_yaml(prediction_headers, keys_fix_yaml=self.keys_fix):
get_logger().debug(f"Using only headers for describe {self.pr_id}")
self.prediction = prediction_headers
async def extend_additional_files(self, remaining_files_list) -> str:
prediction = self.prediction
try:
original_prediction_dict = load_yaml(self.prediction)
original_prediction_dict = load_yaml(self.prediction, keys_fix_yaml=self.keys_fix)
prediction_extra = "pr_files:"
for file in remaining_files_list:
extra_file_yaml = f"""\
@ -328,12 +334,12 @@ extra_file_yaml =
additional files (token-limit)
"""
prediction_extra = prediction_extra + "\n" + extra_file_yaml.strip()
prediction_extra_dict = load_yaml(prediction_extra)
prediction_extra_dict = load_yaml(prediction_extra, keys_fix_yaml=self.keys_fix)
# merge the two dictionaries
if isinstance(original_prediction_dict, dict) and isinstance(prediction_extra_dict, dict):
original_prediction_dict["pr_files"].extend(prediction_extra_dict["pr_files"])
new_yaml = yaml.dump(original_prediction_dict)
if load_yaml(new_yaml):
if load_yaml(new_yaml, keys_fix_yaml=self.keys_fix):
prediction = new_yaml
return prediction
except Exception as e:
@ -362,7 +368,7 @@ extra_file_yaml =
def _prepare_data(self):
# Load the AI prediction data into a dictionary
self.data = load_yaml(self.prediction.strip())
self.data = load_yaml(self.prediction.strip(), keys_fix_yaml=self.keys_fix)
if get_settings().pr_description.add_original_user_description and self.user_description:
self.data["User Description"] = self.user_description
@ -495,7 +501,7 @@ extra_file_yaml =
pr_body += "</details>\n"
elif 'pr_files' in key.lower() and get_settings().pr_description.enable_semantic_files_types:
changes_walkthrough, pr_file_changes = self.process_pr_files_prediction(changes_walkthrough, value)
changes_walkthrough = f"### **Changes walkthrough** 📝\n{changes_walkthrough}"
changes_walkthrough = f"{PRDescriptionHeader.CHANGES_WALKTHROUGH.value}\n{changes_walkthrough}"
else:
# if the value is a list, join its items by comma
if isinstance(value, list):
@ -638,9 +644,10 @@ def insert_br_after_x_chars(text, x=70):
text = replace_code_tags(text)
# convert list items to <li>
if text.startswith("- "):
if text.startswith("- ") or text.startswith("* "):
text = "<li>" + text[2:]
text = text.replace("\n- ", '<br><li> ').replace("\n - ", '<br><li> ')
text = text.replace("\n* ", '<br><li> ').replace("\n * ", '<br><li> ')
# convert new lines to <br>
text = text.replace("\n", '<br>')

View File

@ -1,106 +1,291 @@
import copy
from functools import partial
from pathlib import Path
from jinja2 import Environment, StrictUndefined
from pr_agent.algo import MAX_TOKENS
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.pr_processing import retry_with_fallback_models
from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import ModelType, load_yaml, clip_tokens
from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider, GithubProvider
from pr_agent.git_providers import GithubProvider, BitbucketServerProvider, \
get_git_provider_with_context
from pr_agent.log import get_logger
def extract_header(snippet):
res = ''
lines = snippet.split('===Snippet content===')[0].split('\n')
highest_header = ''
highest_level = float('inf')
for line in lines[::-1]:
line = line.strip()
if line.startswith('Header '):
highest_header = line.split(': ')[1]
if highest_header:
res = f"#{highest_header.lower().replace(' ', '-')}"
return res
class PRHelpMessage:
def __init__(self, pr_url: str, args=None, ai_handler=None):
self.git_provider = get_git_provider()(pr_url)
def __init__(self, pr_url: str, args=None, ai_handler: partial[BaseAiHandler,] = LiteLLMAIHandler):
self.git_provider = get_git_provider_with_context(pr_url)
self.ai_handler = ai_handler()
self.question_str = self.parse_args(args)
self.num_retrieved_snippets = get_settings().get('pr_help.num_retrieved_snippets', 5)
if self.question_str:
self.vars = {
"question": self.question_str,
"snippets": "",
}
self.token_handler = TokenHandler(None,
self.vars,
get_settings().pr_help_prompts.system,
get_settings().pr_help_prompts.user)
async def _prepare_prediction(self, model: str):
try:
variables = copy.deepcopy(self.vars)
environment = Environment(undefined=StrictUndefined)
system_prompt = environment.from_string(get_settings().pr_help_prompts.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_help_prompts.user).render(variables)
response, finish_reason = await self.ai_handler.chat_completion(
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
return response
except Exception as e:
get_logger().error(f"Error while preparing prediction: {e}")
return ""
def parse_args(self, args):
if args and len(args) > 0:
question_str = " ".join(args)
else:
question_str = ""
return question_str
async def run(self):
try:
if not self.git_provider.is_supported("gfm_markdown"):
self.git_provider.publish_comment(
"The `Help` tool requires gfm markdown, which is not supported by your code platform.")
return
if self.question_str:
get_logger().info(f'Answering a PR question about the PR {self.git_provider.pr_url} ')
get_logger().info('Getting PR Help Message...')
relevant_configs = {'pr_help': dict(get_settings().pr_help),
'config': dict(get_settings().config)}
get_logger().debug("Relevant configs", artifacts=relevant_configs)
pr_comment = "## PR Agent Walkthrough 🤖\n\n"
pr_comment += "Welcome to the PR Agent, an AI-powered tool for automated pull request analysis, feedback, suggestions and more."""
pr_comment += "\n\nHere is a list of tools you can use to interact with the PR Agent:\n"
base_path = "https://pr-agent-docs.codium.ai/tools"
if not get_settings().get('openai.key'):
if get_settings().config.publish_output:
self.git_provider.publish_comment(
"The `Help` tool chat feature requires an OpenAI API key for calculating embeddings")
else:
get_logger().error("The `Help` tool chat feature requires an OpenAI API key for calculating embeddings")
return
tool_names = []
tool_names.append(f"[DESCRIBE]({base_path}/describe/)")
tool_names.append(f"[REVIEW]({base_path}/review/)")
tool_names.append(f"[IMPROVE]({base_path}/improve/)")
tool_names.append(f"[UPDATE CHANGELOG]({base_path}/update_changelog/)")
tool_names.append(f"[ADD DOCS]({base_path}/documentation/) 💎")
tool_names.append(f"[TEST]({base_path}/test/) 💎")
tool_names.append(f"[IMPROVE COMPONENT]({base_path}/improve_component/) 💎")
tool_names.append(f"[ANALYZE]({base_path}/analyze/) 💎")
tool_names.append(f"[ASK]({base_path}/ask/)")
tool_names.append(f"[GENERATE CUSTOM LABELS]({base_path}/custom_labels/) 💎")
tool_names.append(f"[CI FEEDBACK]({base_path}/ci_feedback/) 💎")
tool_names.append(f"[CUSTOM PROMPT]({base_path}/custom_prompt/) 💎")
tool_names.append(f"[SIMILAR ISSUE]({base_path}/similar_issues/)")
# current path
docs_path= Path(__file__).parent.parent.parent / 'docs' / 'docs'
# get all the 'md' files inside docs_path and its subdirectories
md_files = list(docs_path.glob('**/*.md'))
folders_to_exclude = ['/finetuning_benchmark/']
files_to_exclude = {'EXAMPLE_BEST_PRACTICE.md', 'compression_strategy.md', '/docs/overview/index.md'}
md_files = [file for file in md_files if not any(folder in str(file) for folder in folders_to_exclude) and not any(file.name == file_to_exclude for file_to_exclude in files_to_exclude)]
descriptions = []
descriptions.append("Generates PR description - title, type, summary, code walkthrough and labels")
descriptions.append("Adjustable feedback about the PR, possible issues, security concerns, review effort and more")
descriptions.append("Code suggestions for improving the PR")
descriptions.append("Automatically updates the changelog")
descriptions.append("Generates documentation to methods/functions/classes that changed in the PR")
descriptions.append("Generates unit tests for a specific component, based on the PR code change")
descriptions.append("Code suggestions for a specific component that changed in the PR")
descriptions.append("Identifies code components that changed in the PR, and enables to interactively generate tests, docs, and code suggestions for each component")
descriptions.append("Answering free-text questions about the PR")
descriptions.append("Generates custom labels for the PR, based on specific guidelines defined by the user")
descriptions.append("Generates feedback and analysis for a failed CI job")
descriptions.append("Generates custom suggestions for improving the PR code, derived only from a specific guidelines prompt defined by the user")
descriptions.append("Automatically retrieves and presents similar issues")
# sort the 'md_files' so that 'priority_files' will be at the top
priority_files_strings = ['/docs/index.md', '/usage-guide', 'tools/describe.md', 'tools/review.md',
'tools/improve.md', '/faq']
md_files_priority = [file for file in md_files if
any(priority_string in str(file) for priority_string in priority_files_strings)]
md_files_not_priority = [file for file in md_files if file not in md_files_priority]
md_files = md_files_priority + md_files_not_priority
commands =[]
commands.append("`/describe`")
commands.append("`/review`")
commands.append("`/improve`")
commands.append("`/update_changelog`")
commands.append("`/add_docs`")
commands.append("`/test`")
commands.append("`/improve_component`")
commands.append("`/analyze`")
commands.append("`/ask`")
commands.append("`/generate_labels`")
commands.append("`/checks`")
commands.append("`/custom_prompt`")
commands.append("`/similar_issue`")
docs_prompt = ""
for file in md_files:
try:
with open(file, 'r') as f:
file_path = str(file).replace(str(docs_path), '')
docs_prompt += f"\n==file name==\n\n{file_path}\n\n==file content==\n\n{f.read().strip()}\n=========\n\n"
except Exception as e:
get_logger().error(f"Error while reading the file {file}: {e}")
token_count = self.token_handler.count_tokens(docs_prompt)
get_logger().debug(f"Token count of full documentation website: {token_count}")
checkbox_list = []
checkbox_list.append(" - [ ] Run <!-- /describe -->")
checkbox_list.append(" - [ ] Run <!-- /review -->")
checkbox_list.append(" - [ ] Run <!-- /improve -->")
checkbox_list.append(" - [ ] Run <!-- /update_changelog -->")
checkbox_list.append(" - [ ] Run <!-- /add_docs -->")
checkbox_list.append(" - [ ] Run <!-- /test -->")
checkbox_list.append(" - [ ] Run <!-- /improve_component -->")
checkbox_list.append(" - [ ] Run <!-- /analyze -->")
checkbox_list.append("[*]")
checkbox_list.append("[*]")
checkbox_list.append("[*]")
checkbox_list.append("[*]")
checkbox_list.append("[*]")
checkbox_list.append("[*]")
checkbox_list.append("[*]")
checkbox_list.append("[*]")
model = get_settings().config.model
max_tokens_full = MAX_TOKENS[model] # note - here we take the actual max tokens, without any reductions. we do aim to get the full documentation website in the prompt
delta_output = 2000
if token_count > max_tokens_full - delta_output:
get_logger().info(f"Token count {token_count} exceeds the limit {max_tokens_full - delta_output}. Skipping the PR Help message.")
docs_prompt = clip_tokens(docs_prompt, max_tokens_full - delta_output)
self.vars['snippets'] = docs_prompt.strip()
if isinstance(self.git_provider, GithubProvider) and not get_settings().config.get('disable_checkboxes', False):
pr_comment += f"<table><tr align='left'><th align='left'>Tool</th><th align='left'>Description</th><th align='left'>Trigger Interactively :gem:</th></tr>"
for i in range(len(tool_names)):
pr_comment += f"\n<tr><td align='left'>\n\n<strong>{tool_names[i]}</strong></td>\n<td>{descriptions[i]}</td>\n<td>\n\n{checkbox_list[i]}\n</td></tr>"
pr_comment += "</table>\n\n"
pr_comment += f"""\n\n(1) Note that each tool be [triggered automatically](https://pr-agent-docs.codium.ai/usage-guide/automations_and_usage/#github-app-automatic-tools-when-a-new-pr-is-opened) when a new PR is opened, or called manually by [commenting on a PR](https://pr-agent-docs.codium.ai/usage-guide/automations_and_usage/#online-usage)."""
pr_comment += f"""\n\n(2) Tools marked with [*] require additional parameters to be passed. For example, to invoke the `/ask` tool, you need to comment on a PR: `/ask "<question content>"`. See the relevant documentation for each tool for more details."""
# run the AI model
response = await retry_with_fallback_models(self._prepare_prediction, model_type=ModelType.REGULAR)
response_yaml = load_yaml(response)
response_str = response_yaml.get('response')
relevant_sections = response_yaml.get('relevant_sections')
if not relevant_sections:
get_logger().info(f"Could not find relevant answer for the question: {self.question_str}")
if get_settings().config.publish_output:
answer_str = f"### Question: \n{self.question_str}\n\n"
answer_str += f"### Answer:\n\n"
answer_str += f"Could not find relevant information to answer the question. Please provide more details and try again."
self.git_provider.publish_comment(answer_str)
return ""
# prepare the answer
answer_str = ""
if response_str:
answer_str += f"### Question: \n{self.question_str}\n\n"
answer_str += f"### Answer:\n{response_str.strip()}\n\n"
answer_str += f"#### Relevant Sources:\n\n"
base_path = "https://qodo-merge-docs.qodo.ai/"
for section in relevant_sections:
file = section.get('file_name').strip().removesuffix('.md')
if str(section['relevant_section_header_string']).strip():
markdown_header = section['relevant_section_header_string'].strip().strip('#').strip().lower().replace(' ', '-').replace("'", '').replace('(', '').replace(')', '').replace(',', '').replace('.', '').replace('?', '').replace('!', '')
answer_str += f"> - {base_path}{file}#{markdown_header}\n"
else:
answer_str += f"> - {base_path}{file}\n"
# publish the answer
if get_settings().config.publish_output:
self.git_provider.publish_comment(answer_str)
else:
get_logger().info(f"Answer:\n{answer_str}")
else:
pr_comment += f"<table><tr align='left'><th align='left'>Tool</th><th align='left'>Command</th><th align='left'>Description</th></tr>"
for i in range(len(tool_names)):
pr_comment += f"\n<tr><td align='left'>\n\n<strong>{tool_names[i]}</strong></td><td>{commands[i]}</td><td>{descriptions[i]}</td></tr>"
pr_comment += "</table>\n\n"
pr_comment += f"""\n\nNote that each tool be [invoked automatically](https://pr-agent-docs.codium.ai/usage-guide/automations_and_usage/) when a new PR is opened, or called manually by [commenting on a PR](https://pr-agent-docs.codium.ai/usage-guide/automations_and_usage/#online-usage)."""
if get_settings().config.publish_output:
self.git_provider.publish_comment(pr_comment)
if not isinstance(self.git_provider, BitbucketServerProvider) and not self.git_provider.is_supported("gfm_markdown"):
self.git_provider.publish_comment(
"The `Help` tool requires gfm markdown, which is not supported by your code platform.")
return
get_logger().info('Getting PR Help Message...')
relevant_configs = {'pr_help': dict(get_settings().pr_help),
'config': dict(get_settings().config)}
get_logger().debug("Relevant configs", artifacts=relevant_configs)
pr_comment = "## PR Agent Walkthrough 🤖\n\n"
pr_comment += "Welcome to the PR Agent, an AI-powered tool for automated pull request analysis, feedback, suggestions and more."""
pr_comment += "\n\nHere is a list of tools you can use to interact with the PR Agent:\n"
base_path = "https://pr-agent-docs.codium.ai/tools"
tool_names = []
tool_names.append(f"[DESCRIBE]({base_path}/describe/)")
tool_names.append(f"[REVIEW]({base_path}/review/)")
tool_names.append(f"[IMPROVE]({base_path}/improve/)")
tool_names.append(f"[UPDATE CHANGELOG]({base_path}/update_changelog/)")
tool_names.append(f"[ADD DOCS]({base_path}/documentation/) 💎")
tool_names.append(f"[TEST]({base_path}/test/) 💎")
tool_names.append(f"[IMPROVE COMPONENT]({base_path}/improve_component/) 💎")
tool_names.append(f"[ANALYZE]({base_path}/analyze/) 💎")
tool_names.append(f"[ASK]({base_path}/ask/)")
tool_names.append(f"[GENERATE CUSTOM LABELS]({base_path}/custom_labels/) 💎")
tool_names.append(f"[CI FEEDBACK]({base_path}/ci_feedback/) 💎")
tool_names.append(f"[CUSTOM PROMPT]({base_path}/custom_prompt/) 💎")
tool_names.append(f"[SIMILAR ISSUE]({base_path}/similar_issues/)")
descriptions = []
descriptions.append("Generates PR description - title, type, summary, code walkthrough and labels")
descriptions.append("Adjustable feedback about the PR, possible issues, security concerns, review effort and more")
descriptions.append("Code suggestions for improving the PR")
descriptions.append("Automatically updates the changelog")
descriptions.append("Generates documentation to methods/functions/classes that changed in the PR")
descriptions.append("Generates unit tests for a specific component, based on the PR code change")
descriptions.append("Code suggestions for a specific component that changed in the PR")
descriptions.append("Identifies code components that changed in the PR, and enables to interactively generate tests, docs, and code suggestions for each component")
descriptions.append("Answering free-text questions about the PR")
descriptions.append("Generates custom labels for the PR, based on specific guidelines defined by the user")
descriptions.append("Generates feedback and analysis for a failed CI job")
descriptions.append("Generates custom suggestions for improving the PR code, derived only from a specific guidelines prompt defined by the user")
descriptions.append("Automatically retrieves and presents similar issues")
commands =[]
commands.append("`/describe`")
commands.append("`/review`")
commands.append("`/improve`")
commands.append("`/update_changelog`")
commands.append("`/add_docs`")
commands.append("`/test`")
commands.append("`/improve_component`")
commands.append("`/analyze`")
commands.append("`/ask`")
commands.append("`/generate_labels`")
commands.append("`/checks`")
commands.append("`/custom_prompt`")
commands.append("`/similar_issue`")
checkbox_list = []
checkbox_list.append(" - [ ] Run <!-- /describe -->")
checkbox_list.append(" - [ ] Run <!-- /review -->")
checkbox_list.append(" - [ ] Run <!-- /improve -->")
checkbox_list.append(" - [ ] Run <!-- /update_changelog -->")
checkbox_list.append(" - [ ] Run <!-- /add_docs -->")
checkbox_list.append(" - [ ] Run <!-- /test -->")
checkbox_list.append(" - [ ] Run <!-- /improve_component -->")
checkbox_list.append(" - [ ] Run <!-- /analyze -->")
checkbox_list.append("[*]")
checkbox_list.append("[*]")
checkbox_list.append("[*]")
checkbox_list.append("[*]")
checkbox_list.append("[*]")
checkbox_list.append("[*]")
checkbox_list.append("[*]")
checkbox_list.append("[*]")
if isinstance(self.git_provider, GithubProvider) and not get_settings().config.get('disable_checkboxes', False):
pr_comment += f"<table><tr align='left'><th align='left'>Tool</th><th align='left'>Description</th><th align='left'>Trigger Interactively :gem:</th></tr>"
for i in range(len(tool_names)):
pr_comment += f"\n<tr><td align='left'>\n\n<strong>{tool_names[i]}</strong></td>\n<td>{descriptions[i]}</td>\n<td>\n\n{checkbox_list[i]}\n</td></tr>"
pr_comment += "</table>\n\n"
pr_comment += f"""\n\n(1) Note that each tool be [triggered automatically](https://pr-agent-docs.codium.ai/usage-guide/automations_and_usage/#github-app-automatic-tools-when-a-new-pr-is-opened) when a new PR is opened, or called manually by [commenting on a PR](https://pr-agent-docs.codium.ai/usage-guide/automations_and_usage/#online-usage)."""
pr_comment += f"""\n\n(2) Tools marked with [*] require additional parameters to be passed. For example, to invoke the `/ask` tool, you need to comment on a PR: `/ask "<question content>"`. See the relevant documentation for each tool for more details."""
elif isinstance(self.git_provider, BitbucketServerProvider):
# only support basic commands in BBDC
pr_comment = generate_bbdc_table(tool_names[:4], descriptions[:4])
else:
pr_comment += f"<table><tr align='left'><th align='left'>Tool</th><th align='left'>Command</th><th align='left'>Description</th></tr>"
for i in range(len(tool_names)):
pr_comment += f"\n<tr><td align='left'>\n\n<strong>{tool_names[i]}</strong></td><td>{commands[i]}</td><td>{descriptions[i]}</td></tr>"
pr_comment += "</table>\n\n"
pr_comment += f"""\n\nNote that each tool be [invoked automatically](https://pr-agent-docs.codium.ai/usage-guide/automations_and_usage/) when a new PR is opened, or called manually by [commenting on a PR](https://pr-agent-docs.codium.ai/usage-guide/automations_and_usage/#online-usage)."""
if get_settings().config.publish_output:
self.git_provider.publish_comment(pr_comment)
except Exception as e:
get_logger().error(f"Error while running PRHelpMessage: {e}")
return ""
get_logger().exception(f"Error while running PRHelpMessage: {e}")
return ""
async def prepare_relevant_snippets(self, sim_results):
# Get relevant snippets
relevant_snippets_full = []
relevant_pages_full = []
relevant_snippets_full_header = []
th = 0.75
for s in sim_results:
page = s[0].metadata['source']
content = s[0].page_content
score = s[1]
relevant_snippets_full.append(content)
relevant_snippets_full_header.append(extract_header(content))
relevant_pages_full.append(page)
# build the snippets string
relevant_snippets_str = ""
for i, s in enumerate(relevant_snippets_full):
relevant_snippets_str += f"Snippet {i+1}:\n\n{s}\n\n"
relevant_snippets_str += "-------------------\n\n"
return relevant_pages_full, relevant_snippets_full_header, relevant_snippets_str
def generate_bbdc_table(column_arr_1, column_arr_2):
# Generating header row
header_row = "| Tool | Description | \n"
# Generating separator row
separator_row = "|--|--|\n"
# Generating data rows
data_rows = ""
max_len = max(len(column_arr_1), len(column_arr_2))
for i in range(max_len):
col1 = column_arr_1[i] if i < len(column_arr_1) else ""
col2 = column_arr_2[i] if i < len(column_arr_2) else ""
data_rows += f"| {col1} | {col2} |\n"
# Combine all parts to form the complete table
markdown_table = header_row + separator_row + data_rows
return markdown_table

View File

@ -1,12 +1,13 @@
import copy
import datetime
import traceback
from collections import OrderedDict
from functools import partial
from typing import List, Tuple
from jinja2 import Environment, StrictUndefined
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models, add_ai_metadata_to_diff_files
from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import github_action_output, load_yaml, ModelType, \
show_relevant_configurations, convert_to_markdown_v2, PRReviewHeader
@ -15,6 +16,7 @@ from pr_agent.git_providers import get_git_provider, get_git_provider_with_conte
from pr_agent.git_providers.git_provider import IncrementalPR, get_main_pr_language
from pr_agent.log import get_logger
from pr_agent.servers.help import HelpMessage
from pr_agent.tools.ticket_pr_compliance_check import extract_tickets, extract_and_cache_pr_tickets
class PRReviewer:
@ -51,15 +53,23 @@ class PRReviewer:
raise Exception(f"Answer mode is not supported for {get_settings().config.git_provider} for now")
self.ai_handler = ai_handler()
self.ai_handler.main_pr_language = self.main_language
self.patches_diff = None
self.prediction = None
answer_str, question_str = self._get_user_answers()
self.pr_description, self.pr_description_files = (
self.git_provider.get_pr_description(split_changes_walkthrough=True))
if (self.pr_description_files and get_settings().get("config.is_auto_command", False) and
get_settings().get("config.enable_ai_metadata", False)):
add_ai_metadata_to_diff_files(self.git_provider, self.pr_description_files)
get_logger().debug(f"AI metadata added to the this command")
else:
get_settings().set("config.enable_ai_metadata", False)
get_logger().debug(f"AI metadata is disabled for this command")
self.vars = {
"title": self.git_provider.pr.title,
"branch": self.git_provider.get_pr_branch(),
"description": self.git_provider.get_pr_description(),
"description": self.pr_description,
"language": self.main_language,
"diff": "", # empty diff for initial calculation
"num_pr_files": self.git_provider.get_num_of_files(),
@ -75,7 +85,8 @@ class PRReviewer:
"commit_messages_str": self.git_provider.get_commit_messages(),
"custom_labels": "",
"enable_custom_labels": get_settings().config.enable_custom_labels,
"extra_issue_links": get_settings().pr_reviewer.extra_issue_links,
"is_ai_metadata": get_settings().get("config.enable_ai_metadata", False),
"related_tickets": get_settings().get('related_tickets', []),
}
self.token_handler = TokenHandler(
@ -113,6 +124,9 @@ class PRReviewer:
'config': dict(get_settings().config)}
get_logger().debug("Relevant configs", artifacts=relevant_configs)
# ticket extraction if exists
await extract_and_cache_pr_tickets(self.git_provider, self.vars)
if self.incremental.is_incremental and hasattr(self.git_provider, "unreviewed_files_set") and not self.git_provider.unreviewed_files_set:
get_logger().info(f"Incremental review is enabled for {self.pr_url} but there are no new files")
previous_review_url = ""
@ -156,7 +170,7 @@ class PRReviewer:
self.token_handler,
model,
add_line_numbers_to_hunks=True,
disable_extra_lines=True,)
disable_extra_lines=False,)
if self.patches_diff:
get_logger().debug(f"PR diff", diff=self.patches_diff)
@ -199,7 +213,7 @@ class PRReviewer:
first_key = 'review'
last_key = 'security_concerns'
data = load_yaml(self.prediction.strip(),
keys_fix_yaml=["estimated_effort_to_review_[1-5]:", "security_concerns:", "key_issues_to_review:",
keys_fix_yaml=["ticket_compliance_check", "estimated_effort_to_review_[1-5]:", "security_concerns:", "key_issues_to_review:",
"relevant_file:", "relevant_line:", "suggestion:"],
first_key=first_key, last_key=last_key)
github_action_output(data, 'review')
@ -274,7 +288,7 @@ class PRReviewer:
first_key = 'review'
last_key = 'security_concerns'
data = load_yaml(self.prediction.strip(),
keys_fix_yaml=["estimated_effort_to_review_[1-5]:", "security_concerns:", "key_issues_to_review:",
keys_fix_yaml=["ticket_compliance_check", "estimated_effort_to_review_[1-5]:", "security_concerns:", "key_issues_to_review:",
"relevant_file:", "relevant_line:", "suggestion:"],
first_key=first_key, last_key=last_key)
comments: List[str] = []
@ -393,7 +407,16 @@ class PRReviewer:
review_labels = []
if get_settings().pr_reviewer.enable_review_labels_effort:
estimated_effort = data['review']['estimated_effort_to_review_[1-5]']
estimated_effort_number = int(estimated_effort.split(',')[0])
estimated_effort_number = 0
if isinstance(estimated_effort, str):
try:
estimated_effort_number = int(estimated_effort.split(',')[0])
except ValueError:
get_logger().warning(f"Invalid estimated_effort value: {estimated_effort}")
elif isinstance(estimated_effort, int):
estimated_effort_number = estimated_effort
else:
get_logger().warning(f"Unexpected type for estimated_effort: {type(estimated_effort)}")
if 1 <= estimated_effort_number <= 5: # 1, because ...
review_labels.append(f'Review effort [1-5]: {estimated_effort_number}')
if get_settings().pr_reviewer.enable_review_labels_security and get_settings().pr_reviewer.require_security_review:

View File

@ -0,0 +1,126 @@
import re
import traceback
from pr_agent.config_loader import get_settings
from pr_agent.git_providers import GithubProvider
from pr_agent.log import get_logger
# Compile the regex pattern once, outside the function
GITHUB_TICKET_PATTERN = re.compile(
r'(https://github[^/]+/[^/]+/[^/]+/issues/\d+)|(\b(\w+)/(\w+)#(\d+)\b)|(#\d+)'
)
def find_jira_tickets(text):
# Regular expression patterns for JIRA tickets
patterns = [
r'\b[A-Z]{2,10}-\d{1,7}\b', # Standard JIRA ticket format (e.g., PROJ-123)
r'(?:https?://[^\s/]+/browse/)?([A-Z]{2,10}-\d{1,7})\b' # JIRA URL or just the ticket
]
tickets = set()
for pattern in patterns:
matches = re.findall(pattern, text)
for match in matches:
if isinstance(match, tuple):
# If it's a tuple (from the URL pattern), take the last non-empty group
ticket = next((m for m in reversed(match) if m), None)
else:
ticket = match
if ticket:
tickets.add(ticket)
return list(tickets)
def extract_ticket_links_from_pr_description(pr_description, repo_path, base_url_html='https://github.com'):
"""
Extract all ticket links from PR description
"""
github_tickets = set()
try:
# Use the updated pattern to find matches
matches = GITHUB_TICKET_PATTERN.findall(pr_description)
for match in matches:
if match[0]: # Full URL match
github_tickets.add(match[0])
elif match[1]: # Shorthand notation match: owner/repo#issue_number
owner, repo, issue_number = match[2], match[3], match[4]
github_tickets.add(f'{base_url_html.strip("/")}/{owner}/{repo}/issues/{issue_number}')
else: # #123 format
issue_number = match[5][1:] # remove #
if issue_number.isdigit() and len(issue_number) < 5 and repo_path:
github_tickets.add(f'{base_url_html.strip("/")}/{repo_path}/issues/{issue_number}')
except Exception as e:
get_logger().error(f"Error extracting tickets error= {e}",
artifact={"traceback": traceback.format_exc()})
return list(github_tickets)
async def extract_tickets(git_provider):
MAX_TICKET_CHARACTERS = 10000
try:
if isinstance(git_provider, GithubProvider):
user_description = git_provider.get_user_description()
tickets = extract_ticket_links_from_pr_description(user_description, git_provider.repo, git_provider.base_url_html)
tickets_content = []
if tickets:
for ticket in tickets:
# extract ticket number and repo name
repo_name, original_issue_number = git_provider._parse_issue_url(ticket)
# get the ticket object
try:
issue_main = git_provider.repo_obj.get_issue(original_issue_number)
except Exception as e:
get_logger().error(f"Error getting issue_main error= {e}",
artifact={"traceback": traceback.format_exc()})
continue
# clip issue_main.body max length
issue_body_str = issue_main.body
if not issue_body_str:
issue_body_str = ""
if len(issue_body_str) > MAX_TICKET_CHARACTERS:
issue_body_str = issue_body_str[:MAX_TICKET_CHARACTERS] + "..."
# extract labels
labels = []
try:
for label in issue_main.labels:
if isinstance(label, str):
labels.append(label)
else:
labels.append(label.name)
except Exception as e:
get_logger().error(f"Error extracting labels error= {e}",
artifact={"traceback": traceback.format_exc()})
tickets_content.append(
{'ticket_id': issue_main.number,
'ticket_url': ticket, 'title': issue_main.title, 'body': issue_body_str,
'labels': ", ".join(labels)})
return tickets_content
except Exception as e:
get_logger().error(f"Error extracting tickets error= {e}",
artifact={"traceback": traceback.format_exc()})
async def extract_and_cache_pr_tickets(git_provider, vars):
if get_settings().get('config.require_ticket_analysis_review', False):
return
related_tickets = get_settings().get('related_tickets', [])
if not related_tickets:
tickets_content = await extract_tickets(git_provider)
if tickets_content:
get_logger().info("Extracted tickets from PR description", artifact={"tickets": tickets_content})
vars['related_tickets'] = tickets_content
get_settings().set('related_tickets', tickets_content)
else: # if tickets are already cached
get_logger().info("Using cached tickets", artifact={"tickets": related_tickets})
vars['related_tickets'] = related_tickets
def check_tickets_relevancy():
return True

View File

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "pr-agent"
version = "0.2.2"
version = "0.2.4"
authors = [{name= "CodiumAI", email = "tal.r@codium.ai"}]

View File

@ -1,19 +1,21 @@
aiohttp==3.9.4
anthropic[vertex]==0.21.3
aiohttp==3.9.5
anthropic[vertex]==0.37.1
atlassian-python-api==3.41.4
azure-devops==7.1.0b3
azure-identity==1.15.0
boto3==1.33.6
certifi==2024.8.30
dynaconf==3.2.4
fastapi==0.111.0
GitPython==3.1.41
google-cloud-aiplatform==1.38.0
google-generativeai==0.8.3
google-cloud-storage==2.10.0
Jinja2==3.1.2
litellm==1.43.13
litellm==1.50.2
loguru==0.7.2
msrest==0.7.1
openai==1.40.6
openai==1.52.1
pytest==7.4.0
PyGithub==1.59.*
PyYAML==6.0.1
@ -27,6 +29,7 @@ tenacity==8.2.3
gunicorn==22.0.0
pytest-cov==5.0.0
pydantic==2.8.2
html2text==2024.2.26
# Uncomment the following lines to enable the 'similar issue' tool
# pinecone-client
# pinecone-datasets @ git+https://github.com/mrT23/pinecone-datasets.git@main

View File

@ -0,0 +1,15 @@
from pr_agent.git_providers import AzureDevopsProvider
class TestAzureDevOpsParsing():
def test_regular_address(self):
pr_url = "https://dev.azure.com/organization/project/_git/repo/pullrequest/1"
# workspace_slug, repo_slug, pr_number
assert AzureDevopsProvider._parse_pr_url(pr_url) == ("project", "repo", 1)
def test_visualstudio_address(self):
pr_url = "https://organization.visualstudio.com/project/_git/repo/pullrequest/1"
# workspace_slug, repo_slug, pr_number
assert AzureDevopsProvider._parse_pr_url(pr_url) == ("project", "repo", 1)

View File

@ -53,7 +53,7 @@ class TestConvertToMarkdown:
'relevant_line': '[return ""](https://github.com/Codium-ai/pr-agent-pro/pull/102/files#diff-52d45f12b836f77ed1aef86e972e65404634ea4e2a6083fb71a9b0f9bb9e062fR199)'}]}
expected_output = f'{PRReviewHeader.REGULAR.value} 🔍\n\n<table>\n<tr><td>⏱️&nbsp;<strong>Estimated effort to review</strong>: 1 🔵⚪⚪⚪⚪</td></tr>\n<tr><td>🧪&nbsp;<strong>No relevant tests</strong></td></tr>\n<tr><td>&nbsp;<strong>Possible issues</strong>: No\n</td></tr>\n<tr><td>🔒&nbsp;<strong>No security concerns identified</strong></td></tr>\n</table>\n\n\n<details><summary> <strong>Code feedback:</strong></summary>\n\n<hr><table><tr><td>relevant file</td><td>pr_agent/git_providers/git_provider.py\n</td></tr><tr><td>suggestion &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>\n\n<strong>\n\nConsider raising an exception or logging a warning when \'pr_url\' attribute is not found. This can help in debugging issues related to the absence of \'pr_url\' in instances where it\'s expected. [important]\n\n</strong>\n</td></tr><tr><td>relevant line</td><td><a href=\'https://github.com/Codium-ai/pr-agent-pro/pull/102/files#diff-52d45f12b836f77ed1aef86e972e65404634ea4e2a6083fb71a9b0f9bb9e062fR199\'>return ""</a></td></tr></table><hr>\n\n</details>'
expected_output = f'{PRReviewHeader.REGULAR.value} 🔍\n\nHere are some key observations to aid the review process:\n\n<table>\n<tr><td>⏱️&nbsp;<strong>Estimated effort to review</strong>: 1 🔵⚪⚪⚪⚪</td></tr>\n<tr><td>🧪&nbsp;<strong>No relevant tests</strong></td></tr>\n<tr><td>&nbsp;<strong>Possible issues</strong>: No\n</td></tr>\n<tr><td>🔒&nbsp;<strong>No security concerns identified</strong></td></tr>\n</table>\n\n\n<details><summary> <strong>Code feedback:</strong></summary>\n\n<hr><table><tr><td>relevant file</td><td>pr_agent/git_providers/git_provider.py\n</td></tr><tr><td>suggestion &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td><td>\n\n<strong>\n\nConsider raising an exception or logging a warning when \'pr_url\' attribute is not found. This can help in debugging issues related to the absence of \'pr_url\' in instances where it\'s expected. [important]\n\n</strong>\n</td></tr><tr><td>relevant line</td><td><a href=\'https://github.com/Codium-ai/pr-agent-pro/pull/102/files#diff-52d45f12b836f77ed1aef86e972e65404634ea4e2a6083fb71a9b0f9bb9e062fR199\'>return ""</a></td></tr></table><hr>\n\n</details>'
assert convert_to_markdown_v2(input_data).strip() == expected_output.strip()

View File

@ -60,11 +60,22 @@ class TestExtendPatch:
original_file_str = 'line1\nline2\nline3\nline4\nline5\nline6'
patch_str = '@@ -2,3 +2,3 @@ init()\n-line2\n+new_line2\n line3\n line4\n@@ -4,1 +4,1 @@ init2()\n-line4\n+new_line4' # noqa: E501
num_lines = 1
original_allow_dynamic_context = get_settings().config.allow_dynamic_context
get_settings().config.allow_dynamic_context = False
expected_output = '\n@@ -1,5 +1,5 @@ init()\n line1\n-line2\n+new_line2\n line3\n line4\n line5\n\n@@ -3,3 +3,3 @@ init2()\n line3\n-line4\n+new_line4\n line5' # noqa: E501
actual_output = extend_patch(original_file_str, patch_str,
patch_extra_lines_before=num_lines, patch_extra_lines_after=num_lines)
assert actual_output == expected_output
get_settings().config.allow_dynamic_context = True
expected_output = '\n@@ -1,5 +1,5 @@ init()\n line1\n-line2\n+new_line2\n line3\n line4\n line5\n\n@@ -3,3 +3,3 @@ init2()\n line3\n-line4\n+new_line4\n line5' # noqa: E501
actual_output = extend_patch(original_file_str, patch_str,
patch_extra_lines_before=num_lines, patch_extra_lines_after=num_lines)
assert actual_output == expected_output
get_settings().config.allow_dynamic_context = original_allow_dynamic_context
def test_dynamic_context(self):
get_settings().config.max_extra_lines_before_dynamic_context = 10
original_file_str = "def foo():"
@ -94,10 +105,11 @@ class TestExtendedPatchMoreLines:
get_settings().config.allow_dynamic_context = False
class File:
def __init__(self, base_file, patch, filename):
def __init__(self, base_file, patch, filename, ai_file_summary=None):
self.base_file = base_file
self.patch = patch
self.filename = filename
self.ai_file_summary = ai_file_summary
@pytest.fixture
def token_handler(self):