Merge pull request #65 from Codium-ai/tr/agent_logic

pr_information_from_user_prompts
This commit is contained in:
Ori Kotek
2023-07-17 02:08:56 +03:00
committed by GitHub
5 changed files with 113 additions and 2 deletions

View File

@ -5,6 +5,7 @@ import os
from pr_agent.tools.pr_code_suggestions import PRCodeSuggestions
from pr_agent.tools.pr_description import PRDescription
from pr_agent.tools.pr_information_from_user import PRInformationFromUser
from pr_agent.tools.pr_questions import PRQuestions
from pr_agent.tools.pr_reviewer import PRReviewer
@ -28,7 +29,8 @@ improve / improve_code - Suggest improvements to the code in the PR as pull requ
parser.add_argument('command', type=str, help='The', choices=['review', 'review_pr',
'ask', 'ask_question',
'describe', 'describe_pr',
'improve', 'improve_code'], default='review')
'improve', 'improve_code',
'user_questions'], default='review')
parser.add_argument('rest', nargs=argparse.REMAINDER, default=[])
args = parser.parse_args()
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
@ -54,6 +56,10 @@ improve / improve_code - Suggest improvements to the code in the PR as pull requ
print(f"Reviewing PR: {args.pr_url}")
reviewer = PRReviewer(args.pr_url, cli_mode=True)
asyncio.run(reviewer.review())
elif command in ['user_questions']:
print(f"Asking the PR author questions: {args.pr_url}")
reviewer = PRInformationFromUser(args.pr_url)
asyncio.run(reviewer.generate_questions())
else:
print(f"Unknown command: {command}")
parser.print_help()

View File

@ -13,6 +13,7 @@ settings = Dynaconf(
"settings/pr_questions_prompts.toml",
"settings/pr_description_prompts.toml",
"settings/pr_code_suggestions_prompts.toml",
"settings/pr_information_from_user_prompts.toml",
"settings_prod/.secrets.toml"
]]
)

View File

@ -2,7 +2,7 @@
model="gpt-4-0613"
git_provider="github"
publish_review=true
verbosity_level=0 # 0,1,2
verbosity_level=2 # 0,1,2
[pr_reviewer]
require_focused_review=true

View File

@ -0,0 +1,33 @@
[pr_information_from_user_prompt]
system="""You are CodiumAI-PR-Reviewer, a language model designed to review git pull requests.
Given the PR Info and the PR Git Diff, generate 4 questions about the PR for the PR author.
The goal of the questions is to help the language model understand the PR better, so the questions should be insightful, informative, non-trivial, and relevant to the PR.
Prefer yes\\no or multiple choice questions. If you have to ask open-ended questions, make sure they are not too difficult, and can be answered in a sentence or two.
Example output:
'
Questions to better understand the PR:
1. ...
2. ...
...
"""
user="""PR Info:
Title: '{{title}}'
Branch: '{{branch}}'
Description: '{{description}}'
{%- if language %}
Main language: {{language}}
{%- endif %}
The PR Git Diff:
```
{{diff}}
```
Note that lines in the diff body are prefixed with a symbol that represents the type of change: '-' for deletions, '+' for additions, and ' ' (a space) for unchanged lines
Response:
"""

View File

@ -0,0 +1,71 @@
import copy
import logging
from jinja2 import Environment, StrictUndefined
from pr_agent.algo.ai_handler import AiHandler
from pr_agent.algo.pr_processing import get_pr_diff
from pr_agent.algo.token_handler import TokenHandler
from pr_agent.config_loader import settings
from pr_agent.git_providers import get_git_provider
from pr_agent.git_providers.git_provider import get_main_pr_language
class PRInformationFromUser:
def __init__(self, pr_url: str):
self.git_provider = get_git_provider()(pr_url)
self.main_pr_language = get_main_pr_language(
self.git_provider.get_languages(), self.git_provider.get_files()
)
self.ai_handler = AiHandler()
self.vars = {
"title": self.git_provider.pr.title,
"branch": self.git_provider.get_pr_branch(),
"description": self.git_provider.get_description(),
"language": self.main_pr_language,
"diff": "", # empty diff for initial calculation
}
self.token_handler = TokenHandler(self.git_provider.pr,
self.vars,
settings.pr_information_from_user_prompt.system,
settings.pr_information_from_user_prompt.user)
self.patches_diff = None
self.prediction = None
async def generate_questions(self):
logging.info('Generating question to the user...')
if settings.config.publish_review:
self.git_provider.publish_comment("Preparing answer...", is_temporary=True)
logging.info('Getting PR diff...')
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler)
logging.info('Getting AI prediction...')
self.prediction = await self._get_prediction()
logging.info('Preparing questions...')
pr_comment = self._prepare_pr_answer()
if settings.config.publish_review:
logging.info('Pushing questions...')
self.git_provider.publish_comment(pr_comment)
self.git_provider.remove_initial_comment()
return ""
async def _get_prediction(self):
variables = copy.deepcopy(self.vars)
variables["diff"] = self.patches_diff # update diff
environment = Environment(undefined=StrictUndefined)
system_prompt = environment.from_string(settings.pr_information_from_user_prompt.system).render(variables)
user_prompt = environment.from_string(settings.pr_information_from_user_prompt.user).render(variables)
if settings.config.verbosity_level >= 2:
logging.info(f"\nSystem prompt:\n{system_prompt}")
logging.info(f"\nUser prompt:\n{user_prompt}")
model = settings.config.model
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt)
return response
def _prepare_pr_answer(self) -> str:
model_output = self.prediction.strip()
if settings.config.verbosity_level >= 2:
logging.info(f"answer_str:\n{model_output}")
answer_str = f"{model_output}\n\n Please respond to the question above in the following format:\n\n" + \
f"/answer <question_id> <answer>\n\n" + f"Example:\n'\n/answer\n1. Yes, because ...\n2. No, because ...\n'"
return answer_str