mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-02 11:50:37 +08:00
Add ask line feature
This commit is contained in:
105
pr_agent/tools/pr_line_questions.py
Normal file
105
pr_agent/tools/pr_line_questions.py
Normal file
@ -0,0 +1,105 @@
|
||||
import argparse
|
||||
import copy
|
||||
from functools import partial
|
||||
|
||||
from jinja2 import Environment, StrictUndefined
|
||||
|
||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
||||
from pr_agent.algo.git_patch_processing import convert_to_hunks_with_lines_numbers, \
|
||||
extract_hunk_lines_from_patch
|
||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.algo.utils import ModelType
|
||||
from pr_agent.config_loader import get_settings
|
||||
from pr_agent.git_providers import get_git_provider
|
||||
from pr_agent.git_providers.git_provider import get_main_pr_language
|
||||
from pr_agent.log import get_logger
|
||||
from pr_agent.servers.ai_disclaimer import AiDisclaimer
|
||||
from pr_agent.servers.help import HelpMessage
|
||||
|
||||
|
||||
class PR_LineQuestions:
|
||||
def __init__(self, pr_url: str, args=None, ai_handler: partial[BaseAiHandler,] = LiteLLMAIHandler):
|
||||
self.question_str = self.parse_args(args)
|
||||
self.git_provider = get_git_provider()(pr_url)
|
||||
|
||||
self.ai_handler = ai_handler()
|
||||
|
||||
self.vars = {
|
||||
"title": self.git_provider.pr.title,
|
||||
"branch": self.git_provider.get_pr_branch(),
|
||||
"diff": "", # empty diff for initial calculation
|
||||
"question": self.question_str,
|
||||
"full_hunk": "",
|
||||
"selected_lines": "",
|
||||
}
|
||||
self.token_handler = TokenHandler(self.git_provider.pr,
|
||||
self.vars,
|
||||
get_settings().pr_line_questions_prompt.system,
|
||||
get_settings().pr_line_questions_prompt.user)
|
||||
self.patches_diff = None
|
||||
self.prediction = None
|
||||
|
||||
def parse_args(self, args):
|
||||
if args and len(args) > 0:
|
||||
question_str = " ".join(args)
|
||||
else:
|
||||
question_str = ""
|
||||
return question_str
|
||||
|
||||
|
||||
async def run(self):
|
||||
get_logger().info('Answering a PR lines question...')
|
||||
# if get_settings().config.publish_output:
|
||||
# self.git_provider.publish_comment("Preparing answer...", is_temporary=True)
|
||||
|
||||
self.patch_with_lines = ""
|
||||
ask_diff = get_settings().get('ask_diff_hunk', "")
|
||||
line_start = get_settings().get('line_start', '')
|
||||
line_end = get_settings().get('line_end', '')
|
||||
side = get_settings().get('side', 'RIGHT')
|
||||
file_name = get_settings().get('file_name', '')
|
||||
comment_id = get_settings().get('comment_id', '')
|
||||
if ask_diff:
|
||||
self.patch_with_lines, self.selected_lines = extract_hunk_lines_from_patch(ask_diff,
|
||||
file_name,
|
||||
line_start=line_start,
|
||||
line_end=line_end,
|
||||
side=side
|
||||
)
|
||||
else:
|
||||
diff_files = self.git_provider.get_diff_files()
|
||||
for file in diff_files:
|
||||
if file.filename == file_name:
|
||||
self.patch_with_lines, self.selected_lines = extract_hunk_lines_from_patch(file.patch, file.filename,
|
||||
line_start=line_start,
|
||||
line_end=line_end,
|
||||
side=side)
|
||||
if self.patch_with_lines:
|
||||
response = await retry_with_fallback_models(self._get_prediction, model_type=ModelType.TURBO)
|
||||
|
||||
get_logger().info('Preparing answer...')
|
||||
if comment_id:
|
||||
self.git_provider.reply_to_comment_from_comment_id(comment_id, response)
|
||||
else:
|
||||
self.git_provider.publish_comment(response)
|
||||
|
||||
return ""
|
||||
|
||||
async def _get_prediction(self, model: str):
|
||||
variables = copy.deepcopy(self.vars)
|
||||
variables["full_hunk"] = self.patch_with_lines # update diff
|
||||
variables["selected_lines"] = self.selected_lines
|
||||
environment = Environment(undefined=StrictUndefined)
|
||||
system_prompt = environment.from_string(get_settings().pr_line_questions_prompt.system).render(variables)
|
||||
user_prompt = environment.from_string(get_settings().pr_line_questions_prompt.user).render(variables)
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
# get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
||||
# get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
||||
print(f"\nSystem prompt:\n{system_prompt}")
|
||||
print(f"\nUser prompt:\n{user_prompt}")
|
||||
|
||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
||||
system=system_prompt, user=user_prompt)
|
||||
return response
|
Reference in New Issue
Block a user