Merge remote-tracking branch 'upstream/main' into abstract-BaseAiHandler

This commit is contained in:
Brian Pham
2023-12-09 16:47:13 +00:00
104 changed files with 3813 additions and 1068 deletions

View File

@ -1,5 +1,4 @@
import copy
import logging
from jinja2 import Environment, StrictUndefined
@ -9,6 +8,7 @@ from pr_agent.algo.token_handler import TokenHandler
from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider
from pr_agent.git_providers.git_provider import get_main_pr_language
from pr_agent.log import get_logger
class PRInformationFromUser:
@ -34,22 +34,22 @@ class PRInformationFromUser:
self.prediction = None
async def run(self):
logging.info('Generating question to the user...')
get_logger().info('Generating question to the user...')
if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing questions...", is_temporary=True)
await retry_with_fallback_models(self._prepare_prediction)
logging.info('Preparing questions...')
get_logger().info('Preparing questions...')
pr_comment = self._prepare_pr_answer()
if get_settings().config.publish_output:
logging.info('Pushing questions...')
get_logger().info('Pushing questions...')
self.git_provider.publish_comment(pr_comment)
self.git_provider.remove_initial_comment()
return ""
async def _prepare_prediction(self, model):
logging.info('Getting PR diff...')
get_logger().info('Getting PR diff...')
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
logging.info('Getting AI prediction...')
get_logger().info('Getting AI prediction...')
self.prediction = await self._get_prediction(model)
async def _get_prediction(self, model: str):
@ -59,8 +59,8 @@ class PRInformationFromUser:
system_prompt = environment.from_string(get_settings().pr_information_from_user_prompt.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_information_from_user_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2:
logging.info(f"\nSystem prompt:\n{system_prompt}")
logging.info(f"\nUser prompt:\n{user_prompt}")
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt)
return response
@ -68,7 +68,7 @@ class PRInformationFromUser:
def _prepare_pr_answer(self) -> str:
model_output = self.prediction.strip()
if get_settings().config.verbosity_level >= 2:
logging.info(f"answer_str:\n{model_output}")
get_logger().info(f"answer_str:\n{model_output}")
answer_str = f"{model_output}\n\n Please respond to the questions above in the following format:\n\n" +\
"\n>/answer\n>1) ...\n>2) ...\n>...\n"
return answer_str