Refactor logging system to use custom logger across the codebase

This commit is contained in:
Ori Kotek
2023-10-16 14:56:00 +03:00
parent 41166dc271
commit c324d88be3
32 changed files with 340 additions and 311 deletions

View File

@ -1,21 +1,17 @@
import logging
import os
import shlex import shlex
import tempfile
from pr_agent.algo.utils import update_settings_from_args from pr_agent.algo.utils import update_settings_from_args
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider
from pr_agent.git_providers.utils import apply_repo_settings from pr_agent.git_providers.utils import apply_repo_settings
from pr_agent.tools.pr_add_docs import PRAddDocs from pr_agent.tools.pr_add_docs import PRAddDocs
from pr_agent.tools.pr_code_suggestions import PRCodeSuggestions from pr_agent.tools.pr_code_suggestions import PRCodeSuggestions
from pr_agent.tools.pr_config import PRConfig
from pr_agent.tools.pr_description import PRDescription from pr_agent.tools.pr_description import PRDescription
from pr_agent.tools.pr_information_from_user import PRInformationFromUser from pr_agent.tools.pr_information_from_user import PRInformationFromUser
from pr_agent.tools.pr_similar_issue import PRSimilarIssue
from pr_agent.tools.pr_questions import PRQuestions from pr_agent.tools.pr_questions import PRQuestions
from pr_agent.tools.pr_reviewer import PRReviewer from pr_agent.tools.pr_reviewer import PRReviewer
from pr_agent.tools.pr_similar_issue import PRSimilarIssue
from pr_agent.tools.pr_update_changelog import PRUpdateChangelog from pr_agent.tools.pr_update_changelog import PRUpdateChangelog
from pr_agent.tools.pr_config import PRConfig
command2class = { command2class = {
"auto_review": PRReviewer, "auto_review": PRReviewer,

View File

@ -1,4 +1,3 @@
import logging
import os import os
import litellm import litellm
@ -7,6 +6,8 @@ from litellm import acompletion
from openai.error import APIError, RateLimitError, Timeout, TryAgain from openai.error import APIError, RateLimitError, Timeout, TryAgain
from retry import retry from retry import retry
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger
OPENAI_RETRIES = 5 OPENAI_RETRIES = 5
@ -88,34 +89,34 @@ class AiHandler:
try: try:
deployment_id = self.deployment_id deployment_id = self.deployment_id
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.debug( get_logger().debug(
f"Generating completion with {model}" f"Generating completion with {model}"
f"{(' from deployment ' + deployment_id) if deployment_id else ''}" f"{(' from deployment ' + deployment_id) if deployment_id else ''}"
) )
if self.azure: if self.azure:
model = 'azure/' + model model = 'azure/' + model
messages = [{"role": "system", "content": system}, {"role": "user", "content": user}]
response = await acompletion( response = await acompletion(
model=model, model=model,
deployment_id=deployment_id, deployment_id=deployment_id,
messages=[ messages=messages,
{"role": "system", "content": system},
{"role": "user", "content": user}
],
temperature=temperature, temperature=temperature,
force_timeout=get_settings().config.ai_timeout force_timeout=get_settings().config.ai_timeout
) )
except (APIError, Timeout, TryAgain) as e: except (APIError, Timeout, TryAgain) as e:
logging.error("Error during OpenAI inference: ", e) get_logger().error("Error during OpenAI inference: ", e)
raise raise
except (RateLimitError) as e: except (RateLimitError) as e:
logging.error("Rate limit error during OpenAI inference: ", e) get_logger().error("Rate limit error during OpenAI inference: ", e)
raise raise
except (Exception) as e: except (Exception) as e:
logging.error("Unknown error during OpenAI inference: ", e) get_logger().error("Unknown error during OpenAI inference: ", e)
raise TryAgain from e raise TryAgain from e
if response is None or len(response["choices"]) == 0: if response is None or len(response["choices"]) == 0:
raise TryAgain raise TryAgain
resp = response["choices"][0]['message']['content'] resp = response["choices"][0]['message']['content']
finish_reason = response["choices"][0]["finish_reason"] finish_reason = response["choices"][0]["finish_reason"]
print(resp, finish_reason) usage = response.get("usage")
get_logger().info("AI response", response=resp, messages=messages, finish_reason=finish_reason,
model=model, usage=usage)
return resp, finish_reason return resp, finish_reason

View File

@ -1,8 +1,9 @@
from __future__ import annotations from __future__ import annotations
import logging
import re import re
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger
def extend_patch(original_file_str, patch_str, num_lines) -> str: def extend_patch(original_file_str, patch_str, num_lines) -> str:
@ -63,7 +64,7 @@ def extend_patch(original_file_str, patch_str, num_lines) -> str:
extended_patch_lines.append(line) extended_patch_lines.append(line)
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.error(f"Failed to extend patch: {e}") get_logger().error(f"Failed to extend patch: {e}")
return patch_str return patch_str
# finish previous hunk # finish previous hunk
@ -134,14 +135,14 @@ def handle_patch_deletions(patch: str, original_file_content_str: str,
if not new_file_content_str: if not new_file_content_str:
# logic for handling deleted files - don't show patch, just show that the file was deleted # logic for handling deleted files - don't show patch, just show that the file was deleted
if get_settings().config.verbosity_level > 0: if get_settings().config.verbosity_level > 0:
logging.info(f"Processing file: {file_name}, minimizing deletion file") get_logger().info(f"Processing file: {file_name}, minimizing deletion file")
patch = None # file was deleted patch = None # file was deleted
else: else:
patch_lines = patch.splitlines() patch_lines = patch.splitlines()
patch_new = omit_deletion_hunks(patch_lines) patch_new = omit_deletion_hunks(patch_lines)
if patch != patch_new: if patch != patch_new:
if get_settings().config.verbosity_level > 0: if get_settings().config.verbosity_level > 0:
logging.info(f"Processing file: {file_name}, hunks were deleted") get_logger().info(f"Processing file: {file_name}, hunks were deleted")
patch = patch_new patch = patch_new
return patch return patch

View File

@ -1,7 +1,6 @@
from __future__ import annotations from __future__ import annotations
import difflib import difflib
import logging
import re import re
import traceback import traceback
from typing import Any, Callable, List, Tuple from typing import Any, Callable, List, Tuple
@ -15,6 +14,7 @@ from pr_agent.algo.file_filter import filter_ignored
from pr_agent.algo.token_handler import TokenHandler, get_token_encoder from pr_agent.algo.token_handler import TokenHandler, get_token_encoder
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers.git_provider import FilePatchInfo, GitProvider from pr_agent.git_providers.git_provider import FilePatchInfo, GitProvider
from pr_agent.log import get_logger
DELETED_FILES_ = "Deleted files:\n" DELETED_FILES_ = "Deleted files:\n"
@ -51,7 +51,7 @@ def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler, model: s
try: try:
diff_files = git_provider.get_diff_files() diff_files = git_provider.get_diff_files()
except RateLimitExceededException as e: except RateLimitExceededException as e:
logging.error(f"Rate limit exceeded for git provider API. original message {e}") get_logger().error(f"Rate limit exceeded for git provider API. original message {e}")
raise raise
diff_files = filter_ignored(diff_files) diff_files = filter_ignored(diff_files)
@ -180,7 +180,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
# Hard Stop, no more tokens # Hard Stop, no more tokens
if total_tokens > MAX_TOKENS[model] - OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD: if total_tokens > MAX_TOKENS[model] - OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD:
logging.warning(f"File was fully skipped, no more tokens: {file.filename}.") get_logger().warning(f"File was fully skipped, no more tokens: {file.filename}.")
continue continue
# If the patch is too large, just show the file name # If the patch is too large, just show the file name
@ -189,7 +189,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
# TODO: Option for alternative logic to remove hunks from the patch to reduce the number of tokens # TODO: Option for alternative logic to remove hunks from the patch to reduce the number of tokens
# until we meet the requirements # until we meet the requirements
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.warning(f"Patch too large, minimizing it, {file.filename}") get_logger().warning(f"Patch too large, minimizing it, {file.filename}")
if not modified_files_list: if not modified_files_list:
total_tokens += token_handler.count_tokens(MORE_MODIFIED_FILES_) total_tokens += token_handler.count_tokens(MORE_MODIFIED_FILES_)
modified_files_list.append(file.filename) modified_files_list.append(file.filename)
@ -204,7 +204,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
patches.append(patch_final) patches.append(patch_final)
total_tokens += token_handler.count_tokens(patch_final) total_tokens += token_handler.count_tokens(patch_final)
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"Tokens: {total_tokens}, last filename: {file.filename}") get_logger().info(f"Tokens: {total_tokens}, last filename: {file.filename}")
return patches, modified_files_list, deleted_files_list return patches, modified_files_list, deleted_files_list
@ -218,7 +218,7 @@ async def retry_with_fallback_models(f: Callable):
get_settings().set("openai.deployment_id", deployment_id) get_settings().set("openai.deployment_id", deployment_id)
return await f(model) return await f(model)
except Exception as e: except Exception as e:
logging.warning( get_logger().warning(
f"Failed to generate prediction with {model}" f"Failed to generate prediction with {model}"
f"{(' from deployment ' + deployment_id) if deployment_id else ''}: " f"{(' from deployment ' + deployment_id) if deployment_id else ''}: "
f"{traceback.format_exc()}" f"{traceback.format_exc()}"
@ -340,7 +340,7 @@ def clip_tokens(text: str, max_tokens: int) -> str:
clipped_text = text[:num_output_chars] clipped_text = text[:num_output_chars]
return clipped_text return clipped_text
except Exception as e: except Exception as e:
logging.warning(f"Failed to clip tokens: {e}") get_logger().warning(f"Failed to clip tokens: {e}")
return text return text
@ -367,7 +367,7 @@ def get_pr_multi_diffs(git_provider: GitProvider,
try: try:
diff_files = git_provider.get_diff_files() diff_files = git_provider.get_diff_files()
except RateLimitExceededException as e: except RateLimitExceededException as e:
logging.error(f"Rate limit exceeded for git provider API. original message {e}") get_logger().error(f"Rate limit exceeded for git provider API. original message {e}")
raise raise
diff_files = filter_ignored(diff_files) diff_files = filter_ignored(diff_files)
@ -387,7 +387,7 @@ def get_pr_multi_diffs(git_provider: GitProvider,
for file in sorted_files: for file in sorted_files:
if call_number > max_calls: if call_number > max_calls:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"Reached max calls ({max_calls})") get_logger().info(f"Reached max calls ({max_calls})")
break break
original_file_content_str = file.base_file original_file_content_str = file.base_file
@ -410,13 +410,13 @@ def get_pr_multi_diffs(git_provider: GitProvider,
total_tokens = token_handler.prompt_tokens total_tokens = token_handler.prompt_tokens
call_number += 1 call_number += 1
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"Call number: {call_number}") get_logger().info(f"Call number: {call_number}")
if patch: if patch:
patches.append(patch) patches.append(patch)
total_tokens += new_patch_tokens total_tokens += new_patch_tokens
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"Tokens: {total_tokens}, last filename: {file.filename}") get_logger().info(f"Tokens: {total_tokens}, last filename: {file.filename}")
# Add the last chunk # Add the last chunk
if patches: if patches:

View File

@ -2,7 +2,6 @@ from __future__ import annotations
import difflib import difflib
import json import json
import logging
import re import re
import textwrap import textwrap
from datetime import datetime from datetime import datetime
@ -11,6 +10,7 @@ from typing import Any, List
import yaml import yaml
from starlette_context import context from starlette_context import context
from pr_agent.config_loader import get_settings, global_settings from pr_agent.config_loader import get_settings, global_settings
from pr_agent.log import get_logger
def get_setting(key: str) -> Any: def get_setting(key: str) -> Any:
@ -159,7 +159,7 @@ def try_fix_json(review, max_iter=10, code_suggestions=False):
iter_count += 1 iter_count += 1
if not valid_json: if not valid_json:
logging.error("Unable to decode JSON response from AI") get_logger().error("Unable to decode JSON response from AI")
data = {} data = {}
return data return data
@ -230,7 +230,7 @@ def load_large_diff(filename, new_file_content_str: str, original_file_content_s
diff = difflib.unified_diff(original_file_content_str.splitlines(keepends=True), diff = difflib.unified_diff(original_file_content_str.splitlines(keepends=True),
new_file_content_str.splitlines(keepends=True)) new_file_content_str.splitlines(keepends=True))
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.warning(f"File was modified, but no patch was found. Manually creating patch: {filename}.") get_logger().warning(f"File was modified, but no patch was found. Manually creating patch: {filename}.")
patch = ''.join(diff) patch = ''.join(diff)
except Exception: except Exception:
pass pass
@ -262,12 +262,12 @@ def update_settings_from_args(args: List[str]) -> List[str]:
vals = arg.split('=', 1) vals = arg.split('=', 1)
if len(vals) != 2: if len(vals) != 2:
if len(vals) > 2: # --extended is a valid argument if len(vals) > 2: # --extended is a valid argument
logging.error(f'Invalid argument format: {arg}') get_logger().error(f'Invalid argument format: {arg}')
other_args.append(arg) other_args.append(arg)
continue continue
key, value = _fix_key_value(*vals) key, value = _fix_key_value(*vals)
get_settings().set(key, value) get_settings().set(key, value)
logging.info(f'Updated setting {key} to: "{value}"') get_logger().info(f'Updated setting {key} to: "{value}"')
else: else:
other_args.append(arg) other_args.append(arg)
return other_args return other_args
@ -279,7 +279,7 @@ def _fix_key_value(key: str, value: str):
try: try:
value = yaml.safe_load(value) value = yaml.safe_load(value)
except Exception as e: except Exception as e:
logging.error(f"Failed to parse YAML for config override {key}={value}", exc_info=e) get_logger().error(f"Failed to parse YAML for config override {key}={value}", exc_info=e)
return key, value return key, value
@ -288,7 +288,7 @@ def load_yaml(review_text: str) -> dict:
try: try:
data = yaml.safe_load(review_text) data = yaml.safe_load(review_text)
except Exception as e: except Exception as e:
logging.error(f"Failed to parse AI prediction: {e}") get_logger().error(f"Failed to parse AI prediction: {e}")
data = try_fix_yaml(review_text) data = try_fix_yaml(review_text)
return data return data
@ -299,7 +299,7 @@ def try_fix_yaml(review_text: str) -> dict:
review_text_lines_tmp = '\n'.join(review_text_lines[:-i]) review_text_lines_tmp = '\n'.join(review_text_lines[:-i])
try: try:
data = yaml.load(review_text_lines_tmp, Loader=yaml.SafeLoader) data = yaml.load(review_text_lines_tmp, Loader=yaml.SafeLoader)
logging.info(f"Successfully parsed AI prediction after removing {i} lines") get_logger().info(f"Successfully parsed AI prediction after removing {i} lines")
break break
except: except:
pass pass

View File

@ -1,11 +1,12 @@
import argparse import argparse
import asyncio import asyncio
import logging
import os import os
from pr_agent.agent.pr_agent import PRAgent, commands from pr_agent.agent.pr_agent import PRAgent, commands
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.log import setup_logger
setup_logger()
def run(inargs=None): def run(inargs=None):
parser = argparse.ArgumentParser(description='AI based pull request analyzer', usage= parser = argparse.ArgumentParser(description='AI based pull request analyzer', usage=
@ -47,7 +48,6 @@ For example: 'python cli.py --pr_url=... review --pr_reviewer.extra_instructions
parser.print_help() parser.print_help()
return return
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
command = args.command.lower() command = args.command.lower()
get_settings().set("CONFIG.CLI_MODE", True) get_settings().set("CONFIG.CLI_MODE", True)
if args.issue_url: if args.issue_url:

View File

@ -1,10 +1,11 @@
import json import json
import logging
from typing import Optional, Tuple from typing import Optional, Tuple
from urllib.parse import urlparse from urllib.parse import urlparse
import os import os
from ..log import get_logger
AZURE_DEVOPS_AVAILABLE = True AZURE_DEVOPS_AVAILABLE = True
try: try:
from msrest.authentication import BasicAuthentication from msrest.authentication import BasicAuthentication
@ -55,7 +56,7 @@ class AzureDevopsProvider:
path=".pr_agent.toml") path=".pr_agent.toml")
return contents return contents
except Exception as e: except Exception as e:
logging.exception("get repo settings error") get_logger().exception("get repo settings error")
return "" return ""
def get_files(self): def get_files(self):
@ -110,7 +111,7 @@ class AzureDevopsProvider:
new_file_content_str = new_file_content_str.content new_file_content_str = new_file_content_str.content
except Exception as error: except Exception as error:
logging.error("Failed to retrieve new file content of %s at version %s. Error: %s", file, version, str(error)) get_logger().error("Failed to retrieve new file content of %s at version %s. Error: %s", file, version, str(error))
new_file_content_str = "" new_file_content_str = ""
edit_type = EDIT_TYPE.MODIFIED edit_type = EDIT_TYPE.MODIFIED
@ -131,7 +132,7 @@ class AzureDevopsProvider:
include_content=True) include_content=True)
original_file_content_str = original_file_content_str.content original_file_content_str = original_file_content_str.content
except Exception as error: except Exception as error:
logging.error("Failed to retrieve original file content of %s at version %s. Error: %s", file, version, str(error)) get_logger().error("Failed to retrieve original file content of %s at version %s. Error: %s", file, version, str(error))
original_file_content_str = "" original_file_content_str = ""
patch = load_large_diff(file, new_file_content_str, original_file_content_str) patch = load_large_diff(file, new_file_content_str, original_file_content_str)
@ -166,7 +167,7 @@ class AzureDevopsProvider:
pull_request_id=self.pr_num, pull_request_id=self.pr_num,
git_pull_request_to_update=updated_pr) git_pull_request_to_update=updated_pr)
except Exception as e: except Exception as e:
logging.exception(f"Could not update pull request {self.pr_num} description: {e}") get_logger().exception(f"Could not update pull request {self.pr_num} description: {e}")
def remove_initial_comment(self): def remove_initial_comment(self):
return "" # not implemented yet return "" # not implemented yet

View File

@ -1,5 +1,4 @@
import json import json
import logging
from typing import Optional, Tuple from typing import Optional, Tuple
from urllib.parse import urlparse from urllib.parse import urlparse
@ -7,8 +6,9 @@ import requests
from atlassian.bitbucket import Cloud from atlassian.bitbucket import Cloud
from starlette_context import context from starlette_context import context
from ..algo.pr_processing import clip_tokens, find_line_number_of_relevant_line_in_file from ..algo.pr_processing import find_line_number_of_relevant_line_in_file
from ..config_loader import get_settings from ..config_loader import get_settings
from ..log import get_logger
from .git_provider import FilePatchInfo, GitProvider from .git_provider import FilePatchInfo, GitProvider
@ -61,14 +61,14 @@ class BitbucketProvider(GitProvider):
if not relevant_lines_start or relevant_lines_start == -1: if not relevant_lines_start or relevant_lines_start == -1:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.exception( get_logger().exception(
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}" f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}"
) )
continue continue
if relevant_lines_end < relevant_lines_start: if relevant_lines_end < relevant_lines_start:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.exception( get_logger().exception(
f"Failed to publish code suggestion, " f"Failed to publish code suggestion, "
f"relevant_lines_end is {relevant_lines_end} and " f"relevant_lines_end is {relevant_lines_end} and "
f"relevant_lines_start is {relevant_lines_start}" f"relevant_lines_start is {relevant_lines_start}"
@ -97,7 +97,7 @@ class BitbucketProvider(GitProvider):
return True return True
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.error(f"Failed to publish code suggestion, error: {e}") get_logger().error(f"Failed to publish code suggestion, error: {e}")
return False return False
def is_supported(self, capability: str) -> bool: def is_supported(self, capability: str) -> bool:
@ -144,7 +144,7 @@ class BitbucketProvider(GitProvider):
for comment in self.temp_comments: for comment in self.temp_comments:
self.pr.delete(f"comments/{comment}") self.pr.delete(f"comments/{comment}")
except Exception as e: except Exception as e:
logging.exception(f"Failed to remove temp comments, error: {e}") get_logger().exception(f"Failed to remove temp comments, error: {e}")
# funtion to create_inline_comment # funtion to create_inline_comment
@ -152,7 +152,7 @@ class BitbucketProvider(GitProvider):
position, absolute_position = find_line_number_of_relevant_line_in_file(self.get_diff_files(), relevant_file.strip('`'), relevant_line_in_file) position, absolute_position = find_line_number_of_relevant_line_in_file(self.get_diff_files(), relevant_file.strip('`'), relevant_line_in_file)
if position == -1: if position == -1:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"Could not find position for {relevant_file} {relevant_line_in_file}") get_logger().info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
subject_type = "FILE" subject_type = "FILE"
else: else:
subject_type = "LINE" subject_type = "LINE"

View File

@ -1,17 +1,16 @@
import logging
import os import os
import re import re
from collections import Counter from collections import Counter
from typing import List, Optional, Tuple from typing import List, Optional, Tuple
from urllib.parse import urlparse from urllib.parse import urlparse
from ..algo.language_handler import is_valid_file, language_extension_map
from ..algo.pr_processing import clip_tokens
from ..algo.utils import load_large_diff
from ..config_loader import get_settings
from .git_provider import EDIT_TYPE, FilePatchInfo, GitProvider, IncrementalPR
from pr_agent.git_providers.codecommit_client import CodeCommitClient from pr_agent.git_providers.codecommit_client import CodeCommitClient
from ..algo.language_handler import is_valid_file, language_extension_map
from ..algo.utils import load_large_diff
from .git_provider import EDIT_TYPE, FilePatchInfo, GitProvider
from ..log import get_logger
class PullRequestCCMimic: class PullRequestCCMimic:
""" """
@ -166,7 +165,7 @@ class CodeCommitProvider(GitProvider):
def publish_comment(self, pr_comment: str, is_temporary: bool = False): def publish_comment(self, pr_comment: str, is_temporary: bool = False):
if is_temporary: if is_temporary:
logging.info(pr_comment) get_logger().info(pr_comment)
return return
pr_comment = CodeCommitProvider._remove_markdown_html(pr_comment) pr_comment = CodeCommitProvider._remove_markdown_html(pr_comment)
@ -188,12 +187,12 @@ class CodeCommitProvider(GitProvider):
for suggestion in code_suggestions: for suggestion in code_suggestions:
# Verify that each suggestion has the required keys # Verify that each suggestion has the required keys
if not all(key in suggestion for key in ["body", "relevant_file", "relevant_lines_start"]): if not all(key in suggestion for key in ["body", "relevant_file", "relevant_lines_start"]):
logging.warning(f"Skipping code suggestion #{counter}: Each suggestion must have 'body', 'relevant_file', 'relevant_lines_start' keys") get_logger().warning(f"Skipping code suggestion #{counter}: Each suggestion must have 'body', 'relevant_file', 'relevant_lines_start' keys")
continue continue
# Publish the code suggestion to CodeCommit # Publish the code suggestion to CodeCommit
try: try:
logging.debug(f"Code Suggestion #{counter} in file: {suggestion['relevant_file']}: {suggestion['relevant_lines_start']}") get_logger().debug(f"Code Suggestion #{counter} in file: {suggestion['relevant_file']}: {suggestion['relevant_lines_start']}")
self.codecommit_client.publish_comment( self.codecommit_client.publish_comment(
repo_name=self.repo_name, repo_name=self.repo_name,
pr_number=self.pr_num, pr_number=self.pr_num,
@ -296,11 +295,11 @@ class CodeCommitProvider(GitProvider):
return self.codecommit_client.get_file(self.repo_name, settings_filename, self.pr.source_commit, optional=True) return self.codecommit_client.get_file(self.repo_name, settings_filename, self.pr.source_commit, optional=True)
def add_eyes_reaction(self, issue_comment_id: int) -> Optional[int]: def add_eyes_reaction(self, issue_comment_id: int) -> Optional[int]:
logging.info("CodeCommit provider does not support eyes reaction yet") get_logger().info("CodeCommit provider does not support eyes reaction yet")
return True return True
def remove_reaction(self, issue_comment_id: int, reaction_id: int) -> bool: def remove_reaction(self, issue_comment_id: int, reaction_id: int) -> bool:
logging.info("CodeCommit provider does not support removing reactions yet") get_logger().info("CodeCommit provider does not support removing reactions yet")
return True return True
@staticmethod @staticmethod
@ -366,7 +365,7 @@ class CodeCommitProvider(GitProvider):
# TODO: implement support for multiple targets in one CodeCommit PR # TODO: implement support for multiple targets in one CodeCommit PR
# for now, we are only using the first target in the PR # for now, we are only using the first target in the PR
if len(response.targets) > 1: if len(response.targets) > 1:
logging.warning( get_logger().warning(
"Multiple targets in one PR is not supported for CodeCommit yet. Continuing, using the first target only..." "Multiple targets in one PR is not supported for CodeCommit yet. Continuing, using the first target only..."
) )

View File

@ -1,5 +1,4 @@
import json import json
import logging
import os import os
import pathlib import pathlib
import shutil import shutil
@ -7,18 +6,16 @@ import subprocess
import uuid import uuid
from collections import Counter, namedtuple from collections import Counter, namedtuple
from pathlib import Path from pathlib import Path
from tempfile import mkdtemp, NamedTemporaryFile from tempfile import NamedTemporaryFile, mkdtemp
import requests import requests
import urllib3.util import urllib3.util
from git import Repo from git import Repo
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers.git_provider import GitProvider, FilePatchInfo, \ from pr_agent.git_providers.git_provider import EDIT_TYPE, FilePatchInfo, GitProvider
EDIT_TYPE
from pr_agent.git_providers.local_git_provider import PullRequestMimic from pr_agent.git_providers.local_git_provider import PullRequestMimic
from pr_agent.log import get_logger
logger = logging.getLogger(__name__)
def _call(*command, **kwargs) -> (int, str, str): def _call(*command, **kwargs) -> (int, str, str):
@ -33,42 +30,42 @@ def _call(*command, **kwargs) -> (int, str, str):
def clone(url, directory): def clone(url, directory):
logger.info("Cloning %s to %s", url, directory) get_logger().info("Cloning %s to %s", url, directory)
stdout = _call('git', 'clone', "--depth", "1", url, directory) stdout = _call('git', 'clone', "--depth", "1", url, directory)
logger.info(stdout) get_logger().info(stdout)
def fetch(url, refspec, cwd): def fetch(url, refspec, cwd):
logger.info("Fetching %s %s", url, refspec) get_logger().info("Fetching %s %s", url, refspec)
stdout = _call( stdout = _call(
'git', 'fetch', '--depth', '2', url, refspec, 'git', 'fetch', '--depth', '2', url, refspec,
cwd=cwd cwd=cwd
) )
logger.info(stdout) get_logger().info(stdout)
def checkout(cwd): def checkout(cwd):
logger.info("Checking out") get_logger().info("Checking out")
stdout = _call('git', 'checkout', "FETCH_HEAD", cwd=cwd) stdout = _call('git', 'checkout', "FETCH_HEAD", cwd=cwd)
logger.info(stdout) get_logger().info(stdout)
def show(*args, cwd=None): def show(*args, cwd=None):
logger.info("Show") get_logger().info("Show")
return _call('git', 'show', *args, cwd=cwd) return _call('git', 'show', *args, cwd=cwd)
def diff(*args, cwd=None): def diff(*args, cwd=None):
logger.info("Diff") get_logger().info("Diff")
patch = _call('git', 'diff', *args, cwd=cwd) patch = _call('git', 'diff', *args, cwd=cwd)
if not patch: if not patch:
logger.warning("No changes found") get_logger().warning("No changes found")
return return
return patch return patch
def reset_local_changes(cwd): def reset_local_changes(cwd):
logger.info("Reset local changes") get_logger().info("Reset local changes")
_call('git', 'checkout', "--force", cwd=cwd) _call('git', 'checkout', "--force", cwd=cwd)

View File

@ -1,4 +1,3 @@
import logging
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from dataclasses import dataclass from dataclasses import dataclass
@ -6,6 +5,8 @@ from dataclasses import dataclass
from enum import Enum from enum import Enum
from typing import Optional from typing import Optional
from pr_agent.log import get_logger
class EDIT_TYPE(Enum): class EDIT_TYPE(Enum):
ADDED = 1 ADDED = 1
@ -136,7 +137,7 @@ def get_main_pr_language(languages, files) -> str:
""" """
main_language_str = "" main_language_str = ""
if not languages: if not languages:
logging.info("No languages detected") get_logger().info("No languages detected")
return main_language_str return main_language_str
try: try:
@ -172,7 +173,7 @@ def get_main_pr_language(languages, files) -> str:
main_language_str = top_language main_language_str = top_language
except Exception as e: except Exception as e:
logging.exception(e) get_logger().exception(e)
pass pass
return main_language_str return main_language_str

View File

@ -1,20 +1,19 @@
import logging
import hashlib import hashlib
from datetime import datetime from datetime import datetime
from typing import Optional, Tuple, Any from typing import Optional, Tuple
from urllib.parse import urlparse from urllib.parse import urlparse
from github import AppAuthentication, Auth, Github, GithubException, Reaction from github import AppAuthentication, Auth, Github, GithubException
from retry import retry from retry import retry
from starlette_context import context from starlette_context import context
from .git_provider import FilePatchInfo, GitProvider, IncrementalPR
from ..algo.language_handler import is_valid_file from ..algo.language_handler import is_valid_file
from ..algo.pr_processing import clip_tokens, find_line_number_of_relevant_line_in_file
from ..algo.utils import load_large_diff from ..algo.utils import load_large_diff
from ..algo.pr_processing import find_line_number_of_relevant_line_in_file, clip_tokens
from ..config_loader import get_settings from ..config_loader import get_settings
from ..log import get_logger
from ..servers.utils import RateLimitExceeded from ..servers.utils import RateLimitExceeded
from .git_provider import FilePatchInfo, GitProvider, IncrementalPR
class GithubProvider(GitProvider): class GithubProvider(GitProvider):
@ -58,7 +57,7 @@ class GithubProvider(GitProvider):
self.file_set = dict() self.file_set = dict()
for commit in self.incremental.commits_range: for commit in self.incremental.commits_range:
if commit.commit.message.startswith(f"Merge branch '{self._get_repo().default_branch}'"): if commit.commit.message.startswith(f"Merge branch '{self._get_repo().default_branch}'"):
logging.info(f"Skipping merge commit {commit.commit.message}") get_logger().info(f"Skipping merge commit {commit.commit.message}")
continue continue
self.file_set.update({file.filename: file for file in commit.files}) self.file_set.update({file.filename: file for file in commit.files})
@ -130,7 +129,7 @@ class GithubProvider(GitProvider):
return diff_files return diff_files
except GithubException.RateLimitExceededException as e: except GithubException.RateLimitExceededException as e:
logging.error(f"Rate limit exceeded for GitHub API. Original message: {e}") get_logger().error(f"Rate limit exceeded for GitHub API. Original message: {e}")
raise RateLimitExceeded("Rate limit exceeded for GitHub API.") from e raise RateLimitExceeded("Rate limit exceeded for GitHub API.") from e
def publish_description(self, pr_title: str, pr_body: str): def publish_description(self, pr_title: str, pr_body: str):
@ -138,7 +137,7 @@ class GithubProvider(GitProvider):
def publish_comment(self, pr_comment: str, is_temporary: bool = False): def publish_comment(self, pr_comment: str, is_temporary: bool = False):
if is_temporary and not get_settings().config.publish_output_progress: if is_temporary and not get_settings().config.publish_output_progress:
logging.debug(f"Skipping publish_comment for temporary comment: {pr_comment}") get_logger().debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
return return
response = self.pr.create_issue_comment(pr_comment) response = self.pr.create_issue_comment(pr_comment)
if hasattr(response, "user") and hasattr(response.user, "login"): if hasattr(response, "user") and hasattr(response.user, "login"):
@ -156,7 +155,7 @@ class GithubProvider(GitProvider):
position, absolute_position = find_line_number_of_relevant_line_in_file(self.diff_files, relevant_file.strip('`'), relevant_line_in_file) position, absolute_position = find_line_number_of_relevant_line_in_file(self.diff_files, relevant_file.strip('`'), relevant_line_in_file)
if position == -1: if position == -1:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"Could not find position for {relevant_file} {relevant_line_in_file}") get_logger().info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
subject_type = "FILE" subject_type = "FILE"
else: else:
subject_type = "LINE" subject_type = "LINE"
@ -179,13 +178,13 @@ class GithubProvider(GitProvider):
if not relevant_lines_start or relevant_lines_start == -1: if not relevant_lines_start or relevant_lines_start == -1:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.exception( get_logger().exception(
f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}") f"Failed to publish code suggestion, relevant_lines_start is {relevant_lines_start}")
continue continue
if relevant_lines_end < relevant_lines_start: if relevant_lines_end < relevant_lines_start:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.exception(f"Failed to publish code suggestion, " get_logger().exception(f"Failed to publish code suggestion, "
f"relevant_lines_end is {relevant_lines_end} and " f"relevant_lines_end is {relevant_lines_end} and "
f"relevant_lines_start is {relevant_lines_start}") f"relevant_lines_start is {relevant_lines_start}")
continue continue
@ -212,7 +211,7 @@ class GithubProvider(GitProvider):
return True return True
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.error(f"Failed to publish code suggestion, error: {e}") get_logger().error(f"Failed to publish code suggestion, error: {e}")
return False return False
def remove_initial_comment(self): def remove_initial_comment(self):
@ -221,7 +220,7 @@ class GithubProvider(GitProvider):
if comment.is_temporary: if comment.is_temporary:
comment.delete() comment.delete()
except Exception as e: except Exception as e:
logging.exception(f"Failed to remove initial comment, error: {e}") get_logger().exception(f"Failed to remove initial comment, error: {e}")
def get_title(self): def get_title(self):
return self.pr.title return self.pr.title
@ -269,7 +268,7 @@ class GithubProvider(GitProvider):
reaction = self.pr.get_issue_comment(issue_comment_id).create_reaction("eyes") reaction = self.pr.get_issue_comment(issue_comment_id).create_reaction("eyes")
return reaction.id return reaction.id
except Exception as e: except Exception as e:
logging.exception(f"Failed to add eyes reaction, error: {e}") get_logger().exception(f"Failed to add eyes reaction, error: {e}")
return None return None
def remove_reaction(self, issue_comment_id: int, reaction_id: int) -> bool: def remove_reaction(self, issue_comment_id: int, reaction_id: int) -> bool:
@ -277,7 +276,7 @@ class GithubProvider(GitProvider):
self.pr.get_issue_comment(issue_comment_id).delete_reaction(reaction_id) self.pr.get_issue_comment(issue_comment_id).delete_reaction(reaction_id)
return True return True
except Exception as e: except Exception as e:
logging.exception(f"Failed to remove eyes reaction, error: {e}") get_logger().exception(f"Failed to remove eyes reaction, error: {e}")
return False return False
@ -396,13 +395,13 @@ class GithubProvider(GitProvider):
"PUT", f"{self.pr.issue_url}/labels", input=post_parameters "PUT", f"{self.pr.issue_url}/labels", input=post_parameters
) )
except Exception as e: except Exception as e:
logging.exception(f"Failed to publish labels, error: {e}") get_logger().exception(f"Failed to publish labels, error: {e}")
def get_labels(self): def get_labels(self):
try: try:
return [label.name for label in self.pr.labels] return [label.name for label in self.pr.labels]
except Exception as e: except Exception as e:
logging.exception(f"Failed to get labels, error: {e}") get_logger().exception(f"Failed to get labels, error: {e}")
return [] return []
def get_commit_messages(self): def get_commit_messages(self):
@ -444,7 +443,7 @@ class GithubProvider(GitProvider):
return link return link
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"Failed adding line link, error: {e}") get_logger().info(f"Failed adding line link, error: {e}")
return "" return ""

View File

@ -1,5 +1,4 @@
import hashlib import hashlib
import logging
import re import re
from typing import Optional, Tuple from typing import Optional, Tuple
from urllib.parse import urlparse from urllib.parse import urlparse
@ -12,8 +11,8 @@ from ..algo.pr_processing import clip_tokens, find_line_number_of_relevant_line_
from ..algo.utils import load_large_diff from ..algo.utils import load_large_diff
from ..config_loader import get_settings from ..config_loader import get_settings
from .git_provider import EDIT_TYPE, FilePatchInfo, GitProvider from .git_provider import EDIT_TYPE, FilePatchInfo, GitProvider
from ..log import get_logger
logger = logging.getLogger()
class DiffNotFoundError(Exception): class DiffNotFoundError(Exception):
"""Raised when the diff for a merge request cannot be found.""" """Raised when the diff for a merge request cannot be found."""
@ -59,7 +58,7 @@ class GitLabProvider(GitProvider):
try: try:
self.last_diff = self.mr.diffs.list(get_all=True)[-1] self.last_diff = self.mr.diffs.list(get_all=True)[-1]
except IndexError as e: except IndexError as e:
logger.error(f"Could not get diff for merge request {self.id_mr}") get_logger().error(f"Could not get diff for merge request {self.id_mr}")
raise DiffNotFoundError(f"Could not get diff for merge request {self.id_mr}") from e raise DiffNotFoundError(f"Could not get diff for merge request {self.id_mr}") from e
@ -99,7 +98,7 @@ class GitLabProvider(GitProvider):
if isinstance(new_file_content_str, bytes): if isinstance(new_file_content_str, bytes):
new_file_content_str = bytes.decode(new_file_content_str, 'utf-8') new_file_content_str = bytes.decode(new_file_content_str, 'utf-8')
except UnicodeDecodeError: except UnicodeDecodeError:
logging.warning( get_logger().warning(
f"Cannot decode file {diff['old_path']} or {diff['new_path']} in merge request {self.id_mr}") f"Cannot decode file {diff['old_path']} or {diff['new_path']} in merge request {self.id_mr}")
edit_type = EDIT_TYPE.MODIFIED edit_type = EDIT_TYPE.MODIFIED
@ -135,7 +134,7 @@ class GitLabProvider(GitProvider):
self.mr.description = pr_body self.mr.description = pr_body
self.mr.save() self.mr.save()
except Exception as e: except Exception as e:
logging.exception(f"Could not update merge request {self.id_mr} description: {e}") get_logger().exception(f"Could not update merge request {self.id_mr} description: {e}")
def publish_comment(self, mr_comment: str, is_temporary: bool = False): def publish_comment(self, mr_comment: str, is_temporary: bool = False):
comment = self.mr.notes.create({'body': mr_comment}) comment = self.mr.notes.create({'body': mr_comment})
@ -157,12 +156,12 @@ class GitLabProvider(GitProvider):
def send_inline_comment(self,body: str,edit_type: str,found: bool,relevant_file: str,relevant_line_in_file: int, def send_inline_comment(self,body: str,edit_type: str,found: bool,relevant_file: str,relevant_line_in_file: int,
source_line_no: int, target_file: str,target_line_no: int) -> None: source_line_no: int, target_file: str,target_line_no: int) -> None:
if not found: if not found:
logging.info(f"Could not find position for {relevant_file} {relevant_line_in_file}") get_logger().info(f"Could not find position for {relevant_file} {relevant_line_in_file}")
else: else:
# in order to have exact sha's we have to find correct diff for this change # in order to have exact sha's we have to find correct diff for this change
diff = self.get_relevant_diff(relevant_file, relevant_line_in_file) diff = self.get_relevant_diff(relevant_file, relevant_line_in_file)
if diff is None: if diff is None:
logger.error(f"Could not get diff for merge request {self.id_mr}") get_logger().error(f"Could not get diff for merge request {self.id_mr}")
raise DiffNotFoundError(f"Could not get diff for merge request {self.id_mr}") raise DiffNotFoundError(f"Could not get diff for merge request {self.id_mr}")
pos_obj = {'position_type': 'text', pos_obj = {'position_type': 'text',
'new_path': target_file.filename, 'new_path': target_file.filename,
@ -175,23 +174,23 @@ class GitLabProvider(GitProvider):
else: else:
pos_obj['new_line'] = target_line_no - 1 pos_obj['new_line'] = target_line_no - 1
pos_obj['old_line'] = source_line_no - 1 pos_obj['old_line'] = source_line_no - 1
logging.debug(f"Creating comment in {self.id_mr} with body {body} and position {pos_obj}") get_logger().debug(f"Creating comment in {self.id_mr} with body {body} and position {pos_obj}")
self.mr.discussions.create({'body': body, 'position': pos_obj}) self.mr.discussions.create({'body': body, 'position': pos_obj})
def get_relevant_diff(self, relevant_file: str, relevant_line_in_file: int) -> Optional[dict]: def get_relevant_diff(self, relevant_file: str, relevant_line_in_file: int) -> Optional[dict]:
changes = self.mr.changes() # Retrieve the changes for the merge request once changes = self.mr.changes() # Retrieve the changes for the merge request once
if not changes: if not changes:
logging.error('No changes found for the merge request.') get_logger().error('No changes found for the merge request.')
return None return None
all_diffs = self.mr.diffs.list(get_all=True) all_diffs = self.mr.diffs.list(get_all=True)
if not all_diffs: if not all_diffs:
logging.error('No diffs found for the merge request.') get_logger().error('No diffs found for the merge request.')
return None return None
for diff in all_diffs: for diff in all_diffs:
for change in changes['changes']: for change in changes['changes']:
if change['new_path'] == relevant_file and relevant_line_in_file in change['diff']: if change['new_path'] == relevant_file and relevant_line_in_file in change['diff']:
return diff return diff
logging.debug( get_logger().debug(
f'No relevant diff found for {relevant_file} {relevant_line_in_file}. Falling back to last diff.') f'No relevant diff found for {relevant_file} {relevant_line_in_file}. Falling back to last diff.')
return self.last_diff # fallback to last_diff if no relevant diff is found return self.last_diff # fallback to last_diff if no relevant diff is found
@ -226,7 +225,7 @@ class GitLabProvider(GitProvider):
self.send_inline_comment(body, edit_type, found, relevant_file, relevant_line_in_file, source_line_no, self.send_inline_comment(body, edit_type, found, relevant_file, relevant_line_in_file, source_line_no,
target_file, target_line_no) target_file, target_line_no)
except Exception as e: except Exception as e:
logging.exception(f"Could not publish code suggestion:\nsuggestion: {suggestion}\nerror: {e}") get_logger().exception(f"Could not publish code suggestion:\nsuggestion: {suggestion}\nerror: {e}")
# note that we publish suggestions one-by-one. so, if one fails, the rest will still be published # note that we publish suggestions one-by-one. so, if one fails, the rest will still be published
return True return True
@ -290,7 +289,7 @@ class GitLabProvider(GitProvider):
for comment in self.temp_comments: for comment in self.temp_comments:
comment.delete() comment.delete()
except Exception as e: except Exception as e:
logging.exception(f"Failed to remove temp comments, error: {e}") get_logger().exception(f"Failed to remove temp comments, error: {e}")
def get_title(self): def get_title(self):
return self.mr.title return self.mr.title
@ -358,7 +357,7 @@ class GitLabProvider(GitProvider):
self.mr.labels = list(set(pr_types)) self.mr.labels = list(set(pr_types))
self.mr.save() self.mr.save()
except Exception as e: except Exception as e:
logging.exception(f"Failed to publish labels, error: {e}") get_logger().exception(f"Failed to publish labels, error: {e}")
def publish_inline_comments(self, comments: list[dict]): def publish_inline_comments(self, comments: list[dict]):
pass pass
@ -410,6 +409,6 @@ class GitLabProvider(GitProvider):
return link return link
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"Failed adding line link, error: {e}") get_logger().info(f"Failed adding line link, error: {e}")
return "" return ""

View File

@ -1,4 +1,3 @@
import logging
from collections import Counter from collections import Counter
from pathlib import Path from pathlib import Path
from typing import List from typing import List
@ -7,6 +6,7 @@ from git import Repo
from pr_agent.config_loader import _find_repository_root, get_settings from pr_agent.config_loader import _find_repository_root, get_settings
from pr_agent.git_providers.git_provider import EDIT_TYPE, FilePatchInfo, GitProvider from pr_agent.git_providers.git_provider import EDIT_TYPE, FilePatchInfo, GitProvider
from pr_agent.log import get_logger
class PullRequestMimic: class PullRequestMimic:
@ -49,7 +49,7 @@ class LocalGitProvider(GitProvider):
""" """
Prepare the repository for PR-mimic generation. Prepare the repository for PR-mimic generation.
""" """
logging.debug('Preparing repository for PR-mimic generation...') get_logger().debug('Preparing repository for PR-mimic generation...')
if self.repo.is_dirty(): if self.repo.is_dirty():
raise ValueError('The repository is not in a clean state. Please commit or stash pending changes.') raise ValueError('The repository is not in a clean state. Please commit or stash pending changes.')
if self.target_branch_name not in self.repo.heads: if self.target_branch_name not in self.repo.heads:

View File

@ -1,5 +1,4 @@
import copy import copy
import logging
import os import os
import tempfile import tempfile
@ -7,6 +6,7 @@ from dynaconf import Dynaconf
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider from pr_agent.git_providers import get_git_provider
from pr_agent.log import get_logger
def apply_repo_settings(pr_url): def apply_repo_settings(pr_url):
@ -32,4 +32,4 @@ def apply_repo_settings(pr_url):
try: try:
os.remove(repo_settings_file) os.remove(repo_settings_file)
except Exception as e: except Exception as e:
logging.error(f"Failed to remove temporary settings file {repo_settings_file}", e) get_logger().error(f"Failed to remove temporary settings file {repo_settings_file}", e)

40
pr_agent/log/__init__.py Normal file
View File

@ -0,0 +1,40 @@
import json
import logging
import sys
from enum import Enum
from loguru import logger
class LoggingFormat(str, Enum):
CONSOLE = "CONSOLE"
JSON = "JSON"
def json_format(record: dict) -> str:
return record["message"]
def setup_logger(level: str = "INFO", fmt: LoggingFormat = LoggingFormat.CONSOLE):
level: int = logging.getLevelName(level.upper())
if type(level) is not int:
level = logging.INFO
if fmt == LoggingFormat.JSON:
logger.remove(None)
logger.add(
sys.stdout,
level=level,
format="{message}",
colorize=False,
serialize=True,
)
elif fmt == LoggingFormat.CONSOLE:
logger.remove(None)
logger.add(sys.stdout, level=level, colorize=True)
return logger
def get_logger(*args, **kwargs):
return logger

View File

@ -1,9 +1,7 @@
import copy import copy
import hashlib import hashlib
import json import json
import logging
import os import os
import sys
import time import time
import jwt import jwt
@ -18,9 +16,10 @@ from starlette_context.middleware import RawContextMiddleware
from pr_agent.agent.pr_agent import PRAgent from pr_agent.agent.pr_agent import PRAgent
from pr_agent.config_loader import get_settings, global_settings from pr_agent.config_loader import get_settings, global_settings
from pr_agent.log import LoggingFormat, get_logger, setup_logger
from pr_agent.secret_providers import get_secret_provider from pr_agent.secret_providers import get_secret_provider
logging.basicConfig(stream=sys.stdout, level=logging.INFO) setup_logger(fmt=LoggingFormat.JSON)
router = APIRouter() router = APIRouter()
secret_provider = get_secret_provider() secret_provider = get_secret_provider()
@ -49,7 +48,7 @@ async def get_bearer_token(shared_secret: str, client_key: str):
bearer_token = response.json()["access_token"] bearer_token = response.json()["access_token"]
return bearer_token return bearer_token
except Exception as e: except Exception as e:
logging.error(f"Failed to get bearer token: {e}") get_logger().error(f"Failed to get bearer token: {e}")
raise e raise e
@router.get("/") @router.get("/")
@ -60,7 +59,7 @@ async def handle_manifest(request: Request, response: Response):
manifest = manifest.replace("app_key", get_settings().bitbucket.app_key) manifest = manifest.replace("app_key", get_settings().bitbucket.app_key)
manifest = manifest.replace("base_url", get_settings().bitbucket.base_url) manifest = manifest.replace("base_url", get_settings().bitbucket.base_url)
except: except:
logging.error("Failed to replace api_key in Bitbucket manifest, trying to continue") get_logger().error("Failed to replace api_key in Bitbucket manifest, trying to continue")
manifest_obj = json.loads(manifest) manifest_obj = json.loads(manifest)
return JSONResponse(manifest_obj) return JSONResponse(manifest_obj)
@ -92,7 +91,7 @@ async def handle_github_webhooks(background_tasks: BackgroundTasks, request: Req
comment_body = data["data"]["comment"]["content"]["raw"] comment_body = data["data"]["comment"]["content"]["raw"]
await agent.handle_request(pr_url, comment_body) await agent.handle_request(pr_url, comment_body)
except Exception as e: except Exception as e:
logging.error(f"Failed to handle webhook: {e}") get_logger().error(f"Failed to handle webhook: {e}")
background_tasks.add_task(inner) background_tasks.add_task(inner)
return "OK" return "OK"
@ -115,7 +114,7 @@ async def handle_installed_webhooks(request: Request, response: Response):
} }
secret_provider.store_secret(username, json.dumps(secrets)) secret_provider.store_secret(username, json.dumps(secrets))
except Exception as e: except Exception as e:
logging.error(f"Failed to register user: {e}") get_logger().error(f"Failed to register user: {e}")
return JSONResponse({"error": "Unable to register user"}, status_code=500) return JSONResponse({"error": "Unable to register user"}, status_code=500)
@router.post("/uninstalled") @router.post("/uninstalled")

View File

@ -1,6 +1,4 @@
import copy import copy
import logging
import sys
from enum import Enum from enum import Enum
from json import JSONDecodeError from json import JSONDecodeError
@ -12,9 +10,10 @@ from starlette_context import context
from starlette_context.middleware import RawContextMiddleware from starlette_context.middleware import RawContextMiddleware
from pr_agent.agent.pr_agent import PRAgent from pr_agent.agent.pr_agent import PRAgent
from pr_agent.config_loader import global_settings, get_settings from pr_agent.config_loader import get_settings, global_settings
from pr_agent.log import get_logger, setup_logger
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) setup_logger()
router = APIRouter() router = APIRouter()
@ -35,7 +34,7 @@ class Item(BaseModel):
@router.post("/api/v1/gerrit/{action}") @router.post("/api/v1/gerrit/{action}")
async def handle_gerrit_request(action: Action, item: Item): async def handle_gerrit_request(action: Action, item: Item):
logging.debug("Received a Gerrit request") get_logger().debug("Received a Gerrit request")
context["settings"] = copy.deepcopy(global_settings) context["settings"] = copy.deepcopy(global_settings)
if action == Action.ask: if action == Action.ask:
@ -54,7 +53,7 @@ async def get_body(request):
try: try:
body = await request.json() body = await request.json()
except JSONDecodeError as e: except JSONDecodeError as e:
logging.error("Error parsing request body", e) get_logger().error("Error parsing request body", e)
return {} return {}
return body return body

View File

@ -1,6 +1,4 @@
import copy import copy
import logging
import sys
import os import os
import time import time
from typing import Any, Dict from typing import Any, Dict
@ -16,9 +14,11 @@ from pr_agent.algo.utils import update_settings_from_args
from pr_agent.config_loader import get_settings, global_settings from pr_agent.config_loader import get_settings, global_settings
from pr_agent.git_providers import get_git_provider from pr_agent.git_providers import get_git_provider
from pr_agent.git_providers.utils import apply_repo_settings from pr_agent.git_providers.utils import apply_repo_settings
from pr_agent.log import LoggingFormat, get_logger, setup_logger
from pr_agent.servers.utils import verify_signature from pr_agent.servers.utils import verify_signature
logging.basicConfig(stream=sys.stdout, level=logging.INFO) setup_logger(fmt=LoggingFormat.JSON)
router = APIRouter() router = APIRouter()
@ -29,11 +29,11 @@ async def handle_github_webhooks(request: Request, response: Response):
Verifies the request signature, parses the request body, and passes it to the handle_request function for further Verifies the request signature, parses the request body, and passes it to the handle_request function for further
processing. processing.
""" """
logging.debug("Received a GitHub webhook") get_logger().debug("Received a GitHub webhook")
body = await get_body(request) body = await get_body(request)
logging.debug(f'Request body:\n{body}') get_logger().debug(f'Request body:\n{body}')
installation_id = body.get("installation", {}).get("id") installation_id = body.get("installation", {}).get("id")
context["installation_id"] = installation_id context["installation_id"] = installation_id
context["settings"] = copy.deepcopy(global_settings) context["settings"] = copy.deepcopy(global_settings)
@ -45,13 +45,13 @@ async def handle_github_webhooks(request: Request, response: Response):
@router.post("/api/v1/marketplace_webhooks") @router.post("/api/v1/marketplace_webhooks")
async def handle_marketplace_webhooks(request: Request, response: Response): async def handle_marketplace_webhooks(request: Request, response: Response):
body = await get_body(request) body = await get_body(request)
logging.info(f'Request body:\n{body}') get_logger().info(f'Request body:\n{body}')
async def get_body(request): async def get_body(request):
try: try:
body = await request.json() body = await request.json()
except Exception as e: except Exception as e:
logging.error("Error parsing request body", e) get_logger().error("Error parsing request body", e)
raise HTTPException(status_code=400, detail="Error parsing request body") from e raise HTTPException(status_code=400, detail="Error parsing request body") from e
webhook_secret = getattr(get_settings().github, 'webhook_secret', None) webhook_secret = getattr(get_settings().github, 'webhook_secret', None)
if webhook_secret: if webhook_secret:
@ -77,8 +77,8 @@ async def handle_request(body: Dict[str, Any], event: str):
return {} return {}
agent = PRAgent() agent = PRAgent()
bot_user = get_settings().github_app.bot_user bot_user = get_settings().github_app.bot_user
logging.info(f"action: '{action}'") sender = body.get("sender", {}).get("login")
logging.info(f"event: '{event}'") log_context = {"action": action, "event": event, "sender": sender}
if get_settings().github_app.duplicate_requests_cache and _is_duplicate_request(body): if get_settings().github_app.duplicate_requests_cache and _is_duplicate_request(body):
return {} return {}
@ -88,21 +88,22 @@ async def handle_request(body: Dict[str, Any], event: str):
if "comment" not in body: if "comment" not in body:
return {} return {}
comment_body = body.get("comment", {}).get("body") comment_body = body.get("comment", {}).get("body")
sender = body.get("sender", {}).get("login")
if sender and bot_user in sender: if sender and bot_user in sender:
logging.info(f"Ignoring comment from {bot_user} user") get_logger().info(f"Ignoring comment from {bot_user} user")
return {} return {}
logging.info(f"Processing comment from {sender} user") get_logger().info(f"Processing comment from {sender} user")
if "issue" in body and "pull_request" in body["issue"] and "url" in body["issue"]["pull_request"]: if "issue" in body and "pull_request" in body["issue"] and "url" in body["issue"]["pull_request"]:
api_url = body["issue"]["pull_request"]["url"] api_url = body["issue"]["pull_request"]["url"]
elif "comment" in body and "pull_request_url" in body["comment"]: elif "comment" in body and "pull_request_url" in body["comment"]:
api_url = body["comment"]["pull_request_url"] api_url = body["comment"]["pull_request_url"]
else: else:
return {} return {}
logging.info(body) log_context["api_url"] = api_url
logging.info(f"Handling comment because of event={event} and action={action}") get_logger().info(body)
get_logger().info(f"Handling comment because of event={event} and action={action}")
comment_id = body.get("comment", {}).get("id") comment_id = body.get("comment", {}).get("id")
provider = get_git_provider()(pr_url=api_url) provider = get_git_provider()(pr_url=api_url)
with get_logger().contextualize(**log_context):
await agent.handle_request(api_url, comment_body, notify=lambda: provider.add_eyes_reaction(comment_id)) await agent.handle_request(api_url, comment_body, notify=lambda: provider.add_eyes_reaction(comment_id))
# handle pull_request event: # handle pull_request event:
@ -115,6 +116,7 @@ async def handle_request(body: Dict[str, Any], event: str):
api_url = pull_request.get("url") api_url = pull_request.get("url")
if not api_url: if not api_url:
return {} return {}
log_context["api_url"] = api_url
if pull_request.get("draft", True) or pull_request.get("state") != "open" or pull_request.get("user", {}).get("login", "") == bot_user: if pull_request.get("draft", True) or pull_request.get("state") != "open" or pull_request.get("user", {}).get("login", "") == bot_user:
return {} return {}
if action in get_settings().github_app.handle_pr_actions: if action in get_settings().github_app.handle_pr_actions:
@ -124,7 +126,7 @@ async def handle_request(body: Dict[str, Any], event: str):
if pull_request.get("created_at") == pull_request.get("updated_at"): if pull_request.get("created_at") == pull_request.get("updated_at"):
# avoid double reviews when opening a PR for the first time # avoid double reviews when opening a PR for the first time
return {} return {}
logging.info(f"Performing review because of event={event} and action={action}") get_logger().info(f"Performing review because of event={event} and action={action}")
apply_repo_settings(api_url) apply_repo_settings(api_url)
for command in get_settings().github_app.pr_commands: for command in get_settings().github_app.pr_commands:
split_command = command.split(" ") split_command = command.split(" ")
@ -132,11 +134,12 @@ async def handle_request(body: Dict[str, Any], event: str):
args = split_command[1:] args = split_command[1:]
other_args = update_settings_from_args(args) other_args = update_settings_from_args(args)
new_command = ' '.join([command] + other_args) new_command = ' '.join([command] + other_args)
logging.info(body) get_logger().info(body)
logging.info(f"Performing command: {new_command}") get_logger().info(f"Performing command: {new_command}")
with get_logger().contextualize(**log_context):
await agent.handle_request(api_url, new_command) await agent.handle_request(api_url, new_command)
logging.info("event or action does not require handling") get_logger().info("event or action does not require handling")
return {} return {}
@ -146,7 +149,7 @@ def _is_duplicate_request(body: Dict[str, Any]) -> bool:
This function checks if the request is duplicate and if so - ignores it. This function checks if the request is duplicate and if so - ignores it.
""" """
request_hash = hash(str(body)) request_hash = hash(str(body))
logging.info(f"request_hash: {request_hash}") get_logger().info(f"request_hash: {request_hash}")
request_time = time.monotonic() request_time = time.monotonic()
ttl = get_settings().github_app.duplicate_requests_cache_ttl # in seconds ttl = get_settings().github_app.duplicate_requests_cache_ttl # in seconds
to_delete = [key for key, key_time in _duplicate_requests_cache.items() if request_time - key_time > ttl] to_delete = [key for key, key_time in _duplicate_requests_cache.items() if request_time - key_time > ttl]
@ -155,7 +158,7 @@ def _is_duplicate_request(body: Dict[str, Any]) -> bool:
is_duplicate = request_hash in _duplicate_requests_cache is_duplicate = request_hash in _duplicate_requests_cache
_duplicate_requests_cache[request_hash] = request_time _duplicate_requests_cache[request_hash] = request_time
if is_duplicate: if is_duplicate:
logging.info(f"Ignoring duplicate request {request_hash}") get_logger().info(f"Ignoring duplicate request {request_hash}")
return is_duplicate return is_duplicate

View File

@ -1,6 +1,4 @@
import asyncio import asyncio
import logging
import sys
from datetime import datetime, timezone from datetime import datetime, timezone
import aiohttp import aiohttp
@ -8,9 +6,10 @@ import aiohttp
from pr_agent.agent.pr_agent import PRAgent from pr_agent.agent.pr_agent import PRAgent
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider from pr_agent.git_providers import get_git_provider
from pr_agent.log import LoggingFormat, get_logger, setup_logger
from pr_agent.servers.help import bot_help_text from pr_agent.servers.help import bot_help_text
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) setup_logger(fmt=LoggingFormat.JSON)
NOTIFICATION_URL = "https://api.github.com/notifications" NOTIFICATION_URL = "https://api.github.com/notifications"
@ -94,7 +93,7 @@ async def polling_loop():
comment_body = comment['body'] if 'body' in comment else '' comment_body = comment['body'] if 'body' in comment else ''
commenter_github_user = comment['user']['login'] \ commenter_github_user = comment['user']['login'] \
if 'user' in comment else '' if 'user' in comment else ''
logging.info(f"Commenter: {commenter_github_user}\nComment: {comment_body}") get_logger().info(f"Commenter: {commenter_github_user}\nComment: {comment_body}")
user_tag = "@" + user_id user_tag = "@" + user_id
if user_tag not in comment_body: if user_tag not in comment_body:
continue continue
@ -112,7 +111,7 @@ async def polling_loop():
print(f"Failed to fetch notifications. Status code: {response.status}") print(f"Failed to fetch notifications. Status code: {response.status}")
except Exception as e: except Exception as e:
logging.error(f"Exception during processing of a notification: {e}") get_logger().error(f"Exception during processing of a notification: {e}")
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -1,7 +1,5 @@
import copy import copy
import json import json
import logging
import sys
import uvicorn import uvicorn
from fastapi import APIRouter, FastAPI, Request, status from fastapi import APIRouter, FastAPI, Request, status
@ -14,9 +12,10 @@ from starlette_context.middleware import RawContextMiddleware
from pr_agent.agent.pr_agent import PRAgent from pr_agent.agent.pr_agent import PRAgent
from pr_agent.config_loader import get_settings, global_settings from pr_agent.config_loader import get_settings, global_settings
from pr_agent.log import get_logger, setup_logger
from pr_agent.secret_providers import get_secret_provider from pr_agent.secret_providers import get_secret_provider
logging.basicConfig(stream=sys.stdout, level=logging.INFO) setup_logger()
router = APIRouter() router = APIRouter()
secret_provider = get_secret_provider() if get_settings().get("CONFIG.SECRET_PROVIDER") else None secret_provider = get_secret_provider() if get_settings().get("CONFIG.SECRET_PROVIDER") else None
@ -33,7 +32,7 @@ async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
context["settings"] = copy.deepcopy(global_settings) context["settings"] = copy.deepcopy(global_settings)
context["settings"].gitlab.personal_access_token = gitlab_token context["settings"].gitlab.personal_access_token = gitlab_token
except Exception as e: except Exception as e:
logging.error(f"Failed to validate secret {request_token}: {e}") get_logger().error(f"Failed to validate secret {request_token}: {e}")
return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED, content=jsonable_encoder({"message": "unauthorized"})) return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED, content=jsonable_encoder({"message": "unauthorized"}))
elif get_settings().get("GITLAB.SHARED_SECRET"): elif get_settings().get("GITLAB.SHARED_SECRET"):
secret = get_settings().get("GITLAB.SHARED_SECRET") secret = get_settings().get("GITLAB.SHARED_SECRET")
@ -45,9 +44,9 @@ async def gitlab_webhook(background_tasks: BackgroundTasks, request: Request):
if not gitlab_token: if not gitlab_token:
return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED, content=jsonable_encoder({"message": "unauthorized"})) return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED, content=jsonable_encoder({"message": "unauthorized"}))
data = await request.json() data = await request.json()
logging.info(json.dumps(data)) get_logger().info(json.dumps(data))
if data.get('object_kind') == 'merge_request' and data['object_attributes'].get('action') in ['open', 'reopen']: if data.get('object_kind') == 'merge_request' and data['object_attributes'].get('action') in ['open', 'reopen']:
logging.info(f"A merge request has been opened: {data['object_attributes'].get('title')}") get_logger().info(f"A merge request has been opened: {data['object_attributes'].get('title')}")
url = data['object_attributes'].get('url') url = data['object_attributes'].get('url')
background_tasks.add_task(PRAgent().handle_request, url, "/review") background_tasks.add_task(PRAgent().handle_request, url, "/review")
elif data.get('object_kind') == 'note' and data['event_type'] == 'note': elif data.get('object_kind') == 'note' and data['event_type'] == 'note':

View File

@ -1,12 +1,10 @@
import logging
from fastapi import FastAPI from fastapi import FastAPI
from mangum import Mangum from mangum import Mangum
from pr_agent.log import setup_logger
from pr_agent.servers.github_app import router from pr_agent.servers.github_app import router
logger = logging.getLogger() setup_logger()
logger.setLevel(logging.DEBUG)
app = FastAPI() app = FastAPI()
app.include_router(router) app.include_router(router)

View File

@ -1,16 +1,17 @@
import copy import copy
import logging
import textwrap import textwrap
from typing import List, Dict from typing import Dict
from jinja2 import Environment, StrictUndefined from jinja2 import Environment, StrictUndefined
from pr_agent.algo.ai_handler import AiHandler from pr_agent.algo.ai_handler import AiHandler
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models, get_pr_multi_diffs from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
from pr_agent.algo.token_handler import TokenHandler from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import load_yaml from pr_agent.algo.utils import load_yaml
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import BitbucketProvider, get_git_provider from pr_agent.git_providers import get_git_provider
from pr_agent.git_providers.git_provider import get_main_pr_language from pr_agent.git_providers.git_provider import get_main_pr_language
from pr_agent.log import get_logger
class PRAddDocs: class PRAddDocs:
@ -43,34 +44,34 @@ class PRAddDocs:
async def run(self): async def run(self):
try: try:
logging.info('Generating code Docs for PR...') get_logger().info('Generating code Docs for PR...')
if get_settings().config.publish_output: if get_settings().config.publish_output:
self.git_provider.publish_comment("Generating Documentation...", is_temporary=True) self.git_provider.publish_comment("Generating Documentation...", is_temporary=True)
logging.info('Preparing PR documentation...') get_logger().info('Preparing PR documentation...')
await retry_with_fallback_models(self._prepare_prediction) await retry_with_fallback_models(self._prepare_prediction)
data = self._prepare_pr_code_docs() data = self._prepare_pr_code_docs()
if (not data) or (not 'Code Documentation' in data): if (not data) or (not 'Code Documentation' in data):
logging.info('No code documentation found for PR.') get_logger().info('No code documentation found for PR.')
return return
if get_settings().config.publish_output: if get_settings().config.publish_output:
logging.info('Pushing PR documentation...') get_logger().info('Pushing PR documentation...')
self.git_provider.remove_initial_comment() self.git_provider.remove_initial_comment()
logging.info('Pushing inline code documentation...') get_logger().info('Pushing inline code documentation...')
self.push_inline_docs(data) self.push_inline_docs(data)
except Exception as e: except Exception as e:
logging.error(f"Failed to generate code documentation for PR, error: {e}") get_logger().error(f"Failed to generate code documentation for PR, error: {e}")
async def _prepare_prediction(self, model: str): async def _prepare_prediction(self, model: str):
logging.info('Getting PR diff...') get_logger().info('Getting PR diff...')
self.patches_diff = get_pr_diff(self.git_provider, self.patches_diff = get_pr_diff(self.git_provider,
self.token_handler, self.token_handler,
model, model,
add_line_numbers_to_hunks=True, add_line_numbers_to_hunks=True,
disable_extra_lines=False) disable_extra_lines=False)
logging.info('Getting AI prediction...') get_logger().info('Getting AI prediction...')
self.prediction = await self._get_prediction(model) self.prediction = await self._get_prediction(model)
async def _get_prediction(self, model: str): async def _get_prediction(self, model: str):
@ -80,8 +81,8 @@ class PRAddDocs:
system_prompt = environment.from_string(get_settings().pr_add_docs_prompt.system).render(variables) system_prompt = environment.from_string(get_settings().pr_add_docs_prompt.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_add_docs_prompt.user).render(variables) user_prompt = environment.from_string(get_settings().pr_add_docs_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"\nSystem prompt:\n{system_prompt}") get_logger().info(f"\nSystem prompt:\n{system_prompt}")
logging.info(f"\nUser prompt:\n{user_prompt}") get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2, response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt) system=system_prompt, user=user_prompt)
@ -103,7 +104,7 @@ class PRAddDocs:
for d in data['Code Documentation']: for d in data['Code Documentation']:
try: try:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"add_docs: {d}") get_logger().info(f"add_docs: {d}")
relevant_file = d['relevant file'].strip() relevant_file = d['relevant file'].strip()
relevant_line = int(d['relevant line']) # absolute position relevant_line = int(d['relevant line']) # absolute position
documentation = d['documentation'] documentation = d['documentation']
@ -118,11 +119,11 @@ class PRAddDocs:
'relevant_lines_end': relevant_line}) 'relevant_lines_end': relevant_line})
except Exception: except Exception:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"Could not parse code docs: {d}") get_logger().info(f"Could not parse code docs: {d}")
is_successful = self.git_provider.publish_code_suggestions(docs) is_successful = self.git_provider.publish_code_suggestions(docs)
if not is_successful: if not is_successful:
logging.info("Failed to publish code docs, trying to publish each docs separately") get_logger().info("Failed to publish code docs, trying to publish each docs separately")
for doc_suggestion in docs: for doc_suggestion in docs:
self.git_provider.publish_code_suggestions([doc_suggestion]) self.git_provider.publish_code_suggestions([doc_suggestion])
@ -154,7 +155,7 @@ class PRAddDocs:
new_code_snippet = new_code_snippet.rstrip() + "\n" + original_initial_line new_code_snippet = new_code_snippet.rstrip() + "\n" + original_initial_line
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"Could not dedent code snippet for file {relevant_file}, error: {e}") get_logger().info(f"Could not dedent code snippet for file {relevant_file}, error: {e}")
return new_code_snippet return new_code_snippet

View File

@ -1,16 +1,17 @@
import copy import copy
import logging
import textwrap import textwrap
from typing import List, Dict from typing import Dict, List
from jinja2 import Environment, StrictUndefined from jinja2 import Environment, StrictUndefined
from pr_agent.algo.ai_handler import AiHandler from pr_agent.algo.ai_handler import AiHandler
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models, get_pr_multi_diffs from pr_agent.algo.pr_processing import get_pr_diff, get_pr_multi_diffs, retry_with_fallback_models
from pr_agent.algo.token_handler import TokenHandler from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import load_yaml from pr_agent.algo.utils import load_yaml
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import BitbucketProvider, get_git_provider from pr_agent.git_providers import get_git_provider
from pr_agent.git_providers.git_provider import get_main_pr_language from pr_agent.git_providers.git_provider import get_main_pr_language
from pr_agent.log import get_logger
class PRCodeSuggestions: class PRCodeSuggestions:
@ -52,42 +53,42 @@ class PRCodeSuggestions:
async def run(self): async def run(self):
try: try:
logging.info('Generating code suggestions for PR...') get_logger().info('Generating code suggestions for PR...')
if get_settings().config.publish_output: if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing review...", is_temporary=True) self.git_provider.publish_comment("Preparing review...", is_temporary=True)
logging.info('Preparing PR review...') get_logger().info('Preparing PR review...')
if not self.is_extended: if not self.is_extended:
await retry_with_fallback_models(self._prepare_prediction) await retry_with_fallback_models(self._prepare_prediction)
data = self._prepare_pr_code_suggestions() data = self._prepare_pr_code_suggestions()
else: else:
data = await retry_with_fallback_models(self._prepare_prediction_extended) data = await retry_with_fallback_models(self._prepare_prediction_extended)
if (not data) or (not 'Code suggestions' in data): if (not data) or (not 'Code suggestions' in data):
logging.info('No code suggestions found for PR.') get_logger().info('No code suggestions found for PR.')
return return
if (not self.is_extended and get_settings().pr_code_suggestions.rank_suggestions) or \ if (not self.is_extended and get_settings().pr_code_suggestions.rank_suggestions) or \
(self.is_extended and get_settings().pr_code_suggestions.rank_extended_suggestions): (self.is_extended and get_settings().pr_code_suggestions.rank_extended_suggestions):
logging.info('Ranking Suggestions...') get_logger().info('Ranking Suggestions...')
data['Code suggestions'] = await self.rank_suggestions(data['Code suggestions']) data['Code suggestions'] = await self.rank_suggestions(data['Code suggestions'])
if get_settings().config.publish_output: if get_settings().config.publish_output:
logging.info('Pushing PR review...') get_logger().info('Pushing PR review...')
self.git_provider.remove_initial_comment() self.git_provider.remove_initial_comment()
logging.info('Pushing inline code suggestions...') get_logger().info('Pushing inline code suggestions...')
self.push_inline_code_suggestions(data) self.push_inline_code_suggestions(data)
except Exception as e: except Exception as e:
logging.error(f"Failed to generate code suggestions for PR, error: {e}") get_logger().error(f"Failed to generate code suggestions for PR, error: {e}")
async def _prepare_prediction(self, model: str): async def _prepare_prediction(self, model: str):
logging.info('Getting PR diff...') get_logger().info('Getting PR diff...')
self.patches_diff = get_pr_diff(self.git_provider, self.patches_diff = get_pr_diff(self.git_provider,
self.token_handler, self.token_handler,
model, model,
add_line_numbers_to_hunks=True, add_line_numbers_to_hunks=True,
disable_extra_lines=True) disable_extra_lines=True)
logging.info('Getting AI prediction...') get_logger().info('Getting AI prediction...')
self.prediction = await self._get_prediction(model) self.prediction = await self._get_prediction(model)
async def _get_prediction(self, model: str): async def _get_prediction(self, model: str):
@ -97,8 +98,8 @@ class PRCodeSuggestions:
system_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.system).render(variables) system_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.user).render(variables) user_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"\nSystem prompt:\n{system_prompt}") get_logger().info(f"\nSystem prompt:\n{system_prompt}")
logging.info(f"\nUser prompt:\n{user_prompt}") get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2, response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt) system=system_prompt, user=user_prompt)
@ -120,7 +121,7 @@ class PRCodeSuggestions:
for d in data['Code suggestions']: for d in data['Code suggestions']:
try: try:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"suggestion: {d}") get_logger().info(f"suggestion: {d}")
relevant_file = d['relevant file'].strip() relevant_file = d['relevant file'].strip()
relevant_lines_start = int(d['relevant lines start']) # absolute position relevant_lines_start = int(d['relevant lines start']) # absolute position
relevant_lines_end = int(d['relevant lines end']) relevant_lines_end = int(d['relevant lines end'])
@ -136,11 +137,11 @@ class PRCodeSuggestions:
'relevant_lines_end': relevant_lines_end}) 'relevant_lines_end': relevant_lines_end})
except Exception: except Exception:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"Could not parse suggestion: {d}") get_logger().info(f"Could not parse suggestion: {d}")
is_successful = self.git_provider.publish_code_suggestions(code_suggestions) is_successful = self.git_provider.publish_code_suggestions(code_suggestions)
if not is_successful: if not is_successful:
logging.info("Failed to publish code suggestions, trying to publish each suggestion separately") get_logger().info("Failed to publish code suggestions, trying to publish each suggestion separately")
for code_suggestion in code_suggestions: for code_suggestion in code_suggestions:
self.git_provider.publish_code_suggestions([code_suggestion]) self.git_provider.publish_code_suggestions([code_suggestion])
@ -162,19 +163,19 @@ class PRCodeSuggestions:
new_code_snippet = textwrap.indent(new_code_snippet, delta_spaces * " ").rstrip('\n') new_code_snippet = textwrap.indent(new_code_snippet, delta_spaces * " ").rstrip('\n')
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"Could not dedent code snippet for file {relevant_file}, error: {e}") get_logger().info(f"Could not dedent code snippet for file {relevant_file}, error: {e}")
return new_code_snippet return new_code_snippet
async def _prepare_prediction_extended(self, model: str) -> dict: async def _prepare_prediction_extended(self, model: str) -> dict:
logging.info('Getting PR diff...') get_logger().info('Getting PR diff...')
patches_diff_list = get_pr_multi_diffs(self.git_provider, self.token_handler, model, patches_diff_list = get_pr_multi_diffs(self.git_provider, self.token_handler, model,
max_calls=get_settings().pr_code_suggestions.max_number_of_calls) max_calls=get_settings().pr_code_suggestions.max_number_of_calls)
logging.info('Getting multi AI predictions...') get_logger().info('Getting multi AI predictions...')
prediction_list = [] prediction_list = []
for i, patches_diff in enumerate(patches_diff_list): for i, patches_diff in enumerate(patches_diff_list):
logging.info(f"Processing chunk {i + 1} of {len(patches_diff_list)}") get_logger().info(f"Processing chunk {i + 1} of {len(patches_diff_list)}")
self.patches_diff = patches_diff self.patches_diff = patches_diff
prediction = await self._get_prediction(model) prediction = await self._get_prediction(model)
prediction_list.append(prediction) prediction_list.append(prediction)
@ -222,8 +223,8 @@ class PRCodeSuggestions:
variables) variables)
user_prompt = environment.from_string(get_settings().pr_sort_code_suggestions_prompt.user).render(variables) user_prompt = environment.from_string(get_settings().pr_sort_code_suggestions_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"\nSystem prompt:\n{system_prompt}") get_logger().info(f"\nSystem prompt:\n{system_prompt}")
logging.info(f"\nUser prompt:\n{user_prompt}") get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(model=model, system=system_prompt, response, finish_reason = await self.ai_handler.chat_completion(model=model, system=system_prompt,
user=user_prompt) user=user_prompt)
@ -238,7 +239,7 @@ class PRCodeSuggestions:
data_sorted = data_sorted[:new_len] data_sorted = data_sorted[:new_len]
except Exception as e: except Exception as e:
if get_settings().config.verbosity_level >= 1: if get_settings().config.verbosity_level >= 1:
logging.info(f"Could not sort suggestions, error: {e}") get_logger().info(f"Could not sort suggestions, error: {e}")
data_sorted = suggestion_list data_sorted = suggestion_list
return data_sorted return data_sorted

View File

@ -1,7 +1,6 @@
import logging
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider from pr_agent.git_providers import get_git_provider
from pr_agent.log import get_logger
class PRConfig: class PRConfig:
@ -19,11 +18,11 @@ class PRConfig:
self.git_provider = get_git_provider()(pr_url) self.git_provider = get_git_provider()(pr_url)
async def run(self): async def run(self):
logging.info('Getting configuration settings...') get_logger().info('Getting configuration settings...')
logging.info('Preparing configs...') get_logger().info('Preparing configs...')
pr_comment = self._prepare_pr_configs() pr_comment = self._prepare_pr_configs()
if get_settings().config.publish_output: if get_settings().config.publish_output:
logging.info('Pushing configs...') get_logger().info('Pushing configs...')
self.git_provider.publish_comment(pr_comment) self.git_provider.publish_comment(pr_comment)
self.git_provider.remove_initial_comment() self.git_provider.remove_initial_comment()
return "" return ""
@ -44,5 +43,5 @@ class PRConfig:
comment_str += f"\n{header.lower()}.{key.lower()} = {repr(value) if isinstance(value, str) else value}" comment_str += f"\n{header.lower()}.{key.lower()} = {repr(value) if isinstance(value, str) else value}"
comment_str += " " comment_str += " "
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"comment_str:\n{comment_str}") get_logger().info(f"comment_str:\n{comment_str}")
return comment_str return comment_str

View File

@ -1,7 +1,5 @@
import copy import copy
import json
import re import re
import logging
from typing import List, Tuple from typing import List, Tuple
from jinja2 import Environment, StrictUndefined from jinja2 import Environment, StrictUndefined
@ -13,6 +11,7 @@ from pr_agent.algo.utils import load_yaml
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider from pr_agent.git_providers import get_git_provider
from pr_agent.git_providers.git_provider import get_main_pr_language from pr_agent.git_providers.git_provider import get_main_pr_language
from pr_agent.log import get_logger
class PRDescription: class PRDescription:
@ -65,13 +64,13 @@ class PRDescription:
""" """
try: try:
logging.info(f"Generating a PR description {self.pr_id}") get_logger().info(f"Generating a PR description {self.pr_id}")
if get_settings().config.publish_output: if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing PR description...", is_temporary=True) self.git_provider.publish_comment("Preparing PR description...", is_temporary=True)
await retry_with_fallback_models(self._prepare_prediction) await retry_with_fallback_models(self._prepare_prediction)
logging.info(f"Preparing answer {self.pr_id}") get_logger().info(f"Preparing answer {self.pr_id}")
if self.prediction: if self.prediction:
self._prepare_data() self._prepare_data()
else: else:
@ -88,7 +87,7 @@ class PRDescription:
full_markdown_description = f"## Title\n\n{pr_title}\n\n___\n{pr_body}" full_markdown_description = f"## Title\n\n{pr_title}\n\n___\n{pr_body}"
if get_settings().config.publish_output: if get_settings().config.publish_output:
logging.info(f"Pushing answer {self.pr_id}") get_logger().info(f"Pushing answer {self.pr_id}")
if get_settings().pr_description.publish_description_as_comment: if get_settings().pr_description.publish_description_as_comment:
self.git_provider.publish_comment(full_markdown_description) self.git_provider.publish_comment(full_markdown_description)
else: else:
@ -100,7 +99,7 @@ class PRDescription:
self.git_provider.publish_labels(pr_labels + current_labels) self.git_provider.publish_labels(pr_labels + current_labels)
self.git_provider.remove_initial_comment() self.git_provider.remove_initial_comment()
except Exception as e: except Exception as e:
logging.error(f"Error generating PR description {self.pr_id}: {e}") get_logger().error(f"Error generating PR description {self.pr_id}: {e}")
return "" return ""
@ -121,9 +120,9 @@ class PRDescription:
if get_settings().pr_description.use_description_markers and 'pr_agent:' not in self.user_description: if get_settings().pr_description.use_description_markers and 'pr_agent:' not in self.user_description:
return None return None
logging.info(f"Getting PR diff {self.pr_id}") get_logger().info(f"Getting PR diff {self.pr_id}")
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model) self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
logging.info(f"Getting AI prediction {self.pr_id}") get_logger().info(f"Getting AI prediction {self.pr_id}")
self.prediction = await self._get_prediction(model) self.prediction = await self._get_prediction(model)
async def _get_prediction(self, model: str) -> str: async def _get_prediction(self, model: str) -> str:
@ -144,8 +143,8 @@ class PRDescription:
user_prompt = environment.from_string(get_settings().pr_description_prompt.user).render(variables) user_prompt = environment.from_string(get_settings().pr_description_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"\nSystem prompt:\n{system_prompt}") get_logger().info(f"\nSystem prompt:\n{system_prompt}")
logging.info(f"\nUser prompt:\n{user_prompt}") get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion( response, finish_reason = await self.ai_handler.chat_completion(
model=model, model=model,
@ -178,7 +177,7 @@ class PRDescription:
return pr_types return pr_types
def _prepare_pr_answer_with_markers(self) -> Tuple[str, str]: def _prepare_pr_answer_with_markers(self) -> Tuple[str, str]:
logging.info(f"Using description marker replacements {self.pr_id}") get_logger().info(f"Using description marker replacements {self.pr_id}")
title = self.vars["title"] title = self.vars["title"]
body = self.user_description body = self.user_description
if get_settings().pr_description.include_generated_by_header: if get_settings().pr_description.include_generated_by_header:
@ -252,6 +251,6 @@ class PRDescription:
pr_body += "\n___\n" pr_body += "\n___\n"
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"title:\n{title}\n{pr_body}") get_logger().info(f"title:\n{title}\n{pr_body}")
return title, pr_body return title, pr_body

View File

@ -1,5 +1,4 @@
import copy import copy
import logging
from jinja2 import Environment, StrictUndefined from jinja2 import Environment, StrictUndefined
@ -9,6 +8,7 @@ from pr_agent.algo.token_handler import TokenHandler
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider from pr_agent.git_providers import get_git_provider
from pr_agent.git_providers.git_provider import get_main_pr_language from pr_agent.git_providers.git_provider import get_main_pr_language
from pr_agent.log import get_logger
class PRInformationFromUser: class PRInformationFromUser:
@ -34,22 +34,22 @@ class PRInformationFromUser:
self.prediction = None self.prediction = None
async def run(self): async def run(self):
logging.info('Generating question to the user...') get_logger().info('Generating question to the user...')
if get_settings().config.publish_output: if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing questions...", is_temporary=True) self.git_provider.publish_comment("Preparing questions...", is_temporary=True)
await retry_with_fallback_models(self._prepare_prediction) await retry_with_fallback_models(self._prepare_prediction)
logging.info('Preparing questions...') get_logger().info('Preparing questions...')
pr_comment = self._prepare_pr_answer() pr_comment = self._prepare_pr_answer()
if get_settings().config.publish_output: if get_settings().config.publish_output:
logging.info('Pushing questions...') get_logger().info('Pushing questions...')
self.git_provider.publish_comment(pr_comment) self.git_provider.publish_comment(pr_comment)
self.git_provider.remove_initial_comment() self.git_provider.remove_initial_comment()
return "" return ""
async def _prepare_prediction(self, model): async def _prepare_prediction(self, model):
logging.info('Getting PR diff...') get_logger().info('Getting PR diff...')
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model) self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
logging.info('Getting AI prediction...') get_logger().info('Getting AI prediction...')
self.prediction = await self._get_prediction(model) self.prediction = await self._get_prediction(model)
async def _get_prediction(self, model: str): async def _get_prediction(self, model: str):
@ -59,8 +59,8 @@ class PRInformationFromUser:
system_prompt = environment.from_string(get_settings().pr_information_from_user_prompt.system).render(variables) system_prompt = environment.from_string(get_settings().pr_information_from_user_prompt.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_information_from_user_prompt.user).render(variables) user_prompt = environment.from_string(get_settings().pr_information_from_user_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"\nSystem prompt:\n{system_prompt}") get_logger().info(f"\nSystem prompt:\n{system_prompt}")
logging.info(f"\nUser prompt:\n{user_prompt}") get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2, response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt) system=system_prompt, user=user_prompt)
return response return response
@ -68,7 +68,7 @@ class PRInformationFromUser:
def _prepare_pr_answer(self) -> str: def _prepare_pr_answer(self) -> str:
model_output = self.prediction.strip() model_output = self.prediction.strip()
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"answer_str:\n{model_output}") get_logger().info(f"answer_str:\n{model_output}")
answer_str = f"{model_output}\n\n Please respond to the questions above in the following format:\n\n" +\ answer_str = f"{model_output}\n\n Please respond to the questions above in the following format:\n\n" +\
"\n>/answer\n>1) ...\n>2) ...\n>...\n" "\n>/answer\n>1) ...\n>2) ...\n>...\n"
return answer_str return answer_str

View File

@ -1,5 +1,4 @@
import copy import copy
import logging
from jinja2 import Environment, StrictUndefined from jinja2 import Environment, StrictUndefined
@ -9,6 +8,7 @@ from pr_agent.algo.token_handler import TokenHandler
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider from pr_agent.git_providers import get_git_provider
from pr_agent.git_providers.git_provider import get_main_pr_language from pr_agent.git_providers.git_provider import get_main_pr_language
from pr_agent.log import get_logger
class PRQuestions: class PRQuestions:
@ -44,22 +44,22 @@ class PRQuestions:
return question_str return question_str
async def run(self): async def run(self):
logging.info('Answering a PR question...') get_logger().info('Answering a PR question...')
if get_settings().config.publish_output: if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing answer...", is_temporary=True) self.git_provider.publish_comment("Preparing answer...", is_temporary=True)
await retry_with_fallback_models(self._prepare_prediction) await retry_with_fallback_models(self._prepare_prediction)
logging.info('Preparing answer...') get_logger().info('Preparing answer...')
pr_comment = self._prepare_pr_answer() pr_comment = self._prepare_pr_answer()
if get_settings().config.publish_output: if get_settings().config.publish_output:
logging.info('Pushing answer...') get_logger().info('Pushing answer...')
self.git_provider.publish_comment(pr_comment) self.git_provider.publish_comment(pr_comment)
self.git_provider.remove_initial_comment() self.git_provider.remove_initial_comment()
return "" return ""
async def _prepare_prediction(self, model: str): async def _prepare_prediction(self, model: str):
logging.info('Getting PR diff...') get_logger().info('Getting PR diff...')
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model) self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
logging.info('Getting AI prediction...') get_logger().info('Getting AI prediction...')
self.prediction = await self._get_prediction(model) self.prediction = await self._get_prediction(model)
async def _get_prediction(self, model: str): async def _get_prediction(self, model: str):
@ -69,8 +69,8 @@ class PRQuestions:
system_prompt = environment.from_string(get_settings().pr_questions_prompt.system).render(variables) system_prompt = environment.from_string(get_settings().pr_questions_prompt.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_questions_prompt.user).render(variables) user_prompt = environment.from_string(get_settings().pr_questions_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"\nSystem prompt:\n{system_prompt}") get_logger().info(f"\nSystem prompt:\n{system_prompt}")
logging.info(f"\nUser prompt:\n{user_prompt}") get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2, response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt) system=system_prompt, user=user_prompt)
return response return response
@ -79,5 +79,5 @@ class PRQuestions:
answer_str = f"Question: {self.question_str}\n\n" answer_str = f"Question: {self.question_str}\n\n"
answer_str += f"Answer:\n{self.prediction.strip()}\n\n" answer_str += f"Answer:\n{self.prediction.strip()}\n\n"
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"answer_str:\n{answer_str}") get_logger().info(f"answer_str:\n{answer_str}")
return answer_str return answer_str

View File

@ -1,6 +1,4 @@
import copy import copy
import json
import logging
from collections import OrderedDict from collections import OrderedDict
from typing import List, Tuple from typing import List, Tuple
@ -9,13 +7,13 @@ from jinja2 import Environment, StrictUndefined
from yaml import SafeLoader from yaml import SafeLoader
from pr_agent.algo.ai_handler import AiHandler from pr_agent.algo.ai_handler import AiHandler
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models, \ from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
find_line_number_of_relevant_line_in_file, clip_tokens
from pr_agent.algo.token_handler import TokenHandler from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import convert_to_markdown, try_fix_json, try_fix_yaml, load_yaml from pr_agent.algo.utils import convert_to_markdown, load_yaml, try_fix_yaml
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider from pr_agent.git_providers import get_git_provider
from pr_agent.git_providers.git_provider import IncrementalPR, get_main_pr_language from pr_agent.git_providers.git_provider import IncrementalPR, get_main_pr_language
from pr_agent.log import get_logger
from pr_agent.servers.help import actions_help_text, bot_help_text from pr_agent.servers.help import actions_help_text, bot_help_text
@ -98,29 +96,29 @@ class PRReviewer:
try: try:
if self.is_auto and not get_settings().pr_reviewer.automatic_review: if self.is_auto and not get_settings().pr_reviewer.automatic_review:
logging.info(f'Automatic review is disabled {self.pr_url}') get_logger().info(f'Automatic review is disabled {self.pr_url}')
return None return None
logging.info(f'Reviewing PR: {self.pr_url} ...') get_logger().info(f'Reviewing PR: {self.pr_url} ...')
if get_settings().config.publish_output: if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing review...", is_temporary=True) self.git_provider.publish_comment("Preparing review...", is_temporary=True)
await retry_with_fallback_models(self._prepare_prediction) await retry_with_fallback_models(self._prepare_prediction)
logging.info('Preparing PR review...') get_logger().info('Preparing PR review...')
pr_comment = self._prepare_pr_review() pr_comment = self._prepare_pr_review()
if get_settings().config.publish_output: if get_settings().config.publish_output:
logging.info('Pushing PR review...') get_logger().info('Pushing PR review...')
self.git_provider.publish_comment(pr_comment) self.git_provider.publish_comment(pr_comment)
self.git_provider.remove_initial_comment() self.git_provider.remove_initial_comment()
if get_settings().pr_reviewer.inline_code_comments: if get_settings().pr_reviewer.inline_code_comments:
logging.info('Pushing inline code comments...') get_logger().info('Pushing inline code comments...')
self._publish_inline_code_comments() self._publish_inline_code_comments()
except Exception as e: except Exception as e:
logging.error(f"Failed to review PR: {e}") get_logger().error(f"Failed to review PR: {e}")
async def _prepare_prediction(self, model: str) -> None: async def _prepare_prediction(self, model: str) -> None:
""" """
@ -132,9 +130,9 @@ class PRReviewer:
Returns: Returns:
None None
""" """
logging.info('Getting PR diff...') get_logger().info('Getting PR diff...')
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model) self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
logging.info('Getting AI prediction...') get_logger().info('Getting AI prediction...')
self.prediction = await self._get_prediction(model) self.prediction = await self._get_prediction(model)
async def _get_prediction(self, model: str) -> str: async def _get_prediction(self, model: str) -> str:
@ -155,8 +153,8 @@ class PRReviewer:
user_prompt = environment.from_string(get_settings().pr_review_prompt.user).render(variables) user_prompt = environment.from_string(get_settings().pr_review_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"\nSystem prompt:\n{system_prompt}") get_logger().info(f"\nSystem prompt:\n{system_prompt}")
logging.info(f"\nUser prompt:\n{user_prompt}") get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion( response, finish_reason = await self.ai_handler.chat_completion(
model=model, model=model,
@ -249,7 +247,7 @@ class PRReviewer:
# Log markdown response if verbosity level is high # Log markdown response if verbosity level is high
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"Markdown response:\n{markdown_text}") get_logger().info(f"Markdown response:\n{markdown_text}")
if markdown_text == None or len(markdown_text) == 0: if markdown_text == None or len(markdown_text) == 0:
markdown_text = "" markdown_text = ""
@ -268,7 +266,7 @@ class PRReviewer:
try: try:
data = yaml.load(review_text, Loader=SafeLoader) data = yaml.load(review_text, Loader=SafeLoader)
except Exception as e: except Exception as e:
logging.error(f"Failed to parse AI prediction: {e}") get_logger().error(f"Failed to parse AI prediction: {e}")
data = try_fix_yaml(review_text) data = try_fix_yaml(review_text)
comments: List[str] = [] comments: List[str] = []
@ -277,7 +275,7 @@ class PRReviewer:
relevant_line_in_file = suggestion.get('relevant line', '').strip() relevant_line_in_file = suggestion.get('relevant line', '').strip()
content = suggestion.get('suggestion', '') content = suggestion.get('suggestion', '')
if not relevant_file or not relevant_line_in_file or not content: if not relevant_file or not relevant_line_in_file or not content:
logging.info("Skipping inline comment with missing file/line/content") get_logger().info("Skipping inline comment with missing file/line/content")
continue continue
if self.git_provider.is_supported("create_inline_comment"): if self.git_provider.is_supported("create_inline_comment"):

View File

@ -1,18 +1,17 @@
import copy
import json
import logging
from enum import Enum from enum import Enum
from typing import List, Tuple from typing import List
import pinecone
import openai import openai
import pandas as pd import pandas as pd
import pinecone
from pinecone_datasets import Dataset, DatasetMetadata
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from pr_agent.algo import MAX_TOKENS from pr_agent.algo import MAX_TOKENS
from pr_agent.algo.token_handler import TokenHandler from pr_agent.algo.token_handler import TokenHandler
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider from pr_agent.git_providers import get_git_provider
from pinecone_datasets import Dataset, DatasetMetadata from pr_agent.log import get_logger
MODEL = "text-embedding-ada-002" MODEL = "text-embedding-ada-002"
@ -62,11 +61,11 @@ class PRSimilarIssue:
upsert = False upsert = False
if run_from_scratch or upsert: # index the entire repo if run_from_scratch or upsert: # index the entire repo
logging.info('Indexing the entire repo...') get_logger().info('Indexing the entire repo...')
logging.info('Getting issues...') get_logger().info('Getting issues...')
issues = list(repo_obj.get_issues(state='all')) issues = list(repo_obj.get_issues(state='all'))
logging.info('Done') get_logger().info('Done')
self._update_index_with_issues(issues, repo_name_for_index, upsert=upsert) self._update_index_with_issues(issues, repo_name_for_index, upsert=upsert)
else: # update index if needed else: # update index if needed
pinecone_index = pinecone.Index(index_name=index_name) pinecone_index = pinecone.Index(index_name=index_name)
@ -92,20 +91,20 @@ class PRSimilarIssue:
break break
if issues_to_update: if issues_to_update:
logging.info(f'Updating index with {counter} new issues...') get_logger().info(f'Updating index with {counter} new issues...')
self._update_index_with_issues(issues_to_update, repo_name_for_index, upsert=True) self._update_index_with_issues(issues_to_update, repo_name_for_index, upsert=True)
else: else:
logging.info('No new issues to update') get_logger().info('No new issues to update')
async def run(self): async def run(self):
logging.info('Getting issue...') get_logger().info('Getting issue...')
repo_name, original_issue_number = self.git_provider._parse_issue_url(self.issue_url.split('=')[-1]) repo_name, original_issue_number = self.git_provider._parse_issue_url(self.issue_url.split('=')[-1])
issue_main = self.git_provider.repo_obj.get_issue(original_issue_number) issue_main = self.git_provider.repo_obj.get_issue(original_issue_number)
issue_str, comments, number = self._process_issue(issue_main) issue_str, comments, number = self._process_issue(issue_main)
openai.api_key = get_settings().openai.key openai.api_key = get_settings().openai.key
logging.info('Done') get_logger().info('Done')
logging.info('Querying...') get_logger().info('Querying...')
res = openai.Embedding.create(input=[issue_str], engine=MODEL) res = openai.Embedding.create(input=[issue_str], engine=MODEL)
embeds = [record['embedding'] for record in res['data']] embeds = [record['embedding'] for record in res['data']]
pinecone_index = pinecone.Index(index_name=self.index_name) pinecone_index = pinecone.Index(index_name=self.index_name)
@ -127,9 +126,9 @@ class PRSimilarIssue:
else: else:
relevant_comment_number_list.append(-1) relevant_comment_number_list.append(-1)
score_list.append(str("{:.2f}".format(r['score']))) score_list.append(str("{:.2f}".format(r['score'])))
logging.info('Done') get_logger().info('Done')
logging.info('Publishing response...') get_logger().info('Publishing response...')
similar_issues_str = "### Similar Issues\n___\n\n" similar_issues_str = "### Similar Issues\n___\n\n"
for i, issue_number_similar in enumerate(relevant_issues_number_list): for i, issue_number_similar in enumerate(relevant_issues_number_list):
issue = self.git_provider.repo_obj.get_issue(issue_number_similar) issue = self.git_provider.repo_obj.get_issue(issue_number_similar)
@ -140,8 +139,8 @@ class PRSimilarIssue:
similar_issues_str += f"{i + 1}. **[{title}]({url})** (score={score_list[i]})\n\n" similar_issues_str += f"{i + 1}. **[{title}]({url})** (score={score_list[i]})\n\n"
if get_settings().config.publish_output: if get_settings().config.publish_output:
response = issue_main.create_comment(similar_issues_str) response = issue_main.create_comment(similar_issues_str)
logging.info(similar_issues_str) get_logger().info(similar_issues_str)
logging.info('Done') get_logger().info('Done')
def _process_issue(self, issue): def _process_issue(self, issue):
header = issue.title header = issue.title
@ -155,7 +154,7 @@ class PRSimilarIssue:
return issue_str, comments, number return issue_str, comments, number
def _update_index_with_issues(self, issues_list, repo_name_for_index, upsert=False): def _update_index_with_issues(self, issues_list, repo_name_for_index, upsert=False):
logging.info('Processing issues...') get_logger().info('Processing issues...')
corpus = Corpus() corpus = Corpus()
example_issue_record = Record( example_issue_record = Record(
id=f"example_issue_{repo_name_for_index}", id=f"example_issue_{repo_name_for_index}",
@ -171,9 +170,9 @@ class PRSimilarIssue:
counter += 1 counter += 1
if counter % 100 == 0: if counter % 100 == 0:
logging.info(f"Scanned {counter} issues") get_logger().info(f"Scanned {counter} issues")
if counter >= self.max_issues_to_scan: if counter >= self.max_issues_to_scan:
logging.info(f"Scanned {self.max_issues_to_scan} issues, stopping") get_logger().info(f"Scanned {self.max_issues_to_scan} issues, stopping")
break break
issue_str, comments, number = self._process_issue(issue) issue_str, comments, number = self._process_issue(issue)
@ -210,9 +209,9 @@ class PRSimilarIssue:
) )
corpus.append(comment_record) corpus.append(comment_record)
df = pd.DataFrame(corpus.dict()["documents"]) df = pd.DataFrame(corpus.dict()["documents"])
logging.info('Done') get_logger().info('Done')
logging.info('Embedding...') get_logger().info('Embedding...')
openai.api_key = get_settings().openai.key openai.api_key = get_settings().openai.key
list_to_encode = list(df["text"].values) list_to_encode = list(df["text"].values)
try: try:
@ -220,7 +219,7 @@ class PRSimilarIssue:
embeds = [record['embedding'] for record in res['data']] embeds = [record['embedding'] for record in res['data']]
except: except:
embeds = [] embeds = []
logging.error('Failed to embed entire list, embedding one by one...') get_logger().error('Failed to embed entire list, embedding one by one...')
for i, text in enumerate(list_to_encode): for i, text in enumerate(list_to_encode):
try: try:
res = openai.Embedding.create(input=[text], engine=MODEL) res = openai.Embedding.create(input=[text], engine=MODEL)
@ -231,21 +230,21 @@ class PRSimilarIssue:
meta = DatasetMetadata.empty() meta = DatasetMetadata.empty()
meta.dense_model.dimension = len(embeds[0]) meta.dense_model.dimension = len(embeds[0])
ds = Dataset.from_pandas(df, meta) ds = Dataset.from_pandas(df, meta)
logging.info('Done') get_logger().info('Done')
api_key = get_settings().pinecone.api_key api_key = get_settings().pinecone.api_key
environment = get_settings().pinecone.environment environment = get_settings().pinecone.environment
if not upsert: if not upsert:
logging.info('Creating index from scratch...') get_logger().info('Creating index from scratch...')
ds.to_pinecone_index(self.index_name, api_key=api_key, environment=environment) ds.to_pinecone_index(self.index_name, api_key=api_key, environment=environment)
else: else:
logging.info('Upserting index...') get_logger().info('Upserting index...')
namespace = "" namespace = ""
batch_size: int = 100 batch_size: int = 100
concurrency: int = 10 concurrency: int = 10
pinecone.init(api_key=api_key, environment=environment) pinecone.init(api_key=api_key, environment=environment)
ds._upsert_to_index(self.index_name, namespace, batch_size, concurrency) ds._upsert_to_index(self.index_name, namespace, batch_size, concurrency)
logging.info('Done') get_logger().info('Done')
class IssueLevel(str, Enum): class IssueLevel(str, Enum):

View File

@ -1,5 +1,4 @@
import copy import copy
import logging
from datetime import date from datetime import date
from time import sleep from time import sleep
from typing import Tuple from typing import Tuple
@ -10,8 +9,9 @@ from pr_agent.algo.ai_handler import AiHandler
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
from pr_agent.algo.token_handler import TokenHandler from pr_agent.algo.token_handler import TokenHandler
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
from pr_agent.git_providers import GithubProvider, get_git_provider from pr_agent.git_providers import get_git_provider
from pr_agent.git_providers.git_provider import get_main_pr_language from pr_agent.git_providers.git_provider import get_main_pr_language
from pr_agent.log import get_logger
CHANGELOG_LINES = 50 CHANGELOG_LINES = 50
@ -48,26 +48,26 @@ class PRUpdateChangelog:
async def run(self): async def run(self):
# assert type(self.git_provider) == GithubProvider, "Currently only Github is supported" # assert type(self.git_provider) == GithubProvider, "Currently only Github is supported"
logging.info('Updating the changelog...') get_logger().info('Updating the changelog...')
if get_settings().config.publish_output: if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing changelog updates...", is_temporary=True) self.git_provider.publish_comment("Preparing changelog updates...", is_temporary=True)
await retry_with_fallback_models(self._prepare_prediction) await retry_with_fallback_models(self._prepare_prediction)
logging.info('Preparing PR changelog updates...') get_logger().info('Preparing PR changelog updates...')
new_file_content, answer = self._prepare_changelog_update() new_file_content, answer = self._prepare_changelog_update()
if get_settings().config.publish_output: if get_settings().config.publish_output:
self.git_provider.remove_initial_comment() self.git_provider.remove_initial_comment()
logging.info('Publishing changelog updates...') get_logger().info('Publishing changelog updates...')
if self.commit_changelog: if self.commit_changelog:
logging.info('Pushing PR changelog updates to repo...') get_logger().info('Pushing PR changelog updates to repo...')
self._push_changelog_update(new_file_content, answer) self._push_changelog_update(new_file_content, answer)
else: else:
logging.info('Publishing PR changelog as comment...') get_logger().info('Publishing PR changelog as comment...')
self.git_provider.publish_comment(f"**Changelog updates:**\n\n{answer}") self.git_provider.publish_comment(f"**Changelog updates:**\n\n{answer}")
async def _prepare_prediction(self, model: str): async def _prepare_prediction(self, model: str):
logging.info('Getting PR diff...') get_logger().info('Getting PR diff...')
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model) self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
logging.info('Getting AI prediction...') get_logger().info('Getting AI prediction...')
self.prediction = await self._get_prediction(model) self.prediction = await self._get_prediction(model)
async def _get_prediction(self, model: str): async def _get_prediction(self, model: str):
@ -77,8 +77,8 @@ class PRUpdateChangelog:
system_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.system).render(variables) system_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.user).render(variables) user_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"\nSystem prompt:\n{system_prompt}") get_logger().info(f"\nSystem prompt:\n{system_prompt}")
logging.info(f"\nUser prompt:\n{user_prompt}") get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2, response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt) system=system_prompt, user=user_prompt)
@ -100,7 +100,7 @@ class PRUpdateChangelog:
"\n>'/update_changelog --pr_update_changelog.push_changelog_changes=true'\n" "\n>'/update_changelog --pr_update_changelog.push_changelog_changes=true'\n"
if get_settings().config.verbosity_level >= 2: if get_settings().config.verbosity_level >= 2:
logging.info(f"answer:\n{answer}") get_logger().info(f"answer:\n{answer}")
return new_file_content, answer return new_file_content, answer
@ -149,7 +149,7 @@ Example:
except Exception: except Exception:
self.changelog_file_str = "" self.changelog_file_str = ""
if self.commit_changelog: if self.commit_changelog:
logging.info("No CHANGELOG.md file found in the repository. Creating one...") get_logger().info("No CHANGELOG.md file found in the repository. Creating one...")
changelog_file = self.git_provider.repo_obj.create_file(path="CHANGELOG.md", changelog_file = self.git_provider.repo_obj.create_file(path="CHANGELOG.md",
message='add CHANGELOG.md', message='add CHANGELOG.md',
content="", content="",

View File

@ -21,3 +21,4 @@ azure-devops==7.1.0b3
msrest==0.7.1 msrest==0.7.1
pinecone-client pinecone-client
pinecone-datasets @ git+https://github.com/mrT23/pinecone-datasets.git@main pinecone-datasets @ git+https://github.com/mrT23/pinecone-datasets.git@main
loguru==0.7.2