Refactor logging system to use custom logger across the codebase

This commit is contained in:
Ori Kotek
2023-10-16 14:56:00 +03:00
parent 41166dc271
commit c324d88be3
32 changed files with 340 additions and 311 deletions

View File

@ -1,4 +1,3 @@
import logging
import os
import litellm
@ -7,6 +6,8 @@ from litellm import acompletion
from openai.error import APIError, RateLimitError, Timeout, TryAgain
from retry import retry
from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger
OPENAI_RETRIES = 5
@ -88,34 +89,34 @@ class AiHandler:
try:
deployment_id = self.deployment_id
if get_settings().config.verbosity_level >= 2:
logging.debug(
get_logger().debug(
f"Generating completion with {model}"
f"{(' from deployment ' + deployment_id) if deployment_id else ''}"
)
if self.azure:
model = 'azure/' + model
messages = [{"role": "system", "content": system}, {"role": "user", "content": user}]
response = await acompletion(
model=model,
deployment_id=deployment_id,
messages=[
{"role": "system", "content": system},
{"role": "user", "content": user}
],
messages=messages,
temperature=temperature,
force_timeout=get_settings().config.ai_timeout
)
except (APIError, Timeout, TryAgain) as e:
logging.error("Error during OpenAI inference: ", e)
get_logger().error("Error during OpenAI inference: ", e)
raise
except (RateLimitError) as e:
logging.error("Rate limit error during OpenAI inference: ", e)
get_logger().error("Rate limit error during OpenAI inference: ", e)
raise
except (Exception) as e:
logging.error("Unknown error during OpenAI inference: ", e)
get_logger().error("Unknown error during OpenAI inference: ", e)
raise TryAgain from e
if response is None or len(response["choices"]) == 0:
raise TryAgain
resp = response["choices"][0]['message']['content']
finish_reason = response["choices"][0]["finish_reason"]
print(resp, finish_reason)
usage = response.get("usage")
get_logger().info("AI response", response=resp, messages=messages, finish_reason=finish_reason,
model=model, usage=usage)
return resp, finish_reason

View File

@ -1,8 +1,9 @@
from __future__ import annotations
import logging
import re
from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger
def extend_patch(original_file_str, patch_str, num_lines) -> str:
@ -63,7 +64,7 @@ def extend_patch(original_file_str, patch_str, num_lines) -> str:
extended_patch_lines.append(line)
except Exception as e:
if get_settings().config.verbosity_level >= 2:
logging.error(f"Failed to extend patch: {e}")
get_logger().error(f"Failed to extend patch: {e}")
return patch_str
# finish previous hunk
@ -134,14 +135,14 @@ def handle_patch_deletions(patch: str, original_file_content_str: str,
if not new_file_content_str:
# logic for handling deleted files - don't show patch, just show that the file was deleted
if get_settings().config.verbosity_level > 0:
logging.info(f"Processing file: {file_name}, minimizing deletion file")
get_logger().info(f"Processing file: {file_name}, minimizing deletion file")
patch = None # file was deleted
else:
patch_lines = patch.splitlines()
patch_new = omit_deletion_hunks(patch_lines)
if patch != patch_new:
if get_settings().config.verbosity_level > 0:
logging.info(f"Processing file: {file_name}, hunks were deleted")
get_logger().info(f"Processing file: {file_name}, hunks were deleted")
patch = patch_new
return patch

View File

@ -1,7 +1,6 @@
from __future__ import annotations
import difflib
import logging
import re
import traceback
from typing import Any, Callable, List, Tuple
@ -15,6 +14,7 @@ from pr_agent.algo.file_filter import filter_ignored
from pr_agent.algo.token_handler import TokenHandler, get_token_encoder
from pr_agent.config_loader import get_settings
from pr_agent.git_providers.git_provider import FilePatchInfo, GitProvider
from pr_agent.log import get_logger
DELETED_FILES_ = "Deleted files:\n"
@ -51,7 +51,7 @@ def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler, model: s
try:
diff_files = git_provider.get_diff_files()
except RateLimitExceededException as e:
logging.error(f"Rate limit exceeded for git provider API. original message {e}")
get_logger().error(f"Rate limit exceeded for git provider API. original message {e}")
raise
diff_files = filter_ignored(diff_files)
@ -180,7 +180,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
# Hard Stop, no more tokens
if total_tokens > MAX_TOKENS[model] - OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD:
logging.warning(f"File was fully skipped, no more tokens: {file.filename}.")
get_logger().warning(f"File was fully skipped, no more tokens: {file.filename}.")
continue
# If the patch is too large, just show the file name
@ -189,7 +189,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
# TODO: Option for alternative logic to remove hunks from the patch to reduce the number of tokens
# until we meet the requirements
if get_settings().config.verbosity_level >= 2:
logging.warning(f"Patch too large, minimizing it, {file.filename}")
get_logger().warning(f"Patch too large, minimizing it, {file.filename}")
if not modified_files_list:
total_tokens += token_handler.count_tokens(MORE_MODIFIED_FILES_)
modified_files_list.append(file.filename)
@ -204,7 +204,7 @@ def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, mo
patches.append(patch_final)
total_tokens += token_handler.count_tokens(patch_final)
if get_settings().config.verbosity_level >= 2:
logging.info(f"Tokens: {total_tokens}, last filename: {file.filename}")
get_logger().info(f"Tokens: {total_tokens}, last filename: {file.filename}")
return patches, modified_files_list, deleted_files_list
@ -218,7 +218,7 @@ async def retry_with_fallback_models(f: Callable):
get_settings().set("openai.deployment_id", deployment_id)
return await f(model)
except Exception as e:
logging.warning(
get_logger().warning(
f"Failed to generate prediction with {model}"
f"{(' from deployment ' + deployment_id) if deployment_id else ''}: "
f"{traceback.format_exc()}"
@ -340,7 +340,7 @@ def clip_tokens(text: str, max_tokens: int) -> str:
clipped_text = text[:num_output_chars]
return clipped_text
except Exception as e:
logging.warning(f"Failed to clip tokens: {e}")
get_logger().warning(f"Failed to clip tokens: {e}")
return text
@ -367,7 +367,7 @@ def get_pr_multi_diffs(git_provider: GitProvider,
try:
diff_files = git_provider.get_diff_files()
except RateLimitExceededException as e:
logging.error(f"Rate limit exceeded for git provider API. original message {e}")
get_logger().error(f"Rate limit exceeded for git provider API. original message {e}")
raise
diff_files = filter_ignored(diff_files)
@ -387,7 +387,7 @@ def get_pr_multi_diffs(git_provider: GitProvider,
for file in sorted_files:
if call_number > max_calls:
if get_settings().config.verbosity_level >= 2:
logging.info(f"Reached max calls ({max_calls})")
get_logger().info(f"Reached max calls ({max_calls})")
break
original_file_content_str = file.base_file
@ -410,13 +410,13 @@ def get_pr_multi_diffs(git_provider: GitProvider,
total_tokens = token_handler.prompt_tokens
call_number += 1
if get_settings().config.verbosity_level >= 2:
logging.info(f"Call number: {call_number}")
get_logger().info(f"Call number: {call_number}")
if patch:
patches.append(patch)
total_tokens += new_patch_tokens
if get_settings().config.verbosity_level >= 2:
logging.info(f"Tokens: {total_tokens}, last filename: {file.filename}")
get_logger().info(f"Tokens: {total_tokens}, last filename: {file.filename}")
# Add the last chunk
if patches:

View File

@ -2,7 +2,6 @@ from __future__ import annotations
import difflib
import json
import logging
import re
import textwrap
from datetime import datetime
@ -11,6 +10,7 @@ from typing import Any, List
import yaml
from starlette_context import context
from pr_agent.config_loader import get_settings, global_settings
from pr_agent.log import get_logger
def get_setting(key: str) -> Any:
@ -159,7 +159,7 @@ def try_fix_json(review, max_iter=10, code_suggestions=False):
iter_count += 1
if not valid_json:
logging.error("Unable to decode JSON response from AI")
get_logger().error("Unable to decode JSON response from AI")
data = {}
return data
@ -230,7 +230,7 @@ def load_large_diff(filename, new_file_content_str: str, original_file_content_s
diff = difflib.unified_diff(original_file_content_str.splitlines(keepends=True),
new_file_content_str.splitlines(keepends=True))
if get_settings().config.verbosity_level >= 2:
logging.warning(f"File was modified, but no patch was found. Manually creating patch: {filename}.")
get_logger().warning(f"File was modified, but no patch was found. Manually creating patch: {filename}.")
patch = ''.join(diff)
except Exception:
pass
@ -262,12 +262,12 @@ def update_settings_from_args(args: List[str]) -> List[str]:
vals = arg.split('=', 1)
if len(vals) != 2:
if len(vals) > 2: # --extended is a valid argument
logging.error(f'Invalid argument format: {arg}')
get_logger().error(f'Invalid argument format: {arg}')
other_args.append(arg)
continue
key, value = _fix_key_value(*vals)
get_settings().set(key, value)
logging.info(f'Updated setting {key} to: "{value}"')
get_logger().info(f'Updated setting {key} to: "{value}"')
else:
other_args.append(arg)
return other_args
@ -279,7 +279,7 @@ def _fix_key_value(key: str, value: str):
try:
value = yaml.safe_load(value)
except Exception as e:
logging.error(f"Failed to parse YAML for config override {key}={value}", exc_info=e)
get_logger().error(f"Failed to parse YAML for config override {key}={value}", exc_info=e)
return key, value
@ -288,7 +288,7 @@ def load_yaml(review_text: str) -> dict:
try:
data = yaml.safe_load(review_text)
except Exception as e:
logging.error(f"Failed to parse AI prediction: {e}")
get_logger().error(f"Failed to parse AI prediction: {e}")
data = try_fix_yaml(review_text)
return data
@ -299,7 +299,7 @@ def try_fix_yaml(review_text: str) -> dict:
review_text_lines_tmp = '\n'.join(review_text_lines[:-i])
try:
data = yaml.load(review_text_lines_tmp, Loader=yaml.SafeLoader)
logging.info(f"Successfully parsed AI prediction after removing {i} lines")
get_logger().info(f"Successfully parsed AI prediction after removing {i} lines")
break
except:
pass