Refactor logging statements for better readability and debugging

This commit is contained in:
mrT23
2024-02-24 16:47:23 +02:00
parent df3a463668
commit 877796b539
15 changed files with 156 additions and 158 deletions

View File

@ -72,6 +72,9 @@ class PRCodeSuggestions:
async def run(self):
try:
get_logger().info('Generating code suggestions for PR...')
relevant_configs = {'pr_code_suggestions': dict(get_settings().pr_code_suggestions),
'config': dict(get_settings().config)}
get_logger().debug("Relevant configs", configs=relevant_configs)
if get_settings().config.publish_output:
if self.git_provider.is_supported("gfm_markdown"):
@ -79,7 +82,6 @@ class PRCodeSuggestions:
else:
self.git_provider.publish_comment("Preparing suggestions...", is_temporary=True)
get_logger().info('Preparing PR code suggestions...')
if not self.is_extended:
await retry_with_fallback_models(self._prepare_prediction, ModelType.TURBO)
data = self._prepare_pr_code_suggestions()
@ -97,13 +99,12 @@ class PRCodeSuggestions:
data['code_suggestions'] = await self.rank_suggestions(data['code_suggestions'])
if get_settings().config.publish_output:
get_logger().info('Pushing PR code suggestions...')
self.git_provider.remove_initial_comment()
if get_settings().pr_code_suggestions.summarize and self.git_provider.is_supported("gfm_markdown"):
get_logger().info('Pushing summarize code suggestions...')
# generate summarized suggestions
pr_body = self.generate_summarized_suggestions(data)
get_logger().debug(f"PR output", suggestions=pr_body)
# add usage guide
if get_settings().pr_code_suggestions.enable_help_text:
@ -117,7 +118,6 @@ class PRCodeSuggestions:
self.git_provider.publish_comment(pr_body)
else:
get_logger().info('Pushing inline code suggestions...')
self.push_inline_code_suggestions(data)
if self.progress_response:
self.progress_response.delete()
@ -127,15 +127,17 @@ class PRCodeSuggestions:
self.progress_response.delete()
async def _prepare_prediction(self, model: str):
get_logger().info('Getting PR diff...')
patches_diff = get_pr_diff(self.git_provider,
self.patches_diff = get_pr_diff(self.git_provider,
self.token_handler,
model,
add_line_numbers_to_hunks=True,
disable_extra_lines=True)
get_logger().info('Getting AI prediction...')
self.prediction = await self._get_prediction(model, patches_diff)
if self.patches_diff:
get_logger().debug(f"PR diff", diff=self.patches_diff)
self.prediction = await self._get_prediction(model, self.patches_diff)
else:
get_logger().error(f"Error getting PR diff")
self.prediction = None
async def _get_prediction(self, model: str, patches_diff: str):
variables = copy.deepcopy(self.vars)
@ -143,15 +145,10 @@ class PRCodeSuggestions:
environment = Environment(undefined=StrictUndefined)
system_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt)
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"\nAI response:\n{response}")
return response
def _prepare_pr_code_suggestions(self) -> Dict:
@ -185,8 +182,6 @@ class PRCodeSuggestions:
for d in data['code_suggestions']:
try:
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"suggestion: {d}")
relevant_file = d['relevant_file'].strip()
relevant_lines_start = int(d['relevant_lines_start']) # absolute position
relevant_lines_end = int(d['relevant_lines_end'])
@ -202,8 +197,7 @@ class PRCodeSuggestions:
'relevant_lines_start': relevant_lines_start,
'relevant_lines_end': relevant_lines_end})
except Exception:
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"Could not parse suggestion: {d}")
get_logger().info(f"Could not parse suggestion: {d}")
is_successful = self.git_provider.publish_code_suggestions(code_suggestions)
if not is_successful:
@ -229,8 +223,7 @@ class PRCodeSuggestions:
if delta_spaces > 0:
new_code_snippet = textwrap.indent(new_code_snippet, delta_spaces * " ").rstrip('\n')
except Exception as e:
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"Could not dedent code snippet for file {relevant_file}, error: {e}")
get_logger().error(f"Could not dedent code snippet for file {relevant_file}, error: {e}")
return new_code_snippet
@ -245,32 +238,33 @@ class PRCodeSuggestions:
return False
async def _prepare_prediction_extended(self, model: str) -> dict:
get_logger().info('Getting PR diff...')
patches_diff_list = get_pr_multi_diffs(self.git_provider, self.token_handler, model,
self.patches_diff_list = get_pr_multi_diffs(self.git_provider, self.token_handler, model,
max_calls=get_settings().pr_code_suggestions.max_number_of_calls)
if self.patches_diff_list:
get_logger().debug(f"PR diff", diff=self.patches_diff_list)
# parallelize calls to AI:
if get_settings().pr_code_suggestions.parallel_calls:
get_logger().info('Getting multi AI predictions in parallel...')
prediction_list = await asyncio.gather(*[self._get_prediction(model, patches_diff) for patches_diff in patches_diff_list])
self.prediction_list = prediction_list
else:
get_logger().info('Getting multi AI predictions...')
prediction_list = []
for i, patches_diff in enumerate(patches_diff_list):
get_logger().info(f"Processing chunk {i + 1} of {len(patches_diff_list)}")
prediction = await self._get_prediction(model, patches_diff)
prediction_list.append(prediction)
data = {}
for prediction in prediction_list:
self.prediction = prediction
data_per_chunk = self._prepare_pr_code_suggestions()
if "code_suggestions" in data:
data["code_suggestions"].extend(data_per_chunk["code_suggestions"])
# parallelize calls to AI:
if get_settings().pr_code_suggestions.parallel_calls:
prediction_list = await asyncio.gather(*[self._get_prediction(model, patches_diff) for patches_diff in self.patches_diff_list])
self.prediction_list = prediction_list
else:
data.update(data_per_chunk)
self.data = data
prediction_list = []
for i, patches_diff in enumerate(self.patches_diff_list):
prediction = await self._get_prediction(model, patches_diff)
prediction_list.append(prediction)
data = {}
for prediction in prediction_list:
self.prediction = prediction
data_per_chunk = self._prepare_pr_code_suggestions()
if "code_suggestions" in data:
data["code_suggestions"].extend(data_per_chunk["code_suggestions"])
else:
data.update(data_per_chunk)
self.data = data
else:
get_logger().error(f"Error getting PR diff")
self.data = data = None
return data
async def rank_suggestions(self, data: List) -> List:
@ -305,9 +299,7 @@ class PRCodeSuggestions:
system_prompt = environment.from_string(get_settings().pr_sort_code_suggestions_prompt.system).render(
variables)
user_prompt = environment.from_string(get_settings().pr_sort_code_suggestions_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(model=model, system=system_prompt,
user=user_prompt)

View File

@ -36,7 +36,7 @@ class PRDescription:
if get_settings().pr_description.enable_semantic_files_types and not self.git_provider.is_supported(
"gfm_markdown"):
get_logger().debug(f"Disabling semantic files types for {self.pr_id}")
get_logger().debug(f"Disabling semantic files types for {self.pr_id}, gfm_markdown not supported.")
get_settings().pr_description.enable_semantic_files_types = False
# Initialize the AI handler
@ -56,10 +56,8 @@ class PRDescription:
"custom_labels_class": "", # will be filled if necessary in 'set_custom_labels' function
"enable_semantic_files_types": get_settings().pr_description.enable_semantic_files_types,
}
self.user_description = self.git_provider.get_user_description()
# Initialize the token handler
self.token_handler = TokenHandler(
self.git_provider.pr,
@ -68,33 +66,32 @@ class PRDescription:
get_settings().pr_description_prompt.user,
)
# Initialize patches_diff and prediction attributes
self.patches_diff = None
self.prediction = None
self.file_label_dict = None
self.COLLAPSIBLE_FILE_LIST_THRESHOLD = 8
async def run(self):
"""
Generates a PR description using an AI model and publishes it to the PR.
"""
try:
get_logger().info(f"Generating a PR description {self.pr_id}")
get_logger().info(f"Generating a PR description for pr_id: {self.pr_id}")
relevant_configs = {'pr_description': dict(get_settings().pr_description),
'config': dict(get_settings().config)}
get_logger().debug("Relevant configs", configs=relevant_configs)
if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing PR description...", is_temporary=True)
await retry_with_fallback_models(self._prepare_prediction, ModelType.TURBO) # turbo model because larger context
get_logger().info(f"Preparing answer {self.pr_id}")
if self.prediction:
self._prepare_data()
else:
get_logger().error(f"Error getting AI prediction {self.pr_id}")
self.git_provider.remove_initial_comment()
return None
if get_settings().pr_description.enable_semantic_files_types:
self._prepare_file_labels()
self.file_label_dict = self._prepare_file_labels()
pr_labels = []
if get_settings().pr_description.publish_labels:
@ -104,6 +101,7 @@ class PRDescription:
pr_title, pr_body = self._prepare_pr_answer_with_markers()
else:
pr_title, pr_body, = self._prepare_pr_answer()
get_logger().debug(f"PR output", title=pr_title, body=pr_body)
# Add help text if gfm_markdown is supported
if self.git_provider.is_supported("gfm_markdown") and get_settings().pr_description.enable_help_text:
@ -111,26 +109,21 @@ class PRDescription:
pr_body += HelpMessage.get_describe_usage_guide()
pr_body += "\n</details>\n"
elif get_settings().pr_description.enable_help_comment:
pr_body +="\n\n___\n\n> ✨ **PR-Agent usage**:"
pr_body +="\n>Comment `/help` on the PR to get a list of all available PR-Agent tools and their descriptions\n\n"
# final markdown description
full_markdown_description = f"## Title\n\n{pr_title}\n\n___\n{pr_body}"
# get_logger().debug(f"full_markdown_description:\n{full_markdown_description}")
pr_body += "\n\n___\n\n> ✨ **PR-Agent usage**:"
pr_body += "\n>Comment `/help` on the PR to get a list of all available PR-Agent tools and their descriptions\n\n"
if get_settings().config.publish_output:
get_logger().info(f"Pushing answer {self.pr_id}")
# publish labels
if get_settings().pr_description.publish_labels and self.git_provider.is_supported("get_labels"):
current_labels = self.git_provider.get_pr_labels()
user_labels = get_user_labels(current_labels)
original_labels = self.git_provider.get_pr_labels()
get_logger().debug(f"original labels", labels=original_labels)
user_labels = get_user_labels(original_labels)
get_logger().debug(f"published labels:\n{pr_labels + user_labels}")
self.git_provider.publish_labels(pr_labels + user_labels)
# publish description
if get_settings().pr_description.publish_description_as_comment:
get_logger().info(f"Publishing answer as comment")
full_markdown_description = f"## Title\n\n{pr_title}\n\n___\n{pr_body}"
self.git_provider.publish_comment(full_markdown_description)
else:
self.git_provider.publish_description(pr_title, pr_body)
@ -152,10 +145,9 @@ class PRDescription:
if get_settings().pr_description.use_description_markers and 'pr_agent:' not in self.user_description:
return None
get_logger().info(f"Getting PR diff {self.pr_id}")
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
if self.patches_diff:
get_logger().info(f"Getting AI prediction {self.pr_id}")
get_logger().debug(f"PR diff", diff=self.patches_diff)
self.prediction = await self._get_prediction(model)
else:
get_logger().error(f"Error getting PR diff {self.pr_id}")
@ -180,10 +172,6 @@ class PRDescription:
system_prompt = environment.from_string(get_settings().pr_description_prompt.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_description_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(
model=model,
temperature=0.2,
@ -191,9 +179,6 @@ class PRDescription:
user=user_prompt
)
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"\nAI response:\n{response}")
return response
def _prepare_data(self):
@ -335,25 +320,23 @@ class PRDescription:
if idx < len(self.data) - 1:
pr_body += "\n\n___\n\n"
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"title:\n{title}\n{pr_body}")
return title, pr_body
def _prepare_file_labels(self):
self.file_label_dict = {}
file_label_dict = {}
for file in self.data['pr_files']:
try:
filename = file['filename'].replace("'", "`").replace('"', '`')
changes_summary = file['changes_summary']
changes_title = file['changes_title'].strip()
label = file.get('label')
if label not in self.file_label_dict:
self.file_label_dict[label] = []
self.file_label_dict[label].append((filename, changes_title, changes_summary))
if label not in file_label_dict:
file_label_dict[label] = []
file_label_dict[label].append((filename, changes_title, changes_summary))
except Exception as e:
get_logger().error(f"Error preparing file label dict {self.pr_id}: {e}")
pass
return file_label_dict
def process_pr_files_prediction(self, pr_body, value):
# logic for using collapsible file list
@ -366,7 +349,6 @@ class PRDescription:
use_collapsible_file_list = num_files > self.COLLAPSIBLE_FILE_LIST_THRESHOLD
if not self.git_provider.is_supported("gfm_markdown"):
get_logger().info(f"Disabling semantic files types for {self.pr_id} since gfm_markdown is not supported")
return pr_body
try:
pr_body += "<table>"

View File

@ -10,6 +10,10 @@ class PRHelpMessage:
async def run(self):
try:
get_logger().info('Getting PR Help Message...')
relevant_configs = {'pr_help': dict(get_settings().pr_help),
'config': dict(get_settings().config)}
get_logger().debug("Relevant configs", configs=relevant_configs)
pr_comment = "## PR Agent Walkthrough\n\n"
pr_comment += "🤖 Welcome to the PR Agent, an AI-powered tool for automated pull request analysis, feedback, suggestions and more."""
pr_comment += "\n\nHere is a list of tools you can use to interact with the PR Agent:\n"

View File

@ -48,27 +48,35 @@ class PRQuestions:
async def run(self):
get_logger().info('Answering a PR question...')
relevant_configs = {'pr_questions': dict(get_settings().pr_questions),
'config': dict(get_settings().config)}
get_logger().debug("Relevant configs", configs=relevant_configs)
if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing answer...", is_temporary=True)
await retry_with_fallback_models(self._prepare_prediction)
get_logger().info('Preparing answer...')
pr_comment = self._prepare_pr_answer()
get_logger().debug(f"PR output", answer=pr_comment)
if self.git_provider.is_supported("gfm_markdown") and get_settings().pr_questions.enable_help_text:
pr_comment += "<hr>\n\n<details> <summary><strong>✨ Ask tool usage guide:</strong></summary><hr> \n\n"
pr_comment += HelpMessage.get_ask_usage_guide()
pr_comment += "\n</details>\n"
if get_settings().config.publish_output:
get_logger().info('Pushing answer...')
self.git_provider.publish_comment(pr_comment)
self.git_provider.remove_initial_comment()
return ""
async def _prepare_prediction(self, model: str):
get_logger().info('Getting PR diff...')
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
get_logger().info('Getting AI prediction...')
self.prediction = await self._get_prediction(model)
if self.patches_diff:
get_logger().debug(f"PR diff", diff=self.patches_diff)
self.prediction = await self._get_prediction(model)
else:
get_logger().error(f"Error getting PR diff")
self.prediction = ""
async def _get_prediction(self, model: str):
variables = copy.deepcopy(self.vars)
@ -76,9 +84,6 @@ class PRQuestions:
environment = Environment(undefined=StrictUndefined)
system_prompt = environment.from_string(get_settings().pr_questions_prompt.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_questions_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt)
return response
@ -86,6 +91,4 @@ class PRQuestions:
def _prepare_pr_answer(self) -> str:
answer_str = f"Question: {self.question_str}\n\n"
answer_str += f"Answer:\n{self.prediction.strip()}\n\n"
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"answer_str:\n{answer_str}")
return answer_str

View File

@ -3,17 +3,12 @@ import datetime
from collections import OrderedDict
from functools import partial
from typing import List, Tuple
import yaml
from jinja2 import Environment, StrictUndefined
from yaml import SafeLoader
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
from pr_agent.algo.token_handler import TokenHandler
from pr_agent.algo.utils import convert_to_markdown, load_yaml, try_fix_yaml, set_custom_labels, get_user_labels, \
ModelType
from pr_agent.algo.utils import convert_to_markdown, load_yaml, ModelType
from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider
from pr_agent.git_providers.git_provider import IncrementalPR, get_main_pr_language
@ -109,6 +104,9 @@ class PRReviewer:
return None
get_logger().info(f'Reviewing PR: {self.pr_url} ...')
relevant_configs = {'pr_reviewer': dict(get_settings().pr_reviewer),
'config': dict(get_settings().config)}
get_logger().debug("Relevant configs", configs=relevant_configs)
if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing review...", is_temporary=True)
@ -118,35 +116,32 @@ class PRReviewer:
self.git_provider.remove_initial_comment()
return None
get_logger().info('Preparing PR review...')
pr_comment = self._prepare_pr_review()
pr_review = self._prepare_pr_review()
get_logger().debug(f"PR output", review=pr_review)
if get_settings().config.publish_output:
get_logger().info('Pushing PR review...')
previous_review_comment = self._get_previous_review_comment()
# publish the review
if get_settings().pr_reviewer.persistent_comment and not self.incremental.is_incremental:
self.git_provider.publish_persistent_comment(pr_comment,
self.git_provider.publish_persistent_comment(pr_review,
initial_header="## PR Review",
update_header=True)
else:
self.git_provider.publish_comment(pr_comment)
self.git_provider.publish_comment(pr_review)
self.git_provider.remove_initial_comment()
if previous_review_comment:
self._remove_previous_review_comment(previous_review_comment)
if get_settings().pr_reviewer.inline_code_comments:
get_logger().info('Pushing inline code comments...')
self._publish_inline_code_comments()
except Exception as e:
get_logger().error(f"Failed to review PR: {e}")
async def _prepare_prediction(self, model: str) -> None:
get_logger().info('Getting PR diff...')
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
if self.patches_diff:
get_logger().info('Getting AI prediction...')
get_logger().debug(f"PR diff", diff=self.patches_diff)
self.prediction = await self._get_prediction(model)
else:
get_logger().error(f"Error getting PR diff")
@ -169,10 +164,6 @@ class PRReviewer:
system_prompt = environment.from_string(get_settings().pr_review_prompt.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_review_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(
model=model,
temperature=0.2,
@ -180,9 +171,7 @@ class PRReviewer:
user=user_prompt
)
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"\nAI response:\n{response}")
get_logger().debug(f"\nAI response:\n{response}")
return response
def _prepare_pr_review(self) -> str:
@ -245,10 +234,6 @@ class PRReviewer:
# Add custom labels from the review prediction (effort, security)
self.set_review_labels(data)
# Log markdown response if verbosity level is high
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"Markdown response:\n{markdown_text}")
if markdown_text == None or len(markdown_text) == 0:
markdown_text = ""
@ -385,7 +370,8 @@ class PRReviewer:
else:
current_labels_filtered = []
if current_labels or review_labels:
get_logger().info(f"Setting review labels: {review_labels + current_labels_filtered}")
get_logger().debug(f"Current labels:\n{current_labels}")
get_logger().info(f"Setting review labels:\n{review_labels + current_labels_filtered}")
self.git_provider.publish_labels(review_labels + current_labels_filtered)
except Exception as e:
get_logger().error(f"Failed to set review labels, error: {e}")

View File

@ -3,9 +3,7 @@ from datetime import date
from functools import partial
from time import sleep
from typing import Tuple
from jinja2 import Environment, StrictUndefined
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models
@ -51,26 +49,33 @@ class PRUpdateChangelog:
# assert type(self.git_provider) == GithubProvider, "Currently only Github is supported"
get_logger().info('Updating the changelog...')
relevant_configs = {'pr_update_changelog': dict(get_settings().pr_update_changelog),
'config': dict(get_settings().config)}
get_logger().debug("Relevant configs", configs=relevant_configs)
if get_settings().config.publish_output:
self.git_provider.publish_comment("Preparing changelog updates...", is_temporary=True)
await retry_with_fallback_models(self._prepare_prediction)
get_logger().info('Preparing PR changelog updates...')
new_file_content, answer = self._prepare_changelog_update()
get_logger().debug(f"PR output", changlog=answer)
if get_settings().config.publish_output:
self.git_provider.remove_initial_comment()
get_logger().info('Publishing changelog updates...')
if self.commit_changelog:
get_logger().info('Pushing PR changelog updates to repo...')
self._push_changelog_update(new_file_content, answer)
else:
get_logger().info('Publishing PR changelog as comment...')
self.git_provider.publish_comment(f"**Changelog updates:**\n\n{answer}")
async def _prepare_prediction(self, model: str):
get_logger().info('Getting PR diff...')
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
get_logger().info('Getting AI prediction...')
self.prediction = await self._get_prediction(model)
if self.patches_diff:
get_logger().debug(f"PR diff", diff=self.patches_diff)
self.prediction = await self._get_prediction(model)
else:
get_logger().error(f"Error getting PR diff")
self.prediction = ""
async def _get_prediction(self, model: str):
variables = copy.deepcopy(self.vars)
@ -78,9 +83,6 @@ class PRUpdateChangelog:
environment = Environment(undefined=StrictUndefined)
system_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.user).render(variables)
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt)
@ -101,9 +103,6 @@ class PRUpdateChangelog:
answer += "\n\n\n>to commit the new content to the CHANGELOG.md file, please type:" \
"\n>'/update_changelog --pr_update_changelog.push_changelog_changes=true'\n"
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"answer:\n{answer}")
return new_file_content, answer
def _push_changelog_update(self, new_file_content, answer):