2024-02-07 08:00:01 +02:00
|
|
|
import asyncio
|
2023-07-15 09:30:50 +03:00
|
|
|
import copy
|
|
|
|
import textwrap
|
2023-12-17 16:52:03 +02:00
|
|
|
from functools import partial
|
2023-10-16 14:56:00 +03:00
|
|
|
from typing import Dict, List
|
2023-07-15 09:30:50 +03:00
|
|
|
from jinja2 import Environment, StrictUndefined
|
|
|
|
|
2023-12-12 23:03:38 +08:00
|
|
|
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
2023-12-14 09:00:14 +02:00
|
|
|
from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
2023-10-16 14:56:00 +03:00
|
|
|
from pr_agent.algo.pr_processing import get_pr_diff, get_pr_multi_diffs, retry_with_fallback_models
|
2023-07-15 09:30:50 +03:00
|
|
|
from pr_agent.algo.token_handler import TokenHandler
|
2024-02-01 09:46:04 +02:00
|
|
|
from pr_agent.algo.utils import load_yaml, replace_code_tags, ModelType
|
2023-08-01 14:43:26 +03:00
|
|
|
from pr_agent.config_loader import get_settings
|
2023-10-16 14:56:00 +03:00
|
|
|
from pr_agent.git_providers import get_git_provider
|
2023-07-15 09:30:50 +03:00
|
|
|
from pr_agent.git_providers.git_provider import get_main_pr_language
|
2023-10-16 14:56:00 +03:00
|
|
|
from pr_agent.log import get_logger
|
2024-01-17 09:50:48 +02:00
|
|
|
from pr_agent.servers.help import HelpMessage
|
2024-01-15 15:10:54 +02:00
|
|
|
from pr_agent.tools.pr_description import insert_br_after_x_chars
|
2024-01-15 15:56:48 +02:00
|
|
|
import difflib
|
2023-07-15 09:30:50 +03:00
|
|
|
|
|
|
|
class PRCodeSuggestions:
|
2023-12-14 09:00:14 +02:00
|
|
|
def __init__(self, pr_url: str, cli_mode=False, args: list = None,
|
2023-12-17 16:52:03 +02:00
|
|
|
ai_handler: partial[BaseAiHandler,] = LiteLLMAIHandler):
|
2023-07-15 09:30:50 +03:00
|
|
|
|
|
|
|
self.git_provider = get_git_provider()(pr_url)
|
|
|
|
self.main_language = get_main_pr_language(
|
|
|
|
self.git_provider.get_languages(), self.git_provider.get_files()
|
|
|
|
)
|
2023-07-30 11:43:44 +03:00
|
|
|
|
2024-02-01 09:46:04 +02:00
|
|
|
# limit context specifically for the improve command, which has hard input to parse:
|
|
|
|
if get_settings().pr_code_suggestions.max_context_tokens:
|
|
|
|
MAX_CONTEXT_TOKENS_IMPROVE = get_settings().pr_code_suggestions.max_context_tokens
|
|
|
|
if get_settings().config.max_model_tokens > MAX_CONTEXT_TOKENS_IMPROVE:
|
|
|
|
get_logger().info(f"Setting max_model_tokens to {MAX_CONTEXT_TOKENS_IMPROVE} for PR improve")
|
|
|
|
get_settings().config.max_model_tokens = MAX_CONTEXT_TOKENS_IMPROVE
|
|
|
|
|
|
|
|
|
2023-08-22 09:42:59 +03:00
|
|
|
# extended mode
|
2023-09-25 18:56:10 +03:00
|
|
|
try:
|
2024-01-03 14:21:38 +02:00
|
|
|
self.is_extended = self._get_is_extended(args or [])
|
2023-09-25 18:56:10 +03:00
|
|
|
except:
|
|
|
|
self.is_extended = False
|
2023-08-22 09:42:59 +03:00
|
|
|
if self.is_extended:
|
|
|
|
num_code_suggestions = get_settings().pr_code_suggestions.num_code_suggestions_per_chunk
|
|
|
|
else:
|
|
|
|
num_code_suggestions = get_settings().pr_code_suggestions.num_code_suggestions
|
|
|
|
|
2023-12-17 16:52:03 +02:00
|
|
|
self.ai_handler = ai_handler()
|
2023-07-15 09:30:50 +03:00
|
|
|
self.patches_diff = None
|
|
|
|
self.prediction = None
|
|
|
|
self.cli_mode = cli_mode
|
|
|
|
self.vars = {
|
|
|
|
"title": self.git_provider.pr.title,
|
|
|
|
"branch": self.git_provider.get_pr_branch(),
|
|
|
|
"description": self.git_provider.get_pr_description(),
|
|
|
|
"language": self.main_language,
|
|
|
|
"diff": "", # empty diff for initial calculation
|
2023-08-22 09:42:59 +03:00
|
|
|
"num_code_suggestions": num_code_suggestions,
|
2024-01-09 22:09:48 +02:00
|
|
|
"summarize_mode": get_settings().pr_code_suggestions.summarize,
|
2023-08-01 14:43:26 +03:00
|
|
|
"extra_instructions": get_settings().pr_code_suggestions.extra_instructions,
|
2023-08-02 18:26:39 +03:00
|
|
|
"commit_messages_str": self.git_provider.get_commit_messages(),
|
2023-07-15 09:30:50 +03:00
|
|
|
}
|
|
|
|
self.token_handler = TokenHandler(self.git_provider.pr,
|
|
|
|
self.vars,
|
2023-08-01 14:43:26 +03:00
|
|
|
get_settings().pr_code_suggestions_prompt.system,
|
|
|
|
get_settings().pr_code_suggestions_prompt.user)
|
2023-07-15 09:30:50 +03:00
|
|
|
|
2024-02-11 12:14:25 +02:00
|
|
|
self.progress = f"## Generating PR code suggestions\n\n"
|
|
|
|
self.progress += f"""\nWork in progress ...<br>\n<img src="https://codium.ai/images/pr_agent/dual_ball_loading-crop.gif" width=48>"""
|
|
|
|
self.progress_response = None
|
|
|
|
|
2023-08-01 14:43:26 +03:00
|
|
|
async def run(self):
|
2024-01-15 16:42:50 +02:00
|
|
|
try:
|
|
|
|
get_logger().info('Generating code suggestions for PR...')
|
2024-02-11 12:14:25 +02:00
|
|
|
|
2024-01-15 16:42:50 +02:00
|
|
|
if get_settings().config.publish_output:
|
2024-02-11 13:14:47 +02:00
|
|
|
if self.git_provider.is_supported("gfm_markdown"):
|
|
|
|
self.progress_response = self.git_provider.publish_comment(self.progress)
|
|
|
|
else:
|
|
|
|
self.git_provider.publish_comment("Preparing suggestions...", is_temporary=True)
|
2024-01-15 16:42:50 +02:00
|
|
|
|
|
|
|
get_logger().info('Preparing PR code suggestions...')
|
|
|
|
if not self.is_extended:
|
2024-02-01 09:46:04 +02:00
|
|
|
await retry_with_fallback_models(self._prepare_prediction, ModelType.TURBO)
|
2024-01-15 16:42:50 +02:00
|
|
|
data = self._prepare_pr_code_suggestions()
|
|
|
|
else:
|
2024-02-01 09:46:04 +02:00
|
|
|
data = await retry_with_fallback_models(self._prepare_prediction_extended, ModelType.TURBO)
|
2024-01-18 17:01:25 +02:00
|
|
|
|
|
|
|
|
2024-01-15 16:42:50 +02:00
|
|
|
if (not data) or (not 'code_suggestions' in data):
|
|
|
|
get_logger().info('No code suggestions found for PR.')
|
|
|
|
return
|
|
|
|
|
|
|
|
if (not self.is_extended and get_settings().pr_code_suggestions.rank_suggestions) or \
|
|
|
|
(self.is_extended and get_settings().pr_code_suggestions.rank_extended_suggestions):
|
|
|
|
get_logger().info('Ranking Suggestions...')
|
|
|
|
data['code_suggestions'] = await self.rank_suggestions(data['code_suggestions'])
|
|
|
|
|
|
|
|
if get_settings().config.publish_output:
|
|
|
|
get_logger().info('Pushing PR code suggestions...')
|
|
|
|
self.git_provider.remove_initial_comment()
|
2024-01-17 11:18:30 +02:00
|
|
|
if get_settings().pr_code_suggestions.summarize and self.git_provider.is_supported("gfm_markdown"):
|
2024-01-15 16:42:50 +02:00
|
|
|
get_logger().info('Pushing summarize code suggestions...')
|
2024-01-17 09:50:48 +02:00
|
|
|
|
|
|
|
# generate summarized suggestions
|
|
|
|
pr_body = self.generate_summarized_suggestions(data)
|
|
|
|
|
|
|
|
# add usage guide
|
2024-01-17 11:18:30 +02:00
|
|
|
if get_settings().pr_code_suggestions.enable_help_text:
|
2024-01-17 09:50:48 +02:00
|
|
|
pr_body += "<hr>\n\n<details> <summary><strong>✨ Usage guide:</strong></summary><hr> \n\n"
|
|
|
|
pr_body += HelpMessage.get_improve_usage_guide()
|
|
|
|
pr_body += "\n</details>\n"
|
|
|
|
|
2024-02-11 12:14:25 +02:00
|
|
|
if self.progress_response:
|
|
|
|
self.git_provider.edit_comment(self.progress_response, body=pr_body)
|
|
|
|
else:
|
|
|
|
self.git_provider.publish_comment(pr_body)
|
|
|
|
|
2024-01-15 16:42:50 +02:00
|
|
|
else:
|
|
|
|
get_logger().info('Pushing inline code suggestions...')
|
|
|
|
self.push_inline_code_suggestions(data)
|
2024-02-11 12:14:25 +02:00
|
|
|
if self.progress_response:
|
|
|
|
self.progress_response.delete()
|
2024-01-15 16:42:50 +02:00
|
|
|
except Exception as e:
|
|
|
|
get_logger().error(f"Failed to generate code suggestions for PR, error: {e}")
|
2024-02-11 12:14:25 +02:00
|
|
|
if self.progress_response:
|
|
|
|
self.progress_response.delete()
|
2023-07-15 09:30:50 +03:00
|
|
|
|
2023-07-23 16:16:36 +03:00
|
|
|
async def _prepare_prediction(self, model: str):
|
2023-10-16 14:56:00 +03:00
|
|
|
get_logger().info('Getting PR diff...')
|
2024-02-07 08:00:01 +02:00
|
|
|
patches_diff = get_pr_diff(self.git_provider,
|
2023-07-23 16:16:36 +03:00
|
|
|
self.token_handler,
|
|
|
|
model,
|
|
|
|
add_line_numbers_to_hunks=True,
|
|
|
|
disable_extra_lines=True)
|
2023-08-03 21:38:18 +03:00
|
|
|
|
2023-10-16 14:56:00 +03:00
|
|
|
get_logger().info('Getting AI prediction...')
|
2024-02-07 08:00:01 +02:00
|
|
|
self.prediction = await self._get_prediction(model, patches_diff)
|
2023-07-23 16:16:36 +03:00
|
|
|
|
2024-02-07 08:00:01 +02:00
|
|
|
async def _get_prediction(self, model: str, patches_diff: str):
|
2023-07-15 09:30:50 +03:00
|
|
|
variables = copy.deepcopy(self.vars)
|
2024-02-07 08:00:01 +02:00
|
|
|
variables["diff"] = patches_diff # update diff
|
2023-07-15 09:30:50 +03:00
|
|
|
environment = Environment(undefined=StrictUndefined)
|
2023-08-01 14:43:26 +03:00
|
|
|
system_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.system).render(variables)
|
|
|
|
user_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.user).render(variables)
|
|
|
|
if get_settings().config.verbosity_level >= 2:
|
2023-10-16 14:56:00 +03:00
|
|
|
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
|
|
|
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
2023-07-15 09:30:50 +03:00
|
|
|
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
|
|
|
system=system_prompt, user=user_prompt)
|
|
|
|
|
2023-12-18 17:35:04 +02:00
|
|
|
if get_settings().config.verbosity_level >= 2:
|
|
|
|
get_logger().info(f"\nAI response:\n{response}")
|
|
|
|
|
2023-07-15 09:30:50 +03:00
|
|
|
return response
|
|
|
|
|
2023-08-28 09:48:43 +03:00
|
|
|
def _prepare_pr_code_suggestions(self) -> Dict:
|
2023-07-15 09:30:50 +03:00
|
|
|
review = self.prediction.strip()
|
2023-12-24 09:44:08 +02:00
|
|
|
data = load_yaml(review,
|
|
|
|
keys_fix_yaml=["relevant_file", "suggestion_content", "existing_code", "improved_code"])
|
2023-08-28 09:48:43 +03:00
|
|
|
if isinstance(data, list):
|
2023-12-24 08:30:35 +02:00
|
|
|
data = {'code_suggestions': data}
|
|
|
|
|
|
|
|
# remove invalid suggestions
|
|
|
|
suggestion_list = []
|
|
|
|
for i, suggestion in enumerate(data['code_suggestions']):
|
|
|
|
if suggestion['existing_code'] != suggestion['improved_code']:
|
|
|
|
suggestion_list.append(suggestion)
|
|
|
|
else:
|
2023-12-24 09:44:08 +02:00
|
|
|
get_logger().debug(
|
|
|
|
f"Skipping suggestion {i + 1}, because existing code is equal to improved code {suggestion['existing_code']}")
|
2023-12-24 08:30:35 +02:00
|
|
|
data['code_suggestions'] = suggestion_list
|
|
|
|
|
2023-07-15 09:30:50 +03:00
|
|
|
return data
|
|
|
|
|
|
|
|
def push_inline_code_suggestions(self, data):
|
2023-07-19 20:57:14 +03:00
|
|
|
code_suggestions = []
|
2023-08-14 13:07:00 -04:00
|
|
|
|
2023-12-24 08:30:35 +02:00
|
|
|
if not data['code_suggestions']:
|
2023-11-28 18:29:35 +02:00
|
|
|
get_logger().info('No suggestions found to improve this PR.')
|
2024-02-11 12:14:25 +02:00
|
|
|
if self.progress_response:
|
|
|
|
return self.git_provider.edit_comment(self.progress_response, body='No suggestions found to improve this PR.')
|
|
|
|
else:
|
|
|
|
return self.git_provider.publish_comment('No suggestions found to improve this PR.')
|
2023-08-14 13:07:00 -04:00
|
|
|
|
2023-12-24 08:30:35 +02:00
|
|
|
for d in data['code_suggestions']:
|
2023-07-28 10:52:49 +03:00
|
|
|
try:
|
2023-08-01 14:43:26 +03:00
|
|
|
if get_settings().config.verbosity_level >= 2:
|
2023-10-16 14:56:00 +03:00
|
|
|
get_logger().info(f"suggestion: {d}")
|
2023-12-24 08:30:35 +02:00
|
|
|
relevant_file = d['relevant_file'].strip()
|
|
|
|
relevant_lines_start = int(d['relevant_lines_start']) # absolute position
|
|
|
|
relevant_lines_end = int(d['relevant_lines_end'])
|
|
|
|
content = d['suggestion_content'].rstrip()
|
|
|
|
new_code_snippet = d['improved_code'].rstrip()
|
2023-12-24 09:44:08 +02:00
|
|
|
label = d['label'].strip()
|
2023-07-28 10:52:49 +03:00
|
|
|
|
|
|
|
if new_code_snippet:
|
|
|
|
new_code_snippet = self.dedent_code(relevant_file, relevant_lines_start, new_code_snippet)
|
|
|
|
|
2024-01-24 19:47:30 +02:00
|
|
|
body = f"**Suggestion:** {content} [{label}]\n```suggestion\n" + new_code_snippet + "\n```"
|
|
|
|
code_suggestions.append({'body': body, 'relevant_file': relevant_file,
|
2023-12-24 09:44:08 +02:00
|
|
|
'relevant_lines_start': relevant_lines_start,
|
|
|
|
'relevant_lines_end': relevant_lines_end})
|
2023-08-01 14:43:26 +03:00
|
|
|
except Exception:
|
|
|
|
if get_settings().config.verbosity_level >= 2:
|
2023-10-16 14:56:00 +03:00
|
|
|
get_logger().info(f"Could not parse suggestion: {d}")
|
2023-07-19 20:57:14 +03:00
|
|
|
|
2024-01-24 19:47:30 +02:00
|
|
|
is_successful = self.git_provider.publish_code_suggestions(code_suggestions)
|
2023-08-22 16:11:51 +03:00
|
|
|
if not is_successful:
|
2023-10-16 14:56:00 +03:00
|
|
|
get_logger().info("Failed to publish code suggestions, trying to publish each suggestion separately")
|
2023-08-22 16:11:51 +03:00
|
|
|
for code_suggestion in code_suggestions:
|
2024-01-24 19:47:30 +02:00
|
|
|
self.git_provider.publish_code_suggestions([code_suggestion])
|
2023-07-19 20:57:14 +03:00
|
|
|
|
|
|
|
def dedent_code(self, relevant_file, relevant_lines_start, new_code_snippet):
|
|
|
|
try: # dedent code snippet
|
|
|
|
self.diff_files = self.git_provider.diff_files if self.git_provider.diff_files \
|
|
|
|
else self.git_provider.get_diff_files()
|
|
|
|
original_initial_line = None
|
|
|
|
for file in self.diff_files:
|
|
|
|
if file.filename.strip() == relevant_file:
|
2024-01-25 11:07:43 +02:00
|
|
|
if file.head_file: # in bitbucket, head_file is empty. toDo: fix this
|
|
|
|
original_initial_line = file.head_file.splitlines()[relevant_lines_start - 1]
|
2023-07-19 20:57:14 +03:00
|
|
|
break
|
|
|
|
if original_initial_line:
|
|
|
|
suggested_initial_line = new_code_snippet.splitlines()[0]
|
|
|
|
original_initial_spaces = len(original_initial_line) - len(original_initial_line.lstrip())
|
|
|
|
suggested_initial_spaces = len(suggested_initial_line) - len(suggested_initial_line.lstrip())
|
|
|
|
delta_spaces = original_initial_spaces - suggested_initial_spaces
|
|
|
|
if delta_spaces > 0:
|
|
|
|
new_code_snippet = textwrap.indent(new_code_snippet, delta_spaces * " ").rstrip('\n')
|
|
|
|
except Exception as e:
|
2023-08-01 14:43:26 +03:00
|
|
|
if get_settings().config.verbosity_level >= 2:
|
2023-10-16 14:56:00 +03:00
|
|
|
get_logger().info(f"Could not dedent code snippet for file {relevant_file}, error: {e}")
|
2023-07-19 20:57:14 +03:00
|
|
|
|
|
|
|
return new_code_snippet
|
2023-07-30 11:43:44 +03:00
|
|
|
|
2024-01-03 14:21:38 +02:00
|
|
|
def _get_is_extended(self, args: list[str]) -> bool:
|
2024-01-04 18:53:45 +02:00
|
|
|
"""Check if extended mode should be enabled by the `--extended` flag or automatically according to the configuration"""
|
2024-01-03 14:21:38 +02:00
|
|
|
if any(["extended" in arg for arg in args]):
|
|
|
|
get_logger().info("Extended mode is enabled by the `--extended` flag")
|
|
|
|
return True
|
2024-01-04 18:53:45 +02:00
|
|
|
if get_settings().pr_code_suggestions.auto_extended_mode:
|
|
|
|
get_logger().info("Extended mode is enabled automatically based on the configuration toggle")
|
2024-01-03 14:21:38 +02:00
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2023-08-22 09:42:59 +03:00
|
|
|
async def _prepare_prediction_extended(self, model: str) -> dict:
|
2023-10-16 14:56:00 +03:00
|
|
|
get_logger().info('Getting PR diff...')
|
2023-08-22 09:42:59 +03:00
|
|
|
patches_diff_list = get_pr_multi_diffs(self.git_provider, self.token_handler, model,
|
|
|
|
max_calls=get_settings().pr_code_suggestions.max_number_of_calls)
|
|
|
|
|
2024-02-07 08:00:01 +02:00
|
|
|
# parallelize calls to AI:
|
|
|
|
if get_settings().pr_code_suggestions.parallel_calls:
|
|
|
|
get_logger().info('Getting multi AI predictions in parallel...')
|
|
|
|
prediction_list = await asyncio.gather(*[self._get_prediction(model, patches_diff) for patches_diff in patches_diff_list])
|
|
|
|
self.prediction_list = prediction_list
|
|
|
|
else:
|
|
|
|
get_logger().info('Getting multi AI predictions...')
|
|
|
|
prediction_list = []
|
|
|
|
for i, patches_diff in enumerate(patches_diff_list):
|
|
|
|
get_logger().info(f"Processing chunk {i + 1} of {len(patches_diff_list)}")
|
2024-02-07 08:00:16 +02:00
|
|
|
prediction = await self._get_prediction(model, patches_diff)
|
2024-02-07 08:00:01 +02:00
|
|
|
prediction_list.append(prediction)
|
2023-08-22 09:42:59 +03:00
|
|
|
|
|
|
|
data = {}
|
|
|
|
for prediction in prediction_list:
|
|
|
|
self.prediction = prediction
|
|
|
|
data_per_chunk = self._prepare_pr_code_suggestions()
|
2023-12-24 08:30:35 +02:00
|
|
|
if "code_suggestions" in data:
|
|
|
|
data["code_suggestions"].extend(data_per_chunk["code_suggestions"])
|
2023-08-22 09:42:59 +03:00
|
|
|
else:
|
|
|
|
data.update(data_per_chunk)
|
|
|
|
self.data = data
|
|
|
|
return data
|
|
|
|
|
|
|
|
async def rank_suggestions(self, data: List) -> List:
|
|
|
|
"""
|
|
|
|
Call a model to rank (sort) code suggestions based on their importance order.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
data (List): A list of code suggestions to be ranked.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
List: The ranked list of code suggestions.
|
|
|
|
"""
|
|
|
|
|
|
|
|
suggestion_list = []
|
2024-01-29 21:52:54 +02:00
|
|
|
if not data:
|
|
|
|
return suggestion_list
|
2023-12-24 10:08:36 +02:00
|
|
|
for suggestion in data:
|
|
|
|
suggestion_list.append(suggestion)
|
2023-08-22 09:42:59 +03:00
|
|
|
data_sorted = [[]] * len(suggestion_list)
|
|
|
|
|
2024-01-29 22:00:11 +02:00
|
|
|
if len(suggestion_list ) == 1:
|
|
|
|
return suggestion_list
|
|
|
|
|
2023-08-22 09:42:59 +03:00
|
|
|
try:
|
|
|
|
suggestion_str = ""
|
|
|
|
for i, suggestion in enumerate(suggestion_list):
|
|
|
|
suggestion_str += f"suggestion {i + 1}: " + str(suggestion) + '\n\n'
|
|
|
|
|
|
|
|
variables = {'suggestion_list': suggestion_list, 'suggestion_str': suggestion_str}
|
|
|
|
model = get_settings().config.model
|
|
|
|
environment = Environment(undefined=StrictUndefined)
|
|
|
|
system_prompt = environment.from_string(get_settings().pr_sort_code_suggestions_prompt.system).render(
|
|
|
|
variables)
|
|
|
|
user_prompt = environment.from_string(get_settings().pr_sort_code_suggestions_prompt.user).render(variables)
|
|
|
|
if get_settings().config.verbosity_level >= 2:
|
2023-10-16 14:56:00 +03:00
|
|
|
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
|
|
|
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
2023-08-22 09:42:59 +03:00
|
|
|
response, finish_reason = await self.ai_handler.chat_completion(model=model, system=system_prompt,
|
|
|
|
user=user_prompt)
|
|
|
|
|
2023-08-28 09:48:43 +03:00
|
|
|
sort_order = load_yaml(response)
|
2023-08-22 09:42:59 +03:00
|
|
|
for s in sort_order['Sort Order']:
|
|
|
|
suggestion_number = s['suggestion number']
|
|
|
|
importance_order = s['importance order']
|
|
|
|
data_sorted[importance_order - 1] = suggestion_list[suggestion_number - 1]
|
|
|
|
|
2023-08-22 16:11:51 +03:00
|
|
|
if get_settings().pr_code_suggestions.final_clip_factor != 1:
|
2024-01-03 16:26:32 +02:00
|
|
|
max_len = max(
|
|
|
|
len(data_sorted),
|
|
|
|
get_settings().pr_code_suggestions.num_code_suggestions,
|
|
|
|
get_settings().pr_code_suggestions.num_code_suggestions_per_chunk,
|
|
|
|
)
|
|
|
|
new_len = int(0.5 + max_len * get_settings().pr_code_suggestions.final_clip_factor)
|
|
|
|
if new_len < len(data_sorted):
|
|
|
|
data_sorted = data_sorted[:new_len]
|
2023-08-22 09:42:59 +03:00
|
|
|
except Exception as e:
|
|
|
|
if get_settings().config.verbosity_level >= 1:
|
2023-10-16 14:56:00 +03:00
|
|
|
get_logger().info(f"Could not sort suggestions, error: {e}")
|
2023-08-22 09:42:59 +03:00
|
|
|
data_sorted = suggestion_list
|
|
|
|
|
|
|
|
return data_sorted
|
|
|
|
|
2024-01-17 09:50:48 +02:00
|
|
|
def generate_summarized_suggestions(self, data: Dict) -> str:
|
2023-11-26 11:57:45 +02:00
|
|
|
try:
|
2024-01-15 15:10:54 +02:00
|
|
|
pr_body = "## PR Code Suggestions\n\n"
|
2023-12-01 12:12:49 +02:00
|
|
|
|
2024-01-18 16:11:44 +02:00
|
|
|
if len(data.get('code_suggestions', [])) == 0:
|
|
|
|
pr_body += "No suggestions found to improve this PR."
|
|
|
|
return pr_body
|
|
|
|
|
2023-12-01 12:12:49 +02:00
|
|
|
language_extension_map_org = get_settings().language_extension_map_org
|
|
|
|
extension_to_language = {}
|
|
|
|
for language, extensions in language_extension_map_org.items():
|
|
|
|
for ext in extensions:
|
|
|
|
extension_to_language[ext] = language
|
|
|
|
|
2024-01-15 15:10:54 +02:00
|
|
|
pr_body += "<table>"
|
|
|
|
header = f"Suggestions"
|
2024-02-05 10:12:47 +02:00
|
|
|
delta = 75
|
2024-01-15 15:10:54 +02:00
|
|
|
header += " " * delta
|
|
|
|
pr_body += f"""<thead><tr><th></th><th>{header}</th></tr></thead>"""
|
|
|
|
pr_body += """<tbody>"""
|
|
|
|
suggestions_labels = dict()
|
2024-01-18 17:01:25 +02:00
|
|
|
# add all suggestions related to each label
|
2024-01-15 15:10:54 +02:00
|
|
|
for suggestion in data['code_suggestions']:
|
|
|
|
label = suggestion['label'].strip().strip("'").strip('"')
|
|
|
|
if label not in suggestions_labels:
|
|
|
|
suggestions_labels[label] = []
|
|
|
|
suggestions_labels[label].append(suggestion)
|
|
|
|
|
|
|
|
for label, suggestions in suggestions_labels.items():
|
|
|
|
pr_body += f"""<tr><td><strong>{label}</strong></td>"""
|
2024-01-15 15:17:57 +02:00
|
|
|
pr_body += f"""<td>"""
|
2024-01-15 19:07:41 +02:00
|
|
|
# pr_body += f"""<details><summary>{len(suggestions)} suggestions</summary>"""
|
2024-01-15 15:17:57 +02:00
|
|
|
pr_body += f"""<table>"""
|
2024-01-15 15:10:54 +02:00
|
|
|
for suggestion in suggestions:
|
|
|
|
|
|
|
|
relevant_file = suggestion['relevant_file'].strip()
|
|
|
|
relevant_lines_start = int(suggestion['relevant_lines_start'])
|
|
|
|
relevant_lines_end = int(suggestion['relevant_lines_end'])
|
2024-01-15 15:56:48 +02:00
|
|
|
range_str = ""
|
|
|
|
if relevant_lines_start == relevant_lines_end:
|
|
|
|
range_str = f"[{relevant_lines_start}]"
|
|
|
|
else:
|
|
|
|
range_str = f"[{relevant_lines_start}-{relevant_lines_end}]"
|
2024-01-15 15:10:54 +02:00
|
|
|
code_snippet_link = self.git_provider.get_line_link(relevant_file, relevant_lines_start,
|
|
|
|
relevant_lines_end)
|
|
|
|
# add html table for each suggestion
|
|
|
|
|
|
|
|
suggestion_content = suggestion['suggestion_content'].rstrip().rstrip()
|
2024-01-15 19:07:41 +02:00
|
|
|
|
2024-01-15 15:10:54 +02:00
|
|
|
suggestion_content = insert_br_after_x_chars(suggestion_content, 90)
|
|
|
|
# pr_body += f"<tr><td><details><summary>{suggestion_content}</summary>"
|
2024-01-15 15:56:48 +02:00
|
|
|
existing_code = suggestion['existing_code'].rstrip()+"\n"
|
|
|
|
improved_code = suggestion['improved_code'].rstrip()+"\n"
|
2024-01-15 19:07:41 +02:00
|
|
|
|
2024-01-15 15:56:48 +02:00
|
|
|
diff = difflib.unified_diff(existing_code.split('\n'),
|
2024-01-15 19:07:41 +02:00
|
|
|
improved_code.split('\n'), n=999)
|
2024-01-15 15:56:48 +02:00
|
|
|
patch_orig = "\n".join(diff)
|
|
|
|
patch = "\n".join(patch_orig.splitlines()[5:]).strip('\n')
|
|
|
|
|
2024-01-15 19:07:41 +02:00
|
|
|
example_code = ""
|
2024-01-15 15:56:48 +02:00
|
|
|
example_code += f"```diff\n{patch}\n```\n"
|
2024-01-15 19:07:41 +02:00
|
|
|
|
|
|
|
pr_body += f"""<tr><td>"""
|
|
|
|
suggestion_summary = suggestion['one_sentence_summary'].strip()
|
2024-01-16 09:41:31 +02:00
|
|
|
if '`' in suggestion_summary:
|
|
|
|
suggestion_summary = replace_code_tags(suggestion_summary)
|
|
|
|
suggestion_summary = suggestion_summary + max((77-len(suggestion_summary)), 0)*" "
|
2024-01-15 19:07:41 +02:00
|
|
|
pr_body += f"""\n\n<details><summary>{suggestion_summary}</summary>\n\n___\n\n"""
|
2024-01-15 15:56:48 +02:00
|
|
|
|
2024-01-15 15:10:54 +02:00
|
|
|
pr_body += f"""
|
|
|
|
|
|
|
|
|
|
|
|
**{suggestion_content}**
|
|
|
|
|
2024-01-15 15:56:48 +02:00
|
|
|
[{relevant_file} {range_str}]({code_snippet_link})
|
2024-01-15 15:10:54 +02:00
|
|
|
|
2024-01-15 19:07:41 +02:00
|
|
|
{example_code}
|
2024-01-15 15:10:54 +02:00
|
|
|
"""
|
2024-01-15 19:07:41 +02:00
|
|
|
pr_body += f"</details>"
|
|
|
|
pr_body += f"</td></tr>"
|
2024-01-15 15:17:57 +02:00
|
|
|
|
|
|
|
pr_body += """</table>"""
|
2024-01-15 19:07:41 +02:00
|
|
|
# pr_body += "</details>"
|
2024-01-15 15:17:57 +02:00
|
|
|
pr_body += """</td></tr>"""
|
2024-01-15 15:10:54 +02:00
|
|
|
pr_body += """</tr></tbody></table>"""
|
2024-01-17 09:50:48 +02:00
|
|
|
return pr_body
|
2023-11-26 11:57:45 +02:00
|
|
|
except Exception as e:
|
|
|
|
get_logger().info(f"Failed to publish summarized code suggestions, error: {e}")
|
2024-01-17 09:50:48 +02:00
|
|
|
return ""
|