diff --git a/pr_agent/algo/pr_processing.py b/pr_agent/algo/pr_processing.py index f4ffae89..1e482dbf 100644 --- a/pr_agent/algo/pr_processing.py +++ b/pr_agent/algo/pr_processing.py @@ -375,6 +375,13 @@ def get_pr_multi_diffs(git_provider: GitProvider, for lang in pr_languages: sorted_files.extend(sorted(lang['files'], key=lambda x: x.tokens, reverse=True)) + + # try first a single run with standard diff string, with patch extension, and no deletions + patches_extended, total_tokens, patches_extended_tokens = pr_generate_extended_diff( + pr_languages, token_handler, add_line_numbers_to_hunks=True) + if total_tokens + OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD < get_max_tokens(model): + return ["\n".join(patches_extended)] + patches = [] final_diff_list = [] total_tokens = token_handler.prompt_tokens diff --git a/pr_agent/tools/pr_code_suggestions.py b/pr_agent/tools/pr_code_suggestions.py index 08074899..381c02a6 100644 --- a/pr_agent/tools/pr_code_suggestions.py +++ b/pr_agent/tools/pr_code_suggestions.py @@ -226,7 +226,7 @@ class PRCodeSuggestions: for i, patches_diff in enumerate(patches_diff_list): get_logger().info(f"Processing chunk {i + 1} of {len(patches_diff_list)}") self.patches_diff = patches_diff - prediction = await self._get_prediction(model) + prediction = await self._get_prediction(model) # toDo: parallelize prediction_list.append(prediction) self.prediction_list = prediction_list @@ -259,6 +259,9 @@ class PRCodeSuggestions: suggestion_list.append(suggestion) data_sorted = [[]] * len(suggestion_list) + if len(suggestion_list ) == 1: + return suggestion_list + try: suggestion_str = "" for i, suggestion in enumerate(suggestion_list):