mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-14 01:30:37 +08:00
Merge remote-tracking branch 'origin/main' into tr/issue_tool
# Conflicts: # pr_agent/algo/utils.py
This commit is contained in:
@ -26,6 +26,7 @@ class AiHandler:
|
||||
try:
|
||||
openai.api_key = get_settings().openai.key
|
||||
litellm.openai_key = get_settings().openai.key
|
||||
litellm.debugger = get_settings().config.litellm_debugger
|
||||
self.azure = False
|
||||
if get_settings().get("OPENAI.ORG", None):
|
||||
litellm.organization = get_settings().openai.org
|
||||
@ -43,6 +44,10 @@ class AiHandler:
|
||||
litellm.cohere_key = get_settings().cohere.key
|
||||
if get_settings().get("REPLICATE.KEY", None):
|
||||
litellm.replicate_key = get_settings().replicate.key
|
||||
if get_settings().get("REPLICATE.KEY", None):
|
||||
litellm.replicate_key = get_settings().replicate.key
|
||||
if get_settings().get("HUGGINGFACE.KEY", None):
|
||||
litellm.huggingface_key = get_settings().huggingface.key
|
||||
except AttributeError as e:
|
||||
raise ValueError("OpenAI key is required") from e
|
||||
|
||||
@ -55,7 +60,7 @@ class AiHandler:
|
||||
|
||||
@retry(exceptions=(APIError, Timeout, TryAgain, AttributeError, RateLimitError),
|
||||
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
|
||||
async def chat_completion(self, model: str, temperature: float, system: str, user: str):
|
||||
async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2):
|
||||
"""
|
||||
Performs a chat completion using the OpenAI ChatCompletion API.
|
||||
Retries in case of API errors or timeouts.
|
||||
|
@ -1,5 +1,4 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
|
||||
@ -157,7 +156,7 @@ def convert_to_hunks_with_lines_numbers(patch: str, file) -> str:
|
||||
|
||||
example output:
|
||||
## src/file.ts
|
||||
--new hunk--
|
||||
__new hunk__
|
||||
881 line1
|
||||
882 line2
|
||||
883 line3
|
||||
@ -166,7 +165,7 @@ def convert_to_hunks_with_lines_numbers(patch: str, file) -> str:
|
||||
889 line6
|
||||
890 line7
|
||||
...
|
||||
--old hunk--
|
||||
__old hunk__
|
||||
line1
|
||||
line2
|
||||
- line3
|
||||
@ -176,8 +175,7 @@ def convert_to_hunks_with_lines_numbers(patch: str, file) -> str:
|
||||
...
|
||||
"""
|
||||
|
||||
patch_with_lines_str = f"## {file.filename}\n"
|
||||
import re
|
||||
patch_with_lines_str = f"\n\n## {file.filename}\n"
|
||||
patch_lines = patch.splitlines()
|
||||
RE_HUNK_HEADER = re.compile(
|
||||
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
|
||||
@ -185,23 +183,30 @@ def convert_to_hunks_with_lines_numbers(patch: str, file) -> str:
|
||||
old_content_lines = []
|
||||
match = None
|
||||
start1, size1, start2, size2 = -1, -1, -1, -1
|
||||
prev_header_line = []
|
||||
header_line =[]
|
||||
for line in patch_lines:
|
||||
if 'no newline at end of file' in line.lower():
|
||||
continue
|
||||
|
||||
if line.startswith('@@'):
|
||||
header_line = line
|
||||
match = RE_HUNK_HEADER.match(line)
|
||||
if match and new_content_lines: # found a new hunk, split the previous lines
|
||||
if new_content_lines:
|
||||
patch_with_lines_str += '\n--new hunk--\n'
|
||||
if prev_header_line:
|
||||
patch_with_lines_str += f'\n{prev_header_line}\n'
|
||||
patch_with_lines_str += '__new hunk__\n'
|
||||
for i, line_new in enumerate(new_content_lines):
|
||||
patch_with_lines_str += f"{start2 + i} {line_new}\n"
|
||||
if old_content_lines:
|
||||
patch_with_lines_str += '--old hunk--\n'
|
||||
patch_with_lines_str += '__old hunk__\n'
|
||||
for line_old in old_content_lines:
|
||||
patch_with_lines_str += f"{line_old}\n"
|
||||
new_content_lines = []
|
||||
old_content_lines = []
|
||||
if match:
|
||||
prev_header_line = header_line
|
||||
try:
|
||||
start1, size1, start2, size2 = map(int, match.groups()[:4])
|
||||
except: # '@@ -0,0 +1 @@' case
|
||||
@ -219,12 +224,13 @@ def convert_to_hunks_with_lines_numbers(patch: str, file) -> str:
|
||||
# finishing last hunk
|
||||
if match and new_content_lines:
|
||||
if new_content_lines:
|
||||
patch_with_lines_str += '\n--new hunk--\n'
|
||||
patch_with_lines_str += f'\n{header_line}\n'
|
||||
patch_with_lines_str += '\n__new hunk__\n'
|
||||
for i, line_new in enumerate(new_content_lines):
|
||||
patch_with_lines_str += f"{start2 + i} {line_new}\n"
|
||||
if old_content_lines:
|
||||
patch_with_lines_str += '\n--old hunk--\n'
|
||||
patch_with_lines_str += '\n__old hunk__\n'
|
||||
for line_old in old_content_lines:
|
||||
patch_with_lines_str += f"{line_old}\n"
|
||||
|
||||
return patch_with_lines_str.strip()
|
||||
return patch_with_lines_str.rstrip()
|
||||
|
@ -57,7 +57,7 @@ def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler, model: s
|
||||
pr_languages = sort_files_by_main_languages(git_provider.get_languages(), diff_files)
|
||||
|
||||
# generate a standard diff string, with patch extension
|
||||
patches_extended, total_tokens = pr_generate_extended_diff(pr_languages, token_handler,
|
||||
patches_extended, total_tokens, patches_extended_tokens = pr_generate_extended_diff(pr_languages, token_handler,
|
||||
add_line_numbers_to_hunks)
|
||||
|
||||
# if we are under the limit, return the full diff
|
||||
@ -78,9 +78,9 @@ def get_pr_diff(git_provider: GitProvider, token_handler: TokenHandler, model: s
|
||||
return final_diff
|
||||
|
||||
|
||||
def pr_generate_extended_diff(pr_languages: list, token_handler: TokenHandler,
|
||||
add_line_numbers_to_hunks: bool) -> \
|
||||
Tuple[list, int]:
|
||||
def pr_generate_extended_diff(pr_languages: list,
|
||||
token_handler: TokenHandler,
|
||||
add_line_numbers_to_hunks: bool) -> Tuple[list, int, list]:
|
||||
"""
|
||||
Generate a standard diff string with patch extension, while counting the number of tokens used and applying diff
|
||||
minimization techniques if needed.
|
||||
@ -90,13 +90,10 @@ def pr_generate_extended_diff(pr_languages: list, token_handler: TokenHandler,
|
||||
files.
|
||||
- token_handler: An object of the TokenHandler class used for handling tokens in the context of the pull request.
|
||||
- add_line_numbers_to_hunks: A boolean indicating whether to add line numbers to the hunks in the diff.
|
||||
|
||||
Returns:
|
||||
- patches_extended: A list of extended patches for each file in the pull request.
|
||||
- total_tokens: The total number of tokens used in the extended patches.
|
||||
"""
|
||||
total_tokens = token_handler.prompt_tokens # initial tokens
|
||||
patches_extended = []
|
||||
patches_extended_tokens = []
|
||||
for lang in pr_languages:
|
||||
for file in lang['files']:
|
||||
original_file_content_str = file.base_file
|
||||
@ -106,7 +103,7 @@ def pr_generate_extended_diff(pr_languages: list, token_handler: TokenHandler,
|
||||
|
||||
# extend each patch with extra lines of context
|
||||
extended_patch = extend_patch(original_file_content_str, patch, num_lines=PATCH_EXTRA_LINES)
|
||||
full_extended_patch = f"## {file.filename}\n\n{extended_patch}\n"
|
||||
full_extended_patch = f"\n\n## {file.filename}\n\n{extended_patch}\n"
|
||||
|
||||
if add_line_numbers_to_hunks:
|
||||
full_extended_patch = convert_to_hunks_with_lines_numbers(extended_patch, file)
|
||||
@ -114,9 +111,10 @@ def pr_generate_extended_diff(pr_languages: list, token_handler: TokenHandler,
|
||||
patch_tokens = token_handler.count_tokens(full_extended_patch)
|
||||
file.tokens = patch_tokens
|
||||
total_tokens += patch_tokens
|
||||
patches_extended_tokens.append(patch_tokens)
|
||||
patches_extended.append(full_extended_patch)
|
||||
|
||||
return patches_extended, total_tokens
|
||||
return patches_extended, total_tokens, patches_extended_tokens
|
||||
|
||||
|
||||
def pr_generate_compressed_diff(top_langs: list, token_handler: TokenHandler, model: str,
|
||||
@ -324,7 +322,9 @@ def clip_tokens(text: str, max_tokens: int) -> str:
|
||||
Returns:
|
||||
str: The clipped string.
|
||||
"""
|
||||
# We'll estimate the number of tokens by hueristically assuming 2.5 tokens per word
|
||||
if not text:
|
||||
return text
|
||||
|
||||
try:
|
||||
encoder = get_token_encoder()
|
||||
num_input_tokens = len(encoder.encode(text))
|
||||
@ -337,4 +337,84 @@ def clip_tokens(text: str, max_tokens: int) -> str:
|
||||
return clipped_text
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed to clip tokens: {e}")
|
||||
return text
|
||||
return text
|
||||
|
||||
|
||||
def get_pr_multi_diffs(git_provider: GitProvider,
|
||||
token_handler: TokenHandler,
|
||||
model: str,
|
||||
max_calls: int = 5) -> List[str]:
|
||||
"""
|
||||
Retrieves the diff files from a Git provider, sorts them by main language, and generates patches for each file.
|
||||
The patches are split into multiple groups based on the maximum number of tokens allowed for the given model.
|
||||
|
||||
Args:
|
||||
git_provider (GitProvider): An object that provides access to Git provider APIs.
|
||||
token_handler (TokenHandler): An object that handles tokens in the context of a pull request.
|
||||
model (str): The name of the model.
|
||||
max_calls (int, optional): The maximum number of calls to retrieve diff files. Defaults to 5.
|
||||
|
||||
Returns:
|
||||
List[str]: A list of final diff strings, split into multiple groups based on the maximum number of tokens allowed for the given model.
|
||||
|
||||
Raises:
|
||||
RateLimitExceededException: If the rate limit for the Git provider API is exceeded.
|
||||
"""
|
||||
try:
|
||||
diff_files = git_provider.get_diff_files()
|
||||
except RateLimitExceededException as e:
|
||||
logging.error(f"Rate limit exceeded for git provider API. original message {e}")
|
||||
raise
|
||||
|
||||
# Sort files by main language
|
||||
pr_languages = sort_files_by_main_languages(git_provider.get_languages(), diff_files)
|
||||
|
||||
# Sort files within each language group by tokens in descending order
|
||||
sorted_files = []
|
||||
for lang in pr_languages:
|
||||
sorted_files.extend(sorted(lang['files'], key=lambda x: x.tokens, reverse=True))
|
||||
|
||||
patches = []
|
||||
final_diff_list = []
|
||||
total_tokens = token_handler.prompt_tokens
|
||||
call_number = 1
|
||||
for file in sorted_files:
|
||||
if call_number > max_calls:
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"Reached max calls ({max_calls})")
|
||||
break
|
||||
|
||||
original_file_content_str = file.base_file
|
||||
new_file_content_str = file.head_file
|
||||
patch = file.patch
|
||||
if not patch:
|
||||
continue
|
||||
|
||||
# Remove delete-only hunks
|
||||
patch = handle_patch_deletions(patch, original_file_content_str, new_file_content_str, file.filename)
|
||||
if patch is None:
|
||||
continue
|
||||
|
||||
patch = convert_to_hunks_with_lines_numbers(patch, file)
|
||||
new_patch_tokens = token_handler.count_tokens(patch)
|
||||
if patch and (total_tokens + new_patch_tokens > MAX_TOKENS[model] - OUTPUT_BUFFER_TOKENS_SOFT_THRESHOLD):
|
||||
final_diff = "\n".join(patches)
|
||||
final_diff_list.append(final_diff)
|
||||
patches = []
|
||||
total_tokens = token_handler.prompt_tokens
|
||||
call_number += 1
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"Call number: {call_number}")
|
||||
|
||||
if patch:
|
||||
patches.append(patch)
|
||||
total_tokens += new_patch_tokens
|
||||
if get_settings().config.verbosity_level >= 2:
|
||||
logging.info(f"Tokens: {total_tokens}, last filename: {file.filename}")
|
||||
|
||||
# Add the last chunk
|
||||
if patches:
|
||||
final_diff = "\n".join(patches)
|
||||
final_diff_list.append(final_diff)
|
||||
|
||||
return final_diff_list
|
||||
|
@ -250,9 +250,7 @@ def update_settings_from_args(args: List[str]) -> List[str]:
|
||||
logging.error(f'Invalid argument format: {arg}')
|
||||
other_args.append(arg)
|
||||
continue
|
||||
key, value = vals
|
||||
key = key.strip().upper()
|
||||
value = value.strip()
|
||||
key, value = _fix_key_value(*vals)
|
||||
if key in get_settings():
|
||||
get_settings().set(key, value)
|
||||
logging.info(f'Updated setting {key} to: "{value}"')
|
||||
|
Reference in New Issue
Block a user