mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-15 02:00:39 +08:00
Merge branch 'main' into tr/pydantic
This commit is contained in:
@ -13,5 +13,9 @@ MAX_TOKENS = {
|
||||
'claude-2': 100000,
|
||||
'command-nightly': 4096,
|
||||
'replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1': 4096,
|
||||
'meta-llama/Llama-2-7b-chat-hf': 4096
|
||||
'meta-llama/Llama-2-7b-chat-hf': 4096,
|
||||
'vertex_ai/codechat-bison': 6144,
|
||||
'vertex_ai/codechat-bison-32k': 32000,
|
||||
'codechat-bison': 6144,
|
||||
'codechat-bison-32k': 32000,
|
||||
}
|
||||
|
@ -23,39 +23,43 @@ class AiHandler:
|
||||
Initializes the OpenAI API key and other settings from a configuration file.
|
||||
Raises a ValueError if the OpenAI key is missing.
|
||||
"""
|
||||
try:
|
||||
self.azure = False
|
||||
|
||||
if get_settings().get("OPENAI.KEY", None):
|
||||
openai.api_key = get_settings().openai.key
|
||||
litellm.openai_key = get_settings().openai.key
|
||||
if get_settings().get("litellm.use_client"):
|
||||
litellm_token = get_settings().get("litellm.LITELLM_TOKEN")
|
||||
assert litellm_token, "LITELLM_TOKEN is required"
|
||||
os.environ["LITELLM_TOKEN"] = litellm_token
|
||||
litellm.use_client = True
|
||||
self.azure = False
|
||||
if get_settings().get("OPENAI.ORG", None):
|
||||
litellm.organization = get_settings().openai.org
|
||||
if get_settings().get("OPENAI.API_TYPE", None):
|
||||
if get_settings().openai.api_type == "azure":
|
||||
self.azure = True
|
||||
litellm.azure_key = get_settings().openai.key
|
||||
if get_settings().get("OPENAI.API_VERSION", None):
|
||||
litellm.api_version = get_settings().openai.api_version
|
||||
if get_settings().get("OPENAI.API_BASE", None):
|
||||
litellm.api_base = get_settings().openai.api_base
|
||||
if get_settings().get("ANTHROPIC.KEY", None):
|
||||
litellm.anthropic_key = get_settings().anthropic.key
|
||||
if get_settings().get("COHERE.KEY", None):
|
||||
litellm.cohere_key = get_settings().cohere.key
|
||||
if get_settings().get("REPLICATE.KEY", None):
|
||||
litellm.replicate_key = get_settings().replicate.key
|
||||
if get_settings().get("REPLICATE.KEY", None):
|
||||
litellm.replicate_key = get_settings().replicate.key
|
||||
if get_settings().get("HUGGINGFACE.KEY", None):
|
||||
litellm.huggingface_key = get_settings().huggingface.key
|
||||
if get_settings().get("HUGGINGFACE.API_BASE", None):
|
||||
litellm.api_base = get_settings().huggingface.api_base
|
||||
except AttributeError as e:
|
||||
raise ValueError("OpenAI key is required") from e
|
||||
if get_settings().get("litellm.use_client"):
|
||||
litellm_token = get_settings().get("litellm.LITELLM_TOKEN")
|
||||
assert litellm_token, "LITELLM_TOKEN is required"
|
||||
os.environ["LITELLM_TOKEN"] = litellm_token
|
||||
litellm.use_client = True
|
||||
if get_settings().get("OPENAI.ORG", None):
|
||||
litellm.organization = get_settings().openai.org
|
||||
if get_settings().get("OPENAI.API_TYPE", None):
|
||||
if get_settings().openai.api_type == "azure":
|
||||
self.azure = True
|
||||
litellm.azure_key = get_settings().openai.key
|
||||
if get_settings().get("OPENAI.API_VERSION", None):
|
||||
litellm.api_version = get_settings().openai.api_version
|
||||
if get_settings().get("OPENAI.API_BASE", None):
|
||||
litellm.api_base = get_settings().openai.api_base
|
||||
if get_settings().get("ANTHROPIC.KEY", None):
|
||||
litellm.anthropic_key = get_settings().anthropic.key
|
||||
if get_settings().get("COHERE.KEY", None):
|
||||
litellm.cohere_key = get_settings().cohere.key
|
||||
if get_settings().get("REPLICATE.KEY", None):
|
||||
litellm.replicate_key = get_settings().replicate.key
|
||||
if get_settings().get("REPLICATE.KEY", None):
|
||||
litellm.replicate_key = get_settings().replicate.key
|
||||
if get_settings().get("HUGGINGFACE.KEY", None):
|
||||
litellm.huggingface_key = get_settings().huggingface.key
|
||||
if get_settings().get("HUGGINGFACE.API_BASE", None):
|
||||
litellm.api_base = get_settings().huggingface.api_base
|
||||
if get_settings().get("VERTEXAI.VERTEX_PROJECT", None):
|
||||
litellm.vertex_project = get_settings().vertexai.vertex_project
|
||||
litellm.vertex_location = get_settings().get(
|
||||
"VERTEXAI.VERTEX_LOCATION", None
|
||||
)
|
||||
|
||||
@property
|
||||
def deployment_id(self):
|
||||
|
@ -11,7 +11,12 @@ def filter_ignored(files):
|
||||
try:
|
||||
# load regex patterns, and translate glob patterns to regex
|
||||
patterns = get_settings().ignore.regex
|
||||
patterns += [fnmatch.translate(glob) for glob in get_settings().ignore.glob]
|
||||
if isinstance(patterns, str):
|
||||
patterns = [patterns]
|
||||
glob_setting = get_settings().ignore.glob
|
||||
if isinstance(glob_setting, str): # --ignore.glob=[.*utils.py], --ignore.glob=.*utils.py
|
||||
glob_setting = glob_setting.strip('[]').split(",")
|
||||
patterns += [fnmatch.translate(glob) for glob in glob_setting]
|
||||
|
||||
# compile all valid patterns
|
||||
compiled_patterns = []
|
||||
|
@ -282,7 +282,7 @@ def find_line_number_of_relevant_line_in_file(diff_files: List[FilePatchInfo],
|
||||
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)")
|
||||
|
||||
for file in diff_files:
|
||||
if file.filename.strip() == relevant_file:
|
||||
if file.filename and (file.filename.strip() == relevant_file):
|
||||
patch = file.patch
|
||||
patch_lines = patch.splitlines()
|
||||
|
||||
|
@ -282,41 +282,43 @@ def _fix_key_value(key: str, value: str):
|
||||
try:
|
||||
value = yaml.safe_load(value)
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed to parse YAML for config override {key}={value}", exc_info=e)
|
||||
get_logger().debug(f"Failed to parse YAML for config override {key}={value}", exc_info=e)
|
||||
return key, value
|
||||
|
||||
|
||||
def load_yaml(review_text: str) -> dict:
|
||||
review_text = review_text.removeprefix('```yaml').rstrip('`')
|
||||
def load_yaml(response_text: str) -> dict:
|
||||
response_text = response_text.removeprefix('```yaml').rstrip('`')
|
||||
try:
|
||||
data = yaml.safe_load(review_text)
|
||||
data = yaml.safe_load(response_text)
|
||||
except Exception as e:
|
||||
get_logger().error(f"Failed to parse AI prediction: {e}")
|
||||
data = try_fix_yaml(review_text)
|
||||
data = try_fix_yaml(response_text)
|
||||
return data
|
||||
|
||||
def try_fix_yaml(review_text: str) -> dict:
|
||||
review_text_lines = review_text.split('\n')
|
||||
def try_fix_yaml(response_text: str) -> dict:
|
||||
response_text_lines = response_text.split('\n')
|
||||
|
||||
keys = ['relevant line:', 'suggestion content:', 'relevant file:']
|
||||
# first fallback - try to convert 'relevant line: ...' to relevant line: |-\n ...'
|
||||
review_text_lines_copy = review_text_lines.copy()
|
||||
for i in range(0, len(review_text_lines_copy)):
|
||||
if 'relevant line:' in review_text_lines_copy[i] and not '|-' in review_text_lines_copy[i]:
|
||||
review_text_lines_copy[i] = review_text_lines_copy[i].replace('relevant line: ',
|
||||
'relevant line: |-\n ')
|
||||
response_text_lines_copy = response_text_lines.copy()
|
||||
for i in range(0, len(response_text_lines_copy)):
|
||||
for key in keys:
|
||||
if key in response_text_lines_copy[i] and not '|-' in response_text_lines_copy[i]:
|
||||
response_text_lines_copy[i] = response_text_lines_copy[i].replace(f'{key}',
|
||||
f'{key} |-\n ')
|
||||
try:
|
||||
data = yaml.load('\n'.join(review_text_lines_copy), Loader=yaml.SafeLoader)
|
||||
get_logger().info(f"Successfully parsed AI prediction after adding |-\n to relevant line")
|
||||
data = yaml.safe_load('\n'.join(response_text_lines_copy))
|
||||
get_logger().info(f"Successfully parsed AI prediction after adding |-\n")
|
||||
return data
|
||||
except:
|
||||
get_logger().debug(f"Failed to parse AI prediction after adding |-\n to relevant line")
|
||||
get_logger().info(f"Failed to parse AI prediction after adding |-\n")
|
||||
|
||||
# second fallback - try to remove last lines
|
||||
data = {}
|
||||
for i in range(1, len(review_text_lines)):
|
||||
review_text_lines_tmp = '\n'.join(review_text_lines[:-i])
|
||||
for i in range(1, len(response_text_lines)):
|
||||
response_text_lines_tmp = '\n'.join(response_text_lines[:-i])
|
||||
try:
|
||||
data = yaml.load(review_text_lines_tmp, Loader=yaml.SafeLoader)
|
||||
data = yaml.safe_load(response_text_lines_tmp,)
|
||||
get_logger().info(f"Successfully parsed AI prediction after removing {i} lines")
|
||||
break
|
||||
except:
|
||||
|
Reference in New Issue
Block a user