Merge remote-tracking branch 'origin/main'

This commit is contained in:
Ori Kotek
2023-07-20 22:00:24 +03:00
5 changed files with 20 additions and 4 deletions

View File

@ -9,6 +9,9 @@ Making pull requests less painful with an AI agent
[![GitHub license](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://github.com/Codium-ai/pr-agent/blob/main/LICENSE) [![GitHub license](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://github.com/Codium-ai/pr-agent/blob/main/LICENSE)
[![Discord](https://badgen.net/badge/icon/discord?icon=discord&label&color=purple)](https://discord.com/channels/1057273017547378788/1126104260430528613) [![Discord](https://badgen.net/badge/icon/discord?icon=discord&label&color=purple)](https://discord.com/channels/1057273017547378788/1126104260430528613)
<a href="https://github.com/Codium-ai/pr-agent/commits/main">
<img alt="GitHub" src="https://img.shields.io/github/last-commit/Codium-ai/pr-agent/main?style=for-the-badge" height="20">
</a>
</div> </div>
<div style="text-align:left;"> <div style="text-align:left;">

View File

@ -1,12 +1,12 @@
import logging import logging
import openai import openai
from openai.error import APIError, Timeout, TryAgain from openai.error import APIError, Timeout, TryAgain, RateLimitError
from retry import retry from retry import retry
from pr_agent.config_loader import settings from pr_agent.config_loader import settings
OPENAI_RETRIES=2 OPENAI_RETRIES=5
class AiHandler: class AiHandler:
""" """
@ -34,7 +34,7 @@ class AiHandler:
except AttributeError as e: except AttributeError as e:
raise ValueError("OpenAI key is required") from e raise ValueError("OpenAI key is required") from e
@retry(exceptions=(APIError, Timeout, TryAgain, AttributeError), @retry(exceptions=(APIError, Timeout, TryAgain, AttributeError, RateLimitError),
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3)) tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
async def chat_completion(self, model: str, temperature: float, system: str, user: str): async def chat_completion(self, model: str, temperature: float, system: str, user: str):
""" """
@ -69,6 +69,12 @@ class AiHandler:
except (APIError, Timeout, TryAgain) as e: except (APIError, Timeout, TryAgain) as e:
logging.error("Error during OpenAI inference: ", e) logging.error("Error during OpenAI inference: ", e)
raise raise
except (RateLimitError) as e:
logging.error("Rate limit error during OpenAI inference: ", e)
raise
except (Exception) as e:
logging.error("Unknown error during OpenAI inference: ", e)
raise TryAgain from e
if response is None or len(response.choices) == 0: if response is None or len(response.choices) == 0:
raise TryAgain raise TryAgain
resp = response.choices[0]['message']['content'] resp = response.choices[0]['message']['content']

View File

@ -105,6 +105,9 @@ class GithubProvider(GitProvider):
# self.pr.create_issue_comment(pr_comment) # self.pr.create_issue_comment(pr_comment)
def publish_comment(self, pr_comment: str, is_temporary: bool = False): def publish_comment(self, pr_comment: str, is_temporary: bool = False):
if is_temporary and not settings.config.publish_output_progress:
logging.debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
return
response = self.pr.create_issue_comment(pr_comment) response = self.pr.create_issue_comment(pr_comment)
if hasattr(response, "user") and hasattr(response.user, "login"): if hasattr(response, "user") and hasattr(response.user, "login"):
self.github_user_id = response.user.login self.github_user_id = response.user.login
@ -205,7 +208,7 @@ class GithubProvider(GitProvider):
def remove_initial_comment(self): def remove_initial_comment(self):
try: try:
for comment in self.pr.comments_list: for comment in getattr(self.pr, 'comments_list', []):
if comment.is_temporary: if comment.is_temporary:
comment.delete() comment.delete()
except Exception as e: except Exception as e:

View File

@ -260,3 +260,6 @@ class GitLabProvider(GitProvider):
def publish_labels(self, labels): def publish_labels(self, labels):
pass pass
def publish_inline_comments(self, comments: list[dict]):
pass

View File

@ -2,6 +2,7 @@
model="gpt-4-0613" model="gpt-4-0613"
git_provider="github" git_provider="github"
publish_output=true publish_output=true
publish_output_progress=true
verbosity_level=0 # 0,1,2 verbosity_level=0 # 0,1,2
[pr_reviewer] [pr_reviewer]