diff --git a/README.md b/README.md
index 9e445912..4da04a4b 100644
--- a/README.md
+++ b/README.md
@@ -9,6 +9,9 @@ Making pull requests less painful with an AI agent
[](https://github.com/Codium-ai/pr-agent/blob/main/LICENSE)
[](https://discord.com/channels/1057273017547378788/1126104260430528613)
+
+
+
diff --git a/pr_agent/algo/ai_handler.py b/pr_agent/algo/ai_handler.py
index a97b97ac..8ab22e05 100644
--- a/pr_agent/algo/ai_handler.py
+++ b/pr_agent/algo/ai_handler.py
@@ -1,12 +1,12 @@
import logging
import openai
-from openai.error import APIError, Timeout, TryAgain
+from openai.error import APIError, Timeout, TryAgain, RateLimitError
from retry import retry
from pr_agent.config_loader import settings
-OPENAI_RETRIES=2
+OPENAI_RETRIES=5
class AiHandler:
"""
@@ -34,7 +34,7 @@ class AiHandler:
except AttributeError as e:
raise ValueError("OpenAI key is required") from e
- @retry(exceptions=(APIError, Timeout, TryAgain, AttributeError),
+ @retry(exceptions=(APIError, Timeout, TryAgain, AttributeError, RateLimitError),
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
async def chat_completion(self, model: str, temperature: float, system: str, user: str):
"""
@@ -69,6 +69,12 @@ class AiHandler:
except (APIError, Timeout, TryAgain) as e:
logging.error("Error during OpenAI inference: ", e)
raise
+ except (RateLimitError) as e:
+ logging.error("Rate limit error during OpenAI inference: ", e)
+ raise
+ except (Exception) as e:
+ logging.error("Unknown error during OpenAI inference: ", e)
+ raise TryAgain from e
if response is None or len(response.choices) == 0:
raise TryAgain
resp = response.choices[0]['message']['content']
diff --git a/pr_agent/git_providers/github_provider.py b/pr_agent/git_providers/github_provider.py
index 9b6d6d8f..feb168f9 100644
--- a/pr_agent/git_providers/github_provider.py
+++ b/pr_agent/git_providers/github_provider.py
@@ -105,6 +105,9 @@ class GithubProvider(GitProvider):
# self.pr.create_issue_comment(pr_comment)
def publish_comment(self, pr_comment: str, is_temporary: bool = False):
+ if is_temporary and not settings.config.publish_output_progress:
+ logging.debug(f"Skipping publish_comment for temporary comment: {pr_comment}")
+ return
response = self.pr.create_issue_comment(pr_comment)
if hasattr(response, "user") and hasattr(response.user, "login"):
self.github_user_id = response.user.login
@@ -205,7 +208,7 @@ class GithubProvider(GitProvider):
def remove_initial_comment(self):
try:
- for comment in self.pr.comments_list:
+ for comment in getattr(self.pr, 'comments_list', []):
if comment.is_temporary:
comment.delete()
except Exception as e:
diff --git a/pr_agent/git_providers/gitlab_provider.py b/pr_agent/git_providers/gitlab_provider.py
index d9efe1d8..1ab4db89 100644
--- a/pr_agent/git_providers/gitlab_provider.py
+++ b/pr_agent/git_providers/gitlab_provider.py
@@ -259,4 +259,7 @@ class GitLabProvider(GitProvider):
return None
def publish_labels(self, labels):
+ pass
+
+ def publish_inline_comments(self, comments: list[dict]):
pass
\ No newline at end of file
diff --git a/pr_agent/settings/configuration.toml b/pr_agent/settings/configuration.toml
index 2062a57c..e5f583e4 100644
--- a/pr_agent/settings/configuration.toml
+++ b/pr_agent/settings/configuration.toml
@@ -2,6 +2,7 @@
model="gpt-4-0613"
git_provider="github"
publish_output=true
+publish_output_progress=true
verbosity_level=0 # 0,1,2
[pr_reviewer]