Merge pull request #105 from Codium-ai/ok/retry_on_rate_limit_error

Retry on Rate Limit Error
This commit is contained in:
Ori Kotek
2023-07-20 18:31:04 +03:00
committed by GitHub

View File

@ -1,12 +1,12 @@
import logging import logging
import openai import openai
from openai.error import APIError, Timeout, TryAgain from openai.error import APIError, Timeout, TryAgain, RateLimitError
from retry import retry from retry import retry
from pr_agent.config_loader import settings from pr_agent.config_loader import settings
OPENAI_RETRIES=2 OPENAI_RETRIES=5
class AiHandler: class AiHandler:
""" """
@ -34,7 +34,7 @@ class AiHandler:
except AttributeError as e: except AttributeError as e:
raise ValueError("OpenAI key is required") from e raise ValueError("OpenAI key is required") from e
@retry(exceptions=(APIError, Timeout, TryAgain, AttributeError), @retry(exceptions=(APIError, Timeout, TryAgain, AttributeError, RateLimitError),
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3)) tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
async def chat_completion(self, model: str, temperature: float, system: str, user: str): async def chat_completion(self, model: str, temperature: float, system: str, user: str):
""" """
@ -69,6 +69,12 @@ class AiHandler:
except (APIError, Timeout, TryAgain) as e: except (APIError, Timeout, TryAgain) as e:
logging.error("Error during OpenAI inference: ", e) logging.error("Error during OpenAI inference: ", e)
raise raise
except (RateLimitError) as e:
logging.error("Rate limit error during OpenAI inference: ", e)
raise
except (Exception) as e:
logging.error("Unknown error during OpenAI inference: ", e)
raise TryAgain from e
if response is None or len(response.choices) == 0: if response is None or len(response.choices) == 0:
raise TryAgain raise TryAgain
resp = response.choices[0]['message']['content'] resp = response.choices[0]['message']['content']