diff --git a/pr_agent/algo/ai_handlers/langchain_ai_handler.py b/pr_agent/algo/ai_handlers/langchain_ai_handler.py index cbd3b2c9..5a9dbc3b 100644 --- a/pr_agent/algo/ai_handlers/langchain_ai_handler.py +++ b/pr_agent/algo/ai_handlers/langchain_ai_handler.py @@ -1,6 +1,6 @@ try: - from langchain.chat_models import ChatOpenAI, AzureChatOpenAI - from langchain.schema import SystemMessage, HumanMessage + from langchain_openai import ChatOpenAI, AzureChatOpenAI + from langchain_core.messages import SystemMessage, HumanMessage except: # we don't enforce langchain as a dependency, so if it's not installed, just move on pass @@ -8,7 +8,7 @@ from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler from pr_agent.config_loader import get_settings from pr_agent.log import get_logger -from openai.error import APIError, RateLimitError, Timeout, TryAgain +from openai import APIError, RateLimitError, Timeout from retry import retry import functools @@ -31,20 +31,24 @@ class LangChainOpenAIHandler(BaseAiHandler): openai_api_version=get_settings().openai.api_version, ) else: - self._chat = ChatOpenAI(openai_api_key=get_settings().openai.key) + # for llms that compatible with openai, should use custom api base + openai_api_base = get_settings().get("OPENAI.API_BASE", None) + if openai_api_base is None or len(openai_api_base) == 0: + self._chat = ChatOpenAI(openai_api_key=get_settings().openai.key) + else: + self._chat = ChatOpenAI(openai_api_key=get_settings().openai.key, openai_api_base=openai_api_base) except AttributeError as e: if getattr(e, "name"): raise ValueError(f"OpenAI {e.name} is required") from e else: raise e - @property - def chat(self): + def chat(self, messages: list, model: str, temperature: float): if self.azure: # we must set the deployment_id only here (instead of the __init__ method) to support fallback_deployments - return self._chat(deployment_name=self.deployment_id) + return self._chat.invoke(input = messages, model=model, temperature=temperature, deployment_name=self.deployment_id) else: - return self._chat + return self._chat.invoke(input = messages, model=model, temperature=temperature) @property def deployment_id(self): @@ -53,7 +57,7 @@ class LangChainOpenAIHandler(BaseAiHandler): """ return get_settings().get("OPENAI.DEPLOYMENT_ID", None) - @retry(exceptions=(APIError, Timeout, TryAgain, AttributeError, RateLimitError), + @retry(exceptions=(APIError, Timeout, AttributeError, RateLimitError), tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3)) async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2): try: