mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-03 04:10:49 +08:00
change type check and remove useless sync
This commit is contained in:
@ -8,10 +8,10 @@ except: # we don't enforce langchain as a dependency, so if it's not installed,
|
||||
pass
|
||||
|
||||
import functools
|
||||
from typing import Optional
|
||||
|
||||
import openai
|
||||
from tenacity import retry, retry_if_exception_type, retry_if_not_exception_type, stop_after_attempt
|
||||
from langchain_core.runnables import Runnable
|
||||
|
||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||
from pr_agent.config_loader import get_settings
|
||||
@ -30,13 +30,6 @@ class LangChainOpenAIHandler(BaseAiHandler):
|
||||
super().__init__()
|
||||
self.azure = get_settings().get("OPENAI.API_TYPE", "").lower() == "azure"
|
||||
|
||||
# Create a default unused chat object to trigger early validation
|
||||
self._create_chat(self.deployment_id)
|
||||
|
||||
def chat(self, messages: list, model: str, temperature: float):
|
||||
chat = self._create_chat(self.deployment_id)
|
||||
return chat.invoke(input=messages, model=model, temperature=temperature)
|
||||
|
||||
@property
|
||||
def deployment_id(self):
|
||||
"""
|
||||
@ -81,9 +74,9 @@ class LangChainOpenAIHandler(BaseAiHandler):
|
||||
messages = [SystemMessage(content=system), HumanMessage(content=user)]
|
||||
llm = await self._create_chat_async(deployment_id=self.deployment_id)
|
||||
|
||||
if not hasattr(llm, 'ainvoke'):
|
||||
if not isinstance(llm, Runnable):
|
||||
error_message = (
|
||||
f"The Langchain LLM object ({type(llm)}) does not have an 'ainvoke' async method. "
|
||||
f"The Langchain LLM object ({type(llm)}) does not implement the Runnable interface. "
|
||||
f"Please update your Langchain library to the latest version or "
|
||||
f"check your LLM configuration to support async calls. "
|
||||
f"PR-Agent is designed to utilize Langchain's async capabilities."
|
||||
@ -116,27 +109,3 @@ class LangChainOpenAIHandler(BaseAiHandler):
|
||||
except Exception as e:
|
||||
get_logger().warning(f"Unknown error during LLM inference: {e}")
|
||||
raise openai.APIError from e
|
||||
|
||||
def _create_chat(self, deployment_id=None):
|
||||
try:
|
||||
if self.azure:
|
||||
# using a partial function so we can set the deployment_id later to support fallback_deployments
|
||||
# but still need to access the other settings now so we can raise a proper exception if they're missing
|
||||
return AzureChatOpenAI(
|
||||
openai_api_key=get_settings().openai.key,
|
||||
openai_api_version=get_settings().openai.api_version,
|
||||
azure_deployment=deployment_id,
|
||||
azure_endpoint=get_settings().openai.api_base,
|
||||
)
|
||||
else:
|
||||
# for llms that compatible with openai, should use custom api base
|
||||
openai_api_base = get_settings().get("OPENAI.API_BASE", None)
|
||||
if openai_api_base is None or len(openai_api_base) == 0:
|
||||
return ChatOpenAI(openai_api_key=get_settings().openai.key)
|
||||
else:
|
||||
return ChatOpenAI(openai_api_key=get_settings().openai.key, openai_api_base=openai_api_base)
|
||||
except AttributeError as e:
|
||||
if getattr(e, "name"):
|
||||
raise ValueError(f"OpenAI {e.name} is required") from e
|
||||
else:
|
||||
raise e
|
||||
|
Reference in New Issue
Block a user