Refactor logging statements for better readability and debugging

This commit is contained in:
mrT23
2024-02-25 09:58:58 +02:00
parent 877796b539
commit 34e421f79b
2 changed files with 8 additions and 5 deletions

View File

@ -100,6 +100,7 @@ class LiteLLMAIHandler(BaseAiHandler):
TryAgain: If there is an attribute error during OpenAI inference. TryAgain: If there is an attribute error during OpenAI inference.
""" """
try: try:
resp, finish_reason = None, None
deployment_id = self.deployment_id deployment_id = self.deployment_id
if self.azure: if self.azure:
model = 'azure/' + model model = 'azure/' + model
@ -127,9 +128,11 @@ class LiteLLMAIHandler(BaseAiHandler):
raise TryAgain from e raise TryAgain from e
if response is None or len(response["choices"]) == 0: if response is None or len(response["choices"]) == 0:
raise TryAgain raise TryAgain
else:
resp = response["choices"][0]['message']['content'] resp = response["choices"][0]['message']['content']
finish_reason = response["choices"][0]["finish_reason"] finish_reason = response["choices"][0]["finish_reason"]
# usage = response.get("usage") # usage = response.get("usage")
get_logger().debug(f"\nAI response:\n{resp}")
get_logger().debug("full_response", response=response)
get_logger().debug(f"\nAI response:\n{resp}", full_response=response)
return resp, finish_reason return resp, finish_reason

View File

@ -254,7 +254,7 @@ async def retry_with_fallback_models(f: Callable, model_type: ModelType = ModelT
) )
get_settings().set("openai.deployment_id", deployment_id) get_settings().set("openai.deployment_id", deployment_id)
return await f(model) return await f(model)
except Exception as e: except:
get_logger().warning( get_logger().warning(
f"Failed to generate prediction with {model}" f"Failed to generate prediction with {model}"
f"{(' from deployment ' + deployment_id) if deployment_id else ''}: " f"{(' from deployment ' + deployment_id) if deployment_id else ''}: "