feat: add debug logging support for streaming models

This commit is contained in:
Makonike
2025-07-09 15:29:03 +08:00
parent 2d8bee0d6d
commit 85e1e2d4ee

View File

@ -15,6 +15,23 @@ import json
OPENAI_RETRIES = 5 OPENAI_RETRIES = 5
class MockResponse:
"""Mock response object for streaming models to enable consistent logging."""
def __init__(self, resp, finish_reason):
self._data = {
"choices": [
{
"message": {"content": resp},
"finish_reason": finish_reason
}
]
}
def dict(self):
return self._data
class LiteLLMAIHandler(BaseAiHandler): class LiteLLMAIHandler(BaseAiHandler):
""" """
This class handles interactions with the OpenAI API for chat completions. This class handles interactions with the OpenAI API for chat completions.
@ -401,7 +418,11 @@ class LiteLLMAIHandler(BaseAiHandler):
get_logger().debug(f"\nAI response:\n{resp}") get_logger().debug(f"\nAI response:\n{resp}")
# log the full response for debugging # log the full response for debugging
if not (model in self.streaming_required_models): if model in self.streaming_required_models:
# for streaming, we don't have the full response object, so we create a mock one
mock_response = MockResponse(resp, finish_reason)
response_log = self.prepare_logs(mock_response, system, user, resp, finish_reason)
else:
response_log = self.prepare_logs(response, system, user, resp, finish_reason) response_log = self.prepare_logs(response, system, user, resp, finish_reason)
get_logger().debug("Full_response", artifact=response_log) get_logger().debug("Full_response", artifact=response_log)