diff --git a/pr_agent/algo/ai_handlers/litellm_ai_handler.py b/pr_agent/algo/ai_handlers/litellm_ai_handler.py index 666db1b1..527a20a8 100644 --- a/pr_agent/algo/ai_handlers/litellm_ai_handler.py +++ b/pr_agent/algo/ai_handlers/litellm_ai_handler.py @@ -73,6 +73,18 @@ class LiteLLMAIHandler(BaseAiHandler): region_name=get_settings().aws.bedrock_region, ) + def prepare_logs(self, response, system, user, resp, finish_reason): + response_log = response.dict().copy() + response_log['system'] = system + response_log['user'] = user + response_log['output'] = resp + response_log['finish_reason'] = finish_reason + if hasattr(self, 'main_pr_language'): + response_log['main_pr_language'] = self.main_pr_language + else: + response_log['main_pr_language'] = 'unknown' + return response_log + @property def deployment_id(self): """ @@ -127,16 +139,8 @@ class LiteLLMAIHandler(BaseAiHandler): finish_reason = response["choices"][0]["finish_reason"] get_logger().debug(f"\nAI response:\n{resp}") - # log the full response for debugging, including the system and user prompts - response_log = response.dict() - response_log['system'] = system - response_log['user'] = user - response_log['output'] = resp - response_log['finish_reason'] = finish_reason - if hasattr(self, 'main_pr_language'): - response_log['main_pr_language'] = self.main_pr_language - else: - response_log['main_pr_language'] = 'unknown' + # log the full response for debugging + response_log = self.prepare_logs(response, system, user, resp, finish_reason) get_logger().debug("Full_response", artifact=response_log) # for CLI debugging