From 222155e4f2cda90e7781bb52e96d9e663b8aca49 Mon Sep 17 00:00:00 2001 From: Kenny Dizi Date: Sat, 8 Mar 2025 08:53:29 +0700 Subject: [PATCH] Optimize logging --- pr_agent/algo/ai_handlers/litellm_ai_handler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pr_agent/algo/ai_handlers/litellm_ai_handler.py b/pr_agent/algo/ai_handlers/litellm_ai_handler.py index cb064096..cae65500 100644 --- a/pr_agent/algo/ai_handlers/litellm_ai_handler.py +++ b/pr_agent/algo/ai_handlers/litellm_ai_handler.py @@ -146,7 +146,8 @@ class LiteLLMAIHandler(BaseAiHandler): "type": "enabled", "budget_tokens": extended_thinking_budget_tokens } - get_logger().info(f"Adding max output tokens {extended_thinking_max_output_tokens} to model {model}, extended thinking budget tokens: {extended_thinking_budget_tokens}") + if get_settings().config.verbosity_level >= 2: + get_logger().info(f"Adding max output tokens {extended_thinking_max_output_tokens} to model {model}, extended thinking budget tokens: {extended_thinking_budget_tokens}") kwargs["max_tokens"] = extended_thinking_max_output_tokens # temperature may only be set to 1 when thinking is enabled