refactor(ai_handler): remove model parameter from _get_completion and handle it within the method

This commit is contained in:
mrT23
2025-07-13 21:29:53 +03:00
parent 0e9cf274ef
commit 8e0c5c8784
2 changed files with 3 additions and 3 deletions

View File

@ -359,7 +359,7 @@ class LiteLLMAIHandler(BaseAiHandler):
get_logger().info(f"\nUser prompt:\n{user}")
# Get completion with automatic streaming detection
resp, finish_reason, response_obj = await self._get_completion(model, **kwargs)
resp, finish_reason, response_obj = await self._get_completion(**kwargs)
except openai.RateLimitError as e:
get_logger().error(f"Rate limit error during LLM inference: {e}")
@ -383,10 +383,11 @@ class LiteLLMAIHandler(BaseAiHandler):
return resp, finish_reason
async def _get_completion(self, model, **kwargs):
async def _get_completion(self, **kwargs):
"""
Wrapper that automatically handles streaming for required models.
"""
model = kwargs["model"]
if model in self.streaming_required_models:
kwargs["stream"] = True
get_logger().info(f"Using streaming mode for model {model}")

View File

@ -1,7 +1,6 @@
import json
import openai
from azure.identity import ClientSecretCredential
from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger