Merge branch 'main' of github.com:qodo-ai/pr-agent into feature/gitea-implement

This commit is contained in:
Pinyoo Thotaboot
2025-05-26 10:59:19 +07:00
15 changed files with 693 additions and 72 deletions

View File

@ -53,9 +53,11 @@ MAX_TOKENS = {
'vertex_ai/claude-3-5-haiku@20241022': 100000,
'vertex_ai/claude-3-sonnet@20240229': 100000,
'vertex_ai/claude-3-opus@20240229': 100000,
'vertex_ai/claude-opus-4@20250514': 200000,
'vertex_ai/claude-3-5-sonnet@20240620': 100000,
'vertex_ai/claude-3-5-sonnet-v2@20241022': 100000,
'vertex_ai/claude-3-7-sonnet@20250219': 200000,
'vertex_ai/claude-sonnet-4@20250514': 200000,
'vertex_ai/gemini-1.5-pro': 1048576,
'vertex_ai/gemini-2.5-pro-preview-03-25': 1048576,
'vertex_ai/gemini-2.5-pro-preview-05-06': 1048576,
@ -74,22 +76,28 @@ MAX_TOKENS = {
'anthropic.claude-v1': 100000,
'anthropic.claude-v2': 100000,
'anthropic/claude-3-opus-20240229': 100000,
'anthropic/claude-opus-4-20250514': 200000,
'anthropic/claude-3-5-sonnet-20240620': 100000,
'anthropic/claude-3-5-sonnet-20241022': 100000,
'anthropic/claude-3-7-sonnet-20250219': 200000,
'anthropic/claude-sonnet-4-20250514': 200000,
'claude-3-7-sonnet-20250219': 200000,
'anthropic/claude-3-5-haiku-20241022': 100000,
'bedrock/anthropic.claude-instant-v1': 100000,
'bedrock/anthropic.claude-v2': 100000,
'bedrock/anthropic.claude-v2:1': 100000,
'bedrock/anthropic.claude-3-sonnet-20240229-v1:0': 100000,
'bedrock/anthropic.claude-opus-4-20250514-v1:0': 200000,
'bedrock/anthropic.claude-3-haiku-20240307-v1:0': 100000,
'bedrock/anthropic.claude-3-5-haiku-20241022-v1:0': 100000,
'bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0': 100000,
'bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0': 100000,
'bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0': 200000,
'bedrock/anthropic.claude-sonnet-4-20250514-v1:0': 200000,
"bedrock/us.anthropic.claude-opus-4-20250514-v1:0": 200000,
"bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0": 100000,
"bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0": 200000,
"bedrock/us.anthropic.claude-sonnet-4-20250514-v1:0": 200000,
'claude-3-5-sonnet': 100000,
'groq/meta-llama/llama-4-scout-17b-16e-instruct': 131072,
'groq/meta-llama/llama-4-maverick-17b-128e-instruct': 131072,
@ -102,9 +110,13 @@ MAX_TOKENS = {
'xai/grok-2': 131072,
'xai/grok-2-1212': 131072,
'xai/grok-2-latest': 131072,
'xai/grok-3': 131072,
'xai/grok-3-beta': 131072,
'xai/grok-3-fast': 131072,
'xai/grok-3-fast-beta': 131072,
'xai/grok-3-mini': 131072,
'xai/grok-3-mini-beta': 131072,
'xai/grok-3-mini-fast': 131072,
'xai/grok-3-mini-fast-beta': 131072,
'ollama/llama3': 4096,
'watsonx/meta-llama/llama-3-8b-instruct': 4096,

View File

@ -6,8 +6,8 @@ except: # we don't enforce langchain as a dependency, so if it's not installed,
import functools
from openai import APIError, RateLimitError, Timeout
from retry import retry
import openai
from tenacity import retry, retry_if_exception_type, retry_if_not_exception_type, stop_after_attempt
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.config_loader import get_settings
@ -36,8 +36,10 @@ class LangChainOpenAIHandler(BaseAiHandler):
"""
return get_settings().get("OPENAI.DEPLOYMENT_ID", None)
@retry(exceptions=(APIError, Timeout, AttributeError, RateLimitError),
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
@retry(
retry=retry_if_exception_type(openai.APIError) & retry_if_not_exception_type(openai.RateLimitError),
stop=stop_after_attempt(OPENAI_RETRIES),
)
async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2):
try:
messages = [SystemMessage(content=system), HumanMessage(content=user)]
@ -47,9 +49,15 @@ class LangChainOpenAIHandler(BaseAiHandler):
finish_reason = "completed"
return resp.content, finish_reason
except (Exception) as e:
get_logger().error("Unknown error during OpenAI inference: ", e)
raise e
except openai.RateLimitError as e:
get_logger().error(f"Rate limit error during LLM inference: {e}")
raise
except openai.APIError as e:
get_logger().warning(f"Error during LLM inference: {e}")
raise
except Exception as e:
get_logger().warning(f"Unknown error during LLM inference: {e}")
raise openai.APIError from e
def _create_chat(self, deployment_id=None):
try:

View File

@ -3,7 +3,7 @@ import litellm
import openai
import requests
from litellm import acompletion
from tenacity import retry, retry_if_exception_type, stop_after_attempt
from tenacity import retry, retry_if_exception_type, retry_if_not_exception_type, stop_after_attempt
from pr_agent.algo import CLAUDE_EXTENDED_THINKING_MODELS, NO_SUPPORT_TEMPERATURE_MODELS, SUPPORT_REASONING_EFFORT_MODELS, USER_MESSAGE_ONLY_MODELS
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
@ -274,8 +274,8 @@ class LiteLLMAIHandler(BaseAiHandler):
return get_settings().get("OPENAI.DEPLOYMENT_ID", None)
@retry(
retry=retry_if_exception_type((openai.APIError, openai.APIConnectionError, openai.APITimeoutError)), # No retry on RateLimitError
stop=stop_after_attempt(OPENAI_RETRIES)
retry=retry_if_exception_type(openai.APIError) & retry_if_not_exception_type(openai.RateLimitError),
stop=stop_after_attempt(OPENAI_RETRIES),
)
async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2, img_path: str = None):
try:
@ -371,13 +371,13 @@ class LiteLLMAIHandler(BaseAiHandler):
get_logger().info(f"\nUser prompt:\n{user}")
response = await acompletion(**kwargs)
except (openai.RateLimitError) as e:
except openai.RateLimitError as e:
get_logger().error(f"Rate limit error during LLM inference: {e}")
raise
except (openai.APIError, openai.APITimeoutError) as e:
except openai.APIError as e:
get_logger().warning(f"Error during LLM inference: {e}")
raise
except (Exception) as e:
except Exception as e:
get_logger().warning(f"Unknown error during LLM inference: {e}")
raise openai.APIError from e
if response is None or len(response["choices"]) == 0:

View File

@ -1,8 +1,8 @@
from os import environ
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
import openai
from openai import APIError, AsyncOpenAI, RateLimitError, Timeout
from retry import retry
from openai import AsyncOpenAI
from tenacity import retry, retry_if_exception_type, retry_if_not_exception_type, stop_after_attempt
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.config_loader import get_settings
@ -38,8 +38,10 @@ class OpenAIHandler(BaseAiHandler):
"""
return get_settings().get("OPENAI.DEPLOYMENT_ID", None)
@retry(exceptions=(APIError, Timeout, AttributeError, RateLimitError),
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
@retry(
retry=retry_if_exception_type(openai.APIError) & retry_if_not_exception_type(openai.RateLimitError),
stop=stop_after_attempt(OPENAI_RETRIES),
)
async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2):
try:
get_logger().info("System: ", system)
@ -57,12 +59,12 @@ class OpenAIHandler(BaseAiHandler):
get_logger().info("AI response", response=resp, messages=messages, finish_reason=finish_reason,
model=model, usage=usage)
return resp, finish_reason
except (APIError, Timeout) as e:
get_logger().error("Error during OpenAI inference: ", e)
except openai.RateLimitError as e:
get_logger().error(f"Rate limit error during LLM inference: {e}")
raise
except (RateLimitError) as e:
get_logger().error("Rate limit error during OpenAI inference: ", e)
raise
except (Exception) as e:
get_logger().error("Unknown error during OpenAI inference: ", e)
except openai.APIError as e:
get_logger().warning(f"Error during LLM inference: {e}")
raise
except Exception as e:
get_logger().warning(f"Unknown error during LLM inference: {e}")
raise openai.APIError from e

View File

@ -1,4 +1,6 @@
from threading import Lock
from math import ceil
import re
from jinja2 import Environment, StrictUndefined
from tiktoken import encoding_for_model, get_encoding
@ -7,6 +9,16 @@ from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger
class ModelTypeValidator:
@staticmethod
def is_openai_model(model_name: str) -> bool:
return 'gpt' in model_name or re.match(r"^o[1-9](-mini|-preview)?$", model_name)
@staticmethod
def is_anthropic_model(model_name: str) -> bool:
return 'claude' in model_name
class TokenEncoder:
_encoder_instance = None
_model = None
@ -40,6 +52,10 @@ class TokenHandler:
method.
"""
# Constants
CLAUDE_MODEL = "claude-3-7-sonnet-20250219"
CLAUDE_MAX_CONTENT_SIZE = 9_000_000 # Maximum allowed content size (9MB) for Claude API
def __init__(self, pr=None, vars: dict = {}, system="", user=""):
"""
Initializes the TokenHandler object.
@ -51,6 +67,7 @@ class TokenHandler:
- user: The user string.
"""
self.encoder = TokenEncoder.get_token_encoder()
if pr is not None:
self.prompt_tokens = self._get_system_user_tokens(pr, self.encoder, vars, system, user)
@ -79,22 +96,22 @@ class TokenHandler:
get_logger().error(f"Error in _get_system_user_tokens: {e}")
return 0
def calc_claude_tokens(self, patch):
def _calc_claude_tokens(self, patch: str) -> int:
try:
import anthropic
from pr_agent.algo import MAX_TOKENS
client = anthropic.Anthropic(api_key=get_settings(use_context=False).get('anthropic.key'))
MaxTokens = MAX_TOKENS[get_settings().config.model]
max_tokens = MAX_TOKENS[get_settings().config.model]
# Check if the content size is too large (9MB limit)
if len(patch.encode('utf-8')) > 9_000_000:
if len(patch.encode('utf-8')) > self.CLAUDE_MAX_CONTENT_SIZE:
get_logger().warning(
"Content too large for Anthropic token counting API, falling back to local tokenizer"
)
return MaxTokens
return max_tokens
response = client.messages.count_tokens(
model="claude-3-7-sonnet-20250219",
model=self.CLAUDE_MODEL,
system="system",
messages=[{
"role": "user",
@ -104,42 +121,51 @@ class TokenHandler:
return response.input_tokens
except Exception as e:
get_logger().error( f"Error in Anthropic token counting: {e}")
return MaxTokens
get_logger().error(f"Error in Anthropic token counting: {e}")
return max_tokens
def estimate_token_count_for_non_anth_claude_models(self, model, default_encoder_estimate):
from math import ceil
import re
def _apply_estimation_factor(self, model_name: str, default_estimate: int) -> int:
factor = 1 + get_settings().get('config.model_token_count_estimate_factor', 0)
get_logger().warning(f"{model_name}'s token count cannot be accurately estimated. Using factor of {factor}")
return ceil(factor * default_estimate)
model_is_from_o_series = re.match(r"^o[1-9](-mini|-preview)?$", model)
if ('gpt' in get_settings().config.model.lower() or model_is_from_o_series) and get_settings(use_context=False).get('openai.key'):
return default_encoder_estimate
#else: Model is not an OpenAI one - therefore, cannot provide an accurate token count and instead, return a higher number as best effort.
def _get_token_count_by_model_type(self, patch: str, default_estimate: int) -> int:
"""
Get token count based on model type.
elbow_factor = 1 + get_settings().get('config.model_token_count_estimate_factor', 0)
get_logger().warning(f"{model}'s expected token count cannot be accurately estimated. Using {elbow_factor} of encoder output as best effort estimate")
return ceil(elbow_factor * default_encoder_estimate)
Args:
patch: The text to count tokens for.
default_estimate: The default token count estimate.
def count_tokens(self, patch: str, force_accurate=False) -> int:
Returns:
int: The calculated token count.
"""
model_name = get_settings().config.model.lower()
if ModelTypeValidator.is_openai_model(model_name) and get_settings(use_context=False).get('openai.key'):
return default_estimate
if ModelTypeValidator.is_anthropic_model(model_name) and get_settings(use_context=False).get('anthropic.key'):
return self._calc_claude_tokens(patch)
return self._apply_estimation_factor(model_name, default_estimate)
def count_tokens(self, patch: str, force_accurate: bool = False) -> int:
"""
Counts the number of tokens in a given patch string.
Args:
- patch: The patch string.
- force_accurate: If True, uses a more precise calculation method.
Returns:
The number of tokens in the patch string.
"""
encoder_estimate = len(self.encoder.encode(patch, disallowed_special=()))
#If an estimate is enough (for example, in cases where the maximal allowed tokens is way below the known limits), return it.
# If an estimate is enough (for example, in cases where the maximal allowed tokens is way below the known limits), return it.
if not force_accurate:
return encoder_estimate
#else, force_accurate==True: User requested providing an accurate estimation:
model = get_settings().config.model.lower()
if 'claude' in model and get_settings(use_context=False).get('anthropic.key'):
return self.calc_claude_tokens(patch) # API call to Anthropic for accurate token counting for Claude models
#else: Non Anthropic provided model:
return self.estimate_token_count_for_non_anth_claude_models(model, encoder_estimate)
return self._get_token_count_by_model_type(patch, encoder_estimate)

View File

@ -945,12 +945,66 @@ def clip_tokens(text: str, max_tokens: int, add_three_dots=True, num_input_token
"""
Clip the number of tokens in a string to a maximum number of tokens.
This function limits text to a specified token count by calculating the approximate
character-to-token ratio and truncating the text accordingly. A safety factor of 0.9
(10% reduction) is applied to ensure the result stays within the token limit.
Args:
text (str): The string to clip.
text (str): The string to clip. If empty or None, returns the input unchanged.
max_tokens (int): The maximum number of tokens allowed in the string.
add_three_dots (bool, optional): A boolean indicating whether to add three dots at the end of the clipped
If negative, returns an empty string.
add_three_dots (bool, optional): Whether to add "\\n...(truncated)" at the end
of the clipped text to indicate truncation.
Defaults to True.
num_input_tokens (int, optional): Pre-computed number of tokens in the input text.
If provided, skips token encoding step for efficiency.
If None, tokens will be counted using TokenEncoder.
Defaults to None.
delete_last_line (bool, optional): Whether to remove the last line from the
clipped content before adding truncation indicator.
Useful for ensuring clean breaks at line boundaries.
Defaults to False.
Returns:
str: The clipped string.
str: The clipped string. Returns original text if:
- Text is empty/None
- Token count is within limit
- An error occurs during processing
Returns empty string if max_tokens <= 0.
Examples:
Basic usage:
>>> text = "This is a sample text that might be too long"
>>> result = clip_tokens(text, max_tokens=10)
>>> print(result)
This is a sample...
(truncated)
Without truncation indicator:
>>> result = clip_tokens(text, max_tokens=10, add_three_dots=False)
>>> print(result)
This is a sample
With pre-computed token count:
>>> result = clip_tokens(text, max_tokens=5, num_input_tokens=15)
>>> print(result)
This...
(truncated)
With line deletion:
>>> multiline_text = "Line 1\\nLine 2\\nLine 3"
>>> result = clip_tokens(multiline_text, max_tokens=3, delete_last_line=True)
>>> print(result)
Line 1
Line 2
...
(truncated)
Notes:
The function uses a safety factor of 0.9 (10% reduction) to ensure the
result stays within the token limit, as character-to-token ratios can vary.
If token encoding fails, the original text is returned with a warning logged.
"""
if not text:
return text