Add support for documentation content exceeding token limits (#1670)

* - Add support for documentation content exceeding token limits via two phase operation:
1. Ask LLM to rank headings which are most likely to contain an answer to a user question
2. Provide the corresponding files for the LLM to search for an answer.

- Refactor of help_docs to make the code more readable
- For the purpose of getting canonical path: git providers to use default branch and not the PR's source branch.
- Refactor of token counting and making it clear on when an estimate factor will be used.

* Code review changes:
1. Correctly handle exception during retry_with_fallback_models (to allow fallback model to run in case of failure)
2. Better naming for default_branch in bitbucket cloud provider
This commit is contained in:
sharoneyal
2025-04-03 11:51:26 +03:00
committed by GitHub
parent ceaca3e621
commit 14971c4f5f
9 changed files with 505 additions and 187 deletions

View File

@ -1,7 +1,6 @@
from threading import Lock
from jinja2 import Environment, StrictUndefined
from math import ceil
from tiktoken import encoding_for_model, get_encoding
from pr_agent.config_loader import get_settings
@ -105,6 +104,19 @@ class TokenHandler:
get_logger().error( f"Error in Anthropic token counting: {e}")
return MaxTokens
def estimate_token_count_for_non_anth_claude_models(self, model, default_encoder_estimate):
from math import ceil
import re
model_is_from_o_series = re.match(r"^o[1-9](-mini|-preview)?$", model)
if ('gpt' in get_settings().config.model.lower() or model_is_from_o_series) and get_settings(use_context=False).get('openai.key'):
return default_encoder_estimate
#else: Model is not an OpenAI one - therefore, cannot provide an accurate token count and instead, return a higher number as best effort.
elbow_factor = 1 + get_settings().get('config.model_token_count_estimate_factor', 0)
get_logger().warning(f"{model}'s expected token count cannot be accurately estimated. Using {elbow_factor} of encoder output as best effort estimate")
return ceil(elbow_factor * default_encoder_estimate)
def count_tokens(self, patch: str, force_accurate=False) -> int:
"""
Counts the number of tokens in a given patch string.
@ -116,21 +128,15 @@ class TokenHandler:
The number of tokens in the patch string.
"""
encoder_estimate = len(self.encoder.encode(patch, disallowed_special=()))
#If an estimate is enough (for example, in cases where the maximal allowed tokens is way below the known limits), return it.
if not force_accurate:
return encoder_estimate
#else, need to provide an accurate estimation:
#else, force_accurate==True: User requested providing an accurate estimation:
model = get_settings().config.model.lower()
if force_accurate and 'claude' in model and get_settings(use_context=False).get('anthropic.key'):
if 'claude' in model and get_settings(use_context=False).get('anthropic.key'):
return self.calc_claude_tokens(patch) # API call to Anthropic for accurate token counting for Claude models
#else: Non Anthropic provided model
import re
model_is_from_o_series = re.match(r"^o[1-9](-mini|-preview)?$", model)
if ('gpt' in get_settings().config.model.lower() or model_is_from_o_series) and get_settings(use_context=False).get('openai.key'):
return encoder_estimate
#else: Model is neither an OpenAI, nor an Anthropic model - therefore, cannot provide an accurate token count and instead, return a higher number as best effort.
elbow_factor = 1 + get_settings().get('config.model_token_count_estimate_factor', 0)
get_logger().warning(f"{model}'s expected token count cannot be accurately estimated. Using {elbow_factor} of encoder output as best effort estimate")
return ceil(elbow_factor * encoder_estimate)
#else: Non Anthropic provided model:
return self.estimate_token_count_for_non_anth_claude_models(model, encoder_estimate)