Merge branch 'main' into feature/gha-outputs-1

This commit is contained in:
idubnori
2024-04-10 23:27:44 +09:00
committed by GitHub
48 changed files with 441 additions and 429 deletions

View File

@ -11,6 +11,8 @@ MAX_TOKENS = {
'gpt-4-1106-preview': 128000, # 128K, but may be limited by config.max_model_tokens
'gpt-4-0125-preview': 128000, # 128K, but may be limited by config.max_model_tokens
'gpt-4-turbo-preview': 128000, # 128K, but may be limited by config.max_model_tokens
'gpt-4-turbo-2024-04-09': 128000, # 128K, but may be limited by config.max_model_tokens
'gpt-4-turbo': 128000, # 128K, but may be limited by config.max_model_tokens
'claude-instant-1': 100000,
'claude-2': 100000,
'command-nightly': 4096,

View File

@ -61,6 +61,9 @@ class LiteLLMAIHandler(BaseAiHandler):
if get_settings().get("HUGGINGFACE.API_BASE", None) and 'huggingface' in get_settings().config.model:
litellm.api_base = get_settings().huggingface.api_base
self.api_base = get_settings().huggingface.api_base
if get_settings().get("OLLAMA.API_BASE", None) :
litellm.api_base = get_settings().ollama.api_base
self.api_base = get_settings().ollama.api_base
if get_settings().get("HUGGINGFACE.REPITITION_PENALTY", None):
self.repetition_penalty = float(get_settings().huggingface.repetition_penalty)
if get_settings().get("VERTEXAI.VERTEX_PROJECT", None):
@ -150,4 +153,4 @@ class LiteLLMAIHandler(BaseAiHandler):
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"\nAI response:\n{resp}")
return resp, finish_reason
return resp, finish_reason

View File

@ -1,12 +1,25 @@
from jinja2 import Environment, StrictUndefined
from tiktoken import encoding_for_model, get_encoding
from pr_agent.config_loader import get_settings
from threading import Lock
def get_token_encoder():
return encoding_for_model(get_settings().config.model) if "gpt" in get_settings().config.model else get_encoding(
"cl100k_base")
class TokenEncoder:
_encoder_instance = None
_model = None
_lock = Lock() # Create a lock object
@classmethod
def get_token_encoder(cls):
model = get_settings().config.model
if cls._encoder_instance is None or model != cls._model: # Check without acquiring the lock for performance
with cls._lock: # Lock acquisition to ensure thread safety
if cls._encoder_instance is None or model != cls._model:
cls._model = model
cls._encoder_instance = encoding_for_model(cls._model) if "gpt" in cls._model else get_encoding(
"cl100k_base")
return cls._encoder_instance
class TokenHandler:
"""
@ -31,7 +44,7 @@ class TokenHandler:
- system: The system string.
- user: The user string.
"""
self.encoder = get_token_encoder()
self.encoder = TokenEncoder.get_token_encoder()
if pr is not None:
self.prompt_tokens = self._get_system_user_tokens(pr, self.encoder, vars, system, user)

View File

@ -13,7 +13,7 @@ import yaml
from starlette_context import context
from pr_agent.algo import MAX_TOKENS
from pr_agent.algo.token_handler import get_token_encoder
from pr_agent.algo.token_handler import TokenEncoder
from pr_agent.config_loader import get_settings, global_settings
from pr_agent.algo.types import FilePatchInfo
from pr_agent.log import get_logger
@ -567,7 +567,7 @@ def clip_tokens(text: str, max_tokens: int, add_three_dots=True) -> str:
return text
try:
encoder = get_token_encoder()
encoder = TokenEncoder.get_token_encoder()
num_input_tokens = len(encoder.encode(text))
if num_input_tokens <= max_tokens:
return text
@ -576,7 +576,7 @@ def clip_tokens(text: str, max_tokens: int, add_three_dots=True) -> str:
num_output_chars = int(chars_per_token * max_tokens)
clipped_text = text[:num_output_chars]
if add_three_dots:
clipped_text += "...(truncated)"
clipped_text += "\n...(truncated)"
return clipped_text
except Exception as e:
get_logger().warning(f"Failed to clip tokens: {e}")