Add end-to-end tests for GitHub, GitLab, and Bitbucket apps; update temperature setting usage across tools

This commit is contained in:
mrT23
2024-07-27 17:19:32 +03:00
parent 3a77652660
commit ac247dbc2c
15 changed files with 352 additions and 18 deletions

View File

@ -129,6 +129,11 @@ class LiteLLMAIHandler(BaseAiHandler):
"force_timeout": get_settings().config.ai_timeout,
"api_base": self.api_base,
}
if get_settings().config.get("seed", -1) > 0:
if temperature > 0:
raise ValueError("Seed is not supported with temperature > 0")
kwargs["seed"] = get_settings().config.seed
if self.repetition_penalty:
kwargs["repetition_penalty"] = self.repetition_penalty

View File

@ -1,16 +1,20 @@
[config]
# models
model="gpt-4-turbo-2024-04-09"
model_turbo="gpt-4o"
fallback_models=["gpt-4-0125-preview"]
# CLI
git_provider="github"
publish_output=true
publish_output_progress=true
verbosity_level=0 # 0,1,2
use_extra_bad_extensions=false
# Configurations
use_wiki_settings_file=true
use_repo_settings_file=true
use_global_settings_file=true
ai_timeout=120 # 2minutes
# token limits
max_description_tokens = 500
max_commits_tokens = 500
max_model_tokens = 32000 # Limits the maximum number of tokens that can be used by any model, regardless of the model's default capabilities.
@ -22,6 +26,9 @@ ai_disclaimer="" # Pro feature, full text for the AI disclaimer
output_relevant_configurations=false
large_patch_policy = "clip" # "clip", "skip"
is_auto_command=false
# seed
seed=-1 # set positive value to fix the seed
temperature=0.2
[pr_reviewer] # /review #
# enable/disable features

View File

@ -89,8 +89,8 @@ class PRAddDocs:
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt)
response, finish_reason = await self.ai_handler.chat_completion(
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
return response

View File

@ -304,8 +304,8 @@ class PRCodeSuggestions:
environment = Environment(undefined=StrictUndefined)
system_prompt = environment.from_string(self.pr_code_suggestions_prompt_system).render(variables)
user_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.user).render(variables)
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt)
response, finish_reason = await self.ai_handler.chat_completion(
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
# load suggestions from the AI response
data = self._prepare_pr_code_suggestions(response)

View File

@ -325,7 +325,7 @@ class PRDescription:
response, finish_reason = await self.ai_handler.chat_completion(
model=model,
temperature=0.2,
temperature=get_settings().config.temperature,
system=system_prompt,
user=user_prompt
)

View File

@ -142,7 +142,7 @@ class PRGenerateLabels:
response, finish_reason = await self.ai_handler.chat_completion(
model=model,
temperature=0.2,
temperature=get_settings().config.temperature,
system=system_prompt,
user=user_prompt
)

View File

@ -66,8 +66,8 @@ class PRInformationFromUser:
if get_settings().config.verbosity_level >= 2:
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
get_logger().info(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt)
response, finish_reason = await self.ai_handler.chat_completion(
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
return response
def _prepare_pr_answer(self) -> str:

View File

@ -102,6 +102,6 @@ class PR_LineQuestions:
print(f"\nSystem prompt:\n{system_prompt}")
print(f"\nUser prompt:\n{user_prompt}")
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt)
response, finish_reason = await self.ai_handler.chat_completion(
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
return response

View File

@ -108,12 +108,12 @@ class PRQuestions:
user_prompt = environment.from_string(get_settings().pr_questions_prompt.user).render(variables)
if 'img_path' in variables:
img_path = self.vars['img_path']
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt,
img_path=img_path)
response, finish_reason = await (self.ai_handler.chat_completion
(model=model, temperature=get_settings().config.temperature,
system=system_prompt, user=user_prompt, img_path=img_path))
else:
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt)
response, finish_reason = await self.ai_handler.chat_completion(
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
return response
def _prepare_pr_answer(self) -> str:

View File

@ -180,7 +180,7 @@ class PRReviewer:
response, finish_reason = await self.ai_handler.chat_completion(
model=model,
temperature=0.2,
temperature=get_settings().config.temperature,
system=system_prompt,
user=user_prompt
)

View File

@ -103,8 +103,8 @@ class PRUpdateChangelog:
environment = Environment(undefined=StrictUndefined)
system_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.system).render(variables)
user_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.user).render(variables)
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
system=system_prompt, user=user_prompt)
response, finish_reason = await self.ai_handler.chat_completion(
model=model, system=system_prompt, user=user_prompt, temperature=get_settings().config.temperature)
return response