mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-02 03:40:38 +08:00
Add end-to-end tests for GitHub, GitLab, and Bitbucket apps; update temperature setting usage across tools
This commit is contained in:
@ -129,6 +129,11 @@ class LiteLLMAIHandler(BaseAiHandler):
|
|||||||
"force_timeout": get_settings().config.ai_timeout,
|
"force_timeout": get_settings().config.ai_timeout,
|
||||||
"api_base": self.api_base,
|
"api_base": self.api_base,
|
||||||
}
|
}
|
||||||
|
if get_settings().config.get("seed", -1) > 0:
|
||||||
|
if temperature > 0:
|
||||||
|
raise ValueError("Seed is not supported with temperature > 0")
|
||||||
|
kwargs["seed"] = get_settings().config.seed
|
||||||
|
|
||||||
if self.repetition_penalty:
|
if self.repetition_penalty:
|
||||||
kwargs["repetition_penalty"] = self.repetition_penalty
|
kwargs["repetition_penalty"] = self.repetition_penalty
|
||||||
|
|
||||||
|
@ -1,16 +1,20 @@
|
|||||||
[config]
|
[config]
|
||||||
|
# models
|
||||||
model="gpt-4-turbo-2024-04-09"
|
model="gpt-4-turbo-2024-04-09"
|
||||||
model_turbo="gpt-4o"
|
model_turbo="gpt-4o"
|
||||||
fallback_models=["gpt-4-0125-preview"]
|
fallback_models=["gpt-4-0125-preview"]
|
||||||
|
# CLI
|
||||||
git_provider="github"
|
git_provider="github"
|
||||||
publish_output=true
|
publish_output=true
|
||||||
publish_output_progress=true
|
publish_output_progress=true
|
||||||
verbosity_level=0 # 0,1,2
|
verbosity_level=0 # 0,1,2
|
||||||
use_extra_bad_extensions=false
|
use_extra_bad_extensions=false
|
||||||
|
# Configurations
|
||||||
use_wiki_settings_file=true
|
use_wiki_settings_file=true
|
||||||
use_repo_settings_file=true
|
use_repo_settings_file=true
|
||||||
use_global_settings_file=true
|
use_global_settings_file=true
|
||||||
ai_timeout=120 # 2minutes
|
ai_timeout=120 # 2minutes
|
||||||
|
# token limits
|
||||||
max_description_tokens = 500
|
max_description_tokens = 500
|
||||||
max_commits_tokens = 500
|
max_commits_tokens = 500
|
||||||
max_model_tokens = 32000 # Limits the maximum number of tokens that can be used by any model, regardless of the model's default capabilities.
|
max_model_tokens = 32000 # Limits the maximum number of tokens that can be used by any model, regardless of the model's default capabilities.
|
||||||
@ -22,6 +26,9 @@ ai_disclaimer="" # Pro feature, full text for the AI disclaimer
|
|||||||
output_relevant_configurations=false
|
output_relevant_configurations=false
|
||||||
large_patch_policy = "clip" # "clip", "skip"
|
large_patch_policy = "clip" # "clip", "skip"
|
||||||
is_auto_command=false
|
is_auto_command=false
|
||||||
|
# seed
|
||||||
|
seed=-1 # set positive value to fix the seed
|
||||||
|
temperature=0.2
|
||||||
|
|
||||||
[pr_reviewer] # /review #
|
[pr_reviewer] # /review #
|
||||||
# enable/disable features
|
# enable/disable features
|
||||||
|
@ -89,8 +89,8 @@ class PRAddDocs:
|
|||||||
if get_settings().config.verbosity_level >= 2:
|
if get_settings().config.verbosity_level >= 2:
|
||||||
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
||||||
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
response, finish_reason = await self.ai_handler.chat_completion(
|
||||||
system=system_prompt, user=user_prompt)
|
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
@ -304,8 +304,8 @@ class PRCodeSuggestions:
|
|||||||
environment = Environment(undefined=StrictUndefined)
|
environment = Environment(undefined=StrictUndefined)
|
||||||
system_prompt = environment.from_string(self.pr_code_suggestions_prompt_system).render(variables)
|
system_prompt = environment.from_string(self.pr_code_suggestions_prompt_system).render(variables)
|
||||||
user_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.user).render(variables)
|
user_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.user).render(variables)
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
response, finish_reason = await self.ai_handler.chat_completion(
|
||||||
system=system_prompt, user=user_prompt)
|
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
|
||||||
|
|
||||||
# load suggestions from the AI response
|
# load suggestions from the AI response
|
||||||
data = self._prepare_pr_code_suggestions(response)
|
data = self._prepare_pr_code_suggestions(response)
|
||||||
|
@ -325,7 +325,7 @@ class PRDescription:
|
|||||||
|
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(
|
response, finish_reason = await self.ai_handler.chat_completion(
|
||||||
model=model,
|
model=model,
|
||||||
temperature=0.2,
|
temperature=get_settings().config.temperature,
|
||||||
system=system_prompt,
|
system=system_prompt,
|
||||||
user=user_prompt
|
user=user_prompt
|
||||||
)
|
)
|
||||||
|
@ -142,7 +142,7 @@ class PRGenerateLabels:
|
|||||||
|
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(
|
response, finish_reason = await self.ai_handler.chat_completion(
|
||||||
model=model,
|
model=model,
|
||||||
temperature=0.2,
|
temperature=get_settings().config.temperature,
|
||||||
system=system_prompt,
|
system=system_prompt,
|
||||||
user=user_prompt
|
user=user_prompt
|
||||||
)
|
)
|
||||||
|
@ -66,8 +66,8 @@ class PRInformationFromUser:
|
|||||||
if get_settings().config.verbosity_level >= 2:
|
if get_settings().config.verbosity_level >= 2:
|
||||||
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
|
||||||
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
get_logger().info(f"\nUser prompt:\n{user_prompt}")
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
response, finish_reason = await self.ai_handler.chat_completion(
|
||||||
system=system_prompt, user=user_prompt)
|
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def _prepare_pr_answer(self) -> str:
|
def _prepare_pr_answer(self) -> str:
|
||||||
|
@ -102,6 +102,6 @@ class PR_LineQuestions:
|
|||||||
print(f"\nSystem prompt:\n{system_prompt}")
|
print(f"\nSystem prompt:\n{system_prompt}")
|
||||||
print(f"\nUser prompt:\n{user_prompt}")
|
print(f"\nUser prompt:\n{user_prompt}")
|
||||||
|
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
response, finish_reason = await self.ai_handler.chat_completion(
|
||||||
system=system_prompt, user=user_prompt)
|
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
|
||||||
return response
|
return response
|
||||||
|
@ -108,12 +108,12 @@ class PRQuestions:
|
|||||||
user_prompt = environment.from_string(get_settings().pr_questions_prompt.user).render(variables)
|
user_prompt = environment.from_string(get_settings().pr_questions_prompt.user).render(variables)
|
||||||
if 'img_path' in variables:
|
if 'img_path' in variables:
|
||||||
img_path = self.vars['img_path']
|
img_path = self.vars['img_path']
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
response, finish_reason = await (self.ai_handler.chat_completion
|
||||||
system=system_prompt, user=user_prompt,
|
(model=model, temperature=get_settings().config.temperature,
|
||||||
img_path=img_path)
|
system=system_prompt, user=user_prompt, img_path=img_path))
|
||||||
else:
|
else:
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
response, finish_reason = await self.ai_handler.chat_completion(
|
||||||
system=system_prompt, user=user_prompt)
|
model=model, temperature=get_settings().config.temperature, system=system_prompt, user=user_prompt)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def _prepare_pr_answer(self) -> str:
|
def _prepare_pr_answer(self) -> str:
|
||||||
|
@ -180,7 +180,7 @@ class PRReviewer:
|
|||||||
|
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(
|
response, finish_reason = await self.ai_handler.chat_completion(
|
||||||
model=model,
|
model=model,
|
||||||
temperature=0.2,
|
temperature=get_settings().config.temperature,
|
||||||
system=system_prompt,
|
system=system_prompt,
|
||||||
user=user_prompt
|
user=user_prompt
|
||||||
)
|
)
|
||||||
|
@ -103,8 +103,8 @@ class PRUpdateChangelog:
|
|||||||
environment = Environment(undefined=StrictUndefined)
|
environment = Environment(undefined=StrictUndefined)
|
||||||
system_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.system).render(variables)
|
system_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.system).render(variables)
|
||||||
user_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.user).render(variables)
|
user_prompt = environment.from_string(get_settings().pr_update_changelog_prompt.user).render(variables)
|
||||||
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
|
response, finish_reason = await self.ai_handler.chat_completion(
|
||||||
system=system_prompt, user=user_prompt)
|
model=model, system=system_prompt, user=user_prompt, temperature=get_settings().config.temperature)
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
35
tests/e2e_tests/e2e_utils.py
Normal file
35
tests/e2e_tests/e2e_utils.py
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
FILE_PATH = "pr_agent/cli_pip.py"
|
||||||
|
|
||||||
|
PR_HEADER_START_WITH = '### **User description**\nupdate cli_pip.py\n\n\n___\n\n### **PR Type**'
|
||||||
|
REVIEW_START_WITH = '## PR Reviewer Guide 🔍\n\n<table>\n<tr><td>⏱️ <strong>Estimated effort to review</strong>:'
|
||||||
|
IMPROVE_START_WITH_REGEX_PATTERN = r'^## PR Code Suggestions ✨\n\n<!-- [a-z0-9]+ -->\n\n<table><thead><tr><td>Category</td>'
|
||||||
|
|
||||||
|
NUM_MINUTES = 5
|
||||||
|
|
||||||
|
NEW_FILE_CONTENT = """\
|
||||||
|
from pr_agent import cli
|
||||||
|
from pr_agent.config_loader import get_settings
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Fill in the following values
|
||||||
|
provider = "github" # GitHub provider
|
||||||
|
user_token = "..." # GitHub user token
|
||||||
|
openai_key = "ghs_afsdfasdfsdf" # Example OpenAI key
|
||||||
|
pr_url = "..." # PR URL, for example 'https://github.com/Codium-ai/pr-agent/pull/809'
|
||||||
|
command = "/improve" # Command to run (e.g. '/review', '/describe', 'improve', '/ask="What is the purpose of this PR?"')
|
||||||
|
|
||||||
|
# Setting the configurations
|
||||||
|
get_settings().set("CONFIG.git_provider", provider)
|
||||||
|
get_settings().set("openai.key", openai_key)
|
||||||
|
get_settings().set("github.user_token", user_token)
|
||||||
|
|
||||||
|
# Run the command. Feedback will appear in GitHub PR comments
|
||||||
|
output = cli.run_command(pr_url, command)
|
||||||
|
|
||||||
|
print(output)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
"""
|
||||||
|
|
100
tests/e2e_tests/test_bitbucket_app.py
Normal file
100
tests/e2e_tests/test_bitbucket_app.py
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
import hashlib
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
import jwt
|
||||||
|
from atlassian.bitbucket import Cloud
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from requests.auth import HTTPBasicAuth
|
||||||
|
|
||||||
|
from pr_agent.config_loader import get_settings
|
||||||
|
from pr_agent.log import setup_logger, get_logger
|
||||||
|
from tests.e2e_tests.e2e_utils import NEW_FILE_CONTENT, FILE_PATH, PR_HEADER_START_WITH, REVIEW_START_WITH, \
|
||||||
|
IMPROVE_START_WITH_REGEX_PATTERN, NUM_MINUTES
|
||||||
|
|
||||||
|
|
||||||
|
log_level = os.environ.get("LOG_LEVEL", "INFO")
|
||||||
|
setup_logger(log_level)
|
||||||
|
logger = get_logger()
|
||||||
|
|
||||||
|
def test_e2e_run_bitbucket_app():
|
||||||
|
repo_slug = 'pr-agent-tests'
|
||||||
|
project_key = 'codiumai'
|
||||||
|
base_branch = "main" # or any base branch you want
|
||||||
|
new_branch = f"bitbucket_app_e2e_test-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}"
|
||||||
|
get_settings().config.git_provider = "bitbucket"
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Add username and password for authentication
|
||||||
|
username = get_settings().get("BITBUCKET.USERNAME", None)
|
||||||
|
password = get_settings().get("BITBUCKET.PASSWORD", None)
|
||||||
|
s = requests.Session()
|
||||||
|
s.auth = (username, password) # Use HTTP Basic Auth
|
||||||
|
bitbucket_client = Cloud(session=s)
|
||||||
|
repo = bitbucket_client.workspaces.get(workspace=project_key).repositories.get(repo_slug)
|
||||||
|
|
||||||
|
# Create a new branch from the base branch
|
||||||
|
logger.info(f"Creating a new branch {new_branch} from {base_branch}")
|
||||||
|
source_branch = repo.branches.get(base_branch)
|
||||||
|
target_repo = repo.branches.create(new_branch,source_branch.hash)
|
||||||
|
|
||||||
|
# Update the file content
|
||||||
|
url = (f"https://api.bitbucket.org/2.0/repositories/{project_key}/{repo_slug}/src")
|
||||||
|
files={FILE_PATH: NEW_FILE_CONTENT}
|
||||||
|
data={
|
||||||
|
"message": "update cli_pip.py",
|
||||||
|
"branch": new_branch,
|
||||||
|
}
|
||||||
|
requests.request("POST", url, auth=HTTPBasicAuth(username, password), data=data, files=files)
|
||||||
|
|
||||||
|
|
||||||
|
# Create a pull request
|
||||||
|
logger.info(f"Creating a pull request from {new_branch} to {base_branch}")
|
||||||
|
pr = repo.pullrequests.create(
|
||||||
|
title=f'{new_branch}',
|
||||||
|
description="update cli_pip.py",
|
||||||
|
source_branch=new_branch,
|
||||||
|
destination_branch=base_branch
|
||||||
|
)
|
||||||
|
|
||||||
|
# check every 1 minute, for 5 minutes if the PR has all the tool results
|
||||||
|
for i in range(NUM_MINUTES):
|
||||||
|
logger.info(f"Waiting for the PR to get all the tool results...")
|
||||||
|
time.sleep(60)
|
||||||
|
comments = list(pr.comments())
|
||||||
|
comments_raw = [c.raw for c in comments]
|
||||||
|
if len(comments) >= 5: # header, 3 suggestions, 1 review
|
||||||
|
valid_review = False
|
||||||
|
for comment_raw in comments_raw:
|
||||||
|
if comment_raw.startswith('## PR Reviewer Guide 🔍'):
|
||||||
|
valid_review = True
|
||||||
|
break
|
||||||
|
if valid_review:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
logger.error(f"REVIEW feedback is invalid")
|
||||||
|
raise Exception("REVIEW feedback is invalid")
|
||||||
|
else:
|
||||||
|
logger.info(f"Waiting for the PR to get all the tool results. {i + 1} minute(s) passed")
|
||||||
|
else:
|
||||||
|
assert False, f"After {NUM_MINUTES} minutes, the PR did not get all the tool results"
|
||||||
|
|
||||||
|
# cleanup - delete the branch
|
||||||
|
pr.decline()
|
||||||
|
repo.branches.delete(new_branch)
|
||||||
|
|
||||||
|
# If we reach here, the test is successful
|
||||||
|
logger.info(f"Succeeded in running e2e test for Bitbucket app on the PR")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to run e2e test for Bitbucket app: {e}")
|
||||||
|
# delete the branch
|
||||||
|
pr.decline()
|
||||||
|
repo.branches.delete(new_branch)
|
||||||
|
assert False
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
test_e2e_run_bitbucket_app()
|
96
tests/e2e_tests/test_github_app.py
Normal file
96
tests/e2e_tests/test_github_app.py
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
import os
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from pr_agent.config_loader import get_settings
|
||||||
|
from pr_agent.git_providers import get_git_provider
|
||||||
|
from pr_agent.log import setup_logger, get_logger
|
||||||
|
from tests.e2e_tests.e2e_utils import NEW_FILE_CONTENT, FILE_PATH, PR_HEADER_START_WITH, REVIEW_START_WITH, \
|
||||||
|
IMPROVE_START_WITH_REGEX_PATTERN, NUM_MINUTES
|
||||||
|
|
||||||
|
log_level = os.environ.get("LOG_LEVEL", "INFO")
|
||||||
|
setup_logger(log_level)
|
||||||
|
logger = get_logger()
|
||||||
|
|
||||||
|
|
||||||
|
def test_e2e_run_github_app():
|
||||||
|
"""
|
||||||
|
What we want to do:
|
||||||
|
(1) open a PR in a repo 'https://github.com/Codium-ai/pr-agent-tests'
|
||||||
|
(2) wait for 5 minutes until the PR is processed by the GitHub app
|
||||||
|
(3) check that the relevant tools have been executed
|
||||||
|
"""
|
||||||
|
base_branch = "main" # or any base branch you want
|
||||||
|
new_branch = f"github_app_e2e_test-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}"
|
||||||
|
repo_url = 'Codium-ai/pr-agent-tests'
|
||||||
|
get_settings().config.git_provider = "github"
|
||||||
|
git_provider = get_git_provider()()
|
||||||
|
github_client = git_provider.github_client
|
||||||
|
repo = github_client.get_repo(repo_url)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Create a new branch from the base branch
|
||||||
|
source = repo.get_branch(base_branch)
|
||||||
|
logger.info(f"Creating a new branch {new_branch} from {base_branch}")
|
||||||
|
repo.create_git_ref(ref=f"refs/heads/{new_branch}", sha=source.commit.sha)
|
||||||
|
|
||||||
|
# Get the file you want to edit
|
||||||
|
file = repo.get_contents(FILE_PATH, ref=base_branch)
|
||||||
|
# content = file.decoded_content.decode()
|
||||||
|
|
||||||
|
# Update the file content
|
||||||
|
logger.info(f"Updating the file {FILE_PATH}")
|
||||||
|
commit_message = "update cli_pip.py"
|
||||||
|
repo.update_file(
|
||||||
|
file.path,
|
||||||
|
commit_message,
|
||||||
|
NEW_FILE_CONTENT,
|
||||||
|
file.sha,
|
||||||
|
branch=new_branch
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create a pull request
|
||||||
|
logger.info(f"Creating a pull request from {new_branch} to {base_branch}")
|
||||||
|
pr = repo.create_pull(
|
||||||
|
title=new_branch,
|
||||||
|
body="update cli_pip.py",
|
||||||
|
head=new_branch,
|
||||||
|
base=base_branch
|
||||||
|
)
|
||||||
|
|
||||||
|
# check every 1 minute, for 5, minutes if the PR has all the tool results
|
||||||
|
for i in range(NUM_MINUTES):
|
||||||
|
logger.info(f"Waiting for the PR to get all the tool results...")
|
||||||
|
time.sleep(60)
|
||||||
|
logger.info(f"Checking the PR {pr.html_url} after {i + 1} minute(s)")
|
||||||
|
pr.update()
|
||||||
|
pr_header_body = pr.body
|
||||||
|
comments = list(pr.get_issue_comments())
|
||||||
|
if len(comments) == 2:
|
||||||
|
comments_body = [comment.body for comment in comments]
|
||||||
|
assert pr_header_body.startswith(PR_HEADER_START_WITH), "DESCRIBE feedback is invalid"
|
||||||
|
assert comments_body[0].startswith(REVIEW_START_WITH), "REVIEW feedback is invalid"
|
||||||
|
assert re.match(IMPROVE_START_WITH_REGEX_PATTERN, comments_body[1]), "IMPROVE feedback is invalid"
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
logger.info(f"Waiting for the PR to get all the tool results. {i + 1} minute(s) passed")
|
||||||
|
else:
|
||||||
|
assert False, f"After {NUM_MINUTES} minutes, the PR did not get all the tool results"
|
||||||
|
|
||||||
|
# cleanup - delete the branch
|
||||||
|
logger.info(f"Deleting the branch {new_branch}")
|
||||||
|
repo.get_git_ref(f"heads/{new_branch}").delete()
|
||||||
|
|
||||||
|
# If we reach here, the test is successful
|
||||||
|
logger.info(f"Succeeded in running e2e test for GitHub app on the PR {pr.html_url}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to run e2e test for GitHub app: {e}")
|
||||||
|
# delete the branch
|
||||||
|
logger.info(f"Deleting the branch {new_branch}")
|
||||||
|
repo.get_git_ref(f"heads/{new_branch}").delete()
|
||||||
|
assert False
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
test_e2e_run_github_app()
|
91
tests/e2e_tests/test_gitlab_webhook.py
Normal file
91
tests/e2e_tests/test_gitlab_webhook.py
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
import os
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
import gitlab
|
||||||
|
|
||||||
|
from pr_agent.config_loader import get_settings
|
||||||
|
from pr_agent.git_providers import get_git_provider
|
||||||
|
from pr_agent.log import setup_logger, get_logger
|
||||||
|
from tests.e2e_tests.e2e_utils import NEW_FILE_CONTENT, FILE_PATH, PR_HEADER_START_WITH, REVIEW_START_WITH, \
|
||||||
|
IMPROVE_START_WITH_REGEX_PATTERN, NUM_MINUTES
|
||||||
|
|
||||||
|
log_level = os.environ.get("LOG_LEVEL", "INFO")
|
||||||
|
setup_logger(log_level)
|
||||||
|
logger = get_logger()
|
||||||
|
|
||||||
|
def test_e2e_run_github_app():
|
||||||
|
# GitLab setup
|
||||||
|
GITLAB_URL = "https://gitlab.com"
|
||||||
|
GITLAB_TOKEN = get_settings().gitlab.PERSONAL_ACCESS_TOKEN
|
||||||
|
gl = gitlab.Gitlab(GITLAB_URL, private_token=GITLAB_TOKEN)
|
||||||
|
repo_url = 'codiumai/pr-agent-tests'
|
||||||
|
project = gl.projects.get(repo_url)
|
||||||
|
|
||||||
|
base_branch = "main" # or any base branch you want
|
||||||
|
new_branch = f"github_app_e2e_test-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}"
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Create a new branch from the base branch
|
||||||
|
logger.info(f"Creating a new branch {new_branch} from {base_branch}")
|
||||||
|
project.branches.create({'branch': new_branch, 'ref': base_branch})
|
||||||
|
|
||||||
|
# Get the file you want to edit
|
||||||
|
file = project.files.get(file_path=FILE_PATH, ref=base_branch)
|
||||||
|
# content = file.decode()
|
||||||
|
|
||||||
|
# Update the file content
|
||||||
|
logger.info(f"Updating the file {FILE_PATH}")
|
||||||
|
commit_message = "update cli_pip.py"
|
||||||
|
file.content = NEW_FILE_CONTENT
|
||||||
|
file.save(branch=new_branch, commit_message=commit_message)
|
||||||
|
|
||||||
|
# Create a merge request
|
||||||
|
logger.info(f"Creating a merge request from {new_branch} to {base_branch}")
|
||||||
|
mr = project.mergerequests.create({
|
||||||
|
'source_branch': new_branch,
|
||||||
|
'target_branch': base_branch,
|
||||||
|
'title': new_branch,
|
||||||
|
'description': "update cli_pip.py"
|
||||||
|
})
|
||||||
|
logger.info(f"Merge request created: {mr.web_url}")
|
||||||
|
|
||||||
|
# check every 1 minute, for 5, minutes if the PR has all the tool results
|
||||||
|
for i in range(NUM_MINUTES):
|
||||||
|
logger.info(f"Waiting for the MR to get all the tool results...")
|
||||||
|
time.sleep(60)
|
||||||
|
logger.info(f"Checking the MR {mr.web_url} after {i + 1} minute(s)")
|
||||||
|
mr = project.mergerequests.get(mr.iid)
|
||||||
|
mr_header_body = mr.description
|
||||||
|
comments = mr.notes.list()[::-1]
|
||||||
|
# clean all system comments
|
||||||
|
comments = [comment for comment in comments if comment.system is False]
|
||||||
|
if len(comments) == 2: # "changed the description" is received as the first comment
|
||||||
|
comments_body = [comment.body for comment in comments]
|
||||||
|
if 'Work in progress' in comments_body[1]:
|
||||||
|
continue
|
||||||
|
assert mr_header_body.startswith(PR_HEADER_START_WITH), "DESCRIBE feedback is invalid"
|
||||||
|
assert comments_body[0].startswith(REVIEW_START_WITH), "REVIEW feedback is invalid"
|
||||||
|
assert re.match(IMPROVE_START_WITH_REGEX_PATTERN, comments_body[1]), "IMPROVE feedback is invalid"
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
logger.info(f"Waiting for the MR to get all the tool results. {i + 1} minute(s) passed")
|
||||||
|
else:
|
||||||
|
assert False, f"After {NUM_MINUTES} minutes, the MR did not get all the tool results"
|
||||||
|
|
||||||
|
# cleanup - delete the branch
|
||||||
|
logger.info(f"Deleting the branch {new_branch}")
|
||||||
|
project.branches.delete(new_branch)
|
||||||
|
|
||||||
|
# If we reach here, the test is successful
|
||||||
|
logger.info(f"Succeeded in running e2e test for GitLab app on the MR {mr.web_url}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to run e2e test for GitHub app: {e}")
|
||||||
|
logger.info(f"Deleting the branch {new_branch}")
|
||||||
|
project.branches.delete(new_branch)
|
||||||
|
assert False
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
test_e2e_run_github_app()
|
Reference in New Issue
Block a user