mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-10 15:50:37 +08:00
Compare commits
1 Commits
mrT23-patc
...
mrT23-patc
Author | SHA1 | Date | |
---|---|---|---|
b9e3e5603b |
@ -3,7 +3,7 @@
|
||||
|
||||
You can use the Bitbucket Pipeline system to run Qodo Merge on every pull request open or update.
|
||||
|
||||
1. Add the following file in your repository bitbucket-pipelines.yml
|
||||
1. Add the following file in your repository bitbucket_pipelines.yml
|
||||
|
||||
```yaml
|
||||
pipelines:
|
||||
|
@ -42,36 +42,21 @@ Note that if your base branches are not protected, don't set the variables as `p
|
||||
|
||||
## Run a GitLab webhook server
|
||||
|
||||
1. From the GitLab workspace or group, create an access token with "Reporter" role ("Developer" if using Pro version of the agent) and "api" scope.
|
||||
1. From the GitLab workspace or group, create an access token. Enable the "api" scope only.
|
||||
|
||||
2. Generate a random secret for your app, and save it for later. For example, you can use:
|
||||
|
||||
```
|
||||
WEBHOOK_SECRET=$(python -c "import secrets; print(secrets.token_hex(10))")
|
||||
```
|
||||
3. Follow the instructions to build the Docker image, setup a secrets file and deploy on your own server from [here](https://qodo-merge-docs.qodo.ai/installation/github/#run-as-a-github-app) steps 4-7.
|
||||
|
||||
3. Clone this repository:
|
||||
4. In the secrets file, fill in the following:
|
||||
- Your OpenAI key.
|
||||
- In the [gitlab] section, fill in personal_access_token and shared_secret. The access token can be a personal access token, or a group or project access token.
|
||||
- Set deployment_type to 'gitlab' in [configuration.toml](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml)
|
||||
|
||||
```
|
||||
git clone https://github.com/Codium-ai/pr-agent.git
|
||||
```
|
||||
5. Create a webhook in GitLab. Set the URL to ```http[s]://<PR_AGENT_HOSTNAME>/webhook```. Set the secret token to the generated secret from step 2.
|
||||
In the "Trigger" section, check the ‘comments’ and ‘merge request events’ boxes.
|
||||
|
||||
4. Prepare variables and secrets. Skip this step if you plan on settings these as environment variables when running the agent:
|
||||
1. In the configuration file/variables:
|
||||
- Set `deployment_type` to "gitlab"
|
||||
|
||||
2. In the secrets file/variables:
|
||||
- Set your AI model key in the respective section
|
||||
- In the [gitlab] section, set `personal_access_token` (with token from step 1) and `shared_secret` (with secret from step 2)
|
||||
|
||||
|
||||
5. Build a Docker image for the app and optionally push it to a Docker repository. We'll use Dockerhub as an example:
|
||||
```
|
||||
docker build . -t gitlab_pr_agent --target gitlab_webhook -f docker/Dockerfile
|
||||
docker push codiumai/pr-agent:gitlab_webhook # Push to your Docker repository
|
||||
```
|
||||
|
||||
6. Create a webhook in GitLab. Set the URL to ```http[s]://<PR_AGENT_HOSTNAME>/webhook```, the secret token to the generated secret from step 2, and enable the triggers `push`, `comments` and `merge request events`.
|
||||
|
||||
7. Test your installation by opening a merge request or commenting on a merge request using one of CodiumAI's commands.
|
||||
boxes
|
||||
6. Test your installation by opening a merge request or commenting or a merge request using one of CodiumAI's commands.
|
||||
|
@ -133,26 +133,9 @@ Your [application default credentials](https://cloud.google.com/docs/authenticat
|
||||
|
||||
If you do want to set explicit credentials, then you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable set to a path to a json credentials file.
|
||||
|
||||
### Google AI Studio
|
||||
|
||||
To use [Google AI Studio](https://aistudio.google.com/) models, set the relevant models in the configuration section of the configuration file:
|
||||
|
||||
```toml
|
||||
[config] # in configuration.toml
|
||||
model="google_ai_studio/gemini-1.5-flash"
|
||||
model_turbo="google_ai_studio/gemini-1.5-flash"
|
||||
fallback_models=["google_ai_studio/gemini-1.5-flash"]
|
||||
|
||||
[google_ai_studio] # in .secrets.toml
|
||||
gemini_api_key = "..."
|
||||
```
|
||||
|
||||
If you don't want to set the API key in the .secrets.toml file, you can set the `GOOGLE_AI_STUDIO.GEMINI_API_KEY` environment variable.
|
||||
|
||||
### Anthropic
|
||||
|
||||
To use Anthropic models, set the relevant models in the configuration section of the configuration file:
|
||||
|
||||
```
|
||||
[config]
|
||||
model="anthropic/claude-3-opus-20240229"
|
||||
|
@ -38,8 +38,6 @@ MAX_TOKENS = {
|
||||
'vertex_ai/gemini-1.5-pro': 1048576,
|
||||
'vertex_ai/gemini-1.5-flash': 1048576,
|
||||
'vertex_ai/gemma2': 8200,
|
||||
'gemini/gemini-1.5-pro': 1048576,
|
||||
'gemini/gemini-1.5-flash': 1048576,
|
||||
'codechat-bison': 6144,
|
||||
'codechat-bison-32k': 32000,
|
||||
'anthropic.claude-instant-v1': 100000,
|
||||
|
@ -83,11 +83,6 @@ class LiteLLMAIHandler(BaseAiHandler):
|
||||
litellm.vertex_location = get_settings().get(
|
||||
"VERTEXAI.VERTEX_LOCATION", None
|
||||
)
|
||||
# Google AI Studio
|
||||
# SEE https://docs.litellm.ai/docs/providers/gemini
|
||||
if get_settings().get("GOOGLE_AI_STUDIO.GEMINI_API_KEY", None):
|
||||
os.environ["GEMINI_API_KEY"] = get_settings().google_ai_studio.gemini_api_key
|
||||
|
||||
def prepare_logs(self, response, system, user, resp, finish_reason):
|
||||
response_log = response.dict().copy()
|
||||
response_log['system'] = system
|
||||
|
@ -1,7 +1,6 @@
|
||||
from os import environ
|
||||
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
|
||||
import openai
|
||||
from openai import APIError, AsyncOpenAI, RateLimitError, Timeout
|
||||
from openai.error import APIError, RateLimitError, Timeout, TryAgain
|
||||
from retry import retry
|
||||
|
||||
from pr_agent.config_loader import get_settings
|
||||
@ -15,7 +14,7 @@ class OpenAIHandler(BaseAiHandler):
|
||||
# Initialize OpenAIHandler specific attributes here
|
||||
try:
|
||||
super().__init__()
|
||||
environ["OPENAI_API_KEY"] = get_settings().openai.key
|
||||
openai.api_key = get_settings().openai.key
|
||||
if get_settings().get("OPENAI.ORG", None):
|
||||
openai.organization = get_settings().openai.org
|
||||
if get_settings().get("OPENAI.API_TYPE", None):
|
||||
@ -25,7 +24,7 @@ class OpenAIHandler(BaseAiHandler):
|
||||
if get_settings().get("OPENAI.API_VERSION", None):
|
||||
openai.api_version = get_settings().openai.api_version
|
||||
if get_settings().get("OPENAI.API_BASE", None):
|
||||
environ["OPENAI_BASE_URL"] = get_settings().openai.api_base
|
||||
openai.api_base = get_settings().openai.api_base
|
||||
|
||||
except AttributeError as e:
|
||||
raise ValueError("OpenAI key is required") from e
|
||||
@ -37,7 +36,7 @@ class OpenAIHandler(BaseAiHandler):
|
||||
"""
|
||||
return get_settings().get("OPENAI.DEPLOYMENT_ID", None)
|
||||
|
||||
@retry(exceptions=(APIError, Timeout, AttributeError, RateLimitError),
|
||||
@retry(exceptions=(APIError, Timeout, TryAgain, AttributeError, RateLimitError),
|
||||
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
|
||||
async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2):
|
||||
try:
|
||||
@ -45,19 +44,20 @@ class OpenAIHandler(BaseAiHandler):
|
||||
get_logger().info("System: ", system)
|
||||
get_logger().info("User: ", user)
|
||||
messages = [{"role": "system", "content": system}, {"role": "user", "content": user}]
|
||||
client = AsyncOpenAI()
|
||||
chat_completion = await client.chat.completions.create(
|
||||
|
||||
chat_completion = await openai.ChatCompletion.acreate(
|
||||
model=model,
|
||||
deployment_id=deployment_id,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
)
|
||||
resp = chat_completion.choices[0].message.content
|
||||
finish_reason = chat_completion.choices[0].finish_reason
|
||||
usage = chat_completion.usage
|
||||
resp = chat_completion["choices"][0]['message']['content']
|
||||
finish_reason = chat_completion["choices"][0]["finish_reason"]
|
||||
usage = chat_completion.get("usage")
|
||||
get_logger().info("AI response", response=resp, messages=messages, finish_reason=finish_reason,
|
||||
model=model, usage=usage)
|
||||
return resp, finish_reason
|
||||
except (APIError, Timeout) as e:
|
||||
except (APIError, Timeout, TryAgain) as e:
|
||||
get_logger().error("Error during OpenAI inference: ", e)
|
||||
raise
|
||||
except (RateLimitError) as e:
|
||||
@ -65,4 +65,4 @@ class OpenAIHandler(BaseAiHandler):
|
||||
raise
|
||||
except (Exception) as e:
|
||||
get_logger().error("Unknown error during OpenAI inference: ", e)
|
||||
raise
|
||||
raise TryAgain from e
|
||||
|
@ -43,10 +43,6 @@ class PRReviewHeader(str, Enum):
|
||||
INCREMENTAL = "## Incremental PR Reviewer Guide"
|
||||
|
||||
|
||||
class PRDescriptionHeader(str, Enum):
|
||||
CHANGES_WALKTHROUGH = "### **Changes walkthrough** 📝"
|
||||
|
||||
|
||||
def get_setting(key: str) -> Any:
|
||||
try:
|
||||
key = key.upper()
|
||||
@ -1028,7 +1024,8 @@ def process_description(description_full: str) -> Tuple[str, List]:
|
||||
if not description_full:
|
||||
return "", []
|
||||
|
||||
description_split = description_full.split(PRDescriptionHeader.CHANGES_WALKTHROUGH.value)
|
||||
split_str = "### **Changes walkthrough** 📝"
|
||||
description_split = description_full.split(split_str)
|
||||
base_description_str = description_split[0]
|
||||
changes_walkthrough_str = ""
|
||||
files = []
|
||||
@ -1063,9 +1060,6 @@ def process_description(description_full: str) -> Tuple[str, List]:
|
||||
if not res or res.lastindex != 4:
|
||||
pattern_back = r'<details>\s*<summary><strong>(.*?)</strong><dd><code>(.*?)</code>.*?</summary>\s*<hr>\s*(.*?)\n\n\s*(.*?)</details>'
|
||||
res = re.search(pattern_back, file_data, re.DOTALL)
|
||||
if not res or res.lastindex != 4:
|
||||
pattern_back = r'<details>\s*<summary><strong>(.*?)</strong>\s*<dd><code>(.*?)</code>.*?</summary>\s*<hr>\s*(.*?)\s*-\s*(.*?)\s*</details>' # looking for hypen ('- ')
|
||||
res = re.search(pattern_back, file_data, re.DOTALL)
|
||||
if res and res.lastindex == 4:
|
||||
short_filename = res.group(1).strip()
|
||||
short_summary = res.group(2).strip()
|
||||
|
@ -5,7 +5,7 @@ from urllib.parse import urlparse
|
||||
from ..algo.file_filter import filter_ignored
|
||||
from ..log import get_logger
|
||||
from ..algo.language_handler import is_valid_file
|
||||
from ..algo.utils import clip_tokens, find_line_number_of_relevant_line_in_file, load_large_diff, PRDescriptionHeader
|
||||
from ..algo.utils import clip_tokens, find_line_number_of_relevant_line_in_file, load_large_diff
|
||||
from ..config_loader import get_settings
|
||||
from .git_provider import GitProvider
|
||||
from pr_agent.algo.types import EDIT_TYPE, FilePatchInfo
|
||||
@ -404,7 +404,7 @@ class AzureDevopsProvider(GitProvider):
|
||||
pr_body = pr_body[:ind]
|
||||
|
||||
if len(pr_body) > MAX_PR_DESCRIPTION_AZURE_LENGTH:
|
||||
changes_walkthrough_text = PRDescriptionHeader.CHANGES_WALKTHROUGH.value
|
||||
changes_walkthrough_text = '## **Changes walkthrough**'
|
||||
ind = pr_body.find(changes_walkthrough_text)
|
||||
if ind != -1:
|
||||
pr_body = pr_body[:ind]
|
||||
|
@ -43,9 +43,6 @@ api_base = "" # the base url for your local Llama 2, Code Llama, and other model
|
||||
vertex_project = "" # the google cloud platform project name for your vertexai deployment
|
||||
vertex_location = "" # the google cloud platform location for your vertexai deployment
|
||||
|
||||
[google_ai_studio]
|
||||
gemini_api_key = "" # the google AI Studio API key
|
||||
|
||||
[github]
|
||||
# ---- Set the following only for deployment type == "user"
|
||||
user_token = "" # A GitHub personal access token with 'repo' scope.
|
||||
@ -63,7 +60,6 @@ webhook_secret = "<WEBHOOK SECRET>" # Optional, may be commented out.
|
||||
[gitlab]
|
||||
# Gitlab personal access token
|
||||
personal_access_token = ""
|
||||
shared_secret = "" # webhook secret
|
||||
|
||||
[bitbucket]
|
||||
# For Bitbucket personal/repository bearer token
|
||||
|
@ -367,18 +367,6 @@ class PRCodeSuggestions:
|
||||
"code_suggestions_feedback": code_suggestions_feedback[i]})
|
||||
suggestion["score"] = 7
|
||||
suggestion["score_why"] = ""
|
||||
|
||||
# if the before and after code is the same, clear one of them
|
||||
try:
|
||||
if suggestion['existing_code'] == suggestion['improved_code']:
|
||||
get_logger().debug(
|
||||
f"edited improved suggestion {i + 1}, because equal to existing code: {suggestion['existing_code']}")
|
||||
if get_settings().pr_code_suggestions.commitable_code_suggestions:
|
||||
suggestion['improved_code'] = "" # we need 'existing_code' to locate the code in the PR
|
||||
else:
|
||||
suggestion['existing_code'] = ""
|
||||
except Exception as e:
|
||||
get_logger().error(f"Error processing suggestion {i + 1}, error: {e}")
|
||||
else:
|
||||
# get_logger().error(f"Could not self-reflect on suggestions. using default score 7")
|
||||
for i, suggestion in enumerate(data["code_suggestions"]):
|
||||
@ -434,6 +422,13 @@ class PRCodeSuggestions:
|
||||
continue
|
||||
|
||||
if ('existing_code' in suggestion) and ('improved_code' in suggestion):
|
||||
if suggestion['existing_code'] == suggestion['improved_code']:
|
||||
get_logger().debug(
|
||||
f"edited improved suggestion {i + 1}, because equal to existing code: {suggestion['existing_code']}")
|
||||
if get_settings().pr_code_suggestions.commitable_code_suggestions:
|
||||
suggestion['improved_code'] = "" # we need 'existing_code' to locate the code in the PR
|
||||
else:
|
||||
suggestion['existing_code'] = ""
|
||||
suggestion = self._truncate_if_needed(suggestion)
|
||||
one_sentence_summary_list.append(suggestion['one_sentence_summary'])
|
||||
suggestion_list.append(suggestion)
|
||||
|
@ -12,7 +12,7 @@ from pr_agent.algo.ai_handlers.litellm_ai_handler import LiteLLMAIHandler
|
||||
from pr_agent.algo.pr_processing import get_pr_diff, retry_with_fallback_models, get_pr_diff_multiple_patchs, \
|
||||
OUTPUT_BUFFER_TOKENS_HARD_THRESHOLD
|
||||
from pr_agent.algo.token_handler import TokenHandler
|
||||
from pr_agent.algo.utils import set_custom_labels, PRDescriptionHeader
|
||||
from pr_agent.algo.utils import set_custom_labels
|
||||
from pr_agent.algo.utils import load_yaml, get_user_labels, ModelType, show_relevant_configurations, get_max_tokens, \
|
||||
clip_tokens
|
||||
from pr_agent.config_loader import get_settings
|
||||
@ -501,7 +501,7 @@ extra_file_yaml =
|
||||
pr_body += "</details>\n"
|
||||
elif 'pr_files' in key.lower() and get_settings().pr_description.enable_semantic_files_types:
|
||||
changes_walkthrough, pr_file_changes = self.process_pr_files_prediction(changes_walkthrough, value)
|
||||
changes_walkthrough = f"{PRDescriptionHeader.CHANGES_WALKTHROUGH.value}\n{changes_walkthrough}"
|
||||
changes_walkthrough = f"### **Changes walkthrough** 📝\n{changes_walkthrough}"
|
||||
else:
|
||||
# if the value is a list, join its items by comma
|
||||
if isinstance(value, list):
|
||||
|
@ -4,12 +4,10 @@ atlassian-python-api==3.41.4
|
||||
azure-devops==7.1.0b3
|
||||
azure-identity==1.15.0
|
||||
boto3==1.33.6
|
||||
certifi==2024.8.30
|
||||
dynaconf==3.2.4
|
||||
fastapi==0.111.0
|
||||
GitPython==3.1.41
|
||||
google-cloud-aiplatform==1.38.0
|
||||
google-generativeai==0.8.3
|
||||
google-cloud-storage==2.10.0
|
||||
Jinja2==3.1.2
|
||||
litellm==1.50.2
|
||||
|
@ -5,7 +5,7 @@ REVIEW_START_WITH = '## PR Reviewer Guide 🔍\n\n<table>\n<tr><td>⏱️ <
|
||||
IMPROVE_START_WITH_REGEX_PATTERN = r'^## PR Code Suggestions ✨\n\n<!-- [a-z0-9]+ -->\n\n<table><thead><tr><td>Category</td>'
|
||||
|
||||
NUM_MINUTES = 5
|
||||
print("aaa")
|
||||
|
||||
NEW_FILE_CONTENT = """\
|
||||
from pr_agent import cli
|
||||
from pr_agent.config_loader import get_settings
|
||||
|
Reference in New Issue
Block a user