Merge pull request #746 from Codium-ai/tr/claude3

Tr/claude3
This commit is contained in:
Tal
2024-03-06 01:08:41 -08:00
committed by GitHub
6 changed files with 67 additions and 54 deletions

View File

@ -265,21 +265,16 @@ inline_code_comments = true
Each time you invoke a `/review` tool, it will use inline code comments.
#### BitBucket Self-Hosted App automatic tools
You can configure in your local `.pr_agent.toml` file which tools will **run automatically** when a new PR is opened.
Specifically, set the following values:
```yaml
to control which commands will run automatically when a new PR is opened, you can set the `pr_commands` parameter in the configuration file:
```
[bitbucket_app]
auto_review = true # set as config var in .pr_agent.toml
auto_describe = true # set as config var in .pr_agent.toml
auto_improve = true # set as config var in .pr_agent.toml
pr_commands = [
"/review --pr_reviewer.num_code_suggestions=0",
"/improve --pr_code_suggestions.summarize=false",
]
```
`bitbucket_app.auto_review`, `bitbucket_app.auto_describe` and `bitbucket_app.auto_improve` are used to enable/disable automatic tools.
If not set, the default option is that only the `review` tool will run automatically when a new PR is opened.
Note that due to limitations of the bitbucket platform, the `auto_describe` tool will be able to publish a PR description only as a comment.
In addition, some subsections like `PR changes walkthrough` will not appear, since they require the usage of collapsible sections, which are not supported by bitbucket.
Note that due to limitations of the bitbucket platform, not all tools or sub-options, are supported. See [here](./README.md#Overview) for an overview of the supported tools for bitbucket.
### Azure DevOps provider
@ -469,6 +464,20 @@ Your [application default credentials](https://cloud.google.com/docs/authenticat
If you do want to set explicit credentials then you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable set to a path to a json credentials file.
##### Anthropic
To use Anthropic models, set the relevant models in the configuration section of the configuration file:
```
[config]
model="anthropic/claude-3-opus-20240229"
model_turbo="anthropic/claude-3-opus-20240229"
fallback_models=["anthropic/claude-3-opus-20240229"]
```
And also set the api key in the .secrets.toml file:
```
[anthropic]
KEY = "..."
```
##### Amazon Bedrock
To use Amazon Bedrock and its foundational models, add the below configuration:

View File

@ -19,7 +19,8 @@ MAX_TOKENS = {
'vertex_ai/codechat-bison-32k': 32000,
'codechat-bison': 6144,
'codechat-bison-32k': 32000,
'anthropic.claude-v2': 100000,
'anthropic.claude-instant-v1': 100000,
'anthropic.claude-v1': 100000,
'anthropic.claude-v2': 100000,
'anthropic/claude-3-opus-20240229': 100000,
}

View File

@ -4,7 +4,8 @@ import boto3
import litellm
import openai
from litellm import acompletion
from openai.error import APIError, RateLimitError, Timeout, TryAgain
# from openai.error import APIError, RateLimitError, Timeout, TryAgain
from openai import APIError, RateLimitError, Timeout
from retry import retry
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.config_loader import get_settings
@ -77,28 +78,9 @@ class LiteLLMAIHandler(BaseAiHandler):
"""
return get_settings().get("OPENAI.DEPLOYMENT_ID", None)
@retry(exceptions=(APIError, Timeout, TryAgain, AttributeError, RateLimitError),
@retry(exceptions=(APIError, Timeout, AttributeError, RateLimitError),
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2):
"""
Performs a chat completion using the OpenAI ChatCompletion API.
Retries in case of API errors or timeouts.
Args:
model (str): The model to use for chat completion.
temperature (float): The temperature parameter for chat completion.
system (str): The system message for chat completion.
user (str): The user message for chat completion.
Returns:
tuple: A tuple containing the response and finish reason from the API.
Raises:
TryAgain: If the API response is empty or there are no choices in the response.
APIError: If there is an error during OpenAI inference.
Timeout: If there is a timeout during OpenAI inference.
TryAgain: If there is an attribute error during OpenAI inference.
"""
try:
resp, finish_reason = None, None
deployment_id = self.deployment_id
@ -117,7 +99,7 @@ class LiteLLMAIHandler(BaseAiHandler):
get_logger().debug("Prompts", artifact={"system": system, "user": user})
response = await acompletion(**kwargs)
except (APIError, Timeout, TryAgain) as e:
except (APIError, Timeout) as e:
get_logger().error("Error during OpenAI inference: ", e)
raise
except (RateLimitError) as e:
@ -125,9 +107,9 @@ class LiteLLMAIHandler(BaseAiHandler):
raise
except (Exception) as e:
get_logger().error("Unknown error during OpenAI inference: ", e)
raise TryAgain from e
raise APIError from e
if response is None or len(response["choices"]) == 0:
raise TryAgain
raise APIError
else:
resp = response["choices"][0]['message']['content']
finish_reason = response["choices"][0]["finish_reason"]

View File

@ -16,6 +16,7 @@ from starlette_context import context
from starlette_context.middleware import RawContextMiddleware
from pr_agent.agent.pr_agent import PRAgent
from pr_agent.algo.utils import update_settings_from_args
from pr_agent.config_loader import get_settings, global_settings
from pr_agent.git_providers.utils import apply_repo_settings
from pr_agent.identity_providers import get_identity_provider
@ -72,6 +73,24 @@ async def handle_manifest(request: Request, response: Response):
manifest_obj = json.loads(manifest)
return JSONResponse(manifest_obj)
async def _perform_commands_bitbucket(commands_conf: str, agent: PRAgent, api_url: str, log_context: dict):
apply_repo_settings(api_url)
commands = get_settings().get(f"bitbucket_app.{commands_conf}", {})
for command in commands:
try:
split_command = command.split(" ")
command = split_command[0]
args = split_command[1:]
other_args = update_settings_from_args(args)
new_command = ' '.join([command] + other_args)
get_logger().info(f"Performing command: {new_command}")
with get_logger().contextualize(**log_context):
await agent.handle_request(api_url, new_command)
except Exception as e:
get_logger().error(f"Failed to perform command {command}: {e}")
@router.post("/webhook")
async def handle_github_webhooks(background_tasks: BackgroundTasks, request: Request):
log_context = {"server_type": "bitbucket_app"}
@ -118,18 +137,19 @@ async def handle_github_webhooks(background_tasks: BackgroundTasks, request: Req
with get_logger().contextualize(**log_context):
apply_repo_settings(pr_url)
if get_identity_provider().verify_eligibility("bitbucket",
sender_id, pr_url) is not Eligibility.NOT_ELIGIBLE:
auto_review = get_setting_or_env("BITBUCKET_APP.AUTO_REVIEW", None)
if auto_review is None or is_true(auto_review): # by default, auto review is enabled
await PRReviewer(pr_url).run()
auto_improve = get_setting_or_env("BITBUCKET_APP.AUTO_IMPROVE", None)
if is_true(auto_improve): # by default, auto improve is disabled
await PRCodeSuggestions(pr_url).run()
auto_describe = get_setting_or_env("BITBUCKET_APP.AUTO_DESCRIBE", None)
if is_true(auto_describe): # by default, auto describe is disabled
await PRDescription(pr_url).run()
# with get_logger().contextualize(**log_context):
# await agent.handle_request(pr_url, "review")
sender_id, pr_url) is not Eligibility.NOT_ELIGIBLE:
if get_settings().get("bitbucket_app.pr_commands"):
await _perform_commands_bitbucket("pr_commands", PRAgent(), pr_url, log_context)
else: # backwards compatibility
auto_review = get_setting_or_env("BITBUCKET_APP.AUTO_REVIEW", None)
if is_true(auto_review): # by default, auto review is disabled
await PRReviewer(pr_url).run()
auto_improve = get_setting_or_env("BITBUCKET_APP.AUTO_IMPROVE", None)
if is_true(auto_improve): # by default, auto improve is disabled
await PRCodeSuggestions(pr_url).run()
auto_describe = get_setting_or_env("BITBUCKET_APP.AUTO_DESCRIBE", None)
if is_true(auto_describe): # by default, auto describe is disabled
await PRDescription(pr_url).run()
elif event == "pullrequest:comment_created":
pr_url = data["data"]["pullrequest"]["links"]["html"]["href"]
log_context["api_url"] = pr_url

View File

@ -165,9 +165,10 @@ pr_commands = [
]
[bitbucket_app]
#auto_review = true # set as config var in .pr_agent.toml
#auto_describe = true # set as config var in .pr_agent.toml
#auto_improve = true # set as config var in .pr_agent.toml
pr_commands = [
"/review --pr_reviewer.num_code_suggestions=0",
"/improve --pr_code_suggestions.summarize=false",
]
[local]

View File

@ -9,10 +9,10 @@ GitPython==3.1.32
google-cloud-aiplatform==1.35.0
google-cloud-storage==2.10.0
Jinja2==3.1.2
litellm==0.12.5
litellm==1.29.1
loguru==0.7.2
msrest==0.7.1
openai==0.27.8
openai==1.13.3
pinecone-client
pinecone-datasets @ git+https://github.com/mrT23/pinecone-datasets.git@main
lancedb==0.5.1