Merge pull request #276 from krrishdholakia/main

Add docs on using Azure
This commit is contained in:
mrT23
2023-09-07 12:20:26 +03:00
committed by GitHub
5 changed files with 21 additions and 13 deletions

View File

@ -369,11 +369,5 @@ WEBHOOK_SECRET=$(python -c "import secrets; print(secrets.token_hex(10))")
In the "Trigger" section, check the comments and merge request events boxes. In the "Trigger" section, check the comments and merge request events boxes.
6. Test your installation by opening a merge request or commenting or a merge request using one of CodiumAI's commands. 6. Test your installation by opening a merge request or commenting or a merge request using one of CodiumAI's commands.
---
### Appendix - **Debugging LLM API Calls** =======
If you're testing your codium/pr-agent server, and need to see if calls were made successfully + the exact call logs, you can use the [LiteLLM Debugger tool](https://docs.litellm.ai/docs/debugging/hosted_debugging).
You can do this by setting `litellm_debugger=true` in configuration.toml. Your Logs will be viewable in real-time @ `admin.litellm.ai/<your_email>`. Set your email in the `.secrets.toml` under 'user_email'.
<img src="./pics/debugger.png" width="800"/>

View File

@ -149,11 +149,28 @@ TBD
#### Changing a model #### Changing a model
See [here](pr_agent/algo/__init__.py) for the list of available models. See [here](pr_agent/algo/__init__.py) for the list of available models.
To use Llama2 model, for example, set: To use Azure, set:
```
api_key = "" # your azure api key
api_type = "azure"
api_version = '2023-05-15' # Check Azure documentation for the current API version
api_base = "" # The base URL for your Azure OpenAI resource. e.g. "https://<your resource name>.openai.azure.com"
deployment_id = "" # The deployment name you chose when you deployed the engine
```
in your .secrets.toml
and
``` ```
[config] [config]
model="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
```
in the configuration.toml
To use Llama2 model with Replicate, for example, set:
```
[config] # in configuration.toml
model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1" model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
[replicate] [replicate] # in .secrets.toml
key = ... key = ...
``` ```
(you can obtain a Llama2 key from [here](https://replicate.com/replicate/llama-2-70b-chat/api)) (you can obtain a Llama2 key from [here](https://replicate.com/replicate/llama-2-70b-chat/api))

View File

@ -5,7 +5,6 @@ import openai
from litellm import acompletion from litellm import acompletion
from openai.error import APIError, RateLimitError, Timeout, TryAgain from openai.error import APIError, RateLimitError, Timeout, TryAgain
from retry import retry from retry import retry
from pr_agent.config_loader import get_settings from pr_agent.config_loader import get_settings
OPENAI_RETRIES = 5 OPENAI_RETRIES = 5
@ -26,7 +25,6 @@ class AiHandler:
try: try:
openai.api_key = get_settings().openai.key openai.api_key = get_settings().openai.key
litellm.openai_key = get_settings().openai.key litellm.openai_key = get_settings().openai.key
litellm.debugger = get_settings().config.litellm_debugger
self.azure = False self.azure = False
if get_settings().get("OPENAI.ORG", None): if get_settings().get("OPENAI.ORG", None):
litellm.organization = get_settings().openai.org litellm.organization = get_settings().openai.org

View File

@ -10,7 +10,6 @@ use_repo_settings_file=true
ai_timeout=180 ai_timeout=180
max_description_tokens = 500 max_description_tokens = 500
max_commits_tokens = 500 max_commits_tokens = 500
litellm_debugger=false
secret_provider="google_cloud_storage" secret_provider="google_cloud_storage"
[pr_reviewer] # /review # [pr_reviewer] # /review #

View File

@ -13,7 +13,7 @@ atlassian-python-api==3.39.0
GitPython~=3.1.32 GitPython~=3.1.32
PyYAML==6.0 PyYAML==6.0
starlette-context==0.3.6 starlette-context==0.3.6
litellm~=0.1.504 litellm~=0.1.538
boto3~=1.28.25 boto3~=1.28.25
google-cloud-storage==2.10.0 google-cloud-storage==2.10.0
ujson==5.8.0 ujson==5.8.0