Files
pr-agent/pr_agent/settings/.secrets_template.toml

110 lines
3.8 KiB
TOML

# QUICKSTART:
# Copy this file to .secrets.toml in the same folder.
# The minimum workable settings - set openai.key to your API key.
# Set github.deployment_type to "user" and github.user_token to your GitHub personal access token.
# This will allow you to run the CLI scripts in the scripts/ folder and the github_polling server.
#
# See README for details about GitHub App deployment.
[openai]
key = "" # Acquire through https://platform.openai.com
#org = "<ORGANIZATION>" # Optional, may be commented out.
# Uncomment the following for Azure OpenAI
#api_type = "azure"
#api_version = '2023-05-15' # Check Azure documentation for the current API version
#api_base = "" # The base URL for your Azure OpenAI resource. e.g. "https://<your resource name>.openai.azure.com"
#deployment_id = "" # The deployment name you chose when you deployed the engine
#fallback_deployments = [] # For each fallback model specified in configuration.toml in the [config] section, specify the appropriate deployment_id
[pinecone]
api_key = "..."
environment = "gcp-starter"
[anthropic]
key = "" # Optional, uncomment if you want to use Anthropic. Acquire through https://www.anthropic.com/
[cohere]
key = "" # Optional, uncomment if you want to use Cohere. Acquire through https://dashboard.cohere.ai/
[replicate]
key = "" # Optional, uncomment if you want to use Replicate. Acquire through https://replicate.com/
[groq]
key = "" # Acquire through https://console.groq.com/keys
[xai]
key = "" # Optional, uncomment if you want to use xAI. Acquire through https://console.x.ai/
[huggingface]
key = "" # Optional, uncomment if you want to use Huggingface Inference API. Acquire through https://huggingface.co/docs/api-inference/quicktour
api_base = "" # the base url for your huggingface inference endpoint
[ollama]
api_base = "" # the base url for your local Llama 2, Code Llama, and other models inference endpoint. Acquire through https://ollama.ai/
[vertexai]
vertex_project = "" # the google cloud platform project name for your vertexai deployment
vertex_location = "" # the google cloud platform location for your vertexai deployment
[google_ai_studio]
gemini_api_key = "" # the google AI Studio API key
[github]
# ---- Set the following only for deployment type == "user"
user_token = "" # A GitHub personal access token with 'repo' scope.
deployment_type = "user" #set to user by default
# ---- Set the following only for deployment type == "app", see README for details.
private_key = """\
-----BEGIN RSA PRIVATE KEY-----
<GITHUB PRIVATE KEY>
-----END RSA PRIVATE KEY-----
"""
app_id = 123456 # The GitHub App ID, replace with your own.
webhook_secret = "<WEBHOOK SECRET>" # Optional, may be commented out.
[gitlab]
# Gitlab personal access token
personal_access_token = ""
shared_secret = "" # webhook secret
[bitbucket]
# For Bitbucket authentication
auth_type = "bearer" # "bearer" or "basic"
# For bearer token authentication
bearer_token = ""
# For basic authentication (uses token only)
basic_token = ""
[bitbucket_server]
# For Bitbucket Server bearer token
bearer_token = ""
webhook_secret = ""
# For Bitbucket app
app_key = ""
base_url = ""
[azure_devops]
# For Azure devops personal access token
org = ""
pat = ""
[azure_devops_server]
# For Azure devops Server basic auth - configured in the webhook creation
# Optional, uncomment if you want to use Azure devops webhooks. Value assinged when you create the webhook
# webhook_username = "<basic auth user>"
# webhook_password = "<basic auth password>"
[deepseek]
key = ""
[deepinfra]
key = ""
[azure_ad]
# Azure AD authentication for OpenAI services
client_id = "" # Your Azure AD application client ID
client_secret = "" # Your Azure AD application client secret
tenant_id = "" # Your Azure AD tenant ID
api_base = "" # Your Azure OpenAI service base URL (e.g., https://openai.xyz.com/)