mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-02 03:40:38 +08:00
Support Google AI Studio
Signed-off-by: Yu Ishikawa <yu-iskw@users.noreply.github.com>
This commit is contained in:
@ -133,9 +133,26 @@ Your [application default credentials](https://cloud.google.com/docs/authenticat
|
||||
|
||||
If you do want to set explicit credentials, then you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable set to a path to a json credentials file.
|
||||
|
||||
### Google AI Studio
|
||||
|
||||
To use [Google AI Studio](https://aistudio.google.com/) models, set the relevant models in the configuration section of the configuration file:
|
||||
|
||||
```toml
|
||||
[config] # in configuration.toml
|
||||
model="google_ai_studio/gemini-1.5-flash"
|
||||
model_turbo="google_ai_studio/gemini-1.5-flash"
|
||||
fallback_models=["google_ai_studio/gemini-1.5-flash"]
|
||||
|
||||
[google_ai_studio] # in .secrets.toml
|
||||
gemini_api_key = "..."
|
||||
```
|
||||
|
||||
If you don't want to set the API key in the .secrets.toml file, you can set the `GOOGLE_AI_STUDIO.GEMINI_API_KEY` environment variable.
|
||||
|
||||
### Anthropic
|
||||
|
||||
To use Anthropic models, set the relevant models in the configuration section of the configuration file:
|
||||
|
||||
```
|
||||
[config]
|
||||
model="anthropic/claude-3-opus-20240229"
|
||||
|
@ -38,6 +38,8 @@ MAX_TOKENS = {
|
||||
'vertex_ai/gemini-1.5-pro': 1048576,
|
||||
'vertex_ai/gemini-1.5-flash': 1048576,
|
||||
'vertex_ai/gemma2': 8200,
|
||||
'gemini/gemini-1.5-pro': 1048576,
|
||||
'gemini/gemini-1.5-flash': 1048576,
|
||||
'codechat-bison': 6144,
|
||||
'codechat-bison-32k': 32000,
|
||||
'anthropic.claude-instant-v1': 100000,
|
||||
|
@ -83,6 +83,11 @@ class LiteLLMAIHandler(BaseAiHandler):
|
||||
litellm.vertex_location = get_settings().get(
|
||||
"VERTEXAI.VERTEX_LOCATION", None
|
||||
)
|
||||
# Google AI Studio
|
||||
# SEE https://docs.litellm.ai/docs/providers/gemini
|
||||
if get_settings().get("GOOGLE_AI_STUDIO.GEMINI_API_KEY", None):
|
||||
os.environ["GEMINI_API_KEY"] = get_settings().google_ai_studio.gemini_api_key
|
||||
|
||||
def prepare_logs(self, response, system, user, resp, finish_reason):
|
||||
response_log = response.dict().copy()
|
||||
response_log['system'] = system
|
||||
|
@ -43,6 +43,9 @@ api_base = "" # the base url for your local Llama 2, Code Llama, and other model
|
||||
vertex_project = "" # the google cloud platform project name for your vertexai deployment
|
||||
vertex_location = "" # the google cloud platform location for your vertexai deployment
|
||||
|
||||
[google_ai_studio]
|
||||
gemini_api_key = "" # the google AI Studio API key
|
||||
|
||||
[github]
|
||||
# ---- Set the following only for deployment type == "user"
|
||||
user_token = "" # A GitHub personal access token with 'repo' scope.
|
||||
|
@ -8,6 +8,7 @@ dynaconf==3.2.4
|
||||
fastapi==0.111.0
|
||||
GitPython==3.1.41
|
||||
google-cloud-aiplatform==1.38.0
|
||||
google-generativeai==0.8.3
|
||||
google-cloud-storage==2.10.0
|
||||
Jinja2==3.1.2
|
||||
litellm==1.50.2
|
||||
|
Reference in New Issue
Block a user