Update weak model document

This commit is contained in:
Trung Dinh
2024-12-08 22:01:00 +07:00
parent 3c31048afc
commit 88a93bdcd7

View File

@ -5,7 +5,7 @@ To use a different model than the default (GPT-4), you need to edit in the [conf
``` ```
[config] [config]
model = "..." model = "..."
model_turbo = "..." model_week = "..."
fallback_models = ["..."] fallback_models = ["..."]
``` ```
@ -28,7 +28,7 @@ and set in your configuration file:
``` ```
[config] [config]
model="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo) model="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
model_turbo="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo) model_week="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
fallback_models=["..."] # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo) fallback_models=["..."] # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
``` ```
@ -52,7 +52,7 @@ MAX_TOKENS={
[config] # in configuration.toml [config] # in configuration.toml
model = "ollama/llama2" model = "ollama/llama2"
model_turbo = "ollama/llama2" model_week = "ollama/llama2"
fallback_models=["ollama/llama2"] fallback_models=["ollama/llama2"]
[ollama] # in .secrets.toml [ollama] # in .secrets.toml
@ -76,7 +76,7 @@ MAX_TOKENS={
} }
[config] # in configuration.toml [config] # in configuration.toml
model = "huggingface/meta-llama/Llama-2-7b-chat-hf" model = "huggingface/meta-llama/Llama-2-7b-chat-hf"
model_turbo = "huggingface/meta-llama/Llama-2-7b-chat-hf" model_week = "huggingface/meta-llama/Llama-2-7b-chat-hf"
fallback_models=["huggingface/meta-llama/Llama-2-7b-chat-hf"] fallback_models=["huggingface/meta-llama/Llama-2-7b-chat-hf"]
[huggingface] # in .secrets.toml [huggingface] # in .secrets.toml
@ -91,7 +91,7 @@ To use Llama2 model with Replicate, for example, set:
``` ```
[config] # in configuration.toml [config] # in configuration.toml
model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1" model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
model_turbo = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1" model_week = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
fallback_models=["replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"] fallback_models=["replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"]
[replicate] # in .secrets.toml [replicate] # in .secrets.toml
key = ... key = ...
@ -107,7 +107,7 @@ To use Llama3 model with Groq, for example, set:
``` ```
[config] # in configuration.toml [config] # in configuration.toml
model = "llama3-70b-8192" model = "llama3-70b-8192"
model_turbo = "llama3-70b-8192" model_week = "llama3-70b-8192"
fallback_models = ["groq/llama3-70b-8192"] fallback_models = ["groq/llama3-70b-8192"]
[groq] # in .secrets.toml [groq] # in .secrets.toml
key = ... # your Groq api key key = ... # your Groq api key
@ -121,7 +121,7 @@ To use Google's Vertex AI platform and its associated models (chat-bison/codecha
``` ```
[config] # in configuration.toml [config] # in configuration.toml
model = "vertex_ai/codechat-bison" model = "vertex_ai/codechat-bison"
model_turbo = "vertex_ai/codechat-bison" model_week = "vertex_ai/codechat-bison"
fallback_models="vertex_ai/codechat-bison" fallback_models="vertex_ai/codechat-bison"
[vertexai] # in .secrets.toml [vertexai] # in .secrets.toml
@ -140,7 +140,7 @@ To use [Google AI Studio](https://aistudio.google.com/) models, set the relevant
```toml ```toml
[config] # in configuration.toml [config] # in configuration.toml
model="google_ai_studio/gemini-1.5-flash" model="google_ai_studio/gemini-1.5-flash"
model_turbo="google_ai_studio/gemini-1.5-flash" model_week="google_ai_studio/gemini-1.5-flash"
fallback_models=["google_ai_studio/gemini-1.5-flash"] fallback_models=["google_ai_studio/gemini-1.5-flash"]
[google_ai_studio] # in .secrets.toml [google_ai_studio] # in .secrets.toml
@ -156,7 +156,7 @@ To use Anthropic models, set the relevant models in the configuration section of
``` ```
[config] [config]
model="anthropic/claude-3-opus-20240229" model="anthropic/claude-3-opus-20240229"
model_turbo="anthropic/claude-3-opus-20240229" model_week="anthropic/claude-3-opus-20240229"
fallback_models=["anthropic/claude-3-opus-20240229"] fallback_models=["anthropic/claude-3-opus-20240229"]
``` ```
@ -173,7 +173,7 @@ To use Amazon Bedrock and its foundational models, add the below configuration:
``` ```
[config] # in configuration.toml [config] # in configuration.toml
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0" model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
model_turbo="bedrock/anthropic.claude-3-sonnet-20240229-v1:0" model_week="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
fallback_models=["bedrock/anthropic.claude-v2:1"] fallback_models=["bedrock/anthropic.claude-v2:1"]
``` ```
@ -195,7 +195,7 @@ If the relevant model doesn't appear [here](https://github.com/Codium-ai/pr-agen
``` ```
[config] [config]
model="custom_model_name" model="custom_model_name"
model_turbo="custom_model_name" model_week="custom_model_name"
fallback_models=["custom_model_name"] fallback_models=["custom_model_name"]
``` ```
(2) Set the maximal tokens for the model: (2) Set the maximal tokens for the model: