mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-02 03:40:38 +08:00
Merge pull request #1892 from dcieslak19973/pr-agent-1886-add-bedrock-llama4-20250622
Add bedrock Llama 4 Scout/Maverick
This commit is contained in:
@ -232,6 +232,14 @@ AWS_SECRET_ACCESS_KEY="..."
|
||||
AWS_REGION_NAME="..."
|
||||
```
|
||||
|
||||
You can also use the new Meta Llama 4 models available on Amazon Bedrock:
|
||||
|
||||
```toml
|
||||
[config] # in configuration.toml
|
||||
model="bedrock/us.meta.llama4-scout-17b-instruct-v1:0"
|
||||
fallback_models=["bedrock/us.meta.llama4-maverick-17b-instruct-v1:0"]
|
||||
```
|
||||
|
||||
See [litellm](https://docs.litellm.ai/docs/providers/bedrock#usage) documentation for more information about the environment variables required for Amazon Bedrock.
|
||||
|
||||
### DeepSeek
|
||||
|
@ -113,6 +113,8 @@ MAX_TOKENS = {
|
||||
'claude-3-5-sonnet': 100000,
|
||||
'groq/meta-llama/llama-4-scout-17b-16e-instruct': 131072,
|
||||
'groq/meta-llama/llama-4-maverick-17b-128e-instruct': 131072,
|
||||
'bedrock/us.meta.llama4-scout-17b-instruct-v1:0': 128000,
|
||||
'bedrock/us.meta.llama4-maverick-17b-instruct-v1:0': 128000,
|
||||
'groq/llama3-8b-8192': 8192,
|
||||
'groq/llama3-70b-8192': 8192,
|
||||
'groq/llama-3.1-8b-instant': 8192,
|
||||
|
Reference in New Issue
Block a user