mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-02 11:50:37 +08:00
adding huggingface inference support + litellm debugger
This commit is contained in:
@ -135,6 +135,14 @@ python pr_agent/cli.py --pr_url <pr_url> describe
|
||||
python pr_agent/cli.py --pr_url <pr_url> improve
|
||||
```
|
||||
|
||||
5. **Debugging LLM API Calls**
|
||||
If you're testing your codium/pr-agent server, and need to see if calls were made successfully + the exact call logs, you can use the [LiteLLM Debugger tool](https://docs.litellm.ai/docs/debugging/hosted_debugging).
|
||||
|
||||
You can do this by setting `litellm_debugger=true` in configuration.toml. Your Logs will be viewable in real-time @ `admin.litellm.ai/<your_email>`. Set your email in the `.secrets.toml` under 'user_email'.
|
||||
|
||||
<img src="./pics/debugger.png" width="900"/>
|
||||
|
||||
|
||||
---
|
||||
|
||||
#### Method 4: Run as a polling server
|
||||
|
BIN
pics/debugger.png
Normal file
BIN
pics/debugger.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 534 KiB |
@ -26,6 +26,7 @@ class AiHandler:
|
||||
try:
|
||||
openai.api_key = get_settings().openai.key
|
||||
litellm.openai_key = get_settings().openai.key
|
||||
litellm.debugger = get_settings().config.litellm_debugger
|
||||
self.azure = False
|
||||
if get_settings().get("OPENAI.ORG", None):
|
||||
litellm.organization = get_settings().openai.org
|
||||
@ -43,6 +44,12 @@ class AiHandler:
|
||||
litellm.cohere_key = get_settings().cohere.key
|
||||
if get_settings().get("REPLICATE.KEY", None):
|
||||
litellm.replicate_key = get_settings().replicate.key
|
||||
if get_settings().get("REPLICATE.KEY", None):
|
||||
litellm.replicate_key = get_settings().replicate.key
|
||||
if get_settings().get("HUGGINGFACE.KEY", None):
|
||||
litellm.huggingface_key = get_settings().huggingface.key
|
||||
if get_settings().get("HUGGINGFACE.KEY", None):
|
||||
litellm.huggingface_key = get_settings().huggingface.key
|
||||
except AttributeError as e:
|
||||
raise ValueError("OpenAI key is required") from e
|
||||
|
||||
@ -82,6 +89,8 @@ class AiHandler:
|
||||
f"Generating completion with {model}"
|
||||
f"{(' from deployment ' + deployment_id) if deployment_id else ''}"
|
||||
)
|
||||
if self.azure:
|
||||
model = self.azure + "/" + model
|
||||
response = await acompletion(
|
||||
model=model,
|
||||
deployment_id=deployment_id,
|
||||
@ -90,7 +99,6 @@ class AiHandler:
|
||||
{"role": "user", "content": user}
|
||||
],
|
||||
temperature=temperature,
|
||||
azure=self.azure,
|
||||
force_timeout=get_settings().config.ai_timeout
|
||||
)
|
||||
except (APIError, Timeout, TryAgain) as e:
|
||||
|
@ -10,6 +10,7 @@ use_repo_settings_file=true
|
||||
ai_timeout=180
|
||||
max_description_tokens = 500
|
||||
max_commits_tokens = 500
|
||||
litellm_debugger=false
|
||||
|
||||
[pr_reviewer] # /review #
|
||||
require_focused_review=true
|
||||
|
@ -14,4 +14,4 @@ GitPython~=3.1.32
|
||||
litellm~=0.1.351
|
||||
PyYAML==6.0
|
||||
starlette-context==0.3.6
|
||||
litellm~=0.1.351
|
||||
litellm~=0.1.445
|
||||
|
Reference in New Issue
Block a user