diff --git a/INSTALL.md b/INSTALL.md index 73848ade..1dccd80f 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -135,6 +135,14 @@ python pr_agent/cli.py --pr_url describe python pr_agent/cli.py --pr_url improve ``` +5. **Debugging LLM API Calls** +If you're testing your codium/pr-agent server, and need to see if calls were made successfully + the exact call logs, you can use the [LiteLLM Debugger tool](https://docs.litellm.ai/docs/debugging/hosted_debugging). + +You can do this by setting `litellm_debugger=true` in configuration.toml. Your Logs will be viewable in real-time @ `admin.litellm.ai/`. Set your email in the `.secrets.toml` under 'user_email'. + + + + --- #### Method 4: Run as a polling server diff --git a/pics/debugger.png b/pics/debugger.png new file mode 100644 index 00000000..7d8f201f Binary files /dev/null and b/pics/debugger.png differ diff --git a/pr_agent/algo/ai_handler.py b/pr_agent/algo/ai_handler.py index fb5f64fe..db541bae 100644 --- a/pr_agent/algo/ai_handler.py +++ b/pr_agent/algo/ai_handler.py @@ -26,6 +26,7 @@ class AiHandler: try: openai.api_key = get_settings().openai.key litellm.openai_key = get_settings().openai.key + litellm.debugger = get_settings().config.litellm_debugger self.azure = False if get_settings().get("OPENAI.ORG", None): litellm.organization = get_settings().openai.org @@ -43,6 +44,12 @@ class AiHandler: litellm.cohere_key = get_settings().cohere.key if get_settings().get("REPLICATE.KEY", None): litellm.replicate_key = get_settings().replicate.key + if get_settings().get("REPLICATE.KEY", None): + litellm.replicate_key = get_settings().replicate.key + if get_settings().get("HUGGINGFACE.KEY", None): + litellm.huggingface_key = get_settings().huggingface.key + if get_settings().get("HUGGINGFACE.KEY", None): + litellm.huggingface_key = get_settings().huggingface.key except AttributeError as e: raise ValueError("OpenAI key is required") from e @@ -82,6 +89,8 @@ class AiHandler: f"Generating completion with {model}" f"{(' from deployment ' + deployment_id) if deployment_id else ''}" ) + if self.azure: + model = self.azure + "/" + model response = await acompletion( model=model, deployment_id=deployment_id, @@ -90,7 +99,6 @@ class AiHandler: {"role": "user", "content": user} ], temperature=temperature, - azure=self.azure, force_timeout=get_settings().config.ai_timeout ) except (APIError, Timeout, TryAgain) as e: diff --git a/pr_agent/settings/configuration.toml b/pr_agent/settings/configuration.toml index ce920efd..4271ec39 100644 --- a/pr_agent/settings/configuration.toml +++ b/pr_agent/settings/configuration.toml @@ -10,6 +10,7 @@ use_repo_settings_file=true ai_timeout=180 max_description_tokens = 500 max_commits_tokens = 500 +litellm_debugger=false [pr_reviewer] # /review # require_focused_review=true diff --git a/requirements.txt b/requirements.txt index 470fc6ef..ed3bf742 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,4 +14,4 @@ GitPython~=3.1.32 litellm~=0.1.351 PyYAML==6.0 starlette-context==0.3.6 -litellm~=0.1.351 +litellm~=0.1.445