mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-02 03:40:38 +08:00
fix: typos
This commit is contained in:
@ -90,7 +90,7 @@ duplicate_examples=true # will duplicate the examples in the prompt, to help the
|
||||
api_base = "http://localhost:11434" # or whatever port you're running Ollama on
|
||||
```
|
||||
|
||||
By default, Ollama uses a context window size of 2048 tokens. In most cases this is not enough to cover pr-agent promt and pull-request diff. Context window size can be overridden with the `OLLAMA_CONTEXT_LENGTH` environment variable. For example, to set the default context length to 8K, use: `OLLAMA_CONTEXT_LENGTH=8192 ollama serve`. More information you can find on the [official ollama faq](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-can-i-specify-the-context-window-size).
|
||||
By default, Ollama uses a context window size of 2048 tokens. In most cases this is not enough to cover pr-agent prompt and pull-request diff. Context window size can be overridden with the `OLLAMA_CONTEXT_LENGTH` environment variable. For example, to set the default context length to 8K, use: `OLLAMA_CONTEXT_LENGTH=8192 ollama serve`. More information you can find on the [official ollama faq](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-can-i-specify-the-context-window-size).
|
||||
|
||||
Please note that the `custom_model_max_tokens` setting should be configured in accordance with the `OLLAMA_CONTEXT_LENGTH`. Failure to do so may result in unexpected model output.
|
||||
|
||||
|
@ -131,7 +131,7 @@ class LiteLLMAIHandler(BaseAiHandler):
|
||||
self.api_base = openrouter_api_base
|
||||
litellm.api_base = openrouter_api_base
|
||||
|
||||
# Models that only use user meessage
|
||||
# Models that only use user message
|
||||
self.user_message_only_models = USER_MESSAGE_ONLY_MODELS
|
||||
|
||||
# Model that doesn't support temperature argument
|
||||
@ -212,7 +212,7 @@ class LiteLLMAIHandler(BaseAiHandler):
|
||||
|
||||
return kwargs
|
||||
|
||||
def add_litellm_callbacks(selfs, kwargs) -> dict:
|
||||
def add_litellm_callbacks(self, kwargs) -> dict:
|
||||
captured_extra = []
|
||||
|
||||
def capture_logs(message):
|
||||
|
@ -1289,7 +1289,7 @@ def process_description(description_full: str) -> Tuple[str, List]:
|
||||
pattern_back = r'<details>\s*<summary><strong>(.*?)</strong><dd><code>(.*?)</code>.*?</summary>\s*<hr>\s*(.*?)\n\n\s*(.*?)</details>'
|
||||
res = re.search(pattern_back, file_data, re.DOTALL)
|
||||
if not res or res.lastindex != 4:
|
||||
pattern_back = r'<details>\s*<summary><strong>(.*?)</strong>\s*<dd><code>(.*?)</code>.*?</summary>\s*<hr>\s*(.*?)\s*-\s*(.*?)\s*</details>' # looking for hypen ('- ')
|
||||
pattern_back = r'<details>\s*<summary><strong>(.*?)</strong>\s*<dd><code>(.*?)</code>.*?</summary>\s*<hr>\s*(.*?)\s*-\s*(.*?)\s*</details>' # looking for hyphen ('- ')
|
||||
res = re.search(pattern_back, file_data, re.DOTALL)
|
||||
if res and res.lastindex == 4:
|
||||
short_filename = res.group(1).strip()
|
||||
|
@ -1,12 +1,12 @@
|
||||
[pr_help_prompts]
|
||||
system="""You are Doc-helper, a language models designed to answer questions about a documentation website for an open-soure project called "PR-Agent" (recently renamed to "Qodo Merge").
|
||||
You will recieve a question, and the full documentation website content.
|
||||
You will receive a question, and the full documentation website content.
|
||||
Your goal is to provide the best answer to the question using the documentation provided.
|
||||
|
||||
Additional instructions:
|
||||
- Try to be short and concise in your answers. Try to give examples if needed.
|
||||
- The main tools of PR-Agent are 'describe', 'review', 'improve'. If there is ambiguity to which tool the user is referring to, prioritize snippets of these tools over others.
|
||||
- If the question has ambiguity and can relate to different tools or platfroms, provide the best answer possible based on what is available, but also state in your answer what additional information would be needed to give a more accurate answer.
|
||||
- If the question has ambiguity and can relate to different tools or platforms, provide the best answer possible based on what is available, but also state in your answer what additional information would be needed to give a more accurate answer.
|
||||
|
||||
|
||||
The output must be a YAML object equivalent to type $DocHelper, according to the following Pydantic definitions:
|
||||
|
@ -21,7 +21,7 @@ from pr_agent.servers.help import HelpMessage
|
||||
|
||||
#Common code that can be called from similar tools:
|
||||
def modify_answer_section(ai_response: str) -> str | None:
|
||||
# Gets the model's answer and relevant sources section, repacing the heading of the answer section with:
|
||||
# Gets the model's answer and relevant sources section, replacing the heading of the answer section with:
|
||||
# :bulb: Auto-generated documentation-based answer:
|
||||
"""
|
||||
For example: The following input:
|
||||
|
Reference in New Issue
Block a user