docs: improve Ollama and Hugging Face model configuration docs

This commit is contained in:
mrT23
2025-01-02 11:16:21 +02:00
parent f6b470bf5e
commit 5971a06d73
3 changed files with 7 additions and 27 deletions

View File

@ -12,7 +12,6 @@ global_settings = Dynaconf(
envvar_prefix=False,
merge_enabled=True,
settings_files=[join(current_dir, f) for f in [
"settings/.secrets.toml",
"settings/configuration.toml",
"settings/ignore.toml",
"settings/language_extensions.toml",
@ -29,6 +28,7 @@ global_settings = Dynaconf(
"settings/pr_add_docs.toml",
"settings/custom_labels.toml",
"settings/pr_help_prompts.toml",
"settings/.secrets.toml",
"settings_prod/.secrets.toml",
]]
)

View File

@ -2,6 +2,7 @@
# models
model="gpt-4o-2024-11-20"
fallback_models=["gpt-4o-2024-08-06"]
custom_model_max_tokens=-1 # for models not in the default list
#model_weak="gpt-4o-mini-2024-07-18" # optional, a weaker model to use for some easier tasks
# CLI
git_provider="github"
@ -21,7 +22,6 @@ skip_keys = []
max_description_tokens = 500
max_commits_tokens = 500
max_model_tokens = 32000 # Limits the maximum number of tokens that can be used by any model, regardless of the model's default capabilities.
custom_model_max_tokens=-1 # for models not in the default list
# patch extension logic
patch_extension_skip_types =[".md",".txt"]
allow_dynamic_context=true