mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-02 03:40:38 +08:00
Improve dynamic context handling with partial line matching and adjust model configuration
This commit is contained in:
@ -130,14 +130,24 @@ def process_patch_lines(patch_str, original_file_str, patch_extra_lines_before,
|
||||
if file_new_lines:
|
||||
delta_lines_new = [f' {line}' for line in file_new_lines[extended_start2 - 1:start2 - 1]]
|
||||
if delta_lines_original != delta_lines_new:
|
||||
get_logger().debug(f"Extra lines before hunk are different in original and new file",
|
||||
artifact={"delta_lines_original": delta_lines_original,
|
||||
"delta_lines_new": delta_lines_new})
|
||||
extended_start1 = start1
|
||||
extended_size1 = size1
|
||||
extended_start2 = start2
|
||||
extended_size2 = size2
|
||||
delta_lines_original = []
|
||||
found_mini_match = False
|
||||
for i in range(len(delta_lines_original)):
|
||||
if delta_lines_original[i:] == delta_lines_new[i:]:
|
||||
delta_lines_original = delta_lines_original[i:]
|
||||
delta_lines_new = delta_lines_new[i:]
|
||||
extended_start1 += i
|
||||
extended_start2 += i
|
||||
found_mini_match = True
|
||||
break
|
||||
if not found_mini_match:
|
||||
extended_start1 = start1
|
||||
extended_size1 = size1
|
||||
extended_start2 = start2
|
||||
extended_size2 = size2
|
||||
delta_lines_original = []
|
||||
# get_logger().debug(f"Extra lines before hunk are different in original and new file",
|
||||
# artifact={"delta_lines_original": delta_lines_original,
|
||||
# "delta_lines_new": delta_lines_new})
|
||||
|
||||
# logic to remove section header if its in the extra delta lines (in dynamic context, this is also done)
|
||||
if section_header and not allow_dynamic_context:
|
||||
|
@ -6,9 +6,9 @@
|
||||
|
||||
[config]
|
||||
# models
|
||||
model_reasoning="o4-mini"
|
||||
model="gpt-4.1"
|
||||
fallback_models=["o4-mini"]
|
||||
model="o4-mini"
|
||||
fallback_models=["gpt-4.1"]
|
||||
#model_reasoning="o4-mini" # dedictated reasoning model for self-reflection
|
||||
#model_weak="gpt-4o" # optional, a weaker model to use for some easier tasks
|
||||
# CLI
|
||||
git_provider="github"
|
||||
@ -36,8 +36,8 @@ model_token_count_estimate_factor=0.3 # factor to increase the token count estim
|
||||
# patch extension logic
|
||||
patch_extension_skip_types =[".md",".txt"]
|
||||
allow_dynamic_context=true
|
||||
max_extra_lines_before_dynamic_context = 8 # will try to include up to 10 extra lines before the hunk in the patch, until we reach an enclosing function or class
|
||||
patch_extra_lines_before = 3 # Number of extra lines (+3 default ones) to include before each hunk in the patch
|
||||
max_extra_lines_before_dynamic_context = 10 # will try to include up to 10 extra lines before the hunk in the patch, until we reach an enclosing function or class
|
||||
patch_extra_lines_before = 5 # Number of extra lines (+3 default ones) to include before each hunk in the patch
|
||||
patch_extra_lines_after = 1 # Number of extra lines (+3 default ones) to include after each hunk in the patch
|
||||
secret_provider=""
|
||||
cli_mode=false
|
||||
|
Reference in New Issue
Block a user