mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-17 11:00:39 +08:00
add tests and update README.md
This commit is contained in:
117
tests/unittest/test_convert_to_markdown.py
Normal file
117
tests/unittest/test_convert_to_markdown.py
Normal file
@ -0,0 +1,117 @@
|
||||
# Generated by CodiumAI
|
||||
from pr_agent.algo.utils import convert_to_markdown
|
||||
|
||||
"""
|
||||
Code Analysis
|
||||
|
||||
Objective:
|
||||
The objective of the 'convert_to_markdown' function is to convert a dictionary of data into a markdown-formatted text.
|
||||
The function takes in a dictionary as input and recursively iterates through its keys and values to generate the
|
||||
markdown text.
|
||||
|
||||
Inputs:
|
||||
- A dictionary of data containing information about a pull request.
|
||||
|
||||
Flow:
|
||||
- Initialize an empty string variable 'markdown_text'.
|
||||
- Create a dictionary 'emojis' containing emojis for each key in the input dictionary.
|
||||
- Iterate through the input dictionary:
|
||||
- If the value is empty, continue to the next iteration.
|
||||
- If the value is a dictionary, recursively call the 'convert_to_markdown' function with the value as input and
|
||||
append the returned markdown text to 'markdown_text'.
|
||||
- If the value is a list:
|
||||
- If the key is 'code suggestions', add an additional line break to 'markdown_text'.
|
||||
- Get the corresponding emoji for the key from the 'emojis' dictionary. If no emoji is found, use a dash.
|
||||
- Append the emoji and key to 'markdown_text'.
|
||||
- Iterate through the items in the list:
|
||||
- If the item is a dictionary and the key is 'code suggestions', call the 'parse_code_suggestion' function with
|
||||
the item as input and append the returned markdown text to 'markdown_text'.
|
||||
- If the item is not empty, append it to 'markdown_text'.
|
||||
- If the value is not 'n/a', get the corresponding emoji for the key from the 'emojis' dictionary. If no emoji is
|
||||
found, use a dash. Append the emoji, key, and value to 'markdown_text'.
|
||||
- Return 'markdown_text'.
|
||||
|
||||
Outputs:
|
||||
- A markdown-formatted string containing the information from the input dictionary.
|
||||
|
||||
Additional aspects:
|
||||
- The function uses recursion to handle nested dictionaries.
|
||||
- The 'parse_code_suggestion' function is called for items in the 'code suggestions' list.
|
||||
- The function uses emojis to add visual cues to the markdown text.
|
||||
"""
|
||||
|
||||
|
||||
class TestConvertToMarkdown:
|
||||
# Tests that the function works correctly with a simple dictionary input
|
||||
def test_simple_dictionary_input(self):
|
||||
input_data = {
|
||||
'Main theme': 'Test',
|
||||
'Type of PR': 'Test type',
|
||||
'Relevant tests added': 'no',
|
||||
'Unrelated changes': 'n/a', # won't be included in the output
|
||||
'Focused PR': 'Yes',
|
||||
'General PR suggestions': 'general suggestion...',
|
||||
'Code suggestions': [
|
||||
{
|
||||
'Code example': {
|
||||
'Before': 'Code before',
|
||||
'After': 'Code after'
|
||||
}
|
||||
},
|
||||
{
|
||||
'Code example': {
|
||||
'Before': 'Code before 2',
|
||||
'After': 'Code after 2'
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
expected_output = """\
|
||||
- 🎯 **Main theme:** Test
|
||||
- 📌 **Type of PR:** Test type
|
||||
- 🧪 **Relevant tests added:** no
|
||||
- ✨ **Focused PR:** Yes
|
||||
- 💡 **General PR suggestions:** general suggestion...
|
||||
|
||||
- 🤖 **Code suggestions:**
|
||||
|
||||
- **Code example:**
|
||||
- **Before:**
|
||||
```
|
||||
Code before
|
||||
```
|
||||
- **After:**
|
||||
```
|
||||
Code after
|
||||
```
|
||||
|
||||
- **Code example:**
|
||||
- **Before:**
|
||||
```
|
||||
Code before 2
|
||||
```
|
||||
- **After:**
|
||||
```
|
||||
Code after 2
|
||||
```
|
||||
"""
|
||||
assert convert_to_markdown(input_data).strip() == expected_output.strip()
|
||||
|
||||
# Tests that the function works correctly with an empty dictionary input
|
||||
def test_empty_dictionary_input(self):
|
||||
input_data = {}
|
||||
expected_output = ""
|
||||
assert convert_to_markdown(input_data).strip() == expected_output.strip()
|
||||
|
||||
def test_dictionary_input_containing_only_empty_dictionaries(self):
|
||||
input_data = {
|
||||
'Main theme': {},
|
||||
'Type of PR': {},
|
||||
'Relevant tests added': {},
|
||||
'Unrelated changes': {},
|
||||
'Focused PR': {},
|
||||
'General PR suggestions': {},
|
||||
'Code suggestions': {}
|
||||
}
|
||||
expected_output = ""
|
||||
assert convert_to_markdown(input_data).strip() == expected_output.strip()
|
84
tests/unittest/test_delete_hunks.py
Normal file
84
tests/unittest/test_delete_hunks.py
Normal file
@ -0,0 +1,84 @@
|
||||
# Generated by CodiumAI
|
||||
|
||||
from pr_agent.algo.git_patch_processing import omit_deletion_hunks
|
||||
|
||||
"""
|
||||
Code Analysis
|
||||
|
||||
Objective:
|
||||
The objective of the "omit_deletion_hunks" function is to remove deletion hunks from a patch file and return only the
|
||||
added lines.
|
||||
|
||||
Inputs:
|
||||
- "patch_lines": a list of strings representing the lines of a patch file.
|
||||
|
||||
Flow:
|
||||
- Initialize empty lists "temp_hunk" and "added_patched", and boolean variables "add_hunk" and "inside_hunk".
|
||||
- Compile a regular expression pattern to match hunk headers.
|
||||
- Iterate through each line in "patch_lines".
|
||||
- If the line starts with "@@", match the line with the hunk header pattern, finish the previous hunk if necessary,
|
||||
and append the line to "temp_hunk".
|
||||
- If the line does not start with "@@", append the line to "temp_hunk", check if it is an added line, and set
|
||||
"add_hunk" to True if it is.
|
||||
- If the function reaches the end of "patch_lines" and there is an unfinished hunk with added lines, append it to
|
||||
"added_patched".
|
||||
- Join the lines in "added_patched" with newline characters and return the resulting string.
|
||||
|
||||
Outputs:
|
||||
- A string representing the added lines in the patch file.
|
||||
|
||||
Additional aspects:
|
||||
- The function only considers hunks with added lines and ignores hunks with deleted lines.
|
||||
- The function assumes that the input patch file is well-formed and follows the unified diff format.
|
||||
"""
|
||||
|
||||
|
||||
class TestOmitDeletionHunks:
|
||||
# Tests that the function correctly handles a simple patch containing only additions
|
||||
def test_simple_patch_additions(self):
|
||||
patch_lines = ['@@ -1,0 +1,1 @@\n', '+added line\n']
|
||||
expected_output = '@@ -1,0 +1,1 @@\n\n+added line\n'
|
||||
assert omit_deletion_hunks(patch_lines) == expected_output
|
||||
|
||||
# Tests that the function correctly omits deletion hunks and concatenates multiple hunks in a patch.
|
||||
def test_patch_multiple_hunks(self):
|
||||
patch_lines = ['@@ -1,0 +1,1 @@\n', '-deleted line', '+added line\n', '@@ -2,0 +3,1 @@\n', '-deleted line\n',
|
||||
'-another deleted line\n']
|
||||
expected_output = '@@ -1,0 +1,1 @@\n\n-deleted line\n+added line\n'
|
||||
assert omit_deletion_hunks(patch_lines) == expected_output
|
||||
|
||||
# Tests that the function correctly omits deletion lines from the patch when there are no additions or context
|
||||
# lines.
|
||||
def test_patch_only_deletions(self):
|
||||
patch_lines = ['@@ -1,1 +1,0 @@\n', '-deleted line\n']
|
||||
expected_output = ''
|
||||
assert omit_deletion_hunks(patch_lines) == expected_output
|
||||
|
||||
# Additional deletion lines
|
||||
patch_lines = ['@@ -1,1 +1,0 @@\n', '-deleted line\n', '-another deleted line\n']
|
||||
expected_output = ''
|
||||
assert omit_deletion_hunks(patch_lines) == expected_output
|
||||
|
||||
# Additional context lines
|
||||
patch_lines = ['@@ -1,1 +1,0 @@\n', '-deleted line\n', '-another deleted line\n', 'context line 1\n',
|
||||
'context line 2\n', 'context line 3\n']
|
||||
expected_output = ''
|
||||
assert omit_deletion_hunks(patch_lines) == expected_output
|
||||
|
||||
# Tests that the function correctly handles an empty patch
|
||||
def test_empty_patch(self):
|
||||
patch_lines = []
|
||||
expected_output = ''
|
||||
assert omit_deletion_hunks(patch_lines) == expected_output
|
||||
|
||||
# Tests that the function correctly handles a patch containing only one hunk
|
||||
def test_patch_one_hunk(self):
|
||||
patch_lines = ['@@ -1,0 +1,1 @@\n', '+added line\n']
|
||||
expected_output = '@@ -1,0 +1,1 @@\n\n+added line\n'
|
||||
assert omit_deletion_hunks(patch_lines) == expected_output
|
||||
|
||||
# Tests that the function correctly handles a patch containing only deletions and no additions
|
||||
def test_patch_deletions_no_additions(self):
|
||||
patch_lines = ['@@ -1,1 +1,0 @@\n', '-deleted line\n']
|
||||
expected_output = ''
|
||||
assert omit_deletion_hunks(patch_lines) == expected_output
|
93
tests/unittest/test_extend_patch.py
Normal file
93
tests/unittest/test_extend_patch.py
Normal file
@ -0,0 +1,93 @@
|
||||
|
||||
# Generated by CodiumAI
|
||||
|
||||
|
||||
from pr_agent.algo.git_patch_processing import extend_patch
|
||||
|
||||
"""
|
||||
Code Analysis
|
||||
|
||||
Objective:
|
||||
The objective of the 'extend_patch' function is to extend a given patch to include a specified number of surrounding
|
||||
lines. This function takes in an original file string, a patch string, and the number of lines to extend the patch by,
|
||||
and returns the extended patch string.
|
||||
|
||||
Inputs:
|
||||
- original_file_str: a string representing the original file
|
||||
- patch_str: a string representing the patch to be extended
|
||||
- num_lines: an integer representing the number of lines to extend the patch by
|
||||
|
||||
Flow:
|
||||
1. Split the original file string and patch string into separate lines
|
||||
2. Initialize variables to keep track of the current hunk's start and size for both the original file and the patch
|
||||
3. Iterate through each line in the patch string
|
||||
4. If the line starts with '@@', extract the start and size values for both the original file and the patch, and
|
||||
calculate the extended start and size values
|
||||
5. Append the extended hunk header to the extended patch lines list
|
||||
6. Append the specified number of lines before the hunk to the extended patch lines list
|
||||
7. Append the current line to the extended patch lines list
|
||||
8. If the line is not a hunk header, append it to the extended patch lines list
|
||||
9. Return the extended patch string
|
||||
|
||||
Outputs:
|
||||
- extended_patch_str: a string representing the extended patch
|
||||
|
||||
Additional aspects:
|
||||
- The function uses regular expressions to extract the start and size values from the hunk header
|
||||
- The function handles cases where the start value of a hunk is less than the number of lines to extend by by setting
|
||||
the extended start value to 1
|
||||
- The function handles cases where the hunk extends beyond the end of the original file by only including lines up to
|
||||
the end of the original file in the extended patch
|
||||
"""
|
||||
|
||||
|
||||
class TestExtendPatch:
|
||||
# Tests that the function works correctly with valid input
|
||||
def test_happy_path(self):
|
||||
original_file_str = 'line1\nline2\nline3\nline4\nline5'
|
||||
patch_str = '@@ -2,2 +2,2 @@ init()\n-line2\n+new_line2\nline3'
|
||||
num_lines = 1
|
||||
expected_output = '@@ -1,4 +1,4 @@ init()\nline1\n-line2\n+new_line2\nline3\nline4'
|
||||
actual_output = extend_patch(original_file_str, patch_str, num_lines)
|
||||
assert actual_output == expected_output
|
||||
|
||||
# Tests that the function returns an empty string when patch_str is empty
|
||||
def test_empty_patch(self):
|
||||
original_file_str = 'line1\nline2\nline3\nline4\nline5'
|
||||
patch_str = ''
|
||||
num_lines = 1
|
||||
expected_output = ''
|
||||
assert extend_patch(original_file_str, patch_str, num_lines) == expected_output
|
||||
|
||||
# Tests that the function returns the original patch when num_lines is 0
|
||||
def test_zero_num_lines(self):
|
||||
original_file_str = 'line1\nline2\nline3\nline4\nline5'
|
||||
patch_str = '@@ -2,2 +2,2 @@ init()\n-line2\n+new_line2\nline3'
|
||||
num_lines = 0
|
||||
assert extend_patch(original_file_str, patch_str, num_lines) == patch_str
|
||||
|
||||
# Tests that the function returns the original patch when patch_str contains no hunks
|
||||
def test_no_hunks(self):
|
||||
original_file_str = 'line1\nline2\nline3\nline4\nline5'
|
||||
patch_str = 'no hunks here'
|
||||
num_lines = 1
|
||||
expected_output = 'no hunks here'
|
||||
assert extend_patch(original_file_str, patch_str, num_lines) == expected_output
|
||||
|
||||
# Tests that the function extends a patch with a single hunk correctly
|
||||
def test_single_hunk(self):
|
||||
original_file_str = 'line1\nline2\nline3\nline4\nline5'
|
||||
patch_str = '@@ -2,3 +2,3 @@ init()\n-line2\n+new_line2\nline3\nline4'
|
||||
num_lines = 1
|
||||
expected_output = '@@ -1,5 +1,5 @@ init()\nline1\n-line2\n+new_line2\nline3\nline4\nline5'
|
||||
actual_output = extend_patch(original_file_str, patch_str, num_lines)
|
||||
assert actual_output == expected_output
|
||||
|
||||
# Tests the functionality of extending a patch with multiple hunks.
|
||||
def test_multiple_hunks(self):
|
||||
original_file_str = 'line1\nline2\nline3\nline4\nline5\nline6'
|
||||
patch_str = '@@ -2,3 +2,3 @@ init()\n-line2\n+new_line2\nline3\nline4\n@@ -4,1 +4,1 @@ init2()\n-line4\n+new_line4' # noqa: E501
|
||||
num_lines = 1
|
||||
expected_output = '@@ -1,5 +1,5 @@ init()\nline1\n-line2\n+new_line2\nline3\nline4\nline5\n@@ -3,3 +3,3 @@ init2()\nline3\n-line4\n+new_line4\nline5' # noqa: E501
|
||||
actual_output = extend_patch(original_file_str, patch_str, num_lines)
|
||||
assert actual_output == expected_output
|
82
tests/unittest/test_fix_output.py
Normal file
82
tests/unittest/test_fix_output.py
Normal file
@ -0,0 +1,82 @@
|
||||
# Generated by CodiumAI
|
||||
|
||||
from pr_agent.algo.utils import try_fix_json
|
||||
|
||||
|
||||
class TestTryFixJson:
|
||||
# Tests that JSON with complete 'Code suggestions' section returns expected output
|
||||
def test_incomplete_code_suggestions(self):
|
||||
review = '{"PR Analysis": {"Main theme": "xxx", "Type of PR": "Bug fix"}, "PR Feedback": {"General PR suggestions": "..., `xxx`...", "Code suggestions": [{"relevant file": "xxx.py", "suggestion content": "xxx [important]"}, {"suggestion number": 2, "relevant file": "yyy.py", "suggestion content": "yyy [incomp...' # noqa: E501
|
||||
expected_output = {
|
||||
'PR Analysis': {
|
||||
'Main theme': 'xxx',
|
||||
'Type of PR': 'Bug fix'
|
||||
},
|
||||
'PR Feedback': {
|
||||
'General PR suggestions': '..., `xxx`...',
|
||||
'Code suggestions': [
|
||||
{
|
||||
'relevant file': 'xxx.py',
|
||||
'suggestion content': 'xxx [important]'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
assert try_fix_json(review) == expected_output
|
||||
|
||||
def test_incomplete_code_suggestions_new_line(self):
|
||||
review = '{"PR Analysis": {"Main theme": "xxx", "Type of PR": "Bug fix"}, "PR Feedback": {"General PR suggestions": "..., `xxx`...", "Code suggestions": [{"relevant file": "xxx.py", "suggestion content": "xxx [important]"} \n\t, {"suggestion number": 2, "relevant file": "yyy.py", "suggestion content": "yyy [incomp...' # noqa: E501
|
||||
expected_output = {
|
||||
'PR Analysis': {
|
||||
'Main theme': 'xxx',
|
||||
'Type of PR': 'Bug fix'
|
||||
},
|
||||
'PR Feedback': {
|
||||
'General PR suggestions': '..., `xxx`...',
|
||||
'Code suggestions': [
|
||||
{
|
||||
'relevant file': 'xxx.py',
|
||||
'suggestion content': 'xxx [important]'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
assert try_fix_json(review) == expected_output
|
||||
|
||||
def test_incomplete_code_suggestions_many_close_brackets(self):
|
||||
review = '{"PR Analysis": {"Main theme": "xxx", "Type of PR": "Bug fix"}, "PR Feedback": {"General PR suggestions": "..., `xxx`...", "Code suggestions": [{"relevant file": "xxx.py", "suggestion content": "xxx [important]"} \n, {"suggestion number": 2, "relevant file": "yyy.py", "suggestion content": "yyy }, [}\n ,incomp.} ,..' # noqa: E501
|
||||
expected_output = {
|
||||
'PR Analysis': {
|
||||
'Main theme': 'xxx',
|
||||
'Type of PR': 'Bug fix'
|
||||
},
|
||||
'PR Feedback': {
|
||||
'General PR suggestions': '..., `xxx`...',
|
||||
'Code suggestions': [
|
||||
{
|
||||
'relevant file': 'xxx.py',
|
||||
'suggestion content': 'xxx [important]'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
assert try_fix_json(review) == expected_output
|
||||
|
||||
def test_incomplete_code_suggestions_relevant_file(self):
|
||||
review = '{"PR Analysis": {"Main theme": "xxx", "Type of PR": "Bug fix"}, "PR Feedback": {"General PR suggestions": "..., `xxx`...", "Code suggestions": [{"relevant file": "xxx.py", "suggestion content": "xxx [important]"}, {"suggestion number": 2, "relevant file": "yyy.p' # noqa: E501
|
||||
expected_output = {
|
||||
'PR Analysis': {
|
||||
'Main theme': 'xxx',
|
||||
'Type of PR': 'Bug fix'
|
||||
},
|
||||
'PR Feedback': {
|
||||
'General PR suggestions': '..., `xxx`...',
|
||||
'Code suggestions': [
|
||||
{
|
||||
'relevant file': 'xxx.py',
|
||||
'suggestion content': 'xxx [important]'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
assert try_fix_json(review) == expected_output
|
84
tests/unittest/test_handle_patch_deletions.py
Normal file
84
tests/unittest/test_handle_patch_deletions.py
Normal file
@ -0,0 +1,84 @@
|
||||
# Generated by CodiumAI
|
||||
import logging
|
||||
|
||||
from pr_agent.algo.git_patch_processing import handle_patch_deletions
|
||||
from pr_agent.config_loader import settings
|
||||
|
||||
"""
|
||||
Code Analysis
|
||||
|
||||
Objective:
|
||||
The objective of the function is to handle entire file or deletion patches and return the patch after omitting the
|
||||
deletion hunks.
|
||||
|
||||
Inputs:
|
||||
- patch: a string representing the patch to be handled
|
||||
- original_file_content_str: a string representing the original content of the file
|
||||
- new_file_content_str: a string representing the new content of the file
|
||||
- file_name: a string representing the name of the file
|
||||
|
||||
Flow:
|
||||
- If new_file_content_str is empty, set patch to "File was deleted" and return it
|
||||
- Otherwise, split patch into lines and omit the deletion hunks using the omit_deletion_hunks function
|
||||
- If the resulting patch is different from the original patch, log a message and set patch to the new patch
|
||||
- Return the resulting patch
|
||||
|
||||
Outputs:
|
||||
- A string representing the patch after omitting the deletion hunks
|
||||
|
||||
Additional aspects:
|
||||
- The function uses the settings from the configuration files to determine the verbosity level of the logging messages
|
||||
- The omit_deletion_hunks function is called to remove the deletion hunks from the patch
|
||||
- The function handles the case where the new_file_content_str is empty by setting the patch to "File was deleted"
|
||||
"""
|
||||
|
||||
|
||||
class TestHandlePatchDeletions:
|
||||
# Tests that handle_patch_deletions returns the original patch when new_file_content_str is not empty
|
||||
def test_handle_patch_deletions_happy_path_new_file_content_exists(self):
|
||||
patch = '--- a/file.py\n+++ b/file.py\n@@ -1,2 +1,2 @@\n-foo\n-bar\n+baz\n'
|
||||
original_file_content_str = 'foo\nbar\n'
|
||||
new_file_content_str = 'foo\nbaz\n'
|
||||
file_name = 'file.py'
|
||||
assert handle_patch_deletions(patch, original_file_content_str, new_file_content_str,
|
||||
file_name) == patch.rstrip()
|
||||
|
||||
# Tests that handle_patch_deletions logs a message when verbosity_level is greater than 0
|
||||
def test_handle_patch_deletions_happy_path_verbosity_level_greater_than_0(self, caplog):
|
||||
patch = '--- a/file.py\n+++ b/file.py\n@@ -1,2 +1,2 @@\n-foo\n-bar\n+baz\n'
|
||||
original_file_content_str = 'foo\nbar\n'
|
||||
new_file_content_str = ''
|
||||
file_name = 'file.py'
|
||||
settings.config.verbosity_level = 1
|
||||
|
||||
with caplog.at_level(logging.INFO):
|
||||
handle_patch_deletions(patch, original_file_content_str, new_file_content_str, file_name)
|
||||
assert any("Processing file" in message for message in caplog.messages)
|
||||
|
||||
# Tests that handle_patch_deletions returns 'File was deleted' when new_file_content_str is empty
|
||||
def test_handle_patch_deletions_edge_case_new_file_content_empty(self):
|
||||
patch = '--- a/file.py\n+++ b/file.py\n@@ -1,2 +1,2 @@\n-foo\n-bar\n'
|
||||
original_file_content_str = 'foo\nbar\n'
|
||||
new_file_content_str = ''
|
||||
file_name = 'file.py'
|
||||
assert handle_patch_deletions(patch, original_file_content_str, new_file_content_str,
|
||||
file_name) is None
|
||||
|
||||
# Tests that handle_patch_deletions returns the original patch when patch and patch_new are equal
|
||||
def test_handle_patch_deletions_edge_case_patch_and_patch_new_are_equal(self):
|
||||
patch = '--- a/file.py\n+++ b/file.py\n@@ -1,2 +1,2 @@\n-foo\n-bar\n'
|
||||
original_file_content_str = 'foo\nbar\n'
|
||||
new_file_content_str = 'foo\nbar\n'
|
||||
file_name = 'file.py'
|
||||
assert handle_patch_deletions(patch, original_file_content_str, new_file_content_str,
|
||||
file_name).rstrip() == patch.rstrip()
|
||||
|
||||
# Tests that handle_patch_deletions returns the modified patch when patch and patch_new are not equal
|
||||
def test_handle_patch_deletions_edge_case_patch_and_patch_new_are_not_equal(self):
|
||||
patch = '--- a/file.py\n+++ b/file.py\n@@ -1,2 +1,2 @@\n-foo\n-bar\n'
|
||||
original_file_content_str = 'foo\nbar\n'
|
||||
new_file_content_str = 'foo\nbaz\n'
|
||||
file_name = 'file.py'
|
||||
expected_patch = '--- a/file.py\n+++ b/file.py\n@@ -1,2 +1,2 @@\n-foo\n-bar'
|
||||
assert handle_patch_deletions(patch, original_file_content_str, new_file_content_str,
|
||||
file_name) == expected_patch
|
123
tests/unittest/test_language_handler.py
Normal file
123
tests/unittest/test_language_handler.py
Normal file
@ -0,0 +1,123 @@
|
||||
|
||||
# Generated by CodiumAI
|
||||
|
||||
from pr_agent.algo.language_handler import sort_files_by_main_languages
|
||||
|
||||
"""
|
||||
Code Analysis
|
||||
|
||||
Objective:
|
||||
The objective of the function is to sort a list of files by their main language, putting the files that are in the main
|
||||
language first and the rest of the files after. It takes in a dictionary of languages and their sizes, and a list of
|
||||
files.
|
||||
|
||||
Inputs:
|
||||
- languages: a dictionary containing the languages and their sizes
|
||||
- files: a list of files
|
||||
|
||||
Flow:
|
||||
1. Sort the languages by their size in descending order
|
||||
2. Get all extensions for the languages
|
||||
3. Filter out files with bad extensions
|
||||
4. Sort files by their extension, putting the files that are in the main extension first and the rest of the files after
|
||||
5. Map languages_sorted to their respective files
|
||||
6. Append the files to the files_sorted list
|
||||
7. Append the rest of the files to the files_sorted list under the "Other" language category
|
||||
8. Return the files_sorted list
|
||||
|
||||
Outputs:
|
||||
- files_sorted: a list of dictionaries containing the language and its respective files
|
||||
|
||||
Additional aspects:
|
||||
- The function uses a language_extension_map dictionary to map the languages to their respective extensions
|
||||
- The function uses the filter_bad_extensions function to filter out files with bad extensions
|
||||
- The function uses a rest_files dictionary to store the files that do not belong to any of the main extensions
|
||||
"""
|
||||
|
||||
|
||||
class TestSortFilesByMainLanguages:
|
||||
# Tests that files are sorted by main language, with files in main language first and the rest after
|
||||
def test_happy_path_sort_files_by_main_languages(self):
|
||||
languages = {'Python': 10, 'Java': 5, 'C++': 3}
|
||||
files = [
|
||||
type('', (object,), {'filename': 'file1.py'})(),
|
||||
type('', (object,), {'filename': 'file2.java'})(),
|
||||
type('', (object,), {'filename': 'file3.cpp'})(),
|
||||
type('', (object,), {'filename': 'file4.py'})(),
|
||||
type('', (object,), {'filename': 'file5.py'})()
|
||||
]
|
||||
expected_output = [
|
||||
{'language': 'Python', 'files': [files[0], files[3], files[4]]},
|
||||
{'language': 'Java', 'files': [files[1]]},
|
||||
{'language': 'C++', 'files': [files[2]]},
|
||||
{'language': 'Other', 'files': []}
|
||||
]
|
||||
assert sort_files_by_main_languages(languages, files) == expected_output
|
||||
|
||||
# Tests that function handles empty languages dictionary
|
||||
def test_edge_case_empty_languages(self):
|
||||
languages = {}
|
||||
files = [
|
||||
type('', (object,), {'filename': 'file1.py'})(),
|
||||
type('', (object,), {'filename': 'file2.java'})()
|
||||
]
|
||||
expected_output = [{'language': 'Other', 'files': []}]
|
||||
assert sort_files_by_main_languages(languages, files) == expected_output
|
||||
|
||||
# Tests that function handles empty files list
|
||||
def test_edge_case_empty_files(self):
|
||||
languages = {'Python': 10, 'Java': 5}
|
||||
files = []
|
||||
expected_output = [
|
||||
{'language': 'Other', 'files': []}
|
||||
]
|
||||
assert sort_files_by_main_languages(languages, files) == expected_output
|
||||
|
||||
# Tests that function handles languages with no extensions
|
||||
def test_edge_case_languages_with_no_extensions(self):
|
||||
languages = {'Python': 10, 'Java': 5, 'C++': 3}
|
||||
files = [
|
||||
type('', (object,), {'filename': 'file1.py'})(),
|
||||
type('', (object,), {'filename': 'file2.java'})(),
|
||||
type('', (object,), {'filename': 'file3.cpp'})()
|
||||
]
|
||||
expected_output = [
|
||||
{'language': 'Python', 'files': [files[0]]},
|
||||
{'language': 'Java', 'files': [files[1]]},
|
||||
{'language': 'C++', 'files': [files[2]]},
|
||||
{'language': 'Other', 'files': []}
|
||||
]
|
||||
assert sort_files_by_main_languages(languages, files) == expected_output
|
||||
|
||||
# Tests the behavior of the function when all files have bad extensions and only one new valid file is added.
|
||||
def test_edge_case_files_with_bad_extensions_only(self):
|
||||
languages = {'Python': 10, 'Java': 5, 'C++': 3}
|
||||
files = [
|
||||
type('', (object,), {'filename': 'file1.csv'})(),
|
||||
type('', (object,), {'filename': 'file2.pdf'})(),
|
||||
type('', (object,), {'filename': 'file3.py'})() # new valid file
|
||||
]
|
||||
expected_output = [{'language': 'Python', 'files': [files[2]]}, {'language': 'Other', 'files': []}]
|
||||
assert sort_files_by_main_languages(languages, files) == expected_output
|
||||
|
||||
# Tests general behaviour of function
|
||||
def test_general_behaviour_sort_files_by_main_languages(self):
|
||||
languages = {'Python': 10, 'Java': 5, 'C++': 3}
|
||||
files = [
|
||||
type('', (object,), {'filename': 'file1.py'})(),
|
||||
type('', (object,), {'filename': 'file2.java'})(),
|
||||
type('', (object,), {'filename': 'file3.cpp'})(),
|
||||
type('', (object,), {'filename': 'file4.py'})(),
|
||||
type('', (object,), {'filename': 'file5.py'})(),
|
||||
type('', (object,), {'filename': 'file6.py'})(),
|
||||
type('', (object,), {'filename': 'file7.java'})(),
|
||||
type('', (object,), {'filename': 'file8.cpp'})(),
|
||||
type('', (object,), {'filename': 'file9.py'})()
|
||||
]
|
||||
expected_output = [
|
||||
{'language': 'Python', 'files': [files[0], files[3], files[4], files[5], files[8]]},
|
||||
{'language': 'Java', 'files': [files[1], files[6]]},
|
||||
{'language': 'C++', 'files': [files[2], files[7]]},
|
||||
{'language': 'Other', 'files': []}
|
||||
]
|
||||
assert sort_files_by_main_languages(languages, files) == expected_output
|
78
tests/unittest/test_parse_code_suggestion.py
Normal file
78
tests/unittest/test_parse_code_suggestion.py
Normal file
@ -0,0 +1,78 @@
|
||||
|
||||
# Generated by CodiumAI
|
||||
from pr_agent.algo.utils import parse_code_suggestion
|
||||
|
||||
"""
|
||||
Code Analysis
|
||||
|
||||
Objective:
|
||||
The objective of the function is to convert a dictionary into a markdown format. The function takes in a dictionary as
|
||||
input and recursively converts it into a markdown format. The function is specifically designed to handle dictionaries
|
||||
that contain code suggestions.
|
||||
|
||||
Inputs:
|
||||
- output_data: a dictionary containing the data to be converted into markdown format
|
||||
|
||||
Flow:
|
||||
- Initialize an empty string variable called markdown_text
|
||||
- Create a dictionary of emojis to be used in the markdown format
|
||||
- Iterate through the items in the input dictionary
|
||||
- If the value is empty, skip to the next item
|
||||
- If the value is a dictionary, recursively call the function with the value as input
|
||||
- If the value is a list, iterate through the list and add each item to the markdown format
|
||||
- If the value is not 'n/a', add it to the markdown format
|
||||
- If the key is 'code suggestions', call the parse_code_suggestion function to handle the list of code suggestions
|
||||
- Return the markdown format as a string
|
||||
|
||||
Outputs:
|
||||
- markdown_text: a string containing the input dictionary converted into markdown format
|
||||
|
||||
Additional aspects:
|
||||
- The function uses the textwrap module to indent code examples in the markdown format
|
||||
- The parse_code_suggestion function is called to handle the 'code suggestions' key in the input dictionary
|
||||
- The function uses emojis to add visual cues to the markdown format
|
||||
"""
|
||||
|
||||
|
||||
class TestParseCodeSuggestion:
|
||||
# Tests that function returns empty string when input is an empty dictionary
|
||||
def test_empty_dict(self):
|
||||
input_data = {}
|
||||
expected_output = "\n" # modified to expect a newline character
|
||||
assert parse_code_suggestion(input_data) == expected_output
|
||||
|
||||
|
||||
# Tests that function returns correct output when 'before' or 'after' key has a non-string value
|
||||
def test_non_string_before_or_after(self):
|
||||
input_data = {
|
||||
"Code example": {
|
||||
"Before": 123,
|
||||
"After": ["a", "b", "c"]
|
||||
}
|
||||
}
|
||||
expected_output = " - **Code example:**\n - **Before:**\n ```\n 123\n ```\n - **After:**\n ```\n ['a', 'b', 'c']\n ```\n\n" # noqa: E501
|
||||
assert parse_code_suggestion(input_data) == expected_output
|
||||
|
||||
# Tests that function returns correct output when input dictionary does not have 'code example' key
|
||||
def test_no_code_example_key(self):
|
||||
code_suggestions = {
|
||||
'suggestion': 'Suggestion 1',
|
||||
'description': 'Description 1',
|
||||
'before': 'Before 1',
|
||||
'after': 'After 1'
|
||||
}
|
||||
expected_output = " **suggestion:** Suggestion 1\n **description:** Description 1\n **before:** Before 1\n **after:** After 1\n\n" # noqa: E501
|
||||
assert parse_code_suggestion(code_suggestions) == expected_output
|
||||
|
||||
# Tests that function returns correct output when input dictionary has 'code example' key
|
||||
def test_with_code_example_key(self):
|
||||
code_suggestions = {
|
||||
'suggestion': 'Suggestion 2',
|
||||
'description': 'Description 2',
|
||||
'code example': {
|
||||
'before': 'Before 2',
|
||||
'after': 'After 2'
|
||||
}
|
||||
}
|
||||
expected_output = " **suggestion:** Suggestion 2\n **description:** Description 2\n - **code example:**\n - **before:**\n ```\n Before 2\n ```\n - **after:**\n ```\n After 2\n ```\n\n" # noqa: E501
|
||||
assert parse_code_suggestion(code_suggestions) == expected_output
|
50
tests/unittest/test_update_settings_from_args.py
Normal file
50
tests/unittest/test_update_settings_from_args.py
Normal file
@ -0,0 +1,50 @@
|
||||
|
||||
# Generated by CodiumAI
|
||||
from pr_agent.algo.utils import update_settings_from_args
|
||||
import logging
|
||||
from pr_agent.config_loader import settings
|
||||
|
||||
|
||||
import pytest
|
||||
|
||||
class TestUpdateSettingsFromArgs:
|
||||
# Tests that the function updates the setting when passed a single valid argument.
|
||||
def test_single_valid_argument(self):
|
||||
args = ['--pr_code_suggestions.extra_instructions="be funny"']
|
||||
update_settings_from_args(args)
|
||||
assert settings.pr_code_suggestions.extra_instructions == '"be funny"'
|
||||
|
||||
# Tests that the function updates the settings when passed multiple valid arguments.
|
||||
def test_multiple_valid_arguments(self):
|
||||
args = ['--pr_code_suggestions.extra_instructions="be funny"', '--pr_code_suggestions.num_code_suggestions=3']
|
||||
update_settings_from_args(args)
|
||||
assert settings.pr_code_suggestions.extra_instructions == '"be funny"'
|
||||
assert settings.pr_code_suggestions.num_code_suggestions == 3
|
||||
|
||||
# Tests that the function updates the setting when passed a boolean value.
|
||||
def test_boolean_values(self):
|
||||
settings.pr_code_suggestions.enabled = False
|
||||
args = ['--pr_code_suggestions.enabled=true']
|
||||
update_settings_from_args(args)
|
||||
assert 'pr_code_suggestions' in settings
|
||||
assert 'enabled' in settings.pr_code_suggestions
|
||||
assert settings.pr_code_suggestions.enabled == True
|
||||
|
||||
# Tests that the function updates the setting when passed an integer value.
|
||||
def test_integer_values(self):
|
||||
args = ['--pr_code_suggestions.num_code_suggestions=3']
|
||||
update_settings_from_args(args)
|
||||
assert settings.pr_code_suggestions.num_code_suggestions == 3
|
||||
|
||||
# Tests that the function does not update any settings when passed an empty argument list.
|
||||
def test_empty_argument_list(self):
|
||||
args = []
|
||||
update_settings_from_args(args)
|
||||
assert settings == settings
|
||||
|
||||
# Tests that the function logs an error when passed an invalid argument format.
|
||||
def test_invalid_argument_format(self, caplog):
|
||||
args = ['--pr_code_suggestions.extra_instructions="be funny"', '--pr_code_suggestions.num_code_suggestions']
|
||||
with caplog.at_level(logging.ERROR):
|
||||
update_settings_from_args(args)
|
||||
assert 'Invalid argument format' in caplog.text
|
Reference in New Issue
Block a user