2023-07-06 00:21:08 +03:00
from __future__ import annotations
2024-07-14 08:53:53 +03:00
import copy
2023-07-18 23:14:47 +03:00
import difflib
2023-07-11 22:11:42 +03:00
import json
2024-04-09 22:51:02 +09:00
import os
2023-07-11 22:11:42 +03:00
import re
2023-07-06 00:21:08 +03:00
import textwrap
2024-05-29 13:52:44 +03:00
import time
2023-08-01 14:43:26 +03:00
from datetime import datetime
2024-02-01 09:46:04 +02:00
from enum import Enum
2024-02-05 09:20:36 +02:00
from typing import Any , List , Tuple
2023-07-06 00:21:08 +03:00
2023-08-09 08:50:15 +03:00
import yaml
2024-07-11 18:37:37 +03:00
from pydantic import BaseModel
2023-08-01 14:43:26 +03:00
from starlette_context import context
2023-11-07 14:28:41 +02:00
from pr_agent . algo import MAX_TOKENS
2024-04-03 08:42:50 +03:00
from pr_agent . algo . token_handler import TokenEncoder
2023-08-01 14:43:26 +03:00
from pr_agent . config_loader import get_settings , global_settings
2024-02-05 09:20:36 +02:00
from pr_agent . algo . types import FilePatchInfo
2023-10-16 14:56:00 +03:00
from pr_agent . log import get_logger
2023-08-01 14:43:26 +03:00
2024-07-11 18:37:37 +03:00
class Range ( BaseModel ) :
line_start : int # should be 0-indexed
line_end : int
column_start : int = - 1
column_end : int = - 1
2024-02-01 09:46:04 +02:00
class ModelType ( str , Enum ) :
REGULAR = " regular "
TURBO = " turbo "
2023-08-01 14:43:26 +03:00
2024-06-27 07:17:26 +07:00
class PRReviewHeader ( str , Enum ) :
2024-06-27 06:59:49 +07:00
REGULAR = " ## PR Reviewer Guide "
INCREMENTAL = " ## Incremental PR Reviewer Guide "
2024-06-27 07:17:26 +07:00
2023-08-01 14:43:26 +03:00
def get_setting ( key : str ) - > Any :
try :
key = key . upper ( )
return context . get ( " settings " , global_settings ) . get ( key , global_settings . get ( key , None ) )
except Exception :
return global_settings . get ( key , None )
2023-07-06 00:21:08 +03:00
2024-03-03 13:58:10 +02:00
2024-07-14 08:53:53 +03:00
def emphasize_header ( text : str , only_markdown = False , reference_link = None ) - > str :
2024-03-03 13:58:10 +02:00
try :
# Finding the position of the first occurrence of ": "
colon_position = text . find ( " : " )
2024-03-01 13:02:50 +02:00
2024-03-03 13:58:10 +02:00
# Splitting the string and wrapping the first part in <strong> tags
if colon_position != - 1 :
# Everything before the colon (inclusive) is wrapped in <strong> tags
2024-06-29 13:08:34 +03:00
if only_markdown :
2024-07-14 08:53:53 +03:00
if reference_link :
transformed_string = f " [** { text [ : colon_position + 1 ] } **]( { reference_link } ) \n " + text [ colon_position + 1 : ]
else :
transformed_string = f " ** { text [ : colon_position + 1 ] } ** \n " + text [ colon_position + 1 : ]
2024-06-29 13:08:34 +03:00
else :
2024-07-14 08:53:53 +03:00
if reference_link :
transformed_string = f " <strong><a href= ' { reference_link } ' > { text [ : colon_position + 1 ] } </a></strong><br> " + text [ colon_position + 1 : ]
else :
transformed_string = " <strong> " + text [ : colon_position + 1 ] + " </strong> " + ' <br> ' + text [ colon_position + 1 : ]
2024-03-03 13:58:10 +02:00
else :
# If there's no ": ", return the original string
transformed_string = text
2024-03-01 13:02:50 +02:00
2024-03-03 13:58:10 +02:00
return transformed_string
except Exception as e :
get_logger ( ) . exception ( f " Failed to emphasize header: { e } " )
return text
2024-03-01 13:02:50 +02:00
2024-03-04 11:07:39 +02:00
def unique_strings ( input_list : List [ str ] ) - > List [ str ] :
if not input_list or not isinstance ( input_list , list ) :
return input_list
seen = set ( )
unique_list = [ ]
for item in input_list :
if item not in seen :
unique_list . append ( item )
seen . add ( item )
return unique_list
2024-07-14 08:53:53 +03:00
def convert_to_markdown_v2 ( output_data : dict ,
gfm_supported : bool = True ,
incremental_review = None ,
git_provider = None ) - > str :
2024-06-29 13:08:34 +03:00
"""
Convert a dictionary of data into markdown format .
Args :
output_data ( dict ) : A dictionary containing data to be converted to markdown format .
Returns :
str : The markdown formatted text generated from the input dictionary .
"""
emojis = {
" Can be split " : " 🔀 " ,
" Possible issues " : " ⚡ " ,
" Key issues to review " : " ⚡ " ,
" Score " : " 🏅 " ,
" Relevant tests " : " 🧪 " ,
" Focused PR " : " ✨ " ,
" Relevant ticket " : " 🎫 " ,
" Security concerns " : " 🔒 " ,
" Insights from user ' s answers " : " 📝 " ,
" Code feedback " : " 🤖 " ,
" Estimated effort to review [1-5] " : " ⏱️ " ,
}
markdown_text = " "
if not incremental_review :
2024-06-29 21:57:20 +03:00
markdown_text + = f " { PRReviewHeader . REGULAR . value } 🔍 \n \n "
2024-06-29 13:08:34 +03:00
else :
2024-06-29 21:57:20 +03:00
markdown_text + = f " { PRReviewHeader . INCREMENTAL . value } 🔍 \n \n "
2024-06-29 13:08:34 +03:00
markdown_text + = f " ⏮️ Review for commits since previous PR-Agent review { incremental_review } . \n \n "
2024-07-03 08:47:59 +03:00
if not output_data or not output_data . get ( ' review ' , { } ) :
return " "
2024-06-29 13:08:34 +03:00
2024-07-03 08:19:58 +03:00
if gfm_supported :
markdown_text + = " <table> \n "
2024-06-29 13:08:34 +03:00
for key , value in output_data [ ' review ' ] . items ( ) :
if value is None or value == ' ' or value == { } or value == [ ] :
2024-07-14 09:00:10 +03:00
if key . lower ( ) not in [ ' can_be_split ' , ' key_issues_to_review ' ] :
2024-06-29 13:08:34 +03:00
continue
key_nice = key . replace ( ' _ ' , ' ' ) . capitalize ( )
emoji = emojis . get ( key_nice , " " )
if ' Estimated effort to review ' in key_nice :
2024-06-29 21:22:25 +03:00
key_nice = ' Estimated effort to review '
2024-07-14 08:53:53 +03:00
value = str ( value ) . strip ( )
2024-07-03 08:47:59 +03:00
if value . isnumeric ( ) :
value_int = int ( value )
else :
try :
value_int = int ( value . split ( ' , ' ) [ 0 ] )
2024-07-03 08:51:08 +03:00
except ValueError :
2024-07-03 08:47:59 +03:00
continue
2024-06-29 13:08:34 +03:00
blue_bars = ' 🔵 ' * value_int
white_bars = ' ⚪ ' * ( 5 - value_int )
2024-07-03 08:47:59 +03:00
value = f " { value_int } { blue_bars } { white_bars } "
2024-07-03 08:19:58 +03:00
if gfm_supported :
markdown_text + = f " <tr><td> "
markdown_text + = f " { emoji } <strong> { key_nice } </strong>: { value } "
markdown_text + = f " </td></tr> \n "
else :
markdown_text + = f " ### { emoji } { key_nice } : { value } \n \n "
2024-06-29 13:08:34 +03:00
elif ' relevant tests ' in key_nice . lower ( ) :
value = value . strip ( ) . lower ( )
2024-07-03 08:19:58 +03:00
if gfm_supported :
markdown_text + = f " <tr><td> "
if is_value_no ( value ) :
markdown_text + = f " { emoji } <strong>No relevant tests</strong> "
else :
markdown_text + = f " { emoji } <strong>PR contains tests</strong> "
markdown_text + = f " </td></tr> \n "
2024-06-29 13:08:34 +03:00
else :
2024-07-16 18:27:58 +07:00
if is_value_no ( value ) :
markdown_text + = f ' ### { emoji } No relevant tests \n \n '
2024-07-03 08:19:58 +03:00
else :
2024-07-16 18:27:58 +07:00
markdown_text + = f " ### PR contains tests \n \n "
2024-06-29 13:08:34 +03:00
elif ' security concerns ' in key_nice . lower ( ) :
2024-07-03 08:19:58 +03:00
if gfm_supported :
markdown_text + = f " <tr><td> "
if is_value_no ( value ) :
markdown_text + = f " { emoji } <strong>No security concerns identified</strong> "
else :
2024-07-03 08:32:37 +03:00
markdown_text + = f " { emoji } <strong>Security concerns</strong><br><br> \n \n "
2024-07-03 08:19:58 +03:00
value = emphasize_header ( value . strip ( ) )
markdown_text + = f " { value } "
markdown_text + = f " </td></tr> \n "
2024-06-29 13:08:34 +03:00
else :
2024-07-03 08:19:58 +03:00
if is_value_no ( value ) :
markdown_text + = f ' ### { emoji } No security concerns identified \n \n '
else :
markdown_text + = f " ### { emoji } Security concerns \n \n "
value = emphasize_header ( value . strip ( ) )
markdown_text + = f " { value } \n \n "
2024-07-03 08:32:37 +03:00
elif ' can be split ' in key_nice . lower ( ) :
if gfm_supported :
markdown_text + = f " <tr><td> "
markdown_text + = process_can_be_split ( emoji , value )
markdown_text + = f " </td></tr> \n "
2024-06-29 13:08:34 +03:00
elif ' key issues to review ' in key_nice . lower ( ) :
2024-07-14 08:53:53 +03:00
# value is a list of issues
2024-07-03 20:58:25 +03:00
if is_value_no ( value ) :
if gfm_supported :
markdown_text + = f " <tr><td> "
markdown_text + = f " { emoji } <strong>No key issues to review</strong> "
markdown_text + = f " </td></tr> \n "
else :
markdown_text + = f " ### { emoji } No key issues to review \n \n "
2024-07-03 08:19:58 +03:00
else :
2024-07-14 08:53:53 +03:00
# issues = value.split('\n- ')
issues = value
# for i, _ in enumerate(issues):
# issues[i] = issues[i].strip().strip('-').strip()
2024-07-03 20:58:25 +03:00
if gfm_supported :
markdown_text + = f " <tr><td> "
markdown_text + = f " { emoji } <strong> { key_nice } </strong><br><br> \n \n "
else :
2024-07-14 08:53:53 +03:00
markdown_text + = f " ### { emoji } Key issues to review \n \n #### \n "
2024-07-03 20:58:25 +03:00
for i , issue in enumerate ( issues ) :
2024-07-14 08:53:53 +03:00
try :
if not issue :
continue
relevant_file = issue . get ( ' relevant_file ' , ' ' ) . strip ( )
issue_header = issue . get ( ' issue_header ' , ' ' ) . strip ( )
issue_content = issue . get ( ' issue_content ' , ' ' ) . strip ( )
start_line = int ( str ( issue . get ( ' start_line ' , 0 ) ) . strip ( ) )
end_line = int ( str ( issue . get ( ' end_line ' , 0 ) ) . strip ( ) )
reference_link = git_provider . get_line_link ( relevant_file , start_line , end_line )
if gfm_supported :
if get_settings ( ) . pr_reviewer . extra_issue_links :
issue_content_linked = copy . deepcopy ( issue_content )
referenced_variables_list = issue . get ( ' referenced_variables ' , [ ] )
for component in referenced_variables_list :
name = component [ ' variable_name ' ] . strip ( ) . strip ( ' ` ' )
ind = issue_content . find ( name )
if ind != - 1 :
reference_link_component = git_provider . get_line_link ( relevant_file , component [ ' relevant_line ' ] , component [ ' relevant_line ' ] )
issue_content_linked = issue_content_linked [ : ind - 1 ] + f " [` { name } `]( { reference_link_component } ) " + issue_content_linked [ ind + len ( name ) + 1 : ]
else :
get_logger ( ) . info ( f " Failed to find variable in issue content: { component [ ' variable_name ' ] . strip ( ) } " )
issue_content = issue_content_linked
issue_str = f " <a href= ' { reference_link } ' ><strong> { issue_header } </strong></a><br> { issue_content } "
else :
issue_str = f " [** { issue_header } **]( { reference_link } ) \n \n { issue_content } \n \n "
markdown_text + = f " { issue_str } \n \n "
except Exception as e :
get_logger ( ) . exception ( f " Failed to process key issues to review: { e } " )
2024-07-03 20:58:25 +03:00
if gfm_supported :
markdown_text + = f " </td></tr> \n "
2024-06-29 13:08:34 +03:00
else :
2024-07-03 08:19:58 +03:00
if gfm_supported :
markdown_text + = f " <tr><td> "
markdown_text + = f " { emoji } <strong> { key_nice } </strong>: { value } "
markdown_text + = f " </td></tr> \n "
else :
markdown_text + = f " ### { emoji } { key_nice } : { value } \n \n "
if gfm_supported :
markdown_text + = " </table> \n "
2024-06-29 13:08:34 +03:00
if ' code_feedback ' in output_data :
if gfm_supported :
markdown_text + = f " \n \n "
markdown_text + = f " <details><summary> <strong>Code feedback:</strong></summary> \n \n "
markdown_text + = " <hr> "
else :
2024-07-03 08:32:37 +03:00
markdown_text + = f " \n \n ### Code feedback: \n \n "
2024-06-29 13:08:34 +03:00
for i , value in enumerate ( output_data [ ' code_feedback ' ] ) :
if value is None or value == ' ' or value == { } or value == [ ] :
continue
markdown_text + = parse_code_suggestion ( value , i , gfm_supported ) + " \n \n "
if markdown_text . endswith ( ' <hr> ' ) :
markdown_text = markdown_text [ : - 4 ]
if gfm_supported :
markdown_text + = f " </details> "
return markdown_text
2024-03-09 10:46:36 +02:00
def process_can_be_split ( emoji , value ) :
2024-06-13 09:28:51 +03:00
try :
# key_nice = "Can this PR be split?"
key_nice = " Multiple PR themes "
markdown_text = " "
if not value or isinstance ( value , list ) and len ( value ) == 1 :
value = " No "
2024-06-29 13:08:34 +03:00
# markdown_text += f"<tr><td> {emoji} <strong>{key_nice}</strong></td><td>\n\n{value}\n\n</td></tr>\n"
2024-07-03 08:19:58 +03:00
# markdown_text += f"### {emoji} No multiple PR themes\n\n"
2024-07-03 08:32:37 +03:00
markdown_text + = f " { emoji } <strong>No multiple PR themes</strong> \n \n "
2024-06-13 09:28:51 +03:00
else :
2024-07-03 08:19:58 +03:00
markdown_text + = f " { emoji } <strong> { key_nice } </strong><br><br> \n \n "
2024-06-13 09:28:51 +03:00
for i , split in enumerate ( value ) :
title = split . get ( ' title ' , ' ' )
relevant_files = split . get ( ' relevant_files ' , [ ] )
2024-06-29 13:08:34 +03:00
markdown_text + = f " <details><summary> \n Sub-PR theme: <b> { title } </b></summary> \n \n "
markdown_text + = f " ___ \n \n Relevant files: \n \n "
for file in relevant_files :
markdown_text + = f " - { file } \n "
markdown_text + = f " ___ \n \n "
markdown_text + = f " </details> \n \n "
# markdown_text += f"#### Sub-PR theme: {title}\n\n"
# markdown_text += f"Relevant files:\n\n"
# for file in relevant_files:
# markdown_text += f"- {file}\n"
# markdown_text += "\n"
# number_of_splits = len(value)
# markdown_text += f"<tr><td rowspan={number_of_splits}> {emoji} <strong>{key_nice}</strong></td>\n"
# for i, split in enumerate(value):
# title = split.get('title', '')
# relevant_files = split.get('relevant_files', [])
# if i == 0:
# markdown_text += f"<td><details><summary>\nSub-PR theme:<br><strong>{title}</strong></summary>\n\n"
# markdown_text += f"<hr>\n"
# markdown_text += f"Relevant files:\n"
# markdown_text += f"<ul>\n"
# for file in relevant_files:
# markdown_text += f"<li>{file}</li>\n"
# markdown_text += f"</ul>\n\n</details></td></tr>\n"
# else:
# markdown_text += f"<tr>\n<td><details><summary>\nSub-PR theme:<br><strong>{title}</strong></summary>\n\n"
# markdown_text += f"<hr>\n"
# markdown_text += f"Relevant files:\n"
# markdown_text += f"<ul>\n"
# for file in relevant_files:
# markdown_text += f"<li>{file}</li>\n"
# markdown_text += f"</ul>\n\n</details></td></tr>\n"
2024-06-13 09:28:51 +03:00
except Exception as e :
get_logger ( ) . exception ( f " Failed to process can be split: { e } " )
return " "
2024-03-09 10:46:36 +02:00
return markdown_text
2024-02-08 17:08:42 +02:00
def parse_code_suggestion ( code_suggestion : dict , i : int = 0 , gfm_supported : bool = True ) - > str :
2023-07-20 10:51:21 +03:00
"""
Convert a dictionary of data into markdown format .
Args :
2024-02-08 17:08:42 +02:00
code_suggestion ( dict ) : A dictionary containing data to be converted to markdown format .
2023-07-20 10:51:21 +03:00
Returns :
str : A string containing the markdown formatted text generated from the input dictionary .
"""
2023-07-06 00:21:08 +03:00
markdown_text = " "
2024-02-08 17:08:42 +02:00
if gfm_supported and ' relevant_line ' in code_suggestion :
2023-12-14 07:44:13 +08:00
markdown_text + = ' <table> '
2024-02-08 17:08:42 +02:00
for sub_key , sub_value in code_suggestion . items ( ) :
2023-12-14 07:44:13 +08:00
try :
2024-02-08 17:08:42 +02:00
if sub_key . lower ( ) == ' relevant_file ' :
2023-12-14 07:44:13 +08:00
relevant_file = sub_value . strip ( ' ` ' ) . strip ( ' " ' ) . strip ( " ' " )
2024-02-08 17:08:42 +02:00
markdown_text + = f " <tr><td>relevant file</td><td> { relevant_file } </td></tr> "
2023-12-14 07:44:13 +08:00
# continue
elif sub_key . lower ( ) == ' suggestion ' :
2023-12-26 17:06:29 +02:00
markdown_text + = ( f " <tr><td> { sub_key } </td> "
2024-02-08 17:08:42 +02:00
f " <td> \n \n <strong> \n \n { sub_value . strip ( ) } \n \n </strong> \n </td></tr> " )
elif sub_key . lower ( ) == ' relevant_line ' :
2023-12-14 07:44:13 +08:00
markdown_text + = f " <tr><td>relevant line</td> "
sub_value_list = sub_value . split ( ' ]( ' )
relevant_line = sub_value_list [ 0 ] . lstrip ( ' ` ' ) . lstrip ( ' [ ' )
if len ( sub_value_list ) > 1 :
link = sub_value_list [ 1 ] . rstrip ( ' ) ' ) . strip ( ' ` ' )
2024-01-09 14:56:18 +08:00
markdown_text + = f " <td><a href= ' { link } ' > { relevant_line } </a></td> "
2023-12-14 07:44:13 +08:00
else :
markdown_text + = f " <td> { relevant_line } </td> "
markdown_text + = " </tr> "
except Exception as e :
get_logger ( ) . exception ( f " Failed to parse code suggestion: { e } " )
pass
markdown_text + = ' </table> '
markdown_text + = " <hr> "
else :
2024-02-08 17:08:42 +02:00
for sub_key , sub_value in code_suggestion . items ( ) :
2024-02-19 19:46:57 +02:00
if isinstance ( sub_key , str ) :
sub_key = sub_key . rstrip ( )
if isinstance ( sub_value , str ) :
sub_value = sub_value . rstrip ( )
2023-12-14 07:44:13 +08:00
if isinstance ( sub_value , dict ) : # "code example"
markdown_text + = f " - ** { sub_key } :** \n "
for code_key , code_value in sub_value . items ( ) : # 'before' and 'after' code
code_str = f " ``` \n { code_value } \n ``` "
code_str_indented = textwrap . indent ( code_str , ' ' )
markdown_text + = f " - ** { code_key } :** \n { code_str_indented } \n "
2023-07-06 00:21:08 +03:00
else :
2024-02-08 17:08:42 +02:00
if " relevant_file " in sub_key . lower ( ) :
2023-12-14 07:44:13 +08:00
markdown_text + = f " \n - ** { sub_key } :** { sub_value } \n "
else :
markdown_text + = f " ** { sub_key } :** { sub_value } \n "
2024-02-19 19:43:31 +02:00
if " relevant_line " not in sub_key . lower ( ) : # nicer presentation
# markdown_text = markdown_text.rstrip('\n') + "\\\n" # works for gitlab
markdown_text = markdown_text . rstrip ( ' \n ' ) + " \n " # works for gitlab and bitbucker
2023-07-06 12:49:10 +03:00
2023-12-14 07:44:13 +08:00
markdown_text + = " \n "
2023-07-06 00:21:08 +03:00
return markdown_text
2023-07-11 22:11:42 +03:00
2023-07-17 01:44:40 +03:00
def try_fix_json ( review , max_iter = 10 , code_suggestions = False ) :
2023-07-20 10:51:21 +03:00
"""
Fix broken or incomplete JSON messages and return the parsed JSON data .
Args :
- review : A string containing the JSON message to be fixed .
- max_iter : An integer representing the maximum number of iterations to try and fix the JSON message .
2023-08-05 10:34:09 +03:00
- code_suggestions : A boolean indicating whether to try and fix JSON messages with code feedback .
2023-07-20 10:51:21 +03:00
Returns :
- data : A dictionary containing the parsed JSON data .
The function attempts to fix broken or incomplete JSON messages by parsing until the last valid code suggestion .
2023-08-01 14:43:26 +03:00
If the JSON message ends with a closing bracket , the function calls the fix_json_escape_char function to fix the
message .
2023-08-05 10:34:09 +03:00
If code_suggestions is True and the JSON message contains code feedback , the function tries to fix the JSON
2023-08-01 14:43:26 +03:00
message by parsing until the last valid code suggestion .
The function uses regular expressions to find the last occurrence of " }, " with any number of whitespaces or
newlines .
2023-07-20 10:51:21 +03:00
It tries to parse the JSON message with the closing bracket and checks if it is valid .
If the JSON message is valid , the parsed JSON data is returned .
2023-08-01 14:43:26 +03:00
If the JSON message is not valid , the last code suggestion is removed and the process is repeated until a valid JSON
message is obtained or the maximum number of iterations is reached .
2023-07-20 10:51:21 +03:00
If a valid JSON message is not obtained , an error is logged and an empty dictionary is returned .
"""
2023-07-17 01:44:40 +03:00
if review . endswith ( " } " ) :
return fix_json_escape_char ( review )
2023-07-20 10:51:21 +03:00
2023-07-11 22:11:42 +03:00
data = { }
2023-07-17 01:44:40 +03:00
if code_suggestions :
closing_bracket = " ]} "
else :
closing_bracket = " ]}} "
2023-07-20 10:51:21 +03:00
2023-08-05 10:34:09 +03:00
if ( review . rfind ( " ' Code feedback ' : [ " ) > 0 or review . rfind ( ' " Code feedback " : [ ' ) > 0 ) or \
( review . rfind ( " ' Code suggestions ' : [ " ) > 0 or review . rfind ( ' " Code suggestions " : [ ' ) > 0 ) :
2023-07-11 22:11:42 +03:00
last_code_suggestion_ind = [ m . end ( ) for m in re . finditer ( r " \ } \ s*, " , review ) ] [ - 1 ] - 1
valid_json = False
2023-07-11 22:22:08 +03:00
iter_count = 0
2023-07-20 10:51:21 +03:00
2023-07-11 22:22:08 +03:00
while last_code_suggestion_ind > 0 and not valid_json and iter_count < max_iter :
2023-07-11 22:11:42 +03:00
try :
2023-07-17 01:44:40 +03:00
data = json . loads ( review [ : last_code_suggestion_ind ] + closing_bracket )
2023-07-11 22:11:42 +03:00
valid_json = True
2023-07-17 01:44:40 +03:00
review = review [ : last_code_suggestion_ind ] . strip ( ) + closing_bracket
2023-07-11 22:11:42 +03:00
except json . decoder . JSONDecodeError :
review = review [ : last_code_suggestion_ind ]
last_code_suggestion_ind = [ m . end ( ) for m in re . finditer ( r " \ } \ s*, " , review ) ] [ - 1 ] - 1
2023-07-11 22:22:08 +03:00
iter_count + = 1
2023-07-20 10:51:21 +03:00
2023-07-11 22:11:42 +03:00
if not valid_json :
2023-10-16 14:56:00 +03:00
get_logger ( ) . error ( " Unable to decode JSON response from AI " )
2023-07-11 22:11:42 +03:00
data = { }
2023-07-20 10:51:21 +03:00
2023-07-11 22:11:42 +03:00
return data
2023-07-17 01:44:40 +03:00
2023-07-18 11:34:57 +03:00
2023-07-17 01:44:40 +03:00
def fix_json_escape_char ( json_message = None ) :
2023-07-20 10:51:21 +03:00
"""
Fix broken or incomplete JSON messages and return the parsed JSON data .
Args :
json_message ( str ) : A string containing the JSON message to be fixed .
Returns :
dict : A dictionary containing the parsed JSON data .
Raises :
None
2023-09-05 08:40:05 +03:00
"""
2023-07-17 01:44:40 +03:00
try :
result = json . loads ( json_message )
except Exception as e :
# Find the offending character index:
idx_to_replace = int ( str ( e ) . split ( ' ' ) [ - 1 ] . replace ( ' ) ' , ' ' ) )
# Remove the offending character:
json_message = list ( json_message )
json_message [ idx_to_replace ] = ' '
new_message = ' ' . join ( json_message )
2023-07-18 11:34:57 +03:00
return fix_json_escape_char ( json_message = new_message )
return result
2023-07-18 23:14:47 +03:00
def convert_str_to_datetime ( date_str ) :
2023-07-20 10:51:21 +03:00
"""
Convert a string representation of a date and time into a datetime object .
Args :
date_str ( str ) : A string representation of a date and time in the format ' %a , %d % b % Y % H: % M: % S % Z '
Returns :
datetime : A datetime object representing the input date and time .
Example :
>> > convert_str_to_datetime ( ' Mon, 01 Jan 2022 12:00:00 UTC ' )
datetime . datetime ( 2022 , 1 , 1 , 12 , 0 , 0 )
2023-09-05 08:40:05 +03:00
"""
2023-07-18 23:14:47 +03:00
datetime_format = ' %a , %d % b % Y % H: % M: % S % Z '
return datetime . strptime ( date_str , datetime_format )
2024-05-15 09:05:01 +03:00
def load_large_diff ( filename , new_file_content_str : str , original_file_content_str : str , show_warning : bool = True ) - > str :
2023-07-20 10:51:21 +03:00
"""
2023-08-01 14:43:26 +03:00
Generate a patch for a modified file by comparing the original content of the file with the new content provided as
input .
2023-07-20 10:51:21 +03:00
Args :
new_file_content_str : The new content of the file as a string .
original_file_content_str : The original content of the file as a string .
Returns :
The generated or provided patch string .
Raises :
None .
"""
2023-08-03 22:14:05 +03:00
patch = " "
try :
diff = difflib . unified_diff ( original_file_content_str . splitlines ( keepends = True ) ,
new_file_content_str . splitlines ( keepends = True ) )
2024-05-15 09:05:01 +03:00
if get_settings ( ) . config . verbosity_level > = 2 and show_warning :
2023-10-16 14:56:00 +03:00
get_logger ( ) . warning ( f " File was modified, but no patch was found. Manually creating patch: { filename } . " )
2023-08-03 22:14:05 +03:00
patch = ' ' . join ( diff )
except Exception :
pass
2023-07-18 23:14:47 +03:00
return patch
2023-07-30 11:43:44 +03:00
2023-08-01 14:43:26 +03:00
def update_settings_from_args ( args : List [ str ] ) - > List [ str ] :
2023-07-30 12:14:26 +03:00
"""
Update the settings of the Dynaconf object based on the arguments passed to the function .
Args :
args : A list of arguments passed to the function .
2023-07-30 12:16:43 +03:00
Example args : [ ' --pr_code_suggestions.extra_instructions= " be funny ' ,
' --pr_code_suggestions.num_code_suggestions=3 ' ]
2023-07-30 12:14:26 +03:00
Returns :
None
Raises :
ValueError : If the argument is not in the correct format .
"""
2023-08-01 14:43:26 +03:00
other_args = [ ]
2023-07-30 12:14:26 +03:00
if args :
2023-07-30 11:43:44 +03:00
for arg in args :
2023-08-01 14:43:26 +03:00
arg = arg . strip ( )
if arg . startswith ( ' -- ' ) :
2023-07-30 11:43:44 +03:00
arg = arg . strip ( ' - ' ) . strip ( )
2023-08-20 10:03:57 +03:00
vals = arg . split ( ' = ' , 1 )
2023-07-30 12:14:26 +03:00
if len ( vals ) != 2 :
2024-06-03 23:58:31 +08:00
if len ( vals ) > 2 : # --extended is a valid argument
2023-10-16 14:56:00 +03:00
get_logger ( ) . error ( f ' Invalid argument format: { arg } ' )
2023-08-01 14:43:26 +03:00
other_args . append ( arg )
continue
2023-08-20 10:03:57 +03:00
key , value = _fix_key_value ( * vals )
2023-08-01 14:43:26 +03:00
get_settings ( ) . set ( key , value )
2023-10-16 14:56:00 +03:00
get_logger ( ) . info ( f ' Updated setting { key } to: " { value } " ' )
2023-08-01 14:43:26 +03:00
else :
other_args . append ( arg )
return other_args
2023-08-09 08:50:15 +03:00
2023-08-20 10:03:57 +03:00
def _fix_key_value ( key : str , value : str ) :
key = key . strip ( ) . upper ( )
value = value . strip ( )
try :
value = yaml . safe_load ( value )
except Exception as e :
2023-11-23 09:16:50 +02:00
get_logger ( ) . debug ( f " Failed to parse YAML for config override { key } = { value } " , exc_info = e )
2023-08-20 10:03:57 +03:00
return key , value
2024-07-03 17:06:27 +03:00
def load_yaml ( response_text : str , keys_fix_yaml : List [ str ] = [ ] , first_key = " " , last_key = " " ) - > dict :
2023-11-19 17:30:57 +02:00
response_text = response_text . removeprefix ( ' ```yaml ' ) . rstrip ( ' ` ' )
2023-08-09 08:50:15 +03:00
try :
2023-11-19 17:30:57 +02:00
data = yaml . safe_load ( response_text )
2023-08-09 08:50:15 +03:00
except Exception as e :
2023-10-16 14:56:00 +03:00
get_logger ( ) . error ( f " Failed to parse AI prediction: { e } " )
2024-07-03 17:06:27 +03:00
data = try_fix_yaml ( response_text , keys_fix_yaml = keys_fix_yaml , first_key = first_key , last_key = last_key )
2023-08-09 08:50:15 +03:00
return data
2023-12-21 08:21:34 +02:00
2024-07-14 08:53:53 +03:00
2024-07-03 17:06:27 +03:00
def try_fix_yaml ( response_text : str ,
keys_fix_yaml : List [ str ] = [ ] ,
first_key = " " ,
last_key = " " , ) - > dict :
2023-11-19 17:30:57 +02:00
response_text_lines = response_text . split ( ' \n ' )
2023-11-10 18:44:19 +02:00
2024-07-03 17:06:27 +03:00
keys_yaml = [ ' relevant line: ' , ' suggestion content: ' , ' relevant file: ' , ' existing code: ' , ' improved code: ' ]
keys_yaml = keys_yaml + keys_fix_yaml
2023-11-10 18:44:19 +02:00
# first fallback - try to convert 'relevant line: ...' to relevant line: |-\n ...'
2023-11-19 17:30:57 +02:00
response_text_lines_copy = response_text_lines . copy ( )
for i in range ( 0 , len ( response_text_lines_copy ) ) :
2024-07-03 17:06:27 +03:00
for key in keys_yaml :
2023-11-19 17:30:57 +02:00
if key in response_text_lines_copy [ i ] and not ' |- ' in response_text_lines_copy [ i ] :
2023-11-20 10:30:59 +02:00
response_text_lines_copy [ i ] = response_text_lines_copy [ i ] . replace ( f ' { key } ' ,
f ' { key } |- \n ' )
2023-11-10 18:44:19 +02:00
try :
2023-11-19 17:30:57 +02:00
data = yaml . safe_load ( ' \n ' . join ( response_text_lines_copy ) )
get_logger ( ) . info ( f " Successfully parsed AI prediction after adding |- \n " )
2023-11-10 18:44:19 +02:00
return data
except :
2023-11-20 10:30:59 +02:00
get_logger ( ) . info ( f " Failed to parse AI prediction after adding |- \n " )
2023-11-10 18:44:19 +02:00
2024-07-14 08:53:53 +03:00
# second fallback - try to extract only range from first ```yaml to ````
2023-12-21 10:48:33 +09:00
snippet_pattern = r ' ```(yaml)?[ \ s \ S]*?``` '
snippet = re . search ( snippet_pattern , ' \n ' . join ( response_text_lines_copy ) )
if snippet :
snippet_text = snippet . group ( )
try :
data = yaml . safe_load ( snippet_text . removeprefix ( ' ```yaml ' ) . rstrip ( ' ` ' ) )
2024-07-14 08:53:53 +03:00
get_logger ( ) . info ( f " Successfully parsed AI prediction after extracting yaml snippet " )
2023-12-21 10:48:33 +09:00
return data
except :
pass
2024-05-29 13:52:44 +03:00
# third fallback - try to remove leading and trailing curly brackets
2024-02-18 07:56:14 +02:00
response_text_copy = response_text . strip ( ) . rstrip ( ) . removeprefix ( ' { ' ) . removesuffix ( ' } ' ) . rstrip ( ' : \n ' )
2023-12-21 08:21:34 +02:00
try :
2024-02-18 07:56:14 +02:00
data = yaml . safe_load ( response_text_copy )
2023-12-21 08:21:34 +02:00
get_logger ( ) . info ( f " Successfully parsed AI prediction after removing curly brackets " )
return data
except :
pass
2024-07-14 08:53:53 +03:00
2024-07-03 20:29:17 +03:00
# forth fallback - try to extract yaml snippet by 'first_key' and 'last_key'
2024-07-03 17:06:27 +03:00
# note that 'last_key' can be in practice a key that is not the last key in the yaml snippet.
# it just needs to be some inner key, so we can look for newlines after it
if first_key and last_key :
index_start = response_text . find ( f " \n { first_key } : " )
if index_start == - 1 :
index_start = response_text . find ( f " { first_key } : " )
index_last_code = response_text . rfind ( f " { last_key } : " )
index_end = response_text . find ( " \n \n " , index_last_code ) # look for newlines after last_key
if index_end == - 1 :
index_end = len ( response_text )
response_text_copy = response_text [ index_start : index_end ] . strip ( ) . strip ( ' ```yaml ' ) . strip ( ' ` ' ) . strip ( )
try :
data = yaml . safe_load ( response_text_copy )
get_logger ( ) . info ( f " Successfully parsed AI prediction after extracting yaml snippet " )
return data
except :
pass
# fifth fallback - try to remove last lines
2023-08-09 08:50:15 +03:00
data = { }
2023-11-19 17:30:57 +02:00
for i in range ( 1 , len ( response_text_lines ) ) :
response_text_lines_tmp = ' \n ' . join ( response_text_lines [ : - i ] )
2023-08-09 08:50:15 +03:00
try :
2024-02-18 07:56:14 +02:00
data = yaml . safe_load ( response_text_lines_tmp )
2023-10-16 14:56:00 +03:00
get_logger ( ) . info ( f " Successfully parsed AI prediction after removing { i } lines " )
2023-12-21 11:11:46 +09:00
return data
2023-08-09 08:50:15 +03:00
except :
pass
2023-10-24 22:28:57 +03:00
2023-12-14 07:44:13 +08:00
def set_custom_labels ( variables , git_provider = None ) :
2023-10-29 14:58:36 +02:00
if not get_settings ( ) . config . enable_custom_labels :
return
2024-06-23 16:53:45 +03:00
labels = get_settings ( ) . get ( ' custom_labels ' , { } )
2023-10-24 22:28:57 +03:00
if not labels :
# set default labels
2023-12-05 07:48:21 +02:00
labels = [ ' Bug fix ' , ' Tests ' , ' Bug fix with tests ' , ' Enhancement ' , ' Documentation ' , ' Other ' ]
2023-10-24 22:28:57 +03:00
labels_list = " \n - " . join ( labels ) if labels else " "
labels_list = f " - { labels_list } " if labels_list else " "
variables [ " custom_labels " ] = labels_list
return
2023-12-14 07:44:13 +08:00
# Set custom labels
2023-11-13 15:55:35 +02:00
variables [ " custom_labels_class " ] = " class Label(str, Enum): "
2023-12-18 12:29:06 +02:00
counter = 0
labels_minimal_to_labels_dict = { }
2023-10-24 22:28:57 +03:00
for k , v in labels . items ( ) :
2023-12-18 12:29:06 +02:00
description = " ' " + v [ ' description ' ] . strip ( ' \n ' ) . replace ( ' \n ' , ' \\ n ' ) + " ' "
# variables["custom_labels_class"] += f"\n {k.lower().replace(' ', '_')} = '{k}' # {description}"
variables [ " custom_labels_class " ] + = f " \n { k . lower ( ) . replace ( ' ' , ' _ ' ) } = { description } "
labels_minimal_to_labels_dict [ k . lower ( ) . replace ( ' ' , ' _ ' ) ] = k
counter + = 1
variables [ " labels_minimal_to_labels_dict " ] = labels_minimal_to_labels_dict
2023-11-06 15:14:08 +02:00
2023-11-08 14:46:11 +02:00
def get_user_labels ( current_labels : List [ str ] = None ) :
"""
Only keep labels that has been added by the user
"""
try :
if current_labels is None :
current_labels = [ ]
user_labels = [ ]
for label in current_labels :
2023-12-05 07:48:21 +02:00
if label . lower ( ) in [ ' bug fix ' , ' tests ' , ' enhancement ' , ' documentation ' , ' other ' ] :
2023-11-06 15:14:08 +02:00
continue
2023-11-08 14:46:11 +02:00
if get_settings ( ) . config . enable_custom_labels :
if label in get_settings ( ) . custom_labels :
continue
user_labels . append ( label )
if user_labels :
2024-02-24 16:47:23 +02:00
get_logger ( ) . debug ( f " Keeping user labels: { user_labels } " )
2023-11-08 14:46:11 +02:00
except Exception as e :
get_logger ( ) . exception ( f " Failed to get user labels: { e } " )
return current_labels
2023-11-06 15:14:08 +02:00
return user_labels
2023-11-07 14:28:41 +02:00
2023-11-07 14:38:37 +02:00
2023-11-07 14:28:41 +02:00
def get_max_tokens ( model ) :
2023-11-07 14:38:37 +02:00
settings = get_settings ( )
2023-12-03 21:06:55 -05:00
if model in MAX_TOKENS :
max_tokens_model = MAX_TOKENS [ model ]
else :
raise Exception ( f " MAX_TOKENS must be set for model { model } in ./pr_agent/algo/__init__.py " )
2023-11-07 14:41:15 +02:00
if settings . config . max_model_tokens :
max_tokens_model = min ( settings . config . max_model_tokens , max_tokens_model )
2023-11-07 14:38:37 +02:00
# get_logger().debug(f"limiting max tokens to {max_tokens_model}")
2023-11-07 14:28:41 +02:00
return max_tokens_model
2023-11-26 08:29:47 +02:00
2024-05-29 13:52:44 +03:00
def clip_tokens ( text : str , max_tokens : int , add_three_dots = True , num_input_tokens = None , delete_last_line = False ) - > str :
2023-11-26 08:29:47 +02:00
"""
Clip the number of tokens in a string to a maximum number of tokens .
Args :
text ( str ) : The string to clip .
max_tokens ( int ) : The maximum number of tokens allowed in the string .
add_three_dots ( bool , optional ) : A boolean indicating whether to add three dots at the end of the clipped
Returns :
str : The clipped string .
"""
if not text :
return text
try :
2024-05-29 13:52:44 +03:00
if num_input_tokens is None :
encoder = TokenEncoder . get_token_encoder ( )
num_input_tokens = len ( encoder . encode ( text ) )
2023-11-26 08:29:47 +02:00
if num_input_tokens < = max_tokens :
return text
2024-05-29 13:52:44 +03:00
if max_tokens < 0 :
return " "
# calculate the number of characters to keep
2023-11-26 08:29:47 +02:00
num_chars = len ( text )
chars_per_token = num_chars / num_input_tokens
2024-05-29 13:52:44 +03:00
factor = 0.9 # reduce by 10% to be safe
num_output_chars = int ( factor * chars_per_token * max_tokens )
# clip the text
if num_output_chars > 0 :
clipped_text = text [ : num_output_chars ]
if delete_last_line :
clipped_text = clipped_text . rsplit ( ' \n ' , 1 ) [ 0 ]
if add_three_dots :
clipped_text + = " \n ...(truncated) "
else : # if the text is empty
clipped_text = " "
2023-11-26 08:29:47 +02:00
return clipped_text
except Exception as e :
get_logger ( ) . warning ( f " Failed to clip tokens: { e } " )
2024-01-15 19:07:41 +02:00
return text
def replace_code_tags ( text ) :
"""
Replace odd instances of ` with < code > and even instances of ` with < / code >
"""
parts = text . split ( ' ` ' )
for i in range ( 1 , len ( parts ) , 2 ) :
parts [ i ] = ' <code> ' + parts [ i ] + ' </code> '
2024-02-05 09:20:36 +02:00
return ' ' . join ( parts )
def find_line_number_of_relevant_line_in_file ( diff_files : List [ FilePatchInfo ] ,
relevant_file : str ,
relevant_line_in_file : str ,
absolute_position : int = None ) - > Tuple [ int , int ] :
position = - 1
if absolute_position is None :
absolute_position = - 1
re_hunk_header = re . compile (
r " ^@@ -( \ d+)(?:,( \ d+))? \ +( \ d+)(?:,( \ d+))? @@[ ]?(.*) " )
for file in diff_files :
if file . filename and ( file . filename . strip ( ) == relevant_file ) :
patch = file . patch
patch_lines = patch . splitlines ( )
delta = 0
start1 , size1 , start2 , size2 = 0 , 0 , 0 , 0
if absolute_position != - 1 : # matching absolute to relative
for i , line in enumerate ( patch_lines ) :
# new hunk
if line . startswith ( ' @@ ' ) :
delta = 0
match = re_hunk_header . match ( line )
start1 , size1 , start2 , size2 = map ( int , match . groups ( ) [ : 4 ] )
elif not line . startswith ( ' - ' ) :
delta + = 1
#
absolute_position_curr = start2 + delta - 1
if absolute_position_curr == absolute_position :
position = i
break
else :
# try to find the line in the patch using difflib, with some margin of error
matches_difflib : list [ str | Any ] = difflib . get_close_matches ( relevant_line_in_file ,
patch_lines , n = 3 , cutoff = 0.93 )
if len ( matches_difflib ) == 1 and matches_difflib [ 0 ] . startswith ( ' + ' ) :
relevant_line_in_file = matches_difflib [ 0 ]
for i , line in enumerate ( patch_lines ) :
if line . startswith ( ' @@ ' ) :
delta = 0
match = re_hunk_header . match ( line )
start1 , size1 , start2 , size2 = map ( int , match . groups ( ) [ : 4 ] )
elif not line . startswith ( ' - ' ) :
delta + = 1
if relevant_line_in_file in line and line [ 0 ] != ' - ' :
position = i
absolute_position = start2 + delta - 1
break
if position == - 1 and relevant_line_in_file [ 0 ] == ' + ' :
no_plus_line = relevant_line_in_file [ 1 : ] . lstrip ( )
for i , line in enumerate ( patch_lines ) :
if line . startswith ( ' @@ ' ) :
delta = 0
match = re_hunk_header . match ( line )
start1 , size1 , start2 , size2 = map ( int , match . groups ( ) [ : 4 ] )
elif not line . startswith ( ' - ' ) :
delta + = 1
if no_plus_line in line and line [ 0 ] != ' - ' :
# The model might add a '+' to the beginning of the relevant_line_in_file even if originally
# it's a context line
position = i
absolute_position = start2 + delta - 1
break
return position , absolute_position
2024-04-09 22:51:02 +09:00
2024-05-29 13:52:44 +03:00
def validate_and_await_rate_limit ( rate_limit_status = None , git_provider = None , get_rate_limit_status_func = None ) :
if git_provider and not rate_limit_status :
rate_limit_status = { ' resources ' : git_provider . github_client . get_rate_limit ( ) . raw_data }
if not rate_limit_status :
rate_limit_status = get_rate_limit_status_func ( )
# validate that the rate limit is not exceeded
is_rate_limit = False
for key , value in rate_limit_status [ ' resources ' ] . items ( ) :
if value [ ' remaining ' ] == 0 :
print ( f " key: { key } , value: { value } " )
is_rate_limit = True
sleep_time_sec = value [ ' reset ' ] - datetime . now ( ) . timestamp ( )
sleep_time_hour = sleep_time_sec / 3600.0
print ( f " Rate limit exceeded. Sleeping for { sleep_time_hour } hours " )
if sleep_time_sec > 0 :
time . sleep ( sleep_time_sec + 1 )
if git_provider :
rate_limit_status = { ' resources ' : git_provider . github_client . get_rate_limit ( ) . raw_data }
else :
rate_limit_status = get_rate_limit_status_func ( )
return is_rate_limit
def get_largest_component ( pr_url ) :
from pr_agent . tools . pr_analyzer import PRAnalyzer
publish_output = get_settings ( ) . config . publish_output
get_settings ( ) . config . publish_output = False # disable publish output
analyzer = PRAnalyzer ( pr_url )
methods_dict_files = analyzer . run_sync ( )
get_settings ( ) . config . publish_output = publish_output
max_lines_changed = 0
file_b = " "
component_name_b = " "
for file in methods_dict_files :
for method in methods_dict_files [ file ] :
try :
if methods_dict_files [ file ] [ method ] [ ' num_plus_lines ' ] > max_lines_changed :
max_lines_changed = methods_dict_files [ file ] [ method ] [ ' num_plus_lines ' ]
file_b = file
component_name_b = method
except :
pass
if component_name_b :
get_logger ( ) . info ( f " Using the largest changed component: ' { component_name_b } ' " )
return component_name_b , file_b
else :
return None , None
2024-04-10 22:16:09 +09:00
def github_action_output ( output_data : dict , key_name : str ) :
2024-04-10 22:30:16 +09:00
try :
if not get_settings ( ) . get ( ' github_action_config.enable_output ' , False ) :
return
2024-05-29 13:52:44 +03:00
2024-04-10 22:30:16 +09:00
key_data = output_data . get ( key_name , { } )
with open ( os . environ [ ' GITHUB_OUTPUT ' ] , ' a ' ) as fh :
print ( f " { key_name } = { json . dumps ( key_data , indent = None , ensure_ascii = False ) } " , file = fh )
except Exception as e :
get_logger ( ) . error ( f " Failed to write to GitHub Action output: { e } " )
2024-05-18 13:09:50 +03:00
return
def show_relevant_configurations ( relevant_section : str ) - > str :
2024-05-19 08:20:15 +03:00
forbidden_keys = [ ' ai_disclaimer ' , ' ai_disclaimer_title ' , ' ANALYTICS_FOLDER ' , ' secret_provider ' ,
2024-05-19 12:18:22 +03:00
' trial_prefix_message ' , ' no_eligible_message ' , ' identity_provider ' , ' ALLOWED_REPOS ' , ' APP_NAME ' ]
2024-05-18 13:09:50 +03:00
markdown_text = " "
markdown_text + = " \n <hr> \n <details> <summary><strong>🛠️ Relevant configurations:</strong></summary> \n \n "
markdown_text + = " <br>These are the relevant [configurations](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml) for this tool: \n \n "
markdown_text + = f " **[config**] \n ```yaml \n \n "
for key , value in get_settings ( ) . config . items ( ) :
if key in forbidden_keys :
continue
markdown_text + = f " { key } : { value } \n "
markdown_text + = " \n ``` \n "
markdown_text + = f " \n **[ { relevant_section } ]** \n ```yaml \n \n "
for key , value in get_settings ( ) . get ( relevant_section , { } ) . items ( ) :
if key in forbidden_keys :
continue
markdown_text + = f " { key } : { value } \n "
markdown_text + = " \n ``` "
markdown_text + = " \n </details> \n "
return markdown_text
2024-06-29 13:08:34 +03:00
def is_value_no ( value ) :
2024-07-14 08:53:53 +03:00
if not value :
2024-06-29 13:08:34 +03:00
return True
value_str = str ( value ) . strip ( ) . lower ( )
if value_str == ' no ' or value_str == ' none ' or value_str == ' false ' :
return True
return False