mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-07-02 03:40:38 +08:00
artifact
This commit is contained in:
@ -115,7 +115,7 @@ class LiteLLMAIHandler(BaseAiHandler):
|
||||
if self.aws_bedrock_client:
|
||||
kwargs["aws_bedrock_client"] = self.aws_bedrock_client
|
||||
|
||||
get_logger().debug("Prompts", messages={"system": system, "user": user})
|
||||
get_logger().debug("Prompts", artifact={"system": system, "user": user})
|
||||
response = await acompletion(**kwargs)
|
||||
except (APIError, Timeout, TryAgain) as e:
|
||||
get_logger().error("Error during OpenAI inference: ", e)
|
||||
@ -133,6 +133,6 @@ class LiteLLMAIHandler(BaseAiHandler):
|
||||
finish_reason = response["choices"][0]["finish_reason"]
|
||||
# usage = response.get("usage")
|
||||
get_logger().debug(f"\nAI response:\n{resp}")
|
||||
get_logger().debug("full_response", response=response)
|
||||
get_logger().debug("Full_response", artifact=response)
|
||||
|
||||
return resp, finish_reason
|
@ -208,15 +208,15 @@ async def handle_request(body: Dict[str, Any], event: str):
|
||||
|
||||
# handle comments on PRs
|
||||
if action == 'created':
|
||||
get_logger().debug(f'Request body', body=body)
|
||||
get_logger().debug(f'Request body', artifact=body)
|
||||
await handle_comments_on_pr(body, event, sender, action, log_context, agent)
|
||||
# handle new PRs
|
||||
elif event == 'pull_request' and action != 'synchronize':
|
||||
get_logger().debug(f'Request body', body=body)
|
||||
get_logger().debug(f'Request body', artifact=body)
|
||||
await handle_new_pr_opened(body, event, sender, action, log_context, agent)
|
||||
# handle pull_request event with synchronize action - "push trigger" for new commits
|
||||
elif event == 'pull_request' and action == 'synchronize':
|
||||
get_logger().debug(f'Request body', body=body)
|
||||
get_logger().debug(f'Request body', artifact=body)
|
||||
await handle_push_trigger_for_new_commits(body, event, sender, action, log_context, agent)
|
||||
else:
|
||||
get_logger().info(f"event {event=} action {action=} does not require any handling")
|
||||
|
@ -74,8 +74,7 @@ class PRCodeSuggestions:
|
||||
get_logger().info('Generating code suggestions for PR...')
|
||||
relevant_configs = {'pr_code_suggestions': dict(get_settings().pr_code_suggestions),
|
||||
'config': dict(get_settings().config)}
|
||||
get_logger().debug("Relevant configs", configs=relevant_configs)
|
||||
|
||||
get_logger().debug("Relevant configs", artifacts=relevant_configs)
|
||||
if get_settings().config.publish_output:
|
||||
if self.git_provider.is_supported("gfm_markdown"):
|
||||
self.progress_response = self.git_provider.publish_comment(self.progress)
|
||||
@ -104,7 +103,7 @@ class PRCodeSuggestions:
|
||||
|
||||
# generate summarized suggestions
|
||||
pr_body = self.generate_summarized_suggestions(data)
|
||||
get_logger().debug(f"PR output", suggestions=pr_body)
|
||||
get_logger().debug(f"PR output", artifact=pr_body)
|
||||
|
||||
# add usage guide
|
||||
if get_settings().pr_code_suggestions.enable_help_text:
|
||||
@ -133,7 +132,7 @@ class PRCodeSuggestions:
|
||||
add_line_numbers_to_hunks=True,
|
||||
disable_extra_lines=True)
|
||||
if self.patches_diff:
|
||||
get_logger().debug(f"PR diff", diff=self.patches_diff)
|
||||
get_logger().debug(f"PR diff", artifact=self.patches_diff)
|
||||
self.prediction = await self._get_prediction(model, self.patches_diff)
|
||||
else:
|
||||
get_logger().error(f"Error getting PR diff")
|
||||
@ -241,7 +240,7 @@ class PRCodeSuggestions:
|
||||
self.patches_diff_list = get_pr_multi_diffs(self.git_provider, self.token_handler, model,
|
||||
max_calls=get_settings().pr_code_suggestions.max_number_of_calls)
|
||||
if self.patches_diff_list:
|
||||
get_logger().debug(f"PR diff", diff=self.patches_diff_list)
|
||||
get_logger().debug(f"PR diff", artifact=self.patches_diff_list)
|
||||
|
||||
# parallelize calls to AI:
|
||||
if get_settings().pr_code_suggestions.parallel_calls:
|
||||
|
@ -77,7 +77,7 @@ class PRDescription:
|
||||
get_logger().info(f"Generating a PR description for pr_id: {self.pr_id}")
|
||||
relevant_configs = {'pr_description': dict(get_settings().pr_description),
|
||||
'config': dict(get_settings().config)}
|
||||
get_logger().debug("Relevant configs", configs=relevant_configs)
|
||||
get_logger().debug("Relevant configs", artifacts=relevant_configs)
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Preparing PR description...", is_temporary=True)
|
||||
|
||||
@ -101,7 +101,7 @@ class PRDescription:
|
||||
pr_title, pr_body = self._prepare_pr_answer_with_markers()
|
||||
else:
|
||||
pr_title, pr_body, = self._prepare_pr_answer()
|
||||
get_logger().debug("PR output", title=pr_title, body=pr_body)
|
||||
get_logger().debug("PR output", artifact={"title": pr_title, "body": pr_body})
|
||||
|
||||
# Add help text if gfm_markdown is supported
|
||||
if self.git_provider.is_supported("gfm_markdown") and get_settings().pr_description.enable_help_text:
|
||||
@ -116,7 +116,7 @@ class PRDescription:
|
||||
# publish labels
|
||||
if get_settings().pr_description.publish_labels and self.git_provider.is_supported("get_labels"):
|
||||
original_labels = self.git_provider.get_pr_labels()
|
||||
get_logger().debug(f"original labels", labels=original_labels)
|
||||
get_logger().debug(f"original labels", artifact=original_labels)
|
||||
user_labels = get_user_labels(original_labels)
|
||||
get_logger().debug(f"published labels:\n{pr_labels + user_labels}")
|
||||
self.git_provider.publish_labels(pr_labels + user_labels)
|
||||
@ -147,7 +147,7 @@ class PRDescription:
|
||||
|
||||
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
|
||||
if self.patches_diff:
|
||||
get_logger().debug(f"PR diff", diff=self.patches_diff)
|
||||
get_logger().debug(f"PR diff", artifact=self.patches_diff)
|
||||
self.prediction = await self._get_prediction(model)
|
||||
else:
|
||||
get_logger().error(f"Error getting PR diff {self.pr_id}")
|
||||
|
@ -12,8 +12,7 @@ class PRHelpMessage:
|
||||
get_logger().info('Getting PR Help Message...')
|
||||
relevant_configs = {'pr_help': dict(get_settings().pr_help),
|
||||
'config': dict(get_settings().config)}
|
||||
get_logger().debug("Relevant configs", configs=relevant_configs)
|
||||
|
||||
get_logger().debug("Relevant configs", artifacts=relevant_configs)
|
||||
pr_comment = "## PR Agent Walkthrough\n\n"
|
||||
pr_comment += "🤖 Welcome to the PR Agent, an AI-powered tool for automated pull request analysis, feedback, suggestions and more."""
|
||||
pr_comment += "\n\nHere is a list of tools you can use to interact with the PR Agent:\n"
|
||||
|
@ -50,14 +50,13 @@ class PRQuestions:
|
||||
get_logger().info('Answering a PR question...')
|
||||
relevant_configs = {'pr_questions': dict(get_settings().pr_questions),
|
||||
'config': dict(get_settings().config)}
|
||||
get_logger().debug("Relevant configs", configs=relevant_configs)
|
||||
|
||||
get_logger().debug("Relevant configs", artifacts=relevant_configs)
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Preparing answer...", is_temporary=True)
|
||||
await retry_with_fallback_models(self._prepare_prediction)
|
||||
|
||||
pr_comment = self._prepare_pr_answer()
|
||||
get_logger().debug(f"PR output", answer=pr_comment)
|
||||
get_logger().debug(f"PR output", artifact=pr_comment)
|
||||
|
||||
if self.git_provider.is_supported("gfm_markdown") and get_settings().pr_questions.enable_help_text:
|
||||
pr_comment += "<hr>\n\n<details> <summary><strong>✨ Ask tool usage guide:</strong></summary><hr> \n\n"
|
||||
@ -72,7 +71,7 @@ class PRQuestions:
|
||||
async def _prepare_prediction(self, model: str):
|
||||
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
|
||||
if self.patches_diff:
|
||||
get_logger().debug(f"PR diff", diff=self.patches_diff)
|
||||
get_logger().debug(f"PR diff", artifact=self.patches_diff)
|
||||
self.prediction = await self._get_prediction(model)
|
||||
else:
|
||||
get_logger().error(f"Error getting PR diff")
|
||||
|
@ -106,8 +106,7 @@ class PRReviewer:
|
||||
get_logger().info(f'Reviewing PR: {self.pr_url} ...')
|
||||
relevant_configs = {'pr_reviewer': dict(get_settings().pr_reviewer),
|
||||
'config': dict(get_settings().config)}
|
||||
get_logger().debug("Relevant configs", configs=relevant_configs)
|
||||
|
||||
get_logger().debug("Relevant configs", artifacts=relevant_configs)
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Preparing review...", is_temporary=True)
|
||||
|
||||
@ -117,7 +116,7 @@ class PRReviewer:
|
||||
return None
|
||||
|
||||
pr_review = self._prepare_pr_review()
|
||||
get_logger().debug(f"PR output", review=pr_review)
|
||||
get_logger().debug(f"PR output", artifact=pr_review)
|
||||
|
||||
if get_settings().config.publish_output:
|
||||
previous_review_comment = self._get_previous_review_comment()
|
||||
|
@ -51,15 +51,14 @@ class PRUpdateChangelog:
|
||||
get_logger().info('Updating the changelog...')
|
||||
relevant_configs = {'pr_update_changelog': dict(get_settings().pr_update_changelog),
|
||||
'config': dict(get_settings().config)}
|
||||
get_logger().debug("Relevant configs", configs=relevant_configs)
|
||||
|
||||
get_logger().debug("Relevant configs", artifacts=relevant_configs)
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.publish_comment("Preparing changelog updates...", is_temporary=True)
|
||||
|
||||
await retry_with_fallback_models(self._prepare_prediction)
|
||||
|
||||
new_file_content, answer = self._prepare_changelog_update()
|
||||
get_logger().debug(f"PR output", changlog=answer)
|
||||
get_logger().debug(f"PR output", artifact=answer)
|
||||
|
||||
if get_settings().config.publish_output:
|
||||
self.git_provider.remove_initial_comment()
|
||||
@ -71,7 +70,7 @@ class PRUpdateChangelog:
|
||||
async def _prepare_prediction(self, model: str):
|
||||
self.patches_diff = get_pr_diff(self.git_provider, self.token_handler, model)
|
||||
if self.patches_diff:
|
||||
get_logger().debug(f"PR diff", diff=self.patches_diff)
|
||||
get_logger().debug(f"PR diff", artifact=self.patches_diff)
|
||||
self.prediction = await self._get_prediction(model)
|
||||
else:
|
||||
get_logger().error(f"Error getting PR diff")
|
||||
|
Reference in New Issue
Block a user