FIX: Triage-flagged posts didn't have a score. (#752)

The score will contain the LLM result, and make sure the flag isn't displayed when a minimum score threshold is present.
This commit is contained in:
Roman Rizzi 2024-08-14 15:54:09 -03:00 committed by GitHub
parent 867cd54556
commit f789d3ee96
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 35 additions and 5 deletions

View File

@ -5,6 +5,10 @@ en:
title: Triage posts using AI
description: "Triage posts using a large language model"
system_prompt_missing_post_placeholder: "System prompt must contain a placeholder for the post: %%POST%%"
flagged_post: |
<div>Response from the model:</div>
<p>%%LLM_RESPONSE%%</p>
<b>Triggered by the <a href="/admin/plugins/discourse-automation/%%AUTOMATION_ID%%">%%AUTOMATION_NAME%%</a> rule.</b>
llm_report:
title: Periodic report using AI
description: "Periodic report based on a large language model"

View File

@ -78,6 +78,7 @@ if defined?(DiscourseAutomation)
canned_reply_user: canned_reply_user,
hide_topic: hide_topic,
flag_post: flag_post,
automation: self.automation,
)
rescue => e
Discourse.warn_exception(e, message: "llm_triage: skipped triage on post #{post.id}")

View File

@ -13,7 +13,8 @@ module DiscourseAi
canned_reply: nil,
canned_reply_user: nil,
hide_topic: nil,
flag_post: nil
flag_post: nil,
automation: nil
)
if category_id.blank? && tags.blank? && canned_reply.blank? && hide_topic.blank? &&
flag_post.blank?
@ -38,12 +39,12 @@ module DiscourseAi
llm.generate(
filled_system_prompt,
temperature: 0,
max_tokens: llm.tokenizer.tokenize(search_for_text).length * 2 + 10,
max_tokens: 700, # ~500 words
user: Discourse.system_user,
feature_name: "llm_triage",
)
)&.strip
if result.present? && result.strip.downcase.include?(search_for_text)
if result.present? && result.downcase.include?(search_for_text)
user = User.find_by_username(canned_reply_user) if canned_reply_user.present?
user = user || Discourse.system_user
if canned_reply.present?
@ -69,7 +70,24 @@ module DiscourseAi
post.topic.update!(visible: false) if hide_topic
ReviewablePost.needs_review!(target: post, created_by: Discourse.system_user) if flag_post
if flag_post
reviewable =
ReviewablePost.needs_review!(target: post, created_by: Discourse.system_user)
score_reason =
I18n
.t("discourse_automation.scriptables.llm_triage.flagged_post")
.sub("%%LLM_RESPONSE%%", result)
.sub("%%AUTOMATION_ID%%", automation&.id.to_s)
.sub("%%AUTOMATION_NAME%%", automation&.name.to_s)
reviewable.add_score(
Discourse.system_user,
ReviewableScore.types[:needs_approval],
reason: score_reason,
force_review: true,
)
end
end
end
end

View File

@ -15,6 +15,7 @@ describe DiscourseAi::Automation::LlmTriage do
hide_topic: true,
system_prompt: "test %%POST%%",
search_for_text: "bad",
automation: nil,
)
end
@ -29,6 +30,7 @@ describe DiscourseAi::Automation::LlmTriage do
hide_topic: true,
system_prompt: "test %%POST%%",
search_for_text: "bad",
automation: nil,
)
end
@ -45,6 +47,7 @@ describe DiscourseAi::Automation::LlmTriage do
category_id: category.id,
system_prompt: "test %%POST%%",
search_for_text: "bad",
automation: nil,
)
end
@ -61,6 +64,7 @@ describe DiscourseAi::Automation::LlmTriage do
search_for_text: "bad",
canned_reply: "test canned reply 123",
canned_reply_user: user.username,
automation: nil,
)
end
@ -78,12 +82,14 @@ describe DiscourseAi::Automation::LlmTriage do
system_prompt: "test %%POST%%",
search_for_text: "bad",
flag_post: true,
automation: nil,
)
end
reviewable = ReviewablePost.last
expect(reviewable.target).to eq(post)
expect(reviewable.reviewable_scores.first.reason).to include("bad")
end
it "can handle garbled output from LLM" do
@ -94,6 +100,7 @@ describe DiscourseAi::Automation::LlmTriage do
system_prompt: "test %%POST%%",
search_for_text: "bad",
flag_post: true,
automation: nil,
)
end