FEATURE: allow llm triage to automatically hide posts (#820)

Previous to this change we could flag, but there was no way
to hide content and treat the flag as spam.

We had the option to hide topics, but this is not desirable for
a spam reply.

New option allows triage to hide a post if it is a reply, if the
post happens to be the first post on the topic, the topic will
be hidden.
This commit is contained in:
Sam 2024-10-04 15:11:30 +09:00 committed by GitHub
parent 110a1629aa
commit c294b6d394
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 62 additions and 11 deletions

View File

@ -99,9 +99,12 @@ en:
hide_topic: hide_topic:
label: "Hide topic" label: "Hide topic"
description: "Make topic non visible to the public if triggered" description: "Make topic non visible to the public if triggered"
flag_type:
label: "Flag type"
description: "Type of flag to apply to the post (spam or simply raise for review)"
flag_post: flag_post:
label: "Send to review" label: "Flag post"
description: "Puts the post into the review queue if triggered, for moderators to triage" description: "Flags post (either as spam or for review)"
model: model:
label: "Model" label: "Model"
description: "Language model used for triage" description: "Language model used for triage"

View File

@ -1,5 +1,9 @@
en: en:
discourse_automation: discourse_automation:
ai:
flag_types:
review: "Add post to review queue"
spam: "Flag as spam and hide post"
scriptables: scriptables:
llm_triage: llm_triage:
title: Triage posts using AI title: Triage posts using AI

View File

@ -21,6 +21,13 @@ if defined?(DiscourseAutomation)
field :tags, component: :tags field :tags, component: :tags
field :hide_topic, component: :boolean field :hide_topic, component: :boolean
field :flag_post, component: :boolean field :flag_post, component: :boolean
field :flag_type,
component: :choices,
required: false,
extra: {
content: DiscourseAi::Automation.flag_types,
},
default: "review"
field :canned_reply, component: :message field :canned_reply, component: :message
field :canned_reply_user, component: :user field :canned_reply_user, component: :user
@ -41,6 +48,7 @@ if defined?(DiscourseAutomation)
tags = fields.dig("tags", "value") tags = fields.dig("tags", "value")
hide_topic = fields.dig("hide_topic", "value") hide_topic = fields.dig("hide_topic", "value")
flag_post = fields.dig("flag_post", "value") flag_post = fields.dig("flag_post", "value")
flag_type = fields.dig("flag_type", "value")
begin begin
RateLimiter.new( RateLimiter.new(
@ -68,6 +76,7 @@ if defined?(DiscourseAutomation)
canned_reply_user: canned_reply_user, canned_reply_user: canned_reply_user,
hide_topic: hide_topic, hide_topic: hide_topic,
flag_post: flag_post, flag_post: flag_post,
flag_type: flag_type.to_s.to_sym,
automation: self.automation, automation: self.automation,
) )
rescue => e rescue => e

View File

@ -2,6 +2,12 @@
module DiscourseAi module DiscourseAi
module Automation module Automation
def self.flag_types
[
{ id: "review", translated_name: I18n.t("discourse_automation.ai.flag_types.review") },
{ id: "spam", translated_name: I18n.t("discourse_automation.ai.flag_types.spam") },
]
end
def self.available_models def self.available_models
values = DB.query_hash(<<~SQL) values = DB.query_hash(<<~SQL)
SELECT display_name AS translated_name, id AS id SELECT display_name AS translated_name, id AS id

View File

@ -14,6 +14,7 @@ module DiscourseAi
canned_reply_user: nil, canned_reply_user: nil,
hide_topic: nil, hide_topic: nil,
flag_post: nil, flag_post: nil,
flag_type: nil,
automation: nil automation: nil
) )
if category_id.blank? && tags.blank? && canned_reply.blank? && hide_topic.blank? && if category_id.blank? && tags.blank? && canned_reply.blank? && hide_topic.blank? &&
@ -65,9 +66,6 @@ module DiscourseAi
post.topic.update!(visible: false) if hide_topic post.topic.update!(visible: false) if hide_topic
if flag_post if flag_post
reviewable =
ReviewablePost.needs_review!(target: post, created_by: Discourse.system_user)
score_reason = score_reason =
I18n I18n
.t("discourse_automation.scriptables.llm_triage.flagged_post") .t("discourse_automation.scriptables.llm_triage.flagged_post")
@ -75,6 +73,18 @@ module DiscourseAi
.sub("%%AUTOMATION_ID%%", automation&.id.to_s) .sub("%%AUTOMATION_ID%%", automation&.id.to_s)
.sub("%%AUTOMATION_NAME%%", automation&.name.to_s) .sub("%%AUTOMATION_NAME%%", automation&.name.to_s)
if flag_type == :spam
PostActionCreator.new(
Discourse.system_user,
post,
PostActionType.types[:spam],
message: score_reason,
queue_for_review: true,
).perform
else
reviewable =
ReviewablePost.needs_review!(target: post, created_by: Discourse.system_user)
reviewable.add_score( reviewable.add_score(
Discourse.system_user, Discourse.system_user,
ReviewableScore.types[:needs_approval], ReviewableScore.types[:needs_approval],
@ -86,4 +96,5 @@ module DiscourseAi
end end
end end
end end
end
end end

View File

@ -1,6 +1,7 @@
# frozen_string_literal: true # frozen_string_literal: true
describe DiscourseAi::Automation::LlmTriage do describe DiscourseAi::Automation::LlmTriage do
fab!(:post) fab!(:post)
fab!(:reply) { Fabricate(:post, topic: post.topic, user: Fabricate(:user)) }
fab!(:llm_model) fab!(:llm_model)
def triage(**args) def triage(**args)
@ -92,6 +93,23 @@ describe DiscourseAi::Automation::LlmTriage do
expect(reviewable.reviewable_scores.first.reason).to include("bad") expect(reviewable.reviewable_scores.first.reason).to include("bad")
end end
it "can handle spam flags" do
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
system_prompt: "test %%POST%%",
search_for_text: "bad",
flag_post: true,
flag_type: :spam,
automation: nil,
)
end
expect(post.reload).to be_hidden
expect(post.topic.reload.visible).to eq(false)
end
it "can handle garbled output from LLM" do it "can handle garbled output from LLM" do
DiscourseAi::Completions::Llm.with_prepared_responses(["Bad.\n\nYo"]) do DiscourseAi::Completions::Llm.with_prepared_responses(["Bad.\n\nYo"]) do
triage( triage(