FEATURE: allow llm triage to automatically hide posts (#820)
Previous to this change we could flag, but there was no way to hide content and treat the flag as spam. We had the option to hide topics, but this is not desirable for a spam reply. New option allows triage to hide a post if it is a reply, if the post happens to be the first post on the topic, the topic will be hidden.
This commit is contained in:
parent
110a1629aa
commit
c294b6d394
|
@ -99,9 +99,12 @@ en:
|
|||
hide_topic:
|
||||
label: "Hide topic"
|
||||
description: "Make topic non visible to the public if triggered"
|
||||
flag_type:
|
||||
label: "Flag type"
|
||||
description: "Type of flag to apply to the post (spam or simply raise for review)"
|
||||
flag_post:
|
||||
label: "Send to review"
|
||||
description: "Puts the post into the review queue if triggered, for moderators to triage"
|
||||
label: "Flag post"
|
||||
description: "Flags post (either as spam or for review)"
|
||||
model:
|
||||
label: "Model"
|
||||
description: "Language model used for triage"
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
en:
|
||||
discourse_automation:
|
||||
ai:
|
||||
flag_types:
|
||||
review: "Add post to review queue"
|
||||
spam: "Flag as spam and hide post"
|
||||
scriptables:
|
||||
llm_triage:
|
||||
title: Triage posts using AI
|
||||
|
|
|
@ -21,6 +21,13 @@ if defined?(DiscourseAutomation)
|
|||
field :tags, component: :tags
|
||||
field :hide_topic, component: :boolean
|
||||
field :flag_post, component: :boolean
|
||||
field :flag_type,
|
||||
component: :choices,
|
||||
required: false,
|
||||
extra: {
|
||||
content: DiscourseAi::Automation.flag_types,
|
||||
},
|
||||
default: "review"
|
||||
field :canned_reply, component: :message
|
||||
field :canned_reply_user, component: :user
|
||||
|
||||
|
@ -41,6 +48,7 @@ if defined?(DiscourseAutomation)
|
|||
tags = fields.dig("tags", "value")
|
||||
hide_topic = fields.dig("hide_topic", "value")
|
||||
flag_post = fields.dig("flag_post", "value")
|
||||
flag_type = fields.dig("flag_type", "value")
|
||||
|
||||
begin
|
||||
RateLimiter.new(
|
||||
|
@ -68,6 +76,7 @@ if defined?(DiscourseAutomation)
|
|||
canned_reply_user: canned_reply_user,
|
||||
hide_topic: hide_topic,
|
||||
flag_post: flag_post,
|
||||
flag_type: flag_type.to_s.to_sym,
|
||||
automation: self.automation,
|
||||
)
|
||||
rescue => e
|
||||
|
|
|
@ -2,6 +2,12 @@
|
|||
|
||||
module DiscourseAi
|
||||
module Automation
|
||||
def self.flag_types
|
||||
[
|
||||
{ id: "review", translated_name: I18n.t("discourse_automation.ai.flag_types.review") },
|
||||
{ id: "spam", translated_name: I18n.t("discourse_automation.ai.flag_types.spam") },
|
||||
]
|
||||
end
|
||||
def self.available_models
|
||||
values = DB.query_hash(<<~SQL)
|
||||
SELECT display_name AS translated_name, id AS id
|
||||
|
|
|
@ -14,6 +14,7 @@ module DiscourseAi
|
|||
canned_reply_user: nil,
|
||||
hide_topic: nil,
|
||||
flag_post: nil,
|
||||
flag_type: nil,
|
||||
automation: nil
|
||||
)
|
||||
if category_id.blank? && tags.blank? && canned_reply.blank? && hide_topic.blank? &&
|
||||
|
@ -65,9 +66,6 @@ module DiscourseAi
|
|||
post.topic.update!(visible: false) if hide_topic
|
||||
|
||||
if flag_post
|
||||
reviewable =
|
||||
ReviewablePost.needs_review!(target: post, created_by: Discourse.system_user)
|
||||
|
||||
score_reason =
|
||||
I18n
|
||||
.t("discourse_automation.scriptables.llm_triage.flagged_post")
|
||||
|
@ -75,12 +73,25 @@ module DiscourseAi
|
|||
.sub("%%AUTOMATION_ID%%", automation&.id.to_s)
|
||||
.sub("%%AUTOMATION_NAME%%", automation&.name.to_s)
|
||||
|
||||
reviewable.add_score(
|
||||
Discourse.system_user,
|
||||
ReviewableScore.types[:needs_approval],
|
||||
reason: score_reason,
|
||||
force_review: true,
|
||||
)
|
||||
if flag_type == :spam
|
||||
PostActionCreator.new(
|
||||
Discourse.system_user,
|
||||
post,
|
||||
PostActionType.types[:spam],
|
||||
message: score_reason,
|
||||
queue_for_review: true,
|
||||
).perform
|
||||
else
|
||||
reviewable =
|
||||
ReviewablePost.needs_review!(target: post, created_by: Discourse.system_user)
|
||||
|
||||
reviewable.add_score(
|
||||
Discourse.system_user,
|
||||
ReviewableScore.types[:needs_approval],
|
||||
reason: score_reason,
|
||||
force_review: true,
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# frozen_string_literal: true
|
||||
describe DiscourseAi::Automation::LlmTriage do
|
||||
fab!(:post)
|
||||
fab!(:reply) { Fabricate(:post, topic: post.topic, user: Fabricate(:user)) }
|
||||
fab!(:llm_model)
|
||||
|
||||
def triage(**args)
|
||||
|
@ -92,6 +93,23 @@ describe DiscourseAi::Automation::LlmTriage do
|
|||
expect(reviewable.reviewable_scores.first.reason).to include("bad")
|
||||
end
|
||||
|
||||
it "can handle spam flags" do
|
||||
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
|
||||
triage(
|
||||
post: post,
|
||||
model: "custom:#{llm_model.id}",
|
||||
system_prompt: "test %%POST%%",
|
||||
search_for_text: "bad",
|
||||
flag_post: true,
|
||||
flag_type: :spam,
|
||||
automation: nil,
|
||||
)
|
||||
end
|
||||
|
||||
expect(post.reload).to be_hidden
|
||||
expect(post.topic.reload.visible).to eq(false)
|
||||
end
|
||||
|
||||
it "can handle garbled output from LLM" do
|
||||
DiscourseAi::Completions::Llm.with_prepared_responses(["Bad.\n\nYo"]) do
|
||||
triage(
|
||||
|
|
Loading…
Reference in New Issue