mirror of
https://github.com/discourse/discourse-ai.git
synced 2025-07-04 13:32:13 +00:00
FIX: Check post action creator result when flagging spam (#1119)
Currently in core re-flagging something that is already flagged as spam is not supported, long term we may want to support this but in the meantime we should not be silencing/hiding if the PostActionCreator fails when flagging things as spam. --------- Co-authored-by: Ted Johansson <drenmi@gmail.com>
This commit is contained in:
parent
b60926c6e6
commit
7b1bdbde6d
@ -19,6 +19,7 @@ end
|
||||
# payload :string(20000) default(""), not null
|
||||
# created_at :datetime not null
|
||||
# updated_at :datetime not null
|
||||
# error :string(3000)
|
||||
#
|
||||
# Indexes
|
||||
#
|
||||
|
6
db/migrate/20250211021037_add_error_to_ai_spam_log.rb
Normal file
6
db/migrate/20250211021037_add_error_to_ai_spam_log.rb
Normal file
@ -0,0 +1,6 @@
|
||||
# frozen_string_literal: true
|
||||
class AddErrorToAiSpamLog < ActiveRecord::Migration[7.2]
|
||||
def change
|
||||
add_column :ai_spam_logs, :error, :string, limit: 3000
|
||||
end
|
||||
end
|
@ -394,22 +394,32 @@ module DiscourseAi
|
||||
queue_for_review: true,
|
||||
).perform
|
||||
|
||||
log.update!(reviewable: result.reviewable)
|
||||
# Currently in core re-flagging something that is already flagged as spam
|
||||
# is not supported, long term we may want to support this but in the meantime
|
||||
# we should not be silencing/hiding if the PostActionCreator fails.
|
||||
if result.success?
|
||||
log.update!(reviewable: result.reviewable)
|
||||
|
||||
reason = I18n.t("discourse_ai.spam_detection.silence_reason", url: url)
|
||||
silencer =
|
||||
UserSilencer.new(
|
||||
post.user,
|
||||
flagging_user,
|
||||
message: :too_many_spam_flags,
|
||||
post_id: post.id,
|
||||
reason: reason,
|
||||
keep_posts: true,
|
||||
reason = I18n.t("discourse_ai.spam_detection.silence_reason", url: url)
|
||||
silencer =
|
||||
UserSilencer.new(
|
||||
post.user,
|
||||
flagging_user,
|
||||
message: :too_many_spam_flags,
|
||||
post_id: post.id,
|
||||
reason: reason,
|
||||
keep_posts: true,
|
||||
)
|
||||
silencer.silence
|
||||
|
||||
# silencer will not hide tl1 posts, so we do this here
|
||||
hide_post(post)
|
||||
else
|
||||
log.update!(
|
||||
error:
|
||||
"unable to flag post as spam, post action failed for post #{post.id} with error: '#{result.errors.full_messages.join(", ").truncate(3000)}'",
|
||||
)
|
||||
silencer.silence
|
||||
|
||||
# silencer will not hide tl1 posts, so we do this here
|
||||
hide_post(post)
|
||||
end
|
||||
end
|
||||
|
||||
def self.hide_post(post)
|
||||
|
@ -88,15 +88,24 @@ module DiscourseAi
|
||||
.sub("%%AUTOMATION_NAME%%", automation&.name.to_s)
|
||||
|
||||
if flag_type == :spam || flag_type == :spam_silence
|
||||
PostActionCreator.new(
|
||||
Discourse.system_user,
|
||||
post,
|
||||
PostActionType.types[:spam],
|
||||
message: score_reason,
|
||||
queue_for_review: true,
|
||||
).perform
|
||||
result =
|
||||
PostActionCreator.new(
|
||||
Discourse.system_user,
|
||||
post,
|
||||
PostActionType.types[:spam],
|
||||
message: score_reason,
|
||||
queue_for_review: true,
|
||||
).perform
|
||||
|
||||
SpamRule::AutoSilence.new(post.user, post).silence_user if flag_type == :spam_silence
|
||||
if flag_type == :spam_silence
|
||||
if result.success?
|
||||
SpamRule::AutoSilence.new(post.user, post).silence_user
|
||||
else
|
||||
Rails.logger.warn(
|
||||
"llm_triage: unable to flag post as spam, post action failed for #{post.id} with error: '#{result.errors.full_messages.join(",").truncate(3000)}'",
|
||||
)
|
||||
end
|
||||
end
|
||||
else
|
||||
reviewable =
|
||||
ReviewablePost.needs_review!(target: post, created_by: Discourse.system_user)
|
||||
|
@ -214,7 +214,7 @@ RSpec.describe DiscourseAi::AiModeration::SpamScanner do
|
||||
|
||||
before { Jobs.run_immediately! }
|
||||
|
||||
it "Can correctly run tests" do
|
||||
it "can correctly run tests" do
|
||||
prompts = nil
|
||||
result =
|
||||
DiscourseAi::Completions::Llm.with_prepared_responses(
|
||||
@ -240,7 +240,7 @@ RSpec.describe DiscourseAi::AiModeration::SpamScanner do
|
||||
expect(result[:is_spam]).to eq(false)
|
||||
end
|
||||
|
||||
it "Correctly handles spam scanning" do
|
||||
it "correctly handles spam scanning" do
|
||||
expect(described_class.flagging_user.id).not_to eq(Discourse.system_user.id)
|
||||
|
||||
# flag post for scanning
|
||||
@ -288,6 +288,30 @@ RSpec.describe DiscourseAi::AiModeration::SpamScanner do
|
||||
expect(post.topic.reload.visible).to eq(true)
|
||||
expect(post.user.reload.silenced?).to eq(false)
|
||||
end
|
||||
|
||||
it "does not silence the user or hide the post when a flag cannot be created" do
|
||||
post = post_with_uploaded_image
|
||||
Fabricate(
|
||||
:post_action,
|
||||
post: post,
|
||||
user: described_class.flagging_user,
|
||||
post_action_type_id: PostActionType.types[:spam],
|
||||
)
|
||||
|
||||
described_class.new_post(post)
|
||||
|
||||
DiscourseAi::Completions::Llm.with_prepared_responses(["spam"]) do |_, _, _prompts|
|
||||
# force a rebake so we actually scan
|
||||
post.rebake!
|
||||
end
|
||||
|
||||
log = AiSpamLog.find_by(post: post)
|
||||
|
||||
expect(log.reviewable).to be_nil
|
||||
expect(log.error).to match(/unable to flag post as spam/)
|
||||
expect(post.user.reload).not_to be_silenced
|
||||
expect(post.topic.reload).to be_visible
|
||||
end
|
||||
end
|
||||
|
||||
it "includes location information and email in context" do
|
||||
|
@ -128,6 +128,28 @@ describe DiscourseAi::Automation::LlmTriage do
|
||||
expect(post.user.silenced?).to eq(true)
|
||||
end
|
||||
|
||||
it "does not silence the user if the flag fails" do
|
||||
Fabricate(
|
||||
:post_action,
|
||||
post: post,
|
||||
user: Discourse.system_user,
|
||||
post_action_type_id: PostActionType.types[:spam],
|
||||
)
|
||||
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
|
||||
triage(
|
||||
post: post,
|
||||
model: "custom:#{llm_model.id}",
|
||||
system_prompt: "test %%POST%%",
|
||||
search_for_text: "bad",
|
||||
flag_post: true,
|
||||
flag_type: :spam_silence,
|
||||
automation: nil,
|
||||
)
|
||||
end
|
||||
|
||||
expect(post.user.reload).not_to be_silenced
|
||||
end
|
||||
|
||||
it "can handle garbled output from LLM" do
|
||||
DiscourseAi::Completions::Llm.with_prepared_responses(["Bad.\n\nYo"]) do
|
||||
triage(
|
||||
|
Loading…
x
Reference in New Issue
Block a user