mirror of
https://github.com/discourse/discourse-ai.git
synced 2025-07-12 17:13:29 +00:00
FIX: never block spam scanning user (#1437)
Previously staff and bots would get scanned if TL was low Additionally if somehow spam scanner user was blocked (deactivated, silenced, banned) it would stop the feature from working This adds an override that ensures unconditionally the user is setup correctly prior to scanning
This commit is contained in:
parent
bc8e57d7e8
commit
32dc45ba4f
@ -47,10 +47,20 @@ module DiscourseAi
|
|||||||
user = nil
|
user = nil
|
||||||
if SiteSetting.ai_spam_detection_user_id.present?
|
if SiteSetting.ai_spam_detection_user_id.present?
|
||||||
user = User.find_by(id: SiteSetting.ai_spam_detection_user_id)
|
user = User.find_by(id: SiteSetting.ai_spam_detection_user_id)
|
||||||
|
ensure_safe_flagging_user!(user)
|
||||||
end
|
end
|
||||||
user || Discourse.system_user
|
user || Discourse.system_user
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def self.ensure_safe_flagging_user!(user)
|
||||||
|
# only do repair on bot users, if somehow it is set to a human skip repairs
|
||||||
|
return if !user.bot?
|
||||||
|
user.update!(silenced_till: nil) if user.silenced?
|
||||||
|
user.update!(trust_level: TrustLevel[4]) if user.trust_level != TrustLevel[4]
|
||||||
|
user.update!(suspended_till: nil, suspended_at: nil) if user.suspended?
|
||||||
|
user.update!(active: true) if !user.active?
|
||||||
|
end
|
||||||
|
|
||||||
def self.after_cooked_post(post)
|
def self.after_cooked_post(post)
|
||||||
return if !enabled?
|
return if !enabled?
|
||||||
return if !should_scan_post?(post)
|
return if !should_scan_post?(post)
|
||||||
@ -94,6 +104,9 @@ module DiscourseAi
|
|||||||
return false if !post.present?
|
return false if !post.present?
|
||||||
return false if post.user.trust_level > TrustLevel[1]
|
return false if post.user.trust_level > TrustLevel[1]
|
||||||
return false if post.topic.private_message?
|
return false if post.topic.private_message?
|
||||||
|
return false if post.user.bot?
|
||||||
|
return false if post.user.staff?
|
||||||
|
|
||||||
if Post
|
if Post
|
||||||
.where(user_id: post.user_id)
|
.where(user_id: post.user_id)
|
||||||
.joins(:topic)
|
.joins(:topic)
|
||||||
|
@ -49,6 +49,16 @@ RSpec.describe DiscourseAi::AiModeration::SpamScanner do
|
|||||||
expect(described_class.should_scan_post?(post)).to eq(false)
|
expect(described_class.should_scan_post?(post)).to eq(false)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
it "returns false for bots" do
|
||||||
|
post.user.id = -100
|
||||||
|
expect(described_class.should_scan_post?(post)).to eq(false)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "returns false for staff" do
|
||||||
|
post.user.moderator = true
|
||||||
|
expect(described_class.should_scan_post?(post)).to eq(false)
|
||||||
|
end
|
||||||
|
|
||||||
it "returns false for users with many public posts" do
|
it "returns false for users with many public posts" do
|
||||||
Fabricate(:post, user: user, topic: topic)
|
Fabricate(:post, user: user, topic: topic)
|
||||||
Fabricate(:post, user: user, topic: topic)
|
Fabricate(:post, user: user, topic: topic)
|
||||||
@ -207,6 +217,26 @@ RSpec.describe DiscourseAi::AiModeration::SpamScanner do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
it "unsilences flagging user if erronuously silenced" do
|
||||||
|
described_class.flagging_user.update!(silenced_till: 1.day.from_now)
|
||||||
|
expect(described_class.flagging_user.silenced?).to eq(false)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "ensures flagging user is tl4" do
|
||||||
|
described_class.flagging_user.update!(trust_level: 0)
|
||||||
|
expect(described_class.flagging_user.trust_level).to eq(4)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "unsuspends user if it was erronuously suspended" do
|
||||||
|
described_class.flagging_user.update!(suspended_till: 1.day.from_now, suspended_at: 1.day.ago)
|
||||||
|
expect(described_class.flagging_user.suspended?).to eq(false)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "makes sure account is active" do
|
||||||
|
described_class.flagging_user.update!(active: false)
|
||||||
|
expect(described_class.flagging_user.active).to eq(true)
|
||||||
|
end
|
||||||
|
|
||||||
describe "integration test" do
|
describe "integration test" do
|
||||||
fab!(:llm_model)
|
fab!(:llm_model)
|
||||||
let(:api_audit_log) { Fabricate(:api_audit_log) }
|
let(:api_audit_log) { Fabricate(:api_audit_log) }
|
||||||
@ -243,8 +273,13 @@ RSpec.describe DiscourseAi::AiModeration::SpamScanner do
|
|||||||
it "correctly handles spam scanning" do
|
it "correctly handles spam scanning" do
|
||||||
expect(described_class.flagging_user.id).not_to eq(Discourse.system_user.id)
|
expect(described_class.flagging_user.id).not_to eq(Discourse.system_user.id)
|
||||||
|
|
||||||
# flag post for scanning
|
|
||||||
post = post_with_uploaded_image
|
post = post_with_uploaded_image
|
||||||
|
# this is surprising, core fabricator is not linking
|
||||||
|
# we need it linked so we scan uploads
|
||||||
|
post.link_post_uploads
|
||||||
|
|
||||||
|
expect(described_class.should_scan_post?(post)).to eq(true)
|
||||||
|
expect(post.upload_ids).to be_present
|
||||||
|
|
||||||
described_class.new_post(post)
|
described_class.new_post(post)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user