DEV: Add missing specs for the toxicity module
This commit is contained in:
parent
e8bffcdd64
commit
94933f3c58
|
@ -65,7 +65,7 @@ plugins:
|
||||||
- sentiment
|
- sentiment
|
||||||
- emotion
|
- emotion
|
||||||
|
|
||||||
ai_nsfw_live_detection_enabled: false
|
ai_nsfw_detection_enabled: false
|
||||||
ai_nsfw_inference_service_api_endpoint:
|
ai_nsfw_inference_service_api_endpoint:
|
||||||
default: ""
|
default: ""
|
||||||
ai_nsfw_inference_service_api_key:
|
ai_nsfw_inference_service_api_key:
|
||||||
|
|
|
@ -4,14 +4,20 @@ module DiscourseAI
|
||||||
module NSFW
|
module NSFW
|
||||||
class EntryPoint
|
class EntryPoint
|
||||||
def load_files
|
def load_files
|
||||||
require_relative "evaluation.rb"
|
require_relative "evaluation"
|
||||||
require_relative "jobs/regular/evaluate_content.rb"
|
require_relative "jobs/regular/evaluate_post_uploads"
|
||||||
end
|
end
|
||||||
|
|
||||||
def inject_into(plugin)
|
def inject_into(plugin)
|
||||||
plugin.add_model_callback(Upload, :after_create) do
|
nsfw_detection_cb =
|
||||||
Jobs.enqueue(:evaluate_content, upload_id: self.id)
|
Proc.new do |post|
|
||||||
end
|
if SiteSetting.ai_nsfw_detection_enabled
|
||||||
|
Jobs.enqueue(:evaluate_post_uploads, post_id: post.id)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
plugin.on(:post_created, &nsfw_detection_cb)
|
||||||
|
plugin.on(:post_edited, &nsfw_detection_cb)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module Jobs
|
module Jobs
|
||||||
class EvaluateContent < ::Jobs::Base
|
class EvaluatePostUploads < ::Jobs::Base
|
||||||
def execute(args)
|
def execute(args)
|
||||||
upload = Upload.find_by_id(args[:upload_id])
|
upload = Upload.find_by_id(args[:upload_id])
|
||||||
|
|
|
@ -3,8 +3,8 @@ module DiscourseAI
|
||||||
module Sentiment
|
module Sentiment
|
||||||
class EntryPoint
|
class EntryPoint
|
||||||
def load_files
|
def load_files
|
||||||
require_relative "post_classifier.rb"
|
require_relative "post_classifier"
|
||||||
require_relative "jobs/regular/post_sentiment_analysis.rb"
|
require_relative "jobs/regular/post_sentiment_analysis"
|
||||||
end
|
end
|
||||||
|
|
||||||
def inject_into(plugin)
|
def inject_into(plugin)
|
||||||
|
|
|
@ -3,10 +3,6 @@
|
||||||
module ::DiscourseAI
|
module ::DiscourseAI
|
||||||
module Sentiment
|
module Sentiment
|
||||||
class PostClassifier
|
class PostClassifier
|
||||||
SENTIMENT_LABELS = %w[anger disgust fear joy neutral sadness surprise]
|
|
||||||
|
|
||||||
SENTIMENT_LABELS = %w[negative neutral positive]
|
|
||||||
|
|
||||||
def classify!(post)
|
def classify!(post)
|
||||||
available_models.each do |model|
|
available_models.each do |model|
|
||||||
classification = request_classification(post, model)
|
classification = request_classification(post, model)
|
||||||
|
|
|
@ -3,25 +3,27 @@
|
||||||
module ::DiscourseAI
|
module ::DiscourseAI
|
||||||
module Toxicity
|
module Toxicity
|
||||||
class ChatMessageClassifier < Classifier
|
class ChatMessageClassifier < Classifier
|
||||||
def content
|
private
|
||||||
@object.message
|
|
||||||
|
def content(chat_message)
|
||||||
|
chat_message.message
|
||||||
end
|
end
|
||||||
|
|
||||||
def store_classification
|
def store_classification(chat_message, classification)
|
||||||
PluginStore.set(
|
PluginStore.set(
|
||||||
"toxicity",
|
"toxicity",
|
||||||
"chat_message_#{@object.id}",
|
"chat_message_#{chat_message.id}",
|
||||||
{
|
{
|
||||||
classification: @classification,
|
classification: classification,
|
||||||
model: SiteSetting.ai_toxicity_inference_service_api_model,
|
model: SiteSetting.ai_toxicity_inference_service_api_model,
|
||||||
date: Time.now.utc,
|
date: Time.now.utc,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
def flag!
|
def flag!(chat_message, _toxic_labels)
|
||||||
Chat::ChatReviewQueue.new.flag_message(
|
Chat::ChatReviewQueue.new.flag_message(
|
||||||
@object,
|
chat_message,
|
||||||
Guardian.new(flagger),
|
Guardian.new(flagger),
|
||||||
ReviewableScore.types[:inappropriate],
|
ReviewableScore.types[:inappropriate],
|
||||||
)
|
)
|
||||||
|
|
|
@ -13,47 +13,53 @@ module ::DiscourseAI
|
||||||
sexual_explicit
|
sexual_explicit
|
||||||
]
|
]
|
||||||
|
|
||||||
def initialize(object)
|
def classify!(target)
|
||||||
@object = object
|
classification = request_classification(target)
|
||||||
|
|
||||||
|
store_classification(target, classification)
|
||||||
|
|
||||||
|
toxic_labels = filter_toxic_labels(classification)
|
||||||
|
|
||||||
|
flag!(target, toxic_labels) if should_flag_based_on?(toxic_labels)
|
||||||
end
|
end
|
||||||
|
|
||||||
def content
|
protected
|
||||||
|
|
||||||
|
def flag!(_target, _toxic_labels)
|
||||||
|
raise NotImplemented
|
||||||
end
|
end
|
||||||
|
|
||||||
def classify!
|
def store_classification(_target, _classification)
|
||||||
@classification =
|
raise NotImplemented
|
||||||
::DiscourseAI::InferenceManager.perform!(
|
|
||||||
"#{SiteSetting.ai_toxicity_inference_service_api_endpoint}/api/v1/classify",
|
|
||||||
SiteSetting.ai_toxicity_inference_service_api_model,
|
|
||||||
content,
|
|
||||||
SiteSetting.ai_toxicity_inference_service_api_key,
|
|
||||||
)
|
|
||||||
store_classification
|
|
||||||
consider_flagging
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def store_classification
|
def content(_target)
|
||||||
end
|
raise NotImplemented
|
||||||
|
|
||||||
def automatic_flag_enabled?
|
|
||||||
SiteSetting.ai_toxicity_flag_automatically
|
|
||||||
end
|
|
||||||
|
|
||||||
def consider_flagging
|
|
||||||
return unless automatic_flag_enabled?
|
|
||||||
@reasons =
|
|
||||||
CLASSIFICATION_LABELS.filter do |label|
|
|
||||||
@classification[label] >= SiteSetting.send("ai_toxicity_flag_threshold_#{label}")
|
|
||||||
end
|
|
||||||
|
|
||||||
flag! unless @reasons.empty?
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def flagger
|
def flagger
|
||||||
User.find_by(id: -1)
|
Discourse.system_user
|
||||||
end
|
end
|
||||||
|
|
||||||
def flag!
|
private
|
||||||
|
|
||||||
|
def request_classification(target)
|
||||||
|
::DiscourseAI::InferenceManager.perform!(
|
||||||
|
"#{SiteSetting.ai_toxicity_inference_service_api_endpoint}/api/v1/classify",
|
||||||
|
SiteSetting.ai_toxicity_inference_service_api_model,
|
||||||
|
content(target),
|
||||||
|
SiteSetting.ai_toxicity_inference_service_api_key,
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
def filter_toxic_labels(classification)
|
||||||
|
CLASSIFICATION_LABELS.filter do |label|
|
||||||
|
classification[label] >= SiteSetting.send("ai_toxicity_flag_threshold_#{label}")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def should_flag_based_on?(toxic_labels)
|
||||||
|
SiteSetting.ai_toxicity_flag_automatically && toxic_labels.present?
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -3,31 +3,26 @@ module DiscourseAI
|
||||||
module Toxicity
|
module Toxicity
|
||||||
class EntryPoint
|
class EntryPoint
|
||||||
def load_files
|
def load_files
|
||||||
require_relative "event_handler.rb"
|
require_relative "scan_queue"
|
||||||
require_relative "classifier.rb"
|
require_relative "classifier"
|
||||||
require_relative "post_classifier.rb"
|
require_relative "post_classifier"
|
||||||
require_relative "chat_message_classifier.rb"
|
require_relative "chat_message_classifier"
|
||||||
|
|
||||||
require_relative "jobs/regular/toxicity_classify_post.rb"
|
require_relative "jobs/regular/toxicity_classify_post"
|
||||||
require_relative "jobs/regular/toxicity_classify_chat_message.rb"
|
require_relative "jobs/regular/toxicity_classify_chat_message"
|
||||||
end
|
end
|
||||||
|
|
||||||
def inject_into(plugin)
|
def inject_into(plugin)
|
||||||
plugin.on(:post_created) do |post|
|
post_analysis_cb = Proc.new { |post| DiscourseAI::Toxicity::ScanQueue.enqueue_post(post) }
|
||||||
DiscourseAI::Toxicity::EventHandler.handle_post_async(post)
|
|
||||||
end
|
|
||||||
|
|
||||||
plugin.on(:post_edited) do |post|
|
plugin.on(:post_created, &post_analysis_cb)
|
||||||
DiscourseAI::Toxicity::EventHandler.handle_post_async(post)
|
plugin.on(:post_edited, &post_analysis_cb)
|
||||||
end
|
|
||||||
|
|
||||||
plugin.on(:chat_message_created) do |chat_message|
|
chat_message_analysis_cb =
|
||||||
DiscourseAI::Toxicity::EventHandler.handle_chat_async(chat_message)
|
Proc.new { |message| DiscourseAI::Toxicity::ScanQueue.enqueue_chat_message(message) }
|
||||||
end
|
|
||||||
|
|
||||||
plugin.on(:chat_message_edited) do |chat_message|
|
plugin.on(:chat_message_created, &chat_message_analysis_cb)
|
||||||
DiscourseAI::Toxicity::EventHandler.handle_chat_async(chat_message)
|
plugin.on(:chat_message_edited, &chat_message_analysis_cb)
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -1,17 +1,16 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module ::Jobs
|
module ::Jobs
|
||||||
class ClassifyChatMessage < ::Jobs::Base
|
class ToxicityClassifyChatMessage < ::Jobs::Base
|
||||||
def execute(args)
|
def execute(args)
|
||||||
return unless SiteSetting.ai_toxicity_enabled
|
return unless SiteSetting.ai_toxicity_enabled
|
||||||
|
|
||||||
chat_message_id = args[:chat_message_id]
|
return if (chat_message_id = args[:chat_message_id]).blank?
|
||||||
return if chat_message_id.blank?
|
|
||||||
|
|
||||||
chat_message = ChatMessage.find_by(id: chat_message_id)
|
chat_message = ChatMessage.find_by(id: chat_message_id)
|
||||||
return if chat_message&.message.blank?
|
return if chat_message&.message.blank?
|
||||||
|
|
||||||
::DiscourseAI::Toxicity::ChatMessageClassifier.new(chat_message).classify!
|
::DiscourseAI::Toxicity::ChatMessageClassifier.new.classify!(chat_message)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -8,10 +8,10 @@ module ::Jobs
|
||||||
post_id = args[:post_id]
|
post_id = args[:post_id]
|
||||||
return if post_id.blank?
|
return if post_id.blank?
|
||||||
|
|
||||||
post = Post.find_by(id: post_id, post_type: Post.types[:regular])
|
post = Post.includes(:user).find_by(id: post_id, post_type: Post.types[:regular])
|
||||||
return if post&.raw.blank?
|
return if post&.raw.blank?
|
||||||
|
|
||||||
::DiscourseAI::Toxicity::PostClassifier.new(post).classify!
|
::DiscourseAI::Toxicity::PostClassifier.new.classify!(post)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -3,23 +3,25 @@
|
||||||
module ::DiscourseAI
|
module ::DiscourseAI
|
||||||
module Toxicity
|
module Toxicity
|
||||||
class PostClassifier < Classifier
|
class PostClassifier < Classifier
|
||||||
def content
|
private
|
||||||
object.post_number == 1 ? "#{object.topic.title}\n#{object.raw}" : object.raw
|
|
||||||
|
def content(post)
|
||||||
|
post.post_number == 1 ? "#{post.topic.title}\n#{post.raw}" : post.raw
|
||||||
end
|
end
|
||||||
|
|
||||||
def store_classification
|
def store_classification(post, classification)
|
||||||
PostCustomField.create!(
|
PostCustomField.create!(
|
||||||
post_id: @object.id,
|
post_id: post.id,
|
||||||
name: "toxicity",
|
name: "toxicity",
|
||||||
value: {
|
value: {
|
||||||
classification: @classification,
|
classification: classification,
|
||||||
model: SiteSetting.ai_toxicity_inference_service_api_model,
|
model: SiteSetting.ai_toxicity_inference_service_api_model,
|
||||||
}.to_json,
|
}.to_json,
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
def flag!
|
def flag!(target, toxic_labels)
|
||||||
DiscourseAI::FlagManager.new(@object, reasons: @reasons).flag!
|
::DiscourseAI::FlagManager.new(target, reasons: toxic_labels).flag!
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -2,14 +2,14 @@
|
||||||
|
|
||||||
module ::DiscourseAI
|
module ::DiscourseAI
|
||||||
module Toxicity
|
module Toxicity
|
||||||
class EventHandler
|
class ScanQueue
|
||||||
class << self
|
class << self
|
||||||
def handle_post_async(post)
|
def enqueue_post(post)
|
||||||
return if bypass?(post)
|
return if bypass?(post)
|
||||||
Jobs.enqueue(:toxicity_classify_post, post_id: post.id)
|
Jobs.enqueue(:toxicity_classify_post, post_id: post.id)
|
||||||
end
|
end
|
||||||
|
|
||||||
def handle_chat_async(chat_message)
|
def enqueue_chat_message(chat_message)
|
||||||
return if bypass?(chat_message)
|
return if bypass?(chat_message)
|
||||||
Jobs.enqueue(:toxicity_classify_chat_message, chat_message_id: chat_message.id)
|
Jobs.enqueue(:toxicity_classify_chat_message, chat_message_id: chat_message.id)
|
||||||
end
|
end
|
||||||
|
@ -19,7 +19,7 @@ module ::DiscourseAI
|
||||||
end
|
end
|
||||||
|
|
||||||
def group_bypass?(user)
|
def group_bypass?(user)
|
||||||
user.groups.pluck(:id).intersection(SiteSetting.disorder_groups_bypass_map).present?
|
user.groups.pluck(:id).intersection(SiteSetting.ai_toxicity_groups_bypass_map).present?
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
21
plugin.rb
21
plugin.rb
|
@ -9,13 +9,18 @@
|
||||||
|
|
||||||
enabled_site_setting :discourse_ai_enabled
|
enabled_site_setting :discourse_ai_enabled
|
||||||
|
|
||||||
require_relative "lib/shared/inference_manager"
|
|
||||||
|
|
||||||
require_relative "lib/modules/nsfw/entry_point"
|
|
||||||
require_relative "lib/modules/toxicity/entry_point"
|
|
||||||
require_relative "lib/modules/sentiment/entry_point"
|
|
||||||
|
|
||||||
after_initialize do
|
after_initialize do
|
||||||
|
module ::DiscourseAI
|
||||||
|
PLUGIN_NAME = "discourse-ai"
|
||||||
|
end
|
||||||
|
|
||||||
|
require_relative "lib/shared/inference_manager"
|
||||||
|
require_relative "lib/shared/flag_manager"
|
||||||
|
|
||||||
|
require_relative "lib/modules/nsfw/entry_point"
|
||||||
|
require_relative "lib/modules/toxicity/entry_point"
|
||||||
|
require_relative "lib/modules/sentiment/entry_point"
|
||||||
|
|
||||||
modules = [
|
modules = [
|
||||||
DiscourseAI::NSFW::EntryPoint.new,
|
DiscourseAI::NSFW::EntryPoint.new,
|
||||||
DiscourseAI::Toxicity::EntryPoint.new,
|
DiscourseAI::Toxicity::EntryPoint.new,
|
||||||
|
@ -26,8 +31,4 @@ after_initialize do
|
||||||
a_module.load_files
|
a_module.load_files
|
||||||
a_module.inject_into(self)
|
a_module.inject_into(self)
|
||||||
end
|
end
|
||||||
|
|
||||||
module ::DiscourseAI
|
|
||||||
PLUGIN_NAME = "discourse-ai"
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require "rails_helper"
|
||||||
|
|
||||||
|
describe DiscourseAI::NSFW::EntryPoint do
|
||||||
|
fab!(:user) { Fabricate(:user) }
|
||||||
|
|
||||||
|
describe "registering event callbacks" do
|
||||||
|
context "when creating a post" do
|
||||||
|
let(:creator) do
|
||||||
|
PostCreator.new(
|
||||||
|
user,
|
||||||
|
raw: "this is the new content for my topic",
|
||||||
|
title: "this is my new topic title",
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "queues a job on create if sentiment analysis is enabled" do
|
||||||
|
SiteSetting.ai_nsfw_detection_enabled = true
|
||||||
|
|
||||||
|
expect { creator.create }.to change(Jobs::EvaluatePostUploads.jobs, :size).by(1)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "does nothing if sentiment analysis is disabled" do
|
||||||
|
SiteSetting.ai_nsfw_detection_enabled = false
|
||||||
|
|
||||||
|
expect { creator.create }.not_to change(Jobs::EvaluatePostUploads.jobs, :size)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context "when editing a post" do
|
||||||
|
fab!(:post) { Fabricate(:post, user: user) }
|
||||||
|
let(:revisor) { PostRevisor.new(post) }
|
||||||
|
|
||||||
|
it "queues a job on update if sentiment analysis is enabled" do
|
||||||
|
SiteSetting.ai_nsfw_detection_enabled = true
|
||||||
|
|
||||||
|
expect { revisor.revise!(user, raw: "This is my new test") }.to change(
|
||||||
|
Jobs::EvaluatePostUploads.jobs,
|
||||||
|
:size,
|
||||||
|
).by(1)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "does nothing if sentiment analysis is disabled" do
|
||||||
|
SiteSetting.ai_nsfw_detection_enabled = false
|
||||||
|
|
||||||
|
expect { revisor.revise!(user, raw: "This is my new test") }.not_to change(
|
||||||
|
Jobs::EvaluatePostUploads.jobs,
|
||||||
|
:size,
|
||||||
|
)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -6,7 +6,7 @@ require_relative "../../../support/nsfw_inference_stubs"
|
||||||
describe DiscourseAI::NSFW::Evaluation do
|
describe DiscourseAI::NSFW::Evaluation do
|
||||||
before do
|
before do
|
||||||
SiteSetting.ai_nsfw_inference_service_api_endpoint = "http://test.com"
|
SiteSetting.ai_nsfw_inference_service_api_endpoint = "http://test.com"
|
||||||
SiteSetting.ai_nsfw_live_detection_enabled = true
|
SiteSetting.ai_nsfw_detection_enabled = true
|
||||||
end
|
end
|
||||||
|
|
||||||
fab!(:image) { Fabricate(:s3_image_upload) }
|
fab!(:image) { Fabricate(:s3_image_upload) }
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
require "rails_helper"
|
require "rails_helper"
|
||||||
require_relative "../../../../../support/nsfw_inference_stubs"
|
require_relative "../../../../../support/nsfw_inference_stubs"
|
||||||
|
|
||||||
describe Jobs::EvaluateContent do
|
describe Jobs::EvaluatePostUploads do
|
||||||
fab!(:image) { Fabricate(:s3_image_upload) }
|
fab!(:image) { Fabricate(:s3_image_upload) }
|
||||||
|
|
||||||
describe "#execute" do
|
describe "#execute" do
|
|
@ -0,0 +1,48 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require "rails_helper"
|
||||||
|
require_relative "../../../support/toxicity_inference_stubs"
|
||||||
|
|
||||||
|
describe DiscourseAI::Toxicity::ChatMessageClassifier do
|
||||||
|
before { SiteSetting.ai_toxicity_flag_automatically = true }
|
||||||
|
|
||||||
|
fab!(:chat_message) { Fabricate(:chat_message) }
|
||||||
|
|
||||||
|
describe "#classify!" do
|
||||||
|
it "creates a reviewable when the post is classified as toxic" do
|
||||||
|
ToxicityInferenceStubs.stub_chat_message_classification(chat_message, toxic: true)
|
||||||
|
|
||||||
|
subject.classify!(chat_message)
|
||||||
|
|
||||||
|
expect(ReviewableChatMessage.where(target: chat_message).count).to eq(1)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "doesn't create a reviewable if the post is not classified as toxic" do
|
||||||
|
ToxicityInferenceStubs.stub_chat_message_classification(chat_message, toxic: false)
|
||||||
|
|
||||||
|
subject.classify!(chat_message)
|
||||||
|
|
||||||
|
expect(ReviewableChatMessage.where(target: chat_message).count).to be_zero
|
||||||
|
end
|
||||||
|
|
||||||
|
it "doesn't create a reviewable if flagging is disabled" do
|
||||||
|
SiteSetting.ai_toxicity_flag_automatically = false
|
||||||
|
ToxicityInferenceStubs.stub_chat_message_classification(chat_message, toxic: true)
|
||||||
|
|
||||||
|
subject.classify!(chat_message)
|
||||||
|
|
||||||
|
expect(ReviewableChatMessage.where(target: chat_message).count).to be_zero
|
||||||
|
end
|
||||||
|
|
||||||
|
it "stores the classification in a custom field" do
|
||||||
|
ToxicityInferenceStubs.stub_chat_message_classification(chat_message, toxic: false)
|
||||||
|
|
||||||
|
subject.classify!(chat_message)
|
||||||
|
store_row = PluginStore.get("toxicity", "chat_message_#{chat_message.id}").deep_symbolize_keys
|
||||||
|
|
||||||
|
expect(store_row[:classification]).to eq(ToxicityInferenceStubs.civilized_response)
|
||||||
|
expect(store_row[:model]).to eq(SiteSetting.ai_toxicity_inference_service_api_model)
|
||||||
|
expect(store_row[:date]).to be_present
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,69 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require "rails_helper"
|
||||||
|
|
||||||
|
describe DiscourseAI::Toxicity::EntryPoint do
|
||||||
|
fab!(:user) { Fabricate(:user) }
|
||||||
|
|
||||||
|
describe "registering event callbacks" do
|
||||||
|
before { SiteSetting.ai_toxicity_enabled = true }
|
||||||
|
|
||||||
|
context "when creating a post" do
|
||||||
|
let(:creator) do
|
||||||
|
PostCreator.new(
|
||||||
|
user,
|
||||||
|
raw: "this is the new content for my topic",
|
||||||
|
title: "this is my new topic title",
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "queues a job on post creation" do
|
||||||
|
SiteSetting.ai_toxicity_enabled = true
|
||||||
|
|
||||||
|
expect { creator.create }.to change(Jobs::ToxicityClassifyPost.jobs, :size).by(1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context "when editing a post" do
|
||||||
|
fab!(:post) { Fabricate(:post, user: user) }
|
||||||
|
let(:revisor) { PostRevisor.new(post) }
|
||||||
|
|
||||||
|
it "queues a job on post update" do
|
||||||
|
expect { revisor.revise!(user, raw: "This is my new test") }.to change(
|
||||||
|
Jobs::ToxicityClassifyPost.jobs,
|
||||||
|
:size,
|
||||||
|
).by(1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context "when creating a chat message" do
|
||||||
|
let(:public_chat_channel) { Fabricate(:chat_channel) }
|
||||||
|
let(:creator) do
|
||||||
|
Chat::ChatMessageCreator.new(
|
||||||
|
chat_channel: public_chat_channel,
|
||||||
|
user: user,
|
||||||
|
content: "This is my new test",
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "queues a job when creating a chat message" do
|
||||||
|
expect { creator.create }.to change(Jobs::ToxicityClassifyChatMessage.jobs, :size).by(1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context "when editing a chat message" do
|
||||||
|
let(:chat_message) { Fabricate(:chat_message) }
|
||||||
|
let(:updater) do
|
||||||
|
Chat::ChatMessageUpdater.new(
|
||||||
|
guardian: Guardian.new(chat_message.user),
|
||||||
|
chat_message: chat_message,
|
||||||
|
new_content: "This is my updated message",
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "queues a job on chat message update" do
|
||||||
|
expect { updater.update }.to change(Jobs::ToxicityClassifyChatMessage.jobs, :size).by(1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,53 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require "rails_helper"
|
||||||
|
require_relative "../../../../../support/toxicity_inference_stubs"
|
||||||
|
|
||||||
|
describe Jobs::ToxicityClassifyChatMessage do
|
||||||
|
describe "#execute" do
|
||||||
|
before do
|
||||||
|
SiteSetting.ai_toxicity_enabled = true
|
||||||
|
SiteSetting.ai_toxicity_flag_automatically = true
|
||||||
|
end
|
||||||
|
|
||||||
|
fab!(:chat_message) { Fabricate(:chat_message) }
|
||||||
|
|
||||||
|
describe "scenarios where we return early without doing anything" do
|
||||||
|
it "does nothing when ai_toxicity_enabled is disabled" do
|
||||||
|
SiteSetting.ai_toxicity_enabled = false
|
||||||
|
|
||||||
|
subject.execute({ chat_message_id: chat_message.id })
|
||||||
|
|
||||||
|
expect(ReviewableChatMessage.where(target: chat_message).count).to be_zero
|
||||||
|
end
|
||||||
|
|
||||||
|
it "does nothing if there's no arg called post_id" do
|
||||||
|
subject.execute({})
|
||||||
|
|
||||||
|
expect(ReviewableChatMessage.where(target: chat_message).count).to be_zero
|
||||||
|
end
|
||||||
|
|
||||||
|
it "does nothing if no post match the given id" do
|
||||||
|
subject.execute({ chat_message_id: nil })
|
||||||
|
|
||||||
|
expect(ReviewableChatMessage.where(target: chat_message).count).to be_zero
|
||||||
|
end
|
||||||
|
|
||||||
|
it "does nothing if the post content is blank" do
|
||||||
|
chat_message.update_columns(message: "")
|
||||||
|
|
||||||
|
subject.execute({ chat_message_id: chat_message.id })
|
||||||
|
|
||||||
|
expect(ReviewableChatMessage.where(target: chat_message).count).to be_zero
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
it "flags the message when classified as toxic" do
|
||||||
|
ToxicityInferenceStubs.stub_chat_message_classification(chat_message, toxic: true)
|
||||||
|
|
||||||
|
subject.execute({ chat_message_id: chat_message.id })
|
||||||
|
|
||||||
|
expect(ReviewableChatMessage.where(target: chat_message).count).to eq(1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,53 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require "rails_helper"
|
||||||
|
require_relative "../../../../../support/toxicity_inference_stubs"
|
||||||
|
|
||||||
|
describe Jobs::ToxicityClassifyPost do
|
||||||
|
describe "#execute" do
|
||||||
|
before do
|
||||||
|
SiteSetting.ai_toxicity_enabled = true
|
||||||
|
SiteSetting.ai_toxicity_flag_automatically = true
|
||||||
|
end
|
||||||
|
|
||||||
|
fab!(:post) { Fabricate(:post) }
|
||||||
|
|
||||||
|
describe "scenarios where we return early without doing anything" do
|
||||||
|
it "does nothing when ai_toxicity_enabled is disabled" do
|
||||||
|
SiteSetting.ai_toxicity_enabled = false
|
||||||
|
|
||||||
|
subject.execute({ post_id: post.id })
|
||||||
|
|
||||||
|
expect(ReviewableFlaggedPost.where(target: post).count).to be_zero
|
||||||
|
end
|
||||||
|
|
||||||
|
it "does nothing if there's no arg called post_id" do
|
||||||
|
subject.execute({})
|
||||||
|
|
||||||
|
expect(ReviewableFlaggedPost.where(target: post).count).to be_zero
|
||||||
|
end
|
||||||
|
|
||||||
|
it "does nothing if no post match the given id" do
|
||||||
|
subject.execute({ post_id: nil })
|
||||||
|
|
||||||
|
expect(ReviewableFlaggedPost.where(target: post).count).to be_zero
|
||||||
|
end
|
||||||
|
|
||||||
|
it "does nothing if the post content is blank" do
|
||||||
|
post.update_columns(raw: "")
|
||||||
|
|
||||||
|
subject.execute({ post_id: post.id })
|
||||||
|
|
||||||
|
expect(ReviewableFlaggedPost.where(target: post).count).to be_zero
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
it "flags the post when classified as toxic" do
|
||||||
|
ToxicityInferenceStubs.stub_post_classification(post, toxic: true)
|
||||||
|
|
||||||
|
subject.execute({ post_id: post.id })
|
||||||
|
|
||||||
|
expect(ReviewableFlaggedPost.where(target: post).count).to eq(1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,51 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require "rails_helper"
|
||||||
|
require_relative "../../../support/toxicity_inference_stubs"
|
||||||
|
|
||||||
|
describe DiscourseAI::Toxicity::PostClassifier do
|
||||||
|
before { SiteSetting.ai_toxicity_flag_automatically = true }
|
||||||
|
|
||||||
|
fab!(:post) { Fabricate(:post) }
|
||||||
|
|
||||||
|
describe "#classify!" do
|
||||||
|
it "creates a reviewable when the post is classified as toxic" do
|
||||||
|
ToxicityInferenceStubs.stub_post_classification(post, toxic: true)
|
||||||
|
|
||||||
|
subject.classify!(post)
|
||||||
|
|
||||||
|
expect(ReviewableFlaggedPost.where(target: post).count).to eq(1)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "doesn't create a reviewable if the post is not classified as toxic" do
|
||||||
|
ToxicityInferenceStubs.stub_post_classification(post, toxic: false)
|
||||||
|
|
||||||
|
subject.classify!(post)
|
||||||
|
|
||||||
|
expect(ReviewableFlaggedPost.where(target: post).count).to be_zero
|
||||||
|
end
|
||||||
|
|
||||||
|
it "doesn't create a reviewable if flagging is disabled" do
|
||||||
|
SiteSetting.ai_toxicity_flag_automatically = false
|
||||||
|
ToxicityInferenceStubs.stub_post_classification(post, toxic: true)
|
||||||
|
|
||||||
|
subject.classify!(post)
|
||||||
|
|
||||||
|
expect(ReviewableFlaggedPost.where(target: post).count).to be_zero
|
||||||
|
end
|
||||||
|
|
||||||
|
it "stores the classification in a custom field" do
|
||||||
|
ToxicityInferenceStubs.stub_post_classification(post, toxic: false)
|
||||||
|
|
||||||
|
subject.classify!(post)
|
||||||
|
custom_field = PostCustomField.find_by(post: post, name: "toxicity")
|
||||||
|
|
||||||
|
expect(custom_field.value).to eq(
|
||||||
|
{
|
||||||
|
classification: ToxicityInferenceStubs.civilized_response,
|
||||||
|
model: SiteSetting.ai_toxicity_inference_service_api_model,
|
||||||
|
}.to_json,
|
||||||
|
)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,70 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require "rails_helper"
|
||||||
|
|
||||||
|
describe DiscourseAI::Toxicity::ScanQueue do
|
||||||
|
fab!(:group) { Fabricate(:group) }
|
||||||
|
|
||||||
|
before do
|
||||||
|
SiteSetting.ai_toxicity_enabled = true
|
||||||
|
SiteSetting.ai_toxicity_groups_bypass = group.id.to_s
|
||||||
|
end
|
||||||
|
|
||||||
|
describe "#enqueue_post" do
|
||||||
|
fab!(:post) { Fabricate(:post) }
|
||||||
|
|
||||||
|
it "queues a job" do
|
||||||
|
expect { described_class.enqueue_post(post) }.to change(
|
||||||
|
Jobs::ToxicityClassifyPost.jobs,
|
||||||
|
:size,
|
||||||
|
).by(1)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "does nothing if ai_toxicity_enabled is disabled" do
|
||||||
|
SiteSetting.ai_toxicity_enabled = false
|
||||||
|
|
||||||
|
expect { described_class.enqueue_post(post) }.not_to change(
|
||||||
|
Jobs::ToxicityClassifyPost.jobs,
|
||||||
|
:size,
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "does nothing if the user group is allowlisted" do
|
||||||
|
group.add(post.user)
|
||||||
|
|
||||||
|
expect { described_class.enqueue_post(post) }.not_to change(
|
||||||
|
Jobs::ToxicityClassifyPost.jobs,
|
||||||
|
:size,
|
||||||
|
)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe "#enqueue_chat_message" do
|
||||||
|
fab!(:chat_message) { Fabricate(:chat_message) }
|
||||||
|
|
||||||
|
it "queues a job" do
|
||||||
|
expect { described_class.enqueue_chat_message(chat_message) }.to change(
|
||||||
|
Jobs::ToxicityClassifyChatMessage.jobs,
|
||||||
|
:size,
|
||||||
|
).by(1)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "does nothing if ai_toxicity_enabled is disabled" do
|
||||||
|
SiteSetting.ai_toxicity_enabled = false
|
||||||
|
|
||||||
|
expect { described_class.enqueue_chat_message(chat_message) }.not_to change(
|
||||||
|
Jobs::ToxicityClassifyChatMessage.jobs,
|
||||||
|
:size,
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "does nothing if the user group is allowlisted" do
|
||||||
|
group.add(chat_message.user)
|
||||||
|
|
||||||
|
expect { described_class.enqueue_chat_message(chat_message) }.not_to change(
|
||||||
|
Jobs::ToxicityClassifyChatMessage.jobs,
|
||||||
|
:size,
|
||||||
|
)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,56 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
class ToxicityInferenceStubs
|
||||||
|
class << self
|
||||||
|
def endpoint
|
||||||
|
"#{SiteSetting.ai_toxicity_inference_service_api_endpoint}/api/v1/classify"
|
||||||
|
end
|
||||||
|
|
||||||
|
def model
|
||||||
|
SiteSetting.ai_toxicity_inference_service_api_model
|
||||||
|
end
|
||||||
|
|
||||||
|
def toxic_response
|
||||||
|
{
|
||||||
|
toxicity: 99,
|
||||||
|
severe_toxicity: 1,
|
||||||
|
obscene: 6,
|
||||||
|
identity_attack: 3,
|
||||||
|
insult: 4,
|
||||||
|
threat: 8,
|
||||||
|
sexual_explicit: 5,
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
def civilized_response
|
||||||
|
{
|
||||||
|
toxicity: 2,
|
||||||
|
severe_toxicity: 1,
|
||||||
|
obscene: 6,
|
||||||
|
identity_attack: 3,
|
||||||
|
insult: 4,
|
||||||
|
threat: 8,
|
||||||
|
sexual_explicit: 5,
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
def stub_post_classification(post, toxic: false)
|
||||||
|
content = post.post_number == 1 ? "#{post.topic.title}\n#{post.raw}" : post.raw
|
||||||
|
response = toxic ? toxic_response : civilized_response
|
||||||
|
|
||||||
|
WebMock
|
||||||
|
.stub_request(:post, endpoint)
|
||||||
|
.with(body: JSON.dump(model: model, content: content))
|
||||||
|
.to_return(status: 200, body: JSON.dump(response))
|
||||||
|
end
|
||||||
|
|
||||||
|
def stub_chat_message_classification(chat_message, toxic: false)
|
||||||
|
response = toxic ? toxic_response : civilized_response
|
||||||
|
|
||||||
|
WebMock
|
||||||
|
.stub_request(:post, endpoint)
|
||||||
|
.with(body: JSON.dump(model: model, content: chat_message.message))
|
||||||
|
.to_return(status: 200, body: JSON.dump(response))
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
Loading…
Reference in New Issue