FEATURE: Introduce NSFW content detection basic flow.

This commit is contained in:
Roman Rizzi 2023-02-23 11:08:34 -03:00
parent f572a7cc2c
commit 6f0c141062
No known key found for this signature in database
GPG Key ID: 64024A71CE7330D3
9 changed files with 205 additions and 3 deletions

View File

@ -1,5 +1,5 @@
plugins: plugins:
ai_enabled: discourse_ai_enabled:
default: true default: true
client: true client: true
@ -64,3 +64,10 @@ plugins:
choices: choices:
- sentiment - sentiment
- emotion - emotion
ai_nsfw_live_detection_enabled: false
ai_nsfw_inference_service_api_endpoint:
default: "https://nsfw-testing.demo-by-discourse.com"
ai_nsfw_inference_service_api_key:
default: ""
ai_nsfw_probability_threshold: 60

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
module DiscourseAI
module NSFW
class EntryPoint
def inject_into(plugin)
require_relative "evaluation.rb"
require_relative "jobs/regular/evaluate_content.rb"
plugin.add_model_callback(Upload, :after_create) do
Jobs.enqueue(:evaluate_content, upload_id: self.id)
end
end
end
end
end

View File

@ -0,0 +1,38 @@
# frozen_string_literal: true
module DiscourseAI
module NSFW
class Evaluation
AVAILABLE_MODELS = %w[opennsfw2 nsfw_detector]
def perform(upload)
result = { verdict: false, evaluation: {} }
AVAILABLE_MODELS.each do |model|
model_result = evaluate_with_model(model, upload).symbolize_keys!
model_result.values.each do |classification_prob|
if classification_prob.to_i >= SiteSetting.ai_nsfw_probability_threshold
result[:verdict] = true
end
end
result[:evaluation][model.to_sym] = model_result
end
result
end
private
def evaluate_with_model(model, upload)
DiscourseAI::InferenceManager.perform!(
"#{SiteSetting.ai_nsfw_inference_service_api_endpoint}/api/v1/classify",
model,
Discourse.store.cdn_url(upload.url),
SiteSetting.ai_nsfw_inference_service_api_key,
)
end
end
end
end

View File

@ -0,0 +1,17 @@
# frozen_string_literal: true
module Jobs
class EvaluateContent < ::Jobs::Base
def execute(args)
upload = Upload.find_by_id(args[:upload_id])
return unless upload
result = DiscourseAI::NSFW::Evaluation.new.perform(upload)
# FIXME(roman): This is a simplistic action just to create
# the basic flow. We'll introduce flagging capabilities in the future.
upload.destroy! if result[:verdict]
end
end
end

View File

@ -7,10 +7,10 @@
# url: TODO # url: TODO
# required_version: 2.7.0 # required_version: 2.7.0
enabled_site_setting :ai_enabled enabled_site_setting :discourse_ai_enabled
after_initialize do after_initialize do
module ::Disorder module ::DiscourseAI
PLUGIN_NAME = "discourse-ai" PLUGIN_NAME = "discourse-ai"
end end
@ -40,4 +40,7 @@ after_initialize do
on(:chat_message_edited) do |chat_message| on(:chat_message_edited) do |chat_message|
DiscourseAI::Toxicity::EventHandler.handle_chat_async(chat_message) DiscourseAI::Toxicity::EventHandler.handle_chat_async(chat_message)
end end
require_relative "lib/modules/nsfw/entry_point.rb"
DiscourseAI::NSFW::EntryPoint.new.inject_into(self)
end end

View File

View File

@ -0,0 +1,46 @@
# frozen_string_literal: true
require "rails_helper"
require_relative "../../../support/nsfw_inference_stubs"
describe DiscourseAI::NSFW::Evaluation do
before { SiteSetting.ai_nsfw_live_detection_enabled = true }
fab!(:image) { Fabricate(:s3_image_upload) }
let(:available_models) { DiscourseAI::NSFW::Evaluation::AVAILABLE_MODELS }
describe "perform" do
context "when we determine content is NSFW" do
before { NSFWInferenceStubs.positive(image) }
it "returns true alongside the evaluation" do
result = subject.perform(image)
expect(result[:verdict]).to eq(true)
available_models.each do |model|
expect(result.dig(:evaluation, model.to_sym)).to eq(
NSFWInferenceStubs.positive_result(model),
)
end
end
end
context "when we determine content is safe" do
before { NSFWInferenceStubs.negative(image) }
it "returns false alongside the evaluation" do
result = subject.perform(image)
expect(result[:verdict]).to eq(false)
available_models.each do |model|
expect(result.dig(:evaluation, model.to_sym)).to eq(
NSFWInferenceStubs.negative_result(model),
)
end
end
end
end
end

View File

@ -0,0 +1,30 @@
# frozen_string_literal: true
require "rails_helper"
require_relative "../../../../../support/nsfw_inference_stubs"
describe Jobs::EvaluateContent do
fab!(:image) { Fabricate(:s3_image_upload) }
describe "#execute" do
context "when we conclude content is NSFW" do
before { NSFWInferenceStubs.positive(image) }
it "deletes the upload" do
subject.execute(upload_id: image.id)
expect { image.reload }.to raise_error(ActiveRecord::RecordNotFound)
end
end
context "when we conclude content is not NSFW" do
before { NSFWInferenceStubs.negative(image) }
it "does nothing" do
subject.execute(upload_id: image.id)
expect(image.reload).to be_present
end
end
end
end

View File

@ -0,0 +1,45 @@
class NSFWInferenceStubs
class << self
def endpoint
"#{SiteSetting.ai_nsfw_inference_service_api_endpoint}/api/v1/classify"
end
def upload_url(upload)
Discourse.store.cdn_url(upload.url)
end
def positive_result(model)
return { nsfw_probability: 90 } if model == "opennsfw2"
{ drawings: 1, hentai: 2, neutral: 0, porn: 90, sexy: 79 }
end
def negative_result(model)
return { nsfw_probability: 3 } if model == "opennsfw2"
{ drawings: 1, hentai: 2, neutral: 0, porn: 3, sexy: 1 }
end
def positive(upload)
WebMock
.stub_request(:post, endpoint)
.with(body: JSON.dump(model: "nsfw_detector", content: upload_url(upload)))
.to_return(status: 200, body: JSON.dump(positive_result("nsfw_detector")))
WebMock
.stub_request(:post, endpoint)
.with(body: JSON.dump(model: "opennsfw2", content: upload_url(upload)))
.to_return(status: 200, body: JSON.dump(positive_result("opennsfw2")))
end
def negative(upload)
WebMock
.stub_request(:post, endpoint)
.with(body: JSON.dump(model: "nsfw_detector", content: upload_url(upload)))
.to_return(status: 200, body: JSON.dump(negative_result("nsfw_detector")))
WebMock
.stub_request(:post, endpoint)
.with(body: JSON.dump(model: "opennsfw2", content: upload_url(upload)))
.to_return(status: 200, body: JSON.dump(negative_result("opennsfw2")))
end
end
end