2023-02-24 11:25:02 -05:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
|
|
|
require "rails_helper"
|
|
|
|
require_relative "../../../support/nsfw_inference_stubs"
|
|
|
|
|
2023-11-28 23:17:46 -05:00
|
|
|
describe DiscourseAi::Nsfw::Classification do
|
2023-02-24 11:25:02 -05:00
|
|
|
before { SiteSetting.ai_nsfw_inference_service_api_endpoint = "http://test.com" }
|
|
|
|
|
|
|
|
let(:available_models) { SiteSetting.ai_nsfw_models.split("|") }
|
|
|
|
|
2023-02-27 14:21:40 -05:00
|
|
|
fab!(:upload_1) { Fabricate(:s3_image_upload) }
|
|
|
|
fab!(:post) { Fabricate(:post, uploads: [upload_1]) }
|
2023-02-24 11:25:02 -05:00
|
|
|
|
2023-02-27 14:21:40 -05:00
|
|
|
describe "#request" do
|
|
|
|
def assert_correctly_classified(results, expected)
|
|
|
|
available_models.each { |model| expect(results[model]).to eq(expected[model]) }
|
2023-02-24 11:25:02 -05:00
|
|
|
end
|
|
|
|
|
2023-02-27 14:21:40 -05:00
|
|
|
def build_expected_classification(target, positive: true)
|
2023-02-24 11:25:02 -05:00
|
|
|
available_models.reduce({}) do |memo, model|
|
|
|
|
model_expected =
|
|
|
|
if positive
|
|
|
|
NSFWInferenceStubs.positive_result(model)
|
|
|
|
else
|
|
|
|
NSFWInferenceStubs.negative_result(model)
|
|
|
|
end
|
|
|
|
|
2023-02-27 14:21:40 -05:00
|
|
|
memo[model] = {
|
|
|
|
target.id => model_expected.merge(target_classified_type: target.class.name),
|
|
|
|
}
|
2023-02-24 11:25:02 -05:00
|
|
|
memo
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context "when the target has one upload" do
|
|
|
|
it "returns the classification and the model used for it" do
|
|
|
|
NSFWInferenceStubs.positive(upload_1)
|
2023-02-27 14:21:40 -05:00
|
|
|
expected = build_expected_classification(upload_1)
|
2023-02-24 11:25:02 -05:00
|
|
|
|
|
|
|
classification = subject.request(post)
|
|
|
|
|
2023-02-27 14:21:40 -05:00
|
|
|
assert_correctly_classified(classification, expected)
|
2023-02-24 11:25:02 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
context "when the target has multiple uploads" do
|
|
|
|
fab!(:upload_2) { Fabricate(:upload) }
|
|
|
|
|
|
|
|
before { post.uploads << upload_2 }
|
|
|
|
|
|
|
|
it "returns a classification for each one" do
|
|
|
|
NSFWInferenceStubs.positive(upload_1)
|
|
|
|
NSFWInferenceStubs.negative(upload_2)
|
2023-02-27 14:21:40 -05:00
|
|
|
expected_classification = build_expected_classification(upload_1)
|
|
|
|
expected_classification.deep_merge!(
|
|
|
|
build_expected_classification(upload_2, positive: false),
|
|
|
|
)
|
2023-05-11 14:35:39 -04:00
|
|
|
|
|
|
|
classification = subject.request(post)
|
|
|
|
|
|
|
|
assert_correctly_classified(classification, expected_classification)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "correctly skips unsupported uploads" do
|
|
|
|
NSFWInferenceStubs.positive(upload_1)
|
|
|
|
NSFWInferenceStubs.unsupported(upload_2)
|
|
|
|
expected_classification = build_expected_classification(upload_1)
|
2023-02-24 11:25:02 -05:00
|
|
|
|
|
|
|
classification = subject.request(post)
|
|
|
|
|
2023-02-27 14:21:40 -05:00
|
|
|
assert_correctly_classified(classification, expected_classification)
|
2023-02-24 11:25:02 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe "#should_flag_based_on?" do
|
|
|
|
before { SiteSetting.ai_nsfw_flag_automatically = true }
|
|
|
|
|
2023-03-07 13:39:28 -05:00
|
|
|
let(:positive_verdict) { { "opennsfw2" => true, "nsfw_detector" => true } }
|
2023-02-24 11:25:02 -05:00
|
|
|
|
2023-03-07 13:39:28 -05:00
|
|
|
let(:negative_verdict) { { "opennsfw2" => false } }
|
2023-02-24 11:25:02 -05:00
|
|
|
|
2024-01-19 06:51:26 -05:00
|
|
|
it "returns false when NSFW flagging is disabled" do
|
2023-02-24 11:25:02 -05:00
|
|
|
SiteSetting.ai_nsfw_flag_automatically = false
|
|
|
|
|
2023-03-07 13:39:28 -05:00
|
|
|
should_flag = subject.should_flag_based_on?(positive_verdict)
|
2023-02-24 11:25:02 -05:00
|
|
|
|
|
|
|
expect(should_flag).to eq(false)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns true if the response is NSFW based on our thresholds" do
|
2023-03-07 13:39:28 -05:00
|
|
|
should_flag = subject.should_flag_based_on?(positive_verdict)
|
2023-02-24 11:25:02 -05:00
|
|
|
|
|
|
|
expect(should_flag).to eq(true)
|
|
|
|
end
|
|
|
|
|
|
|
|
it "returns false if the response is safe based on our thresholds" do
|
2023-03-07 13:39:28 -05:00
|
|
|
should_flag = subject.should_flag_based_on?(negative_verdict)
|
2023-02-24 11:25:02 -05:00
|
|
|
|
|
|
|
expect(should_flag).to eq(false)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|