DEV: Cleanup deprecations (#952)

This commit is contained in:
Rafael dos Santos Silva 2024-12-02 14:18:03 -03:00 committed by GitHub
parent 0abd4b1244
commit 3828370679
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
33 changed files with 7 additions and 1591 deletions

View File

@ -1,17 +0,0 @@
# frozen_string_literal: true
module Jobs
class EvaluatePostUploads < ::Jobs::Base
def execute(args)
return unless SiteSetting.ai_nsfw_detection_enabled
return if (post_id = args[:post_id]).blank?
post = Post.includes(:uploads).find_by_id(post_id)
return if post.nil? || post.uploads.empty?
return if post.uploads.none? { |u| FileHelper.is_supported_image?(u.url) }
DiscourseAi::PostClassificator.new(DiscourseAi::Nsfw::Classification.new).classify!(post)
end
end
end

View File

@ -1,18 +0,0 @@
# frozen_string_literal: true
module ::Jobs
class ToxicityClassifyChatMessage < ::Jobs::Base
def execute(args)
return unless SiteSetting.ai_toxicity_enabled
return if (chat_message_id = args[:chat_message_id]).blank?
chat_message = ::Chat::Message.find_by(id: chat_message_id)
return if chat_message&.message.blank?
DiscourseAi::ChatMessageClassificator.new(
DiscourseAi::Toxicity::ToxicityClassification.new,
).classify!(chat_message)
end
end
end

View File

@ -1,19 +0,0 @@
# frozen_string_literal: true
module ::Jobs
class ToxicityClassifyPost < ::Jobs::Base
def execute(args)
return unless SiteSetting.ai_toxicity_enabled
post_id = args[:post_id]
return if post_id.blank?
post = Post.find_by(id: post_id, post_type: Post.types[:regular])
return if post&.raw.blank?
DiscourseAi::PostClassificator.new(
DiscourseAi::Toxicity::ToxicityClassification.new,
).classify!(post)
end
end
end

View File

@ -10,55 +10,6 @@ discourse_ai:
- "disabled"
- "lax"
- "strict"
ai_toxicity_enabled:
default: false
client: true
hidden: true
ai_toxicity_inference_service_api_endpoint:
default: ""
ai_toxicity_inference_service_api_endpoint_srv:
default: ""
hidden: true
ai_toxicity_inference_service_api_key:
default: ""
secret: true
ai_toxicity_inference_service_api_model:
type: enum
default: unbiased
choices:
- unbiased
- multilingual
- original
ai_toxicity_flag_automatically:
default: false
client: false
ai_toxicity_flag_threshold_toxicity:
default: 80
client: false
ai_toxicity_flag_threshold_severe_toxicity:
default: 30
client: false
ai_toxicity_flag_threshold_obscene:
default: 80
client: false
ai_toxicity_flag_threshold_identity_attack:
default: 60
client: false
ai_toxicity_flag_threshold_insult:
default: 60
client: false
ai_toxicity_flag_threshold_threat:
default: 60
client: false
ai_toxicity_flag_threshold_sexual_explicit:
default: 60
client: false
ai_toxicity_groups_bypass:
type: group_list
list_type: compact
default: "3" # 3: @staff
allow_any: false
refresh: true
ai_sentiment_enabled:
default: false
@ -67,50 +18,6 @@ discourse_ai:
default: ""
json_schema: DiscourseAi::Sentiment::SentimentSiteSettingJsonSchema
ai_nsfw_detection_enabled:
default: false
hidden: true
ai_nsfw_inference_service_api_endpoint:
default: ""
ai_nsfw_inference_service_api_endpoint_srv:
default: ""
hidden: true
ai_nsfw_inference_service_api_key:
default: ""
secret: true
ai_nsfw_flag_automatically: true
ai_nsfw_flag_threshold_general: 60
ai_nsfw_flag_threshold_drawings: 60
ai_nsfw_flag_threshold_hentai: 60
ai_nsfw_flag_threshold_porn: 60
ai_nsfw_flag_threshold_sexy: 70
ai_nsfw_models:
type: list
list_type: compact
default: "opennsfw2"
allow_any: false
choices:
- opennsfw2
- nsfw_detector
ai_openai_gpt35_url:
default: "https://api.openai.com/v1/chat/completions"
hidden: true
ai_openai_gpt35_16k_url:
default: "https://api.openai.com/v1/chat/completions"
hidden: true
ai_openai_gpt4o_url:
default: "https://api.openai.com/v1/chat/completions"
hidden: true
ai_openai_gpt4_url:
default: "https://api.openai.com/v1/chat/completions"
hidden: true
ai_openai_gpt4_32k_url:
default: "https://api.openai.com/v1/chat/completions"
hidden: true
ai_openai_gpt4_turbo_url:
default: "https://api.openai.com/v1/chat/completions"
hidden: true
ai_openai_dall_e_3_url: "https://api.openai.com/v1/images/generations"
ai_openai_embeddings_url: "https://api.openai.com/v1/embeddings"
ai_openai_organization:
@ -119,12 +26,6 @@ discourse_ai:
ai_openai_api_key:
default: ""
secret: true
ai_anthropic_api_key:
default: ""
hidden: true
ai_cohere_api_key:
default: ""
hidden: true
ai_stability_api_key:
default: ""
secret: true
@ -139,18 +40,6 @@ discourse_ai:
- "stable-diffusion-xl-1024-v1-0"
- "stable-diffusion-768-v2-1"
- "stable-diffusion-v1-5"
ai_hugging_face_api_url:
default: ""
hidden: true
ai_hugging_face_api_key:
default: ""
hidden: true
ai_hugging_face_token_limit:
default: 4096
hidden: true
ai_hugging_face_model_display_name:
default: ""
hidden: true
ai_hugging_face_tei_endpoint:
default: ""
ai_hugging_face_tei_endpoint_srv:
@ -168,16 +57,6 @@ discourse_ai:
secret: true
ai_google_custom_search_cx:
default: ""
ai_bedrock_access_key_id:
default: ""
secret: true
hidden: true
ai_bedrock_secret_access_key:
default: ""
hidden: true
ai_bedrock_region:
default: "us-east-1"
hidden: true
ai_cloudflare_workers_account_id:
default: ""
secret: true
@ -187,30 +66,9 @@ discourse_ai:
ai_gemini_api_key:
default: ""
hidden: false
ai_vllm_endpoint:
default: ""
hidden: true
ai_vllm_endpoint_srv:
default: ""
hidden: true
ai_vllm_api_key:
default: ""
hidden: true
ai_llava_endpoint:
default: ""
hidden: true
ai_llava_endpoint_srv:
default: ""
hidden: true
ai_llava_api_key:
default: ""
hidden: true
ai_strict_token_counting:
default: false
hidden: true
ai_ollama_endpoint:
hidden: true
default: ""
ai_helper_enabled:
default: false

View File

@ -1,25 +0,0 @@
# frozen_string_literal: true
module ::DiscourseAi
class ChatMessageClassificator < Classificator
private
def flag!(chat_message, classification, verdicts, accuracies)
reviewable =
ReviewableAiChatMessage.needs_review!(
created_by: Discourse.system_user,
target: chat_message,
reviewable_by_moderator: true,
potential_spam: false,
payload: {
classification: classification,
accuracies: accuracies,
verdicts: verdicts,
},
)
reviewable.update(target_created_by: chat_message.user)
add_score(reviewable)
end
end
end

View File

@ -1,81 +0,0 @@
# frozen_string_literal: true
module ::DiscourseAi
class Classificator
def initialize(classification_model)
@classification_model = classification_model
end
def classify!(target)
return :cannot_classify unless classification_model.can_classify?(target)
classification_model
.request(target)
.tap do |classification|
store_classification(target, classification)
verdicts = classification_model.get_verdicts(classification)
if classification_model.should_flag_based_on?(verdicts)
accuracies = get_model_accuracies(verdicts.keys)
flag!(target, classification, verdicts, accuracies)
end
end
end
protected
attr_reader :classification_model
def flag!(_target, _classification, _verdicts, _accuracies)
raise NotImplemented
end
def get_model_accuracies(models)
models
.map do |name|
accuracy =
ModelAccuracy.find_or_create_by(
model: name,
classification_type: classification_model.type,
)
[name, accuracy.calculate_accuracy]
end
.to_h
end
def add_score(reviewable)
reviewable.add_score(
Discourse.system_user,
ReviewableScore.types[:inappropriate],
reason: "flagged_by_#{classification_model.type}",
force_review: true,
)
end
def store_classification(target, classification)
attrs =
classification.map do |model_name, classifications|
{
model_used: model_name,
target_id: target.id,
target_type: target.class.sti_name,
classification_type: classification_model.type,
classification: classifications,
updated_at: DateTime.now,
created_at: DateTime.now,
}
end
ClassificationResult.upsert_all(
attrs,
unique_by: %i[target_id target_type model_used],
update_only: %i[classification],
)
end
def flagger
Discourse.system_user
end
end
end

View File

@ -1,95 +0,0 @@
# frozen_string_literal: true
module DiscourseAi
module Nsfw
class Classification
def type
:nsfw
end
def can_classify?(target)
content_of(target).present?
end
def get_verdicts(classification_data)
classification_data
.map do |model_name, classifications|
verdict =
classifications.values.any? do |data|
send("#{model_name}_verdict?", data.except(:neutral, :target_classified_type))
end
[model_name, verdict]
end
.to_h
end
def should_flag_based_on?(verdicts)
return false if !SiteSetting.ai_nsfw_flag_automatically
verdicts.values.any?
end
def request(target_to_classify)
uploads_to_classify = content_of(target_to_classify)
available_models.reduce({}) do |memo, model|
memo[model] = uploads_to_classify.reduce({}) do |upl_memo, upload|
classification =
evaluate_with_model(model, upload).merge(target_classified_type: upload.class.name)
# 415 denotes that the image is not supported by the model, so we skip it
upl_memo[upload.id] = classification if classification.dig(:status) != 415
upl_memo
end
memo
end
end
private
def evaluate_with_model(model, upload)
upload_url = Discourse.store.cdn_url(upload.url)
upload_url = "#{Discourse.base_url_no_prefix}#{upload_url}" if upload_url.starts_with?("/")
DiscourseAi::Inference::DiscourseClassifier.new(
"#{endpoint}/api/v1/classify",
SiteSetting.ai_nsfw_inference_service_api_key,
model,
).perform!(upload_url)
end
def available_models
SiteSetting.ai_nsfw_models.split("|")
end
def content_of(target_to_classify)
target_to_classify.uploads.to_a.select { |u| FileHelper.is_supported_image?(u.url) }
end
def opennsfw2_verdict?(classification)
classification.values.first.to_i >= SiteSetting.ai_nsfw_flag_threshold_general
end
def nsfw_detector_verdict?(classification)
classification.any? do |key, value|
value.to_i >= SiteSetting.send("ai_nsfw_flag_threshold_#{key}")
end
end
def endpoint
if SiteSetting.ai_nsfw_inference_service_api_endpoint_srv.present?
service =
DiscourseAi::Utils::DnsSrv.lookup(
SiteSetting.ai_nsfw_inference_service_api_endpoint_srv,
)
"https://#{service.target}:#{service.port}"
else
SiteSetting.ai_nsfw_inference_service_api_endpoint
end
end
end
end
end

View File

@ -1,20 +0,0 @@
# frozen_string_literal: true
module DiscourseAi
module Nsfw
class EntryPoint
def inject_into(plugin)
nsfw_detection_cb =
Proc.new do |post|
if SiteSetting.ai_nsfw_detection_enabled &&
DiscourseAi::Nsfw::Classification.new.can_classify?(post)
Jobs.enqueue(:evaluate_post_uploads, post_id: post.id)
end
end
plugin.on(:post_created, &nsfw_detection_cb)
plugin.on(:post_edited, &nsfw_detection_cb)
end
end
end
end

View File

@ -1,26 +0,0 @@
# frozen_string_literal: true
module ::DiscourseAi
class PostClassificator < Classificator
private
def flag!(post, classification, verdicts, accuracies)
post.hide!(ReviewableScore.types[:inappropriate])
reviewable =
ReviewableAiPost.needs_review!(
created_by: Discourse.system_user,
target: post,
reviewable_by_moderator: true,
potential_spam: false,
payload: {
classification: classification,
accuracies: accuracies,
verdicts: verdicts,
},
)
add_score(reviewable)
end
end
end

View File

@ -65,6 +65,10 @@ module DiscourseAi
store_classification(target, results)
end
def classifiers
DiscourseAi::Sentiment::SentimentSiteSettingJsonSchema.values
end
private
def prepare_text(target)
@ -78,10 +82,6 @@ module DiscourseAi
Tokenizer::BertTokenizer.truncate(content, 512)
end
def classifiers
DiscourseAi::Sentiment::SentimentSiteSettingJsonSchema.values
end
def request_with(content, config, base_url = Discourse.base_url)
result =
DiscourseAi::Inference::HuggingFaceTextEmbeddings.classify(content, config, base_url)

View File

@ -1,64 +0,0 @@
# frozen_string_literal: true
module DiscourseAi
module Sentiment
class SentimentClassification
def type
:sentiment
end
def available_classifiers
DiscourseAi::Sentiment::SentimentSiteSettingJsonSchema.values
end
def can_classify?(target)
content_of(target).present?
end
def get_verdicts(_)
available_classifiers.reduce({}) do |memo, model|
memo[model.model_name] = false
memo
end
end
def should_flag_based_on?(_verdicts)
# We don't flag based on sentiment classification.
false
end
def request(target_to_classify)
target_content = content_of(target_to_classify)
available_classifiers.reduce({}) do |memo, model|
memo[model.model_name] = request_with(target_content, model)
memo
end
end
def transform_result(result)
hash_result = {}
result.each { |r| hash_result[r[:label]] = r[:score] }
hash_result
end
private
def request_with(content, model_config)
result = ::DiscourseAi::Inference::HuggingFaceTextEmbeddings.classify(content, model_config)
transform_result(result)
end
def content_of(target_to_classify)
content =
if target_to_classify.post_number == 1
"#{target_to_classify.topic.title}\n#{target_to_classify.raw}"
else
target_to_classify.raw
end
Tokenizer::BertTokenizer.truncate(content, 512)
end
end
end
end

View File

@ -1,20 +0,0 @@
# frozen_string_literal: true
module DiscourseAi
module Toxicity
class EntryPoint
def inject_into(plugin)
post_analysis_cb = Proc.new { |post| DiscourseAi::Toxicity::ScanQueue.enqueue_post(post) }
plugin.on(:post_created, &post_analysis_cb)
plugin.on(:post_edited, &post_analysis_cb)
chat_message_analysis_cb =
Proc.new { |message| DiscourseAi::Toxicity::ScanQueue.enqueue_chat_message(message) }
plugin.on(:chat_message_created, &chat_message_analysis_cb)
plugin.on(:chat_message_edited, &chat_message_analysis_cb)
end
end
end
end

View File

@ -1,27 +0,0 @@
# frozen_string_literal: true
module ::DiscourseAi
module Toxicity
class ScanQueue
class << self
def enqueue_post(post)
return if bypass?(post)
Jobs.enqueue(:toxicity_classify_post, post_id: post.id)
end
def enqueue_chat_message(chat_message)
return if bypass?(chat_message)
Jobs.enqueue(:toxicity_classify_chat_message, chat_message_id: chat_message.id)
end
def bypass?(content)
!SiteSetting.ai_toxicity_enabled || group_bypass?(content.user)
end
def group_bypass?(user)
user.groups.pluck(:id).intersection(SiteSetting.ai_toxicity_groups_bypass_map).present?
end
end
end
end
end

View File

@ -1,88 +0,0 @@
# frozen_string_literal: true
module DiscourseAi
module Toxicity
class ToxicityClassification
CLASSIFICATION_LABELS = %i[
toxicity
severe_toxicity
obscene
identity_attack
insult
threat
sexual_explicit
]
def type
:toxicity
end
def can_classify?(target)
content_of(target).present?
end
def get_verdicts(classification_data)
# We only use one model for this classification.
# Classification_data looks like { model_name => classification }
_model_used, data = classification_data.to_a.first
verdict =
CLASSIFICATION_LABELS.any? do |label|
data[label] >= SiteSetting.send("ai_toxicity_flag_threshold_#{label}")
end
{ available_model => verdict }
end
def should_flag_based_on?(verdicts)
return false if !SiteSetting.ai_toxicity_flag_automatically
verdicts.values.any?
end
def request(target_to_classify)
data =
::DiscourseAi::Inference::DiscourseClassifier.new(
"#{endpoint}/api/v1/classify",
SiteSetting.ai_toxicity_inference_service_api_key,
SiteSetting.ai_toxicity_inference_service_api_model,
).perform!(content_of(target_to_classify))
{ available_model => data }
end
private
def available_model
SiteSetting.ai_toxicity_inference_service_api_model
end
def content_of(target_to_classify)
content =
if target_to_classify.is_a?(Chat::Message)
target_to_classify.message
else
if target_to_classify.post_number == 1
"#{target_to_classify.topic.title}\n#{target_to_classify.raw}"
else
target_to_classify.raw
end
end
Tokenizer::BertTokenizer.truncate(content, 512)
end
def endpoint
if SiteSetting.ai_toxicity_inference_service_api_endpoint_srv.present?
service =
DiscourseAi::Utils::DnsSrv.lookup(
SiteSetting.ai_toxicity_inference_service_api_endpoint_srv,
)
"https://#{service.target}:#{service.port}"
else
SiteSetting.ai_toxicity_inference_service_api_endpoint
end
end
end
end
end

View File

@ -68,8 +68,6 @@ after_initialize do
[
DiscourseAi::Embeddings::EntryPoint.new,
DiscourseAi::Nsfw::EntryPoint.new,
DiscourseAi::Toxicity::EntryPoint.new,
DiscourseAi::Sentiment::EntryPoint.new,
DiscourseAi::AiHelper::EntryPoint.new,
DiscourseAi::Summarization::EntryPoint.new,

View File

@ -1,63 +0,0 @@
# frozen_string_literal: true
describe DiscourseAi::Nsfw::EntryPoint do
fab!(:user) { Fabricate(:user, refresh_auto_groups: true) }
describe "registering event callbacks" do
fab!(:image_upload) { Fabricate(:upload) }
let(:raw_with_upload) { "A public post with an image.\n![](#{image_upload.short_path})" }
before { SiteSetting.ai_nsfw_detection_enabled = true }
context "when creating a post" do
let(:creator) do
PostCreator.new(user, raw: raw_with_upload, title: "this is my new topic title")
end
it "queues a job on create if sentiment analysis is enabled" do
expect { creator.create }.to change(Jobs::EvaluatePostUploads.jobs, :size).by(1)
end
it "does nothing if sentiment analysis is disabled" do
SiteSetting.ai_nsfw_detection_enabled = false
expect { creator.create }.not_to change(Jobs::EvaluatePostUploads.jobs, :size)
end
it "does nothing if the post has no uploads" do
creator_2 =
PostCreator.new(user, raw: "this is a test", title: "this is my new topic title")
expect { creator_2.create }.not_to change(Jobs::EvaluatePostUploads.jobs, :size)
end
end
context "when editing a post" do
fab!(:post) { Fabricate(:post, user: user) }
let(:revisor) { PostRevisor.new(post) }
it "queues a job on update if sentiment analysis is enabled" do
expect { revisor.revise!(user, raw: raw_with_upload) }.to change(
Jobs::EvaluatePostUploads.jobs,
:size,
).by(1)
end
it "does nothing if sentiment analysis is disabled" do
SiteSetting.ai_nsfw_detection_enabled = false
expect { revisor.revise!(user, raw: raw_with_upload) }.not_to change(
Jobs::EvaluatePostUploads.jobs,
:size,
)
end
it "does nothing if the new raw has no uploads" do
expect { revisor.revise!(user, raw: "this is a test") }.not_to change(
Jobs::EvaluatePostUploads.jobs,
:size,
)
end
end
end
end

View File

@ -1,79 +0,0 @@
# frozen_string_literal: true
require_relative "../../../../../support/nsfw_inference_stubs"
describe Jobs::EvaluatePostUploads do
describe "#execute" do
before do
SiteSetting.ai_nsfw_detection_enabled = true
SiteSetting.ai_nsfw_inference_service_api_endpoint = "http://test.com"
end
fab!(:upload_1) { Fabricate(:s3_image_upload) }
fab!(:post) { Fabricate(:post, uploads: [upload_1]) }
describe "scenarios where we return early without doing anything" do
before { NSFWInferenceStubs.positive(upload_1) }
it "does nothing when ai_toxicity_enabled is disabled" do
SiteSetting.ai_nsfw_detection_enabled = false
subject.execute({ post_id: post.id })
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
it "does nothing if there's no arg called post_id" do
subject.execute({})
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
it "does nothing if no post match the given id" do
subject.execute({ post_id: nil })
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
it "does nothing if the post has no uploads" do
post_no_uploads = Fabricate(:post)
subject.execute({ post_id: post_no_uploads.id })
expect(ReviewableAiPost.where(target: post_no_uploads).count).to be_zero
end
it "does nothing if the upload is not an image" do
SiteSetting.authorized_extensions = "pdf"
upload_1.update!(original_filename: "test.pdf", url: "test.pdf")
subject.execute({ post_id: post.id })
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
end
context "when the post has one upload" do
context "when we conclude content is NSFW" do
before { NSFWInferenceStubs.positive(upload_1) }
it "flags and hides the post" do
subject.execute({ post_id: post.id })
expect(ReviewableAiPost.where(target: post).count).to eq(1)
expect(post.reload.hidden?).to eq(true)
end
end
context "when we conclude content is safe" do
before { NSFWInferenceStubs.negative(upload_1) }
it "does nothing" do
subject.execute({ post_id: post.id })
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
end
end
end
end

View File

@ -1,103 +0,0 @@
# frozen_string_literal: true
require "rails_helper"
require_relative "../../../support/nsfw_inference_stubs"
describe DiscourseAi::Nsfw::Classification do
before { SiteSetting.ai_nsfw_inference_service_api_endpoint = "http://test.com" }
let(:available_models) { SiteSetting.ai_nsfw_models.split("|") }
fab!(:upload_1) { Fabricate(:s3_image_upload) }
fab!(:post) { Fabricate(:post, uploads: [upload_1]) }
describe "#request" do
def assert_correctly_classified(results, expected)
available_models.each { |model| expect(results[model]).to eq(expected[model]) }
end
def build_expected_classification(target, positive: true)
available_models.reduce({}) do |memo, model|
model_expected =
if positive
NSFWInferenceStubs.positive_result(model)
else
NSFWInferenceStubs.negative_result(model)
end
memo[model] = {
target.id => model_expected.merge(target_classified_type: target.class.name),
}
memo
end
end
context "when the target has one upload" do
it "returns the classification and the model used for it" do
NSFWInferenceStubs.positive(upload_1)
expected = build_expected_classification(upload_1)
classification = subject.request(post)
assert_correctly_classified(classification, expected)
end
context "when the target has multiple uploads" do
fab!(:upload_2) { Fabricate(:upload) }
before { post.uploads << upload_2 }
it "returns a classification for each one" do
NSFWInferenceStubs.positive(upload_1)
NSFWInferenceStubs.negative(upload_2)
expected_classification = build_expected_classification(upload_1)
expected_classification.deep_merge!(
build_expected_classification(upload_2, positive: false),
)
classification = subject.request(post)
assert_correctly_classified(classification, expected_classification)
end
it "correctly skips unsupported uploads" do
NSFWInferenceStubs.positive(upload_1)
NSFWInferenceStubs.unsupported(upload_2)
expected_classification = build_expected_classification(upload_1)
classification = subject.request(post)
assert_correctly_classified(classification, expected_classification)
end
end
end
end
describe "#should_flag_based_on?" do
before { SiteSetting.ai_nsfw_flag_automatically = true }
let(:positive_verdict) { { "opennsfw2" => true, "nsfw_detector" => true } }
let(:negative_verdict) { { "opennsfw2" => false } }
it "returns false when NSFW flagging is disabled" do
SiteSetting.ai_nsfw_flag_automatically = false
should_flag = subject.should_flag_based_on?(positive_verdict)
expect(should_flag).to eq(false)
end
it "returns true if the response is NSFW based on our thresholds" do
should_flag = subject.should_flag_based_on?(positive_verdict)
expect(should_flag).to eq(true)
end
it "returns false if the response is safe based on our thresholds" do
should_flag = subject.should_flag_based_on?(negative_verdict)
expect(should_flag).to eq(false)
end
end
end

View File

@ -43,8 +43,7 @@ describe Jobs::PostSentimentAnalysis do
end
it "successfully classifies the post" do
expected_analysis =
DiscourseAi::Sentiment::SentimentClassification.new.available_classifiers.length
expected_analysis = DiscourseAi::Sentiment::PostClassification.new.classifiers.length
SentimentInferenceStubs.stub_classification(post)
subject.execute({ post_id: post.id })

View File

@ -1,26 +0,0 @@
# frozen_string_literal: true
require_relative "../../../support/sentiment_inference_stubs"
describe DiscourseAi::Sentiment::SentimentClassification do
fab!(:target) { Fabricate(:post) }
describe "#request" do
before do
SiteSetting.ai_sentiment_model_configs =
"[{\"model_name\":\"SamLowe/roberta-base-go_emotions\",\"endpoint\":\"http://samlowe-emotion.com\",\"api_key\":\"123\"},{\"model_name\":\"j-hartmann/emotion-english-distilroberta-base\",\"endpoint\":\"http://jhartmann-emotion.com\",\"api_key\":\"123\"},{\"model_name\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\",\"endpoint\":\"http://cardiffnlp-sentiment.com\",\"api_key\":\"123\"}]"
end
it "returns the classification and the model used for it" do
SentimentInferenceStubs.stub_classification(target)
result = subject.request(target)
subject.available_classifiers.each do |model_config|
expect(result[model_config.model_name]).to eq(
subject.transform_result(SentimentInferenceStubs.model_response(model_config.model_name)),
)
end
end
end
end

View File

@ -1,66 +0,0 @@
# frozen_string_literal: true
describe DiscourseAi::Toxicity::EntryPoint do
fab!(:user) { Fabricate(:user, refresh_auto_groups: true) }
describe "registering event callbacks" do
before { SiteSetting.ai_toxicity_enabled = true }
context "when creating a post" do
let(:creator) do
PostCreator.new(
user,
raw: "this is the new content for my topic",
title: "this is my new topic title",
)
end
it "queues a job on post creation" do
SiteSetting.ai_toxicity_enabled = true
expect { creator.create }.to change(Jobs::ToxicityClassifyPost.jobs, :size).by(1)
end
end
context "when editing a post" do
fab!(:post) { Fabricate(:post, user: user) }
let(:revisor) { PostRevisor.new(post) }
it "queues a job on post update" do
expect { revisor.revise!(user, raw: "This is my new test") }.to change(
Jobs::ToxicityClassifyPost.jobs,
:size,
).by(1)
end
end
context "when creating a chat message" do
fab!(:public_chat_channel) { Fabricate(:chat_channel) }
it "queues a job when creating a chat message" do
expect {
Fabricate(
:chat_message,
chat_channel: public_chat_channel,
user: user,
message: "This is my new test",
use_service: true,
)
}.to change(Jobs::ToxicityClassifyChatMessage.jobs, :size).by(1)
end
end
context "when editing a chat message" do
# This fabricator trigger events because it uses the UpdateMessage service.
# Using let makes the test fail.
fab!(:chat_message)
it "queues a job on chat message update" do
expect { update_message!(chat_message, text: "abcdef") }.to change(
Jobs::ToxicityClassifyChatMessage.jobs,
:size,
).by(1)
end
end
end
end

View File

@ -1,53 +0,0 @@
# frozen_string_literal: true
require_relative "../../../../../support/toxicity_inference_stubs"
describe Jobs::ToxicityClassifyChatMessage do
describe "#execute" do
before do
SiteSetting.ai_toxicity_enabled = true
SiteSetting.ai_toxicity_flag_automatically = true
SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com"
end
fab!(:chat_message)
describe "scenarios where we return early without doing anything" do
it "does nothing when ai_toxicity_enabled is disabled" do
SiteSetting.ai_toxicity_enabled = false
subject.execute({ chat_message_id: chat_message.id })
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
end
it "does nothing if there's no arg called post_id" do
subject.execute({})
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
end
it "does nothing if no post match the given id" do
subject.execute({ chat_message_id: nil })
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
end
it "does nothing if the post content is blank" do
chat_message.update_columns(message: "")
subject.execute({ chat_message_id: chat_message.id })
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
end
end
it "flags the message when classified as toxic" do
ToxicityInferenceStubs.stub_chat_message_classification(chat_message, toxic: true)
subject.execute({ chat_message_id: chat_message.id })
expect(ReviewableAiChatMessage.where(target: chat_message).count).to eq(1)
end
end
end

View File

@ -1,54 +0,0 @@
# frozen_string_literal: true
require "rails_helper"
require_relative "../../../../../support/toxicity_inference_stubs"
describe Jobs::ToxicityClassifyPost do
describe "#execute" do
before do
SiteSetting.ai_toxicity_enabled = true
SiteSetting.ai_toxicity_flag_automatically = true
SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com"
end
fab!(:post)
describe "scenarios where we return early without doing anything" do
it "does nothing when ai_toxicity_enabled is disabled" do
SiteSetting.ai_toxicity_enabled = false
subject.execute({ post_id: post.id })
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
it "does nothing if there's no arg called post_id" do
subject.execute({})
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
it "does nothing if no post match the given id" do
subject.execute({ post_id: nil })
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
it "does nothing if the post content is blank" do
post.update_columns(raw: "")
subject.execute({ post_id: post.id })
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
end
it "flags the post when classified as toxic" do
ToxicityInferenceStubs.stub_post_classification(post, toxic: true)
subject.execute({ post_id: post.id })
expect(ReviewableAiPost.where(target: post).count).to eq(1)
end
end
end

View File

@ -1,68 +0,0 @@
# frozen_string_literal: true
describe DiscourseAi::Toxicity::ScanQueue do
fab!(:group)
before do
SiteSetting.ai_toxicity_enabled = true
SiteSetting.ai_toxicity_groups_bypass = group.id.to_s
end
describe "#enqueue_post" do
fab!(:post)
it "queues a job" do
expect { described_class.enqueue_post(post) }.to change(
Jobs::ToxicityClassifyPost.jobs,
:size,
).by(1)
end
it "does nothing if ai_toxicity_enabled is disabled" do
SiteSetting.ai_toxicity_enabled = false
expect { described_class.enqueue_post(post) }.not_to change(
Jobs::ToxicityClassifyPost.jobs,
:size,
)
end
it "does nothing if the user group is allowlisted" do
group.add(post.user)
expect { described_class.enqueue_post(post) }.not_to change(
Jobs::ToxicityClassifyPost.jobs,
:size,
)
end
end
describe "#enqueue_chat_message" do
fab!(:chat_message)
it "queues a job" do
expect { described_class.enqueue_chat_message(chat_message) }.to change(
Jobs::ToxicityClassifyChatMessage.jobs,
:size,
).by(1)
end
it "does nothing if ai_toxicity_enabled is disabled" do
SiteSetting.ai_toxicity_enabled = false
expect { described_class.enqueue_chat_message(chat_message) }.not_to change(
Jobs::ToxicityClassifyChatMessage.jobs,
:size,
)
end
it "does nothing if the user group is allowlisted" do
group.add(chat_message.user)
expect { described_class.enqueue_chat_message(chat_message) }.not_to change(
Jobs::ToxicityClassifyChatMessage.jobs,
:size,
)
end
end
end

View File

@ -1,49 +0,0 @@
# frozen_string_literal: true
require_relative "../../../support/toxicity_inference_stubs"
describe DiscourseAi::Toxicity::ToxicityClassification do
fab!(:target) { Fabricate(:post) }
before { SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com" }
describe "#request" do
it "returns the classification and the model used for it" do
ToxicityInferenceStubs.stub_post_classification(target, toxic: false)
result = subject.request(target)
expect(result[SiteSetting.ai_toxicity_inference_service_api_model]).to eq(
ToxicityInferenceStubs.civilized_response,
)
end
end
describe "#should_flag_based_on?" do
before { SiteSetting.ai_toxicity_flag_automatically = true }
let(:toxic_verdict) { { SiteSetting.ai_toxicity_inference_service_api_model => true } }
it "returns false when toxicity flagging is disabled" do
SiteSetting.ai_toxicity_flag_automatically = false
should_flag = subject.should_flag_based_on?(toxic_verdict)
expect(should_flag).to eq(false)
end
it "returns true if the response is toxic based on our thresholds" do
should_flag = subject.should_flag_based_on?(toxic_verdict)
expect(should_flag).to eq(true)
end
it "returns false if the response is civilized based on our thresholds" do
civilized_verdict = { SiteSetting.ai_toxicity_inference_service_api_model => false }
should_flag = subject.should_flag_based_on?(civilized_verdict)
expect(should_flag).to eq(false)
end
end
end

View File

@ -1,31 +1,7 @@
# frozen_string_literal: true
require_relative "support/toxicity_inference_stubs"
describe Plugin::Instance do
before do
SiteSetting.discourse_ai_enabled = true
SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com"
end
describe "on reviewable_transitioned_to event" do
fab!(:post)
fab!(:admin)
it "adjusts model accuracy" do
ToxicityInferenceStubs.stub_post_classification(post, toxic: true)
SiteSetting.ai_toxicity_flag_automatically = true
classification = DiscourseAi::Toxicity::ToxicityClassification.new
classificator = DiscourseAi::PostClassificator.new(classification)
classificator.classify!(post)
reviewable = ReviewableAiPost.find_by(target: post)
reviewable.perform admin, :agree_and_keep
accuracy = ModelAccuracy.find_by(classification_type: classification.type)
expect(accuracy.flags_agreed).to eq(1)
end
end
before { SiteSetting.discourse_ai_enabled = true }
describe "current_user_serializer#ai_helper_prompts" do
fab!(:user)

View File

@ -1,54 +0,0 @@
# frozen_string_literal: true
require_relative "../support/toxicity_inference_stubs"
describe DiscourseAi::ChatMessageClassificator do
fab!(:chat_message)
let(:model) { DiscourseAi::Toxicity::ToxicityClassification.new }
let(:classification) { described_class.new(model) }
before { SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com" }
describe "#classify!" do
before { ToxicityInferenceStubs.stub_chat_message_classification(chat_message, toxic: true) }
it "stores the model classification data" do
classification.classify!(chat_message)
result =
ClassificationResult.find_by(target_id: chat_message.id, classification_type: model.type)
classification = result.classification.symbolize_keys
expect(classification).to eq(ToxicityInferenceStubs.toxic_response)
end
it "flags the message when the model decides we should" do
SiteSetting.ai_toxicity_flag_automatically = true
classification.classify!(chat_message)
expect(ReviewableAiChatMessage.where(target: chat_message).count).to eq(1)
end
it "doesn't flags the message if the model decides we shouldn't" do
SiteSetting.ai_toxicity_flag_automatically = false
classification.classify!(chat_message)
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
end
it "includes the model accuracy in the payload" do
SiteSetting.ai_toxicity_flag_automatically = true
classification.classify!(chat_message)
reviewable = ReviewableAiChatMessage.find_by(target: chat_message)
expect(
reviewable.payload.dig("accuracies", SiteSetting.ai_toxicity_inference_service_api_model),
).to be_zero
end
end
end

View File

@ -1,83 +0,0 @@
# frozen_string_literal: true
require "rails_helper"
require_relative "../support/sentiment_inference_stubs"
describe DiscourseAi::Classificator do
describe "#classify!" do
describe "saving the classification result" do
let(:model) { DiscourseAi::Sentiment::SentimentClassification.new }
let(:classification_raw_result) do
model
.available_classifiers
.reduce({}) do |memo, model_config|
memo[model_config.model_name] = model.transform_result(
SentimentInferenceStubs.model_response(model_config.model_name),
)
memo
end
end
let(:classification) { DiscourseAi::PostClassificator.new(model) }
fab!(:target) { Fabricate(:post) }
before do
SiteSetting.ai_sentiment_model_configs =
"[{\"model_name\":\"SamLowe/roberta-base-go_emotions\",\"endpoint\":\"http://samlowe-emotion.com\",\"api_key\":\"123\"},{\"model_name\":\"j-hartmann/emotion-english-distilroberta-base\",\"endpoint\":\"http://jhartmann-emotion.com\",\"api_key\":\"123\"},{\"model_name\":\"cardiffnlp/twitter-roberta-base-sentiment-latest\",\"endpoint\":\"http://cardiffnlp-sentiment.com\",\"api_key\":\"123\"}]"
SentimentInferenceStubs.stub_classification(target)
end
it "stores one result per model used" do
classification.classify!(target)
stored_results = ClassificationResult.where(target: target)
expect(stored_results.length).to eq(model.available_classifiers.length)
model.available_classifiers.each do |model_config|
result = stored_results.detect { |c| c.model_used == model_config.model_name }
expect(result.classification_type).to eq(model.type.to_s)
expect(result.created_at).to be_present
expect(result.updated_at).to be_present
expected_classification = SentimentInferenceStubs.model_response(model_config.model_name)
transformed_classification = model.transform_result(expected_classification)
expect(result.classification).to eq(transformed_classification)
end
end
it "updates an existing classification result" do
original_creation = 3.days.ago
model.available_classifiers.each do |model_config|
ClassificationResult.create!(
target: target,
model_used: model_config.model_name,
classification_type: model.type,
created_at: original_creation,
updated_at: original_creation,
classification: {
},
)
end
classification.classify!(target)
stored_results = ClassificationResult.where(target: target)
expect(stored_results.length).to eq(model.available_classifiers.length)
model.available_classifiers.each do |model_config|
result = stored_results.detect { |c| c.model_used == model_config.model_name }
expect(result.classification_type).to eq(model.type.to_s)
expect(result.updated_at).to be > original_creation
expect(result.created_at).to eq_time(original_creation)
expect(result.classification).to eq(classification_raw_result[model_config.model_name])
end
end
end
end
end

View File

@ -1,53 +0,0 @@
# frozen_string_literal: true
require_relative "../support/toxicity_inference_stubs"
describe DiscourseAi::PostClassificator do
fab!(:post)
let(:model) { DiscourseAi::Toxicity::ToxicityClassification.new }
let(:classification) { described_class.new(model) }
before { SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com" }
describe "#classify!" do
before { ToxicityInferenceStubs.stub_post_classification(post, toxic: true) }
it "stores the model classification data" do
classification.classify!(post)
result = ClassificationResult.find_by(target: post, classification_type: model.type)
classification = result.classification.symbolize_keys
expect(classification).to eq(ToxicityInferenceStubs.toxic_response)
end
it "flags the message and hides the post when the model decides we should" do
SiteSetting.ai_toxicity_flag_automatically = true
classification.classify!(post)
expect(ReviewableAiPost.where(target: post).count).to eq(1)
expect(post.reload.hidden?).to eq(true)
end
it "doesn't flags the message if the model decides we shouldn't" do
SiteSetting.ai_toxicity_flag_automatically = false
classification.classify!(post)
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
it "includes the model accuracy in the payload" do
SiteSetting.ai_toxicity_flag_automatically = true
classification.classify!(post)
reviewable = ReviewableAiPost.find_by(target: post)
expect(
reviewable.payload.dig("accuracies", SiteSetting.ai_toxicity_inference_service_api_model),
).to be_zero
end
end
end

View File

@ -1,62 +0,0 @@
# frozen_string_literal: true
class NSFWInferenceStubs
class << self
def endpoint
"#{SiteSetting.ai_nsfw_inference_service_api_endpoint}/api/v1/classify"
end
def upload_url(upload)
upload_url = Discourse.store.cdn_url(upload.url)
upload_url = "#{Discourse.base_url_no_prefix}#{upload_url}" if upload_url.starts_with?("/")
upload_url
end
def positive_result(model)
return { nsfw_probability: 90 } if model == "opennsfw2"
{ drawings: 1, hentai: 2, neutral: 0, porn: 90, sexy: 79 }
end
def negative_result(model)
return { nsfw_probability: 3 } if model == "opennsfw2"
{ drawings: 1, hentai: 2, neutral: 0, porn: 3, sexy: 1 }
end
def positive(upload)
WebMock
.stub_request(:post, endpoint)
.with(body: JSON.dump(model: "nsfw_detector", content: upload_url(upload)))
.to_return(status: 200, body: JSON.dump(positive_result("nsfw_detector")))
WebMock
.stub_request(:post, endpoint)
.with(body: JSON.dump(model: "opennsfw2", content: upload_url(upload)))
.to_return(status: 200, body: JSON.dump(positive_result("opennsfw2")))
end
def negative(upload)
WebMock
.stub_request(:post, endpoint)
.with(body: JSON.dump(model: "nsfw_detector", content: upload_url(upload)))
.to_return(status: 200, body: JSON.dump(negative_result("nsfw_detector")))
WebMock
.stub_request(:post, endpoint)
.with(body: JSON.dump(model: "opennsfw2", content: upload_url(upload)))
.to_return(status: 200, body: JSON.dump(negative_result("opennsfw2")))
end
def unsupported(upload)
WebMock
.stub_request(:post, endpoint)
.with(body: JSON.dump(model: "nsfw_detector", content: upload_url(upload)))
.to_return(status: 415, body: JSON.dump({ error: "Unsupported image type", status: 415 }))
WebMock
.stub_request(:post, endpoint)
.with(body: JSON.dump(model: "opennsfw2", content: upload_url(upload)))
.to_return(status: 415, body: JSON.dump({ error: "Unsupported image type", status: 415 }))
end
end
end

View File

@ -57,10 +57,7 @@ class SentimentInferenceStubs
def stub_classification(post)
content = post.post_number == 1 ? "#{post.topic.title}\n#{post.raw}" : post.raw
DiscourseAi::Sentiment::SentimentClassification
.new
.available_classifiers
.each do |model_config|
DiscourseAi::Sentiment::PostClassification.new.classifiers.each do |model_config|
WebMock
.stub_request(:post, model_config.endpoint)
.with(body: JSON.dump(inputs: content, truncate: true))

View File

@ -1,56 +0,0 @@
# frozen_string_literal: true
class ToxicityInferenceStubs
class << self
def endpoint
"#{SiteSetting.ai_toxicity_inference_service_api_endpoint}/api/v1/classify"
end
def model
SiteSetting.ai_toxicity_inference_service_api_model
end
def toxic_response
{
toxicity: 99,
severe_toxicity: 1,
obscene: 6,
identity_attack: 3,
insult: 4,
threat: 8,
sexual_explicit: 5,
}
end
def civilized_response
{
toxicity: 2,
severe_toxicity: 1,
obscene: 6,
identity_attack: 3,
insult: 4,
threat: 8,
sexual_explicit: 5,
}
end
def stub_post_classification(post, toxic: false)
content = post.post_number == 1 ? "#{post.topic.title}\n#{post.raw}" : post.raw
response = toxic ? toxic_response : civilized_response
WebMock
.stub_request(:post, endpoint)
.with(body: JSON.dump(model: model, content: content))
.to_return(status: 200, body: JSON.dump(response))
end
def stub_chat_message_classification(chat_message, toxic: false)
response = toxic ? toxic_response : civilized_response
WebMock
.stub_request(:post, endpoint)
.with(body: JSON.dump(model: model, content: chat_message.message))
.to_return(status: 200, body: JSON.dump(response))
end
end
end

View File

@ -1,43 +0,0 @@
# frozen_string_literal: true
require_relative "../../support/toxicity_inference_stubs"
RSpec.describe "Toxicity-flagged chat messages", type: :system, js: true do
fab!(:chat_message)
fab!(:admin)
before do
sign_in(admin)
SiteSetting.ai_toxicity_enabled = true
SiteSetting.ai_toxicity_flag_automatically = true
SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com"
ToxicityInferenceStubs.stub_chat_message_classification(chat_message, toxic: true)
DiscourseAi::ChatMessageClassificator.new(
DiscourseAi::Toxicity::ToxicityClassification.new,
).classify!(chat_message)
end
it "displays them in the review queue" do
visit("/review")
expect(page).to have_selector(".reviewable-ai-chat-message .reviewable-actions")
end
context "when the message is hard deleted" do
before { chat_message.destroy! }
it "does not throw an error" do
visit("/review")
expect(page).to have_selector(".reviewable-ai-chat-message .reviewable-actions")
end
it "adds the option to ignore the flag" do
visit("/review")
expect(page).to have_selector(".reviewable-actions .chat-message-ignore")
end
end
end