diff --git a/config/settings.yml b/config/settings.yml index 7240fa06..f89157e6 100644 --- a/config/settings.yml +++ b/config/settings.yml @@ -58,7 +58,7 @@ plugins: default: '' ai_sentiment_models: type: list - list_type: simple + list_type: compact default: "emotion" allow_any: false choices: @@ -70,4 +70,17 @@ plugins: default: "https://nsfw-testing.demo-by-discourse.com" ai_nsfw_inference_service_api_key: default: "" - ai_nsfw_probability_threshold: 60 + ai_nsfw_flag_automatically: true + ai_nsfw_flag_threshold_general: 60 + ai_nsfw_flag_threshold_drawings: 60 + ai_nsfw_flag_threshold_hentai: 60 + ai_nsfw_flag_threshold_porn: 60 + ai_nsfw_flag_threshold_sexy: 70 + ai_nsfw_models: + type: list + list_type: compact + default: "opennsfw2" + allow_any: false + choices: + - opennsfw2 + - nsfw_detector diff --git a/lib/modules/nsfw/evaluation.rb b/lib/modules/nsfw/evaluation.rb index 286734fc..7f060cd9 100644 --- a/lib/modules/nsfw/evaluation.rb +++ b/lib/modules/nsfw/evaluation.rb @@ -3,36 +3,48 @@ module DiscourseAI module NSFW class Evaluation - AVAILABLE_MODELS = %w[opennsfw2 nsfw_detector] - def perform(upload) result = { verdict: false, evaluation: {} } - AVAILABLE_MODELS.each do |model| - model_result = evaluate_with_model(model, upload).symbolize_keys! + SiteSetting + .ai_nsfw_models + .split("|") + .each do |model| + model_result = evaluate_with_model(model, upload).symbolize_keys! - model_result.values.each do |classification_prob| - if classification_prob.to_i >= SiteSetting.ai_nsfw_probability_threshold - result[:verdict] = true - end + result[:evaluation][model.to_sym] = model_result + + result[:verdict] = send("#{model}_verdict?", model_result) end - result[:evaluation][model.to_sym] = model_result - end - result end private def evaluate_with_model(model, upload) + upload_url = Discourse.store.cdn_url(upload.url) + upload_url = "#{Discourse.base_url_no_prefix}#{upload_url}" if upload_url.starts_with?("/") + DiscourseAI::InferenceManager.perform!( "#{SiteSetting.ai_nsfw_inference_service_api_endpoint}/api/v1/classify", model, - Discourse.store.cdn_url(upload.url), + upload_url, SiteSetting.ai_nsfw_inference_service_api_key, ) end + + def opennsfw2_verdict?(clasification) + clasification.values.first.to_i >= SiteSetting.ai_nsfw_flag_threshold_general + end + + def nsfw_detector_verdict?(classification) + classification.each do |key, value| + next if key == :neutral + return true if value.to_i >= SiteSetting.send("ai_nsfw_flag_threshold_#{key}") + end + false + end end end end diff --git a/lib/modules/toxicity/post_classifier.rb b/lib/modules/toxicity/post_classifier.rb index 1cd3ffdf..02e1994d 100644 --- a/lib/modules/toxicity/post_classifier.rb +++ b/lib/modules/toxicity/post_classifier.rb @@ -19,8 +19,7 @@ module ::DiscourseAI end def flag! - PostActionCreator.create(flagger, @object, :inappropriate, reason: @reasons.join("/")) - @object.publish_change_to_clients! :acted + DiscourseAI::FlagManager.new(@object, reasons: @reasons).flag! end end end diff --git a/lib/shared/flag_manager.rb b/lib/shared/flag_manager.rb new file mode 100644 index 00000000..5f4be18f --- /dev/null +++ b/lib/shared/flag_manager.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module ::DiscourseAI + class FlagManager + DEFAULT_FLAGGER = Discourse.system_user + DEFAULT_REASON = "discourse-ai" + + def initialize(object, flagger: DEFAULT_FLAGGER, type: :inappropriate, reasons: DEFAULT_REASON) + @flagger = flagger + @object = object + @type = type + @reasons = reasons + end + + def flag! + PostActionCreator.create(@flagger, @object, :inappropriate, reason: @reasons) + @object.publish_change_to_clients! :acted + end + end +end