Refactoring of nsfw and flagger

This commit is contained in:
Rafael dos Santos Silva 2023-02-23 12:13:26 -03:00
parent 6f0c141062
commit a73931c151
No known key found for this signature in database
GPG Key ID: 5E50360227B34938
4 changed files with 60 additions and 16 deletions

View File

@ -58,7 +58,7 @@ plugins:
default: ''
ai_sentiment_models:
type: list
list_type: simple
list_type: compact
default: "emotion"
allow_any: false
choices:
@ -70,4 +70,17 @@ plugins:
default: "https://nsfw-testing.demo-by-discourse.com"
ai_nsfw_inference_service_api_key:
default: ""
ai_nsfw_probability_threshold: 60
ai_nsfw_flag_automatically: true
ai_nsfw_flag_threshold_general: 60
ai_nsfw_flag_threshold_drawings: 60
ai_nsfw_flag_threshold_hentai: 60
ai_nsfw_flag_threshold_porn: 60
ai_nsfw_flag_threshold_sexy: 70
ai_nsfw_models:
type: list
list_type: compact
default: "opennsfw2"
allow_any: false
choices:
- opennsfw2
- nsfw_detector

View File

@ -3,36 +3,48 @@
module DiscourseAI
module NSFW
class Evaluation
AVAILABLE_MODELS = %w[opennsfw2 nsfw_detector]
def perform(upload)
result = { verdict: false, evaluation: {} }
AVAILABLE_MODELS.each do |model|
model_result = evaluate_with_model(model, upload).symbolize_keys!
SiteSetting
.ai_nsfw_models
.split("|")
.each do |model|
model_result = evaluate_with_model(model, upload).symbolize_keys!
model_result.values.each do |classification_prob|
if classification_prob.to_i >= SiteSetting.ai_nsfw_probability_threshold
result[:verdict] = true
end
result[:evaluation][model.to_sym] = model_result
result[:verdict] = send("#{model}_verdict?", model_result)
end
result[:evaluation][model.to_sym] = model_result
end
result
end
private
def evaluate_with_model(model, upload)
upload_url = Discourse.store.cdn_url(upload.url)
upload_url = "#{Discourse.base_url_no_prefix}#{upload_url}" if upload_url.starts_with?("/")
DiscourseAI::InferenceManager.perform!(
"#{SiteSetting.ai_nsfw_inference_service_api_endpoint}/api/v1/classify",
model,
Discourse.store.cdn_url(upload.url),
upload_url,
SiteSetting.ai_nsfw_inference_service_api_key,
)
end
def opennsfw2_verdict?(clasification)
clasification.values.first.to_i >= SiteSetting.ai_nsfw_flag_threshold_general
end
def nsfw_detector_verdict?(classification)
classification.each do |key, value|
next if key == :neutral
return true if value.to_i >= SiteSetting.send("ai_nsfw_flag_threshold_#{key}")
end
false
end
end
end
end

View File

@ -19,8 +19,7 @@ module ::DiscourseAI
end
def flag!
PostActionCreator.create(flagger, @object, :inappropriate, reason: @reasons.join("/"))
@object.publish_change_to_clients! :acted
DiscourseAI::FlagManager.new(@object, reasons: @reasons).flag!
end
end
end

View File

@ -0,0 +1,20 @@
# frozen_string_literal: true
module ::DiscourseAI
class FlagManager
DEFAULT_FLAGGER = Discourse.system_user
DEFAULT_REASON = "discourse-ai"
def initialize(object, flagger: DEFAULT_FLAGGER, type: :inappropriate, reasons: DEFAULT_REASON)
@flagger = flagger
@object = object
@type = type
@reasons = reasons
end
def flag!
PostActionCreator.create(@flagger, @object, :inappropriate, reason: @reasons)
@object.publish_change_to_clients! :acted
end
end
end