mirror of
https://github.com/discourse/discourse-ai.git
synced 2025-03-09 11:48:47 +00:00
Refactoring of nsfw and flagger
This commit is contained in:
parent
6f0c141062
commit
a73931c151
@ -58,7 +58,7 @@ plugins:
|
|||||||
default: ''
|
default: ''
|
||||||
ai_sentiment_models:
|
ai_sentiment_models:
|
||||||
type: list
|
type: list
|
||||||
list_type: simple
|
list_type: compact
|
||||||
default: "emotion"
|
default: "emotion"
|
||||||
allow_any: false
|
allow_any: false
|
||||||
choices:
|
choices:
|
||||||
@ -70,4 +70,17 @@ plugins:
|
|||||||
default: "https://nsfw-testing.demo-by-discourse.com"
|
default: "https://nsfw-testing.demo-by-discourse.com"
|
||||||
ai_nsfw_inference_service_api_key:
|
ai_nsfw_inference_service_api_key:
|
||||||
default: ""
|
default: ""
|
||||||
ai_nsfw_probability_threshold: 60
|
ai_nsfw_flag_automatically: true
|
||||||
|
ai_nsfw_flag_threshold_general: 60
|
||||||
|
ai_nsfw_flag_threshold_drawings: 60
|
||||||
|
ai_nsfw_flag_threshold_hentai: 60
|
||||||
|
ai_nsfw_flag_threshold_porn: 60
|
||||||
|
ai_nsfw_flag_threshold_sexy: 70
|
||||||
|
ai_nsfw_models:
|
||||||
|
type: list
|
||||||
|
list_type: compact
|
||||||
|
default: "opennsfw2"
|
||||||
|
allow_any: false
|
||||||
|
choices:
|
||||||
|
- opennsfw2
|
||||||
|
- nsfw_detector
|
||||||
|
@ -3,36 +3,48 @@
|
|||||||
module DiscourseAI
|
module DiscourseAI
|
||||||
module NSFW
|
module NSFW
|
||||||
class Evaluation
|
class Evaluation
|
||||||
AVAILABLE_MODELS = %w[opennsfw2 nsfw_detector]
|
|
||||||
|
|
||||||
def perform(upload)
|
def perform(upload)
|
||||||
result = { verdict: false, evaluation: {} }
|
result = { verdict: false, evaluation: {} }
|
||||||
|
|
||||||
AVAILABLE_MODELS.each do |model|
|
SiteSetting
|
||||||
model_result = evaluate_with_model(model, upload).symbolize_keys!
|
.ai_nsfw_models
|
||||||
|
.split("|")
|
||||||
|
.each do |model|
|
||||||
|
model_result = evaluate_with_model(model, upload).symbolize_keys!
|
||||||
|
|
||||||
model_result.values.each do |classification_prob|
|
result[:evaluation][model.to_sym] = model_result
|
||||||
if classification_prob.to_i >= SiteSetting.ai_nsfw_probability_threshold
|
|
||||||
result[:verdict] = true
|
result[:verdict] = send("#{model}_verdict?", model_result)
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
result[:evaluation][model.to_sym] = model_result
|
|
||||||
end
|
|
||||||
|
|
||||||
result
|
result
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
def evaluate_with_model(model, upload)
|
def evaluate_with_model(model, upload)
|
||||||
|
upload_url = Discourse.store.cdn_url(upload.url)
|
||||||
|
upload_url = "#{Discourse.base_url_no_prefix}#{upload_url}" if upload_url.starts_with?("/")
|
||||||
|
|
||||||
DiscourseAI::InferenceManager.perform!(
|
DiscourseAI::InferenceManager.perform!(
|
||||||
"#{SiteSetting.ai_nsfw_inference_service_api_endpoint}/api/v1/classify",
|
"#{SiteSetting.ai_nsfw_inference_service_api_endpoint}/api/v1/classify",
|
||||||
model,
|
model,
|
||||||
Discourse.store.cdn_url(upload.url),
|
upload_url,
|
||||||
SiteSetting.ai_nsfw_inference_service_api_key,
|
SiteSetting.ai_nsfw_inference_service_api_key,
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def opennsfw2_verdict?(clasification)
|
||||||
|
clasification.values.first.to_i >= SiteSetting.ai_nsfw_flag_threshold_general
|
||||||
|
end
|
||||||
|
|
||||||
|
def nsfw_detector_verdict?(classification)
|
||||||
|
classification.each do |key, value|
|
||||||
|
next if key == :neutral
|
||||||
|
return true if value.to_i >= SiteSetting.send("ai_nsfw_flag_threshold_#{key}")
|
||||||
|
end
|
||||||
|
false
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
@ -19,8 +19,7 @@ module ::DiscourseAI
|
|||||||
end
|
end
|
||||||
|
|
||||||
def flag!
|
def flag!
|
||||||
PostActionCreator.create(flagger, @object, :inappropriate, reason: @reasons.join("/"))
|
DiscourseAI::FlagManager.new(@object, reasons: @reasons).flag!
|
||||||
@object.publish_change_to_clients! :acted
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
20
lib/shared/flag_manager.rb
Normal file
20
lib/shared/flag_manager.rb
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
module ::DiscourseAI
|
||||||
|
class FlagManager
|
||||||
|
DEFAULT_FLAGGER = Discourse.system_user
|
||||||
|
DEFAULT_REASON = "discourse-ai"
|
||||||
|
|
||||||
|
def initialize(object, flagger: DEFAULT_FLAGGER, type: :inappropriate, reasons: DEFAULT_REASON)
|
||||||
|
@flagger = flagger
|
||||||
|
@object = object
|
||||||
|
@type = type
|
||||||
|
@reasons = reasons
|
||||||
|
end
|
||||||
|
|
||||||
|
def flag!
|
||||||
|
PostActionCreator.create(@flagger, @object, :inappropriate, reason: @reasons)
|
||||||
|
@object.publish_change_to_clients! :acted
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
Loading…
x
Reference in New Issue
Block a user