2023-02-24 11:25:02 -05:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2023-03-14 15:03:50 -04:00
|
|
|
module DiscourseAi
|
2023-02-24 11:25:02 -05:00
|
|
|
module Toxicity
|
|
|
|
class ToxicityClassification
|
|
|
|
CLASSIFICATION_LABELS = %i[
|
|
|
|
toxicity
|
|
|
|
severe_toxicity
|
|
|
|
obscene
|
|
|
|
identity_attack
|
|
|
|
insult
|
|
|
|
threat
|
|
|
|
sexual_explicit
|
|
|
|
]
|
|
|
|
|
|
|
|
def type
|
|
|
|
:toxicity
|
|
|
|
end
|
|
|
|
|
|
|
|
def can_classify?(target)
|
|
|
|
content_of(target).present?
|
|
|
|
end
|
|
|
|
|
2023-03-07 13:39:28 -05:00
|
|
|
def get_verdicts(classification_data)
|
2023-02-24 11:25:02 -05:00
|
|
|
# We only use one model for this classification.
|
|
|
|
# Classification_data looks like { model_name => classification }
|
|
|
|
_model_used, data = classification_data.to_a.first
|
|
|
|
|
2023-03-07 13:39:28 -05:00
|
|
|
verdict =
|
|
|
|
CLASSIFICATION_LABELS.any? do |label|
|
|
|
|
data[label] >= SiteSetting.send("ai_toxicity_flag_threshold_#{label}")
|
|
|
|
end
|
|
|
|
|
|
|
|
{ available_model => verdict }
|
|
|
|
end
|
|
|
|
|
|
|
|
def should_flag_based_on?(verdicts)
|
|
|
|
return false if !SiteSetting.ai_toxicity_flag_automatically
|
|
|
|
|
|
|
|
verdicts.values.any?
|
2023-02-24 11:25:02 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def request(target_to_classify)
|
|
|
|
data =
|
2023-03-14 15:03:50 -04:00
|
|
|
::DiscourseAi::Inference::DiscourseClassifier.perform!(
|
2024-01-10 17:23:07 -05:00
|
|
|
"#{endpoint}/api/v1/classify",
|
2023-02-24 11:25:02 -05:00
|
|
|
SiteSetting.ai_toxicity_inference_service_api_model,
|
|
|
|
content_of(target_to_classify),
|
|
|
|
SiteSetting.ai_toxicity_inference_service_api_key,
|
|
|
|
)
|
|
|
|
|
2023-02-27 14:21:40 -05:00
|
|
|
{ available_model => data }
|
2023-02-24 11:25:02 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
private
|
|
|
|
|
2023-02-27 14:21:40 -05:00
|
|
|
def available_model
|
|
|
|
SiteSetting.ai_toxicity_inference_service_api_model
|
|
|
|
end
|
|
|
|
|
2023-02-24 11:25:02 -05:00
|
|
|
def content_of(target_to_classify)
|
2023-03-17 10:15:38 -04:00
|
|
|
return target_to_classify.message if target_to_classify.is_a?(Chat::Message)
|
2023-02-24 11:25:02 -05:00
|
|
|
|
|
|
|
if target_to_classify.post_number == 1
|
|
|
|
"#{target_to_classify.topic.title}\n#{target_to_classify.raw}"
|
|
|
|
else
|
|
|
|
target_to_classify.raw
|
|
|
|
end
|
|
|
|
end
|
2024-01-10 17:23:07 -05:00
|
|
|
|
|
|
|
def endpoint
|
|
|
|
if SiteSetting.ai_toxicity_inference_service_api_endpoint_srv.present?
|
|
|
|
service =
|
|
|
|
DiscourseAi::Utils::DnsSrv.lookup(
|
|
|
|
SiteSetting.ai_toxicity_inference_service_api_endpoint_srv,
|
|
|
|
)
|
|
|
|
"https://#{service.target}:#{service.port}"
|
|
|
|
else
|
|
|
|
SiteSetting.ai_toxicity_inference_service_api_endpoint
|
|
|
|
end
|
|
|
|
end
|
2023-02-24 11:25:02 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|