DEV: DiscourseAI -> DiscourseAi rename to have consistent folders and files (#9)
This commit is contained in:
parent
cbaa40edc5
commit
aa2fca6086
|
@ -3,7 +3,7 @@
|
||||||
class ModelAccuracy < ActiveRecord::Base
|
class ModelAccuracy < ActiveRecord::Base
|
||||||
def self.adjust_model_accuracy(new_status, reviewable)
|
def self.adjust_model_accuracy(new_status, reviewable)
|
||||||
return unless %i[approved rejected].include?(new_status)
|
return unless %i[approved rejected].include?(new_status)
|
||||||
return unless [ReviewableAIPost, ReviewableAIChatMessage].include?(reviewable.class)
|
return unless [ReviewableAiPost, ReviewableAiChatMessage].include?(reviewable.class)
|
||||||
|
|
||||||
verdicts = reviewable.payload.to_h["verdicts"] || {}
|
verdicts = reviewable.payload.to_h["verdicts"] || {}
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
require_dependency "reviewable"
|
require_dependency "reviewable"
|
||||||
|
|
||||||
class ReviewableAIChatMessage < Reviewable
|
class ReviewableAiChatMessage < Reviewable
|
||||||
def self.action_aliases
|
def self.action_aliases
|
||||||
{
|
{
|
||||||
agree_and_keep_hidden: :agree_and_delete,
|
agree_and_keep_hidden: :agree_and_delete,
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
require_dependency "reviewable"
|
require_dependency "reviewable"
|
||||||
|
|
||||||
class ReviewableAIPost < Reviewable
|
class ReviewableAiPost < Reviewable
|
||||||
# Penalties are handled by the modal after the action is performed
|
# Penalties are handled by the modal after the action is performed
|
||||||
def self.action_aliases
|
def self.action_aliases
|
||||||
{
|
{
|
|
@ -1,5 +1,5 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
class AIChatChannelSerializer < ApplicationSerializer
|
class AiChatChannelSerializer < ApplicationSerializer
|
||||||
attributes :id, :chatable, :chatable_type, :chatable_url, :title, :slug
|
attributes :id, :chatable, :chatable_type, :chatable_url, :title, :slug
|
||||||
end
|
end
|
|
@ -2,12 +2,12 @@
|
||||||
|
|
||||||
require_dependency "reviewable_serializer"
|
require_dependency "reviewable_serializer"
|
||||||
|
|
||||||
class ReviewableAIChatMessageSerializer < ReviewableSerializer
|
class ReviewableAiChatMessageSerializer < ReviewableSerializer
|
||||||
payload_attributes :accuracies, :message_cooked
|
payload_attributes :accuracies, :message_cooked
|
||||||
target_attributes :cooked
|
target_attributes :cooked
|
||||||
attributes :target_id
|
attributes :target_id
|
||||||
|
|
||||||
has_one :chat_channel, serializer: AIChatChannelSerializer, root: false, embed: :objects
|
has_one :chat_channel, serializer: AiChatChannelSerializer, root: false, embed: :objects
|
||||||
|
|
||||||
def chat_channel
|
def chat_channel
|
||||||
object.chat_message.chat_channel
|
object.chat_message.chat_channel
|
|
@ -2,6 +2,6 @@
|
||||||
|
|
||||||
require_dependency "reviewable_flagged_post_serializer"
|
require_dependency "reviewable_flagged_post_serializer"
|
||||||
|
|
||||||
class ReviewableAIPostSerializer < ReviewableFlaggedPostSerializer
|
class ReviewableAiPostSerializer < ReviewableFlaggedPostSerializer
|
||||||
payload_attributes :accuracies
|
payload_attributes :accuracies
|
||||||
end
|
end
|
|
@ -1,7 +1,7 @@
|
||||||
import Component from "@glimmer/component";
|
import Component from "@glimmer/component";
|
||||||
import { inject as service } from "@ember/service";
|
import { inject as service } from "@ember/service";
|
||||||
|
|
||||||
export default class ReviewableAIChatMessage extends Component {
|
export default class ReviewableAiChatMessage extends Component {
|
||||||
@service store;
|
@service store;
|
||||||
|
|
||||||
get chatChannel() {
|
get chatChannel() {
|
|
@ -0,0 +1,3 @@
|
||||||
|
import Component from "@glimmer/component";
|
||||||
|
|
||||||
|
export default class ReviewableAiPost extends Component {}
|
|
@ -1,3 +0,0 @@
|
||||||
import Component from "@glimmer/component";
|
|
||||||
|
|
||||||
export default class ReviewableAIPost extends Component {}
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
class MigrateDiscourseAiReviewables < ActiveRecord::Migration[7.0]
|
||||||
|
def up
|
||||||
|
DB.exec("UPDATE reviewables SET type='ReviewableAiPost' WHERE type='ReviewableAIPost'")
|
||||||
|
DB.exec(
|
||||||
|
"UPDATE reviewables SET type='ReviewableAiChatMessage' WHERE type='ReviewableAIChatMessage'",
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
def down
|
||||||
|
raise ActiveRecord::IrreversibleMigration
|
||||||
|
end
|
||||||
|
end
|
|
@ -1,7 +1,8 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module DiscourseAI
|
module DiscourseAi
|
||||||
class Engine < ::Rails::Engine
|
class Engine < ::Rails::Engine
|
||||||
isolate_namespace DiscourseAI
|
engine_name PLUGIN_NAME
|
||||||
|
isolate_namespace DiscourseAi
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module DiscourseAI
|
module DiscourseAi
|
||||||
module NSFW
|
module NSFW
|
||||||
class EntryPoint
|
class EntryPoint
|
||||||
def load_files
|
def load_files
|
||||||
|
@ -12,7 +12,7 @@ module DiscourseAI
|
||||||
nsfw_detection_cb =
|
nsfw_detection_cb =
|
||||||
Proc.new do |post|
|
Proc.new do |post|
|
||||||
if SiteSetting.ai_nsfw_detection_enabled &&
|
if SiteSetting.ai_nsfw_detection_enabled &&
|
||||||
DiscourseAI::NSFW::NSFWClassification.new.can_classify?(post)
|
DiscourseAi::NSFW::NSFWClassification.new.can_classify?(post)
|
||||||
Jobs.enqueue(:evaluate_post_uploads, post_id: post.id)
|
Jobs.enqueue(:evaluate_post_uploads, post_id: post.id)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -11,7 +11,7 @@ module Jobs
|
||||||
|
|
||||||
return if post.uploads.none? { |u| FileHelper.is_supported_image?(u.url) }
|
return if post.uploads.none? { |u| FileHelper.is_supported_image?(u.url) }
|
||||||
|
|
||||||
DiscourseAI::PostClassificator.new(DiscourseAI::NSFW::NSFWClassification.new).classify!(post)
|
DiscourseAi::PostClassificator.new(DiscourseAi::NSFW::NSFWClassification.new).classify!(post)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module DiscourseAI
|
module DiscourseAi
|
||||||
module NSFW
|
module NSFW
|
||||||
class NSFWClassification
|
class NSFWClassification
|
||||||
def type
|
def type
|
||||||
|
@ -52,7 +52,7 @@ module DiscourseAI
|
||||||
upload_url = Discourse.store.cdn_url(upload.url)
|
upload_url = Discourse.store.cdn_url(upload.url)
|
||||||
upload_url = "#{Discourse.base_url_no_prefix}#{upload_url}" if upload_url.starts_with?("/")
|
upload_url = "#{Discourse.base_url_no_prefix}#{upload_url}" if upload_url.starts_with?("/")
|
||||||
|
|
||||||
DiscourseAI::Inference::DiscourseClassifier.perform!(
|
DiscourseAi::Inference::DiscourseClassifier.perform!(
|
||||||
"#{SiteSetting.ai_nsfw_inference_service_api_endpoint}/api/v1/classify",
|
"#{SiteSetting.ai_nsfw_inference_service_api_endpoint}/api/v1/classify",
|
||||||
model,
|
model,
|
||||||
upload_url,
|
upload_url,
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
module DiscourseAI
|
|
||||||
|
module DiscourseAi
|
||||||
module Sentiment
|
module Sentiment
|
||||||
class EntryPoint
|
class EntryPoint
|
||||||
def load_files
|
def load_files
|
||||||
|
|
|
@ -9,8 +9,8 @@ module ::Jobs
|
||||||
post = Post.find_by(id: post_id, post_type: Post.types[:regular])
|
post = Post.find_by(id: post_id, post_type: Post.types[:regular])
|
||||||
return if post&.raw.blank?
|
return if post&.raw.blank?
|
||||||
|
|
||||||
DiscourseAI::PostClassificator.new(
|
DiscourseAi::PostClassificator.new(
|
||||||
DiscourseAI::Sentiment::SentimentClassification.new,
|
DiscourseAi::Sentiment::SentimentClassification.new,
|
||||||
).classify!(post)
|
).classify!(post)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module DiscourseAI
|
module DiscourseAi
|
||||||
module Sentiment
|
module Sentiment
|
||||||
class SentimentClassification
|
class SentimentClassification
|
||||||
def type
|
def type
|
||||||
|
@ -39,7 +39,7 @@ module DiscourseAI
|
||||||
private
|
private
|
||||||
|
|
||||||
def request_with(model, content)
|
def request_with(model, content)
|
||||||
::DiscourseAI::Inference::DiscourseClassifier.perform!(
|
::DiscourseAi::Inference::DiscourseClassifier.perform!(
|
||||||
"#{SiteSetting.ai_sentiment_inference_service_api_endpoint}/api/v1/classify",
|
"#{SiteSetting.ai_sentiment_inference_service_api_endpoint}/api/v1/classify",
|
||||||
model,
|
model,
|
||||||
content,
|
content,
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
module DiscourseAI
|
|
||||||
|
module DiscourseAi
|
||||||
module Toxicity
|
module Toxicity
|
||||||
class EntryPoint
|
class EntryPoint
|
||||||
def load_files
|
def load_files
|
||||||
|
@ -11,13 +12,13 @@ module DiscourseAI
|
||||||
end
|
end
|
||||||
|
|
||||||
def inject_into(plugin)
|
def inject_into(plugin)
|
||||||
post_analysis_cb = Proc.new { |post| DiscourseAI::Toxicity::ScanQueue.enqueue_post(post) }
|
post_analysis_cb = Proc.new { |post| DiscourseAi::Toxicity::ScanQueue.enqueue_post(post) }
|
||||||
|
|
||||||
plugin.on(:post_created, &post_analysis_cb)
|
plugin.on(:post_created, &post_analysis_cb)
|
||||||
plugin.on(:post_edited, &post_analysis_cb)
|
plugin.on(:post_edited, &post_analysis_cb)
|
||||||
|
|
||||||
chat_message_analysis_cb =
|
chat_message_analysis_cb =
|
||||||
Proc.new { |message| DiscourseAI::Toxicity::ScanQueue.enqueue_chat_message(message) }
|
Proc.new { |message| DiscourseAi::Toxicity::ScanQueue.enqueue_chat_message(message) }
|
||||||
|
|
||||||
plugin.on(:chat_message_created, &chat_message_analysis_cb)
|
plugin.on(:chat_message_created, &chat_message_analysis_cb)
|
||||||
plugin.on(:chat_message_edited, &chat_message_analysis_cb)
|
plugin.on(:chat_message_edited, &chat_message_analysis_cb)
|
||||||
|
|
|
@ -10,8 +10,8 @@ module ::Jobs
|
||||||
chat_message = ChatMessage.find_by(id: chat_message_id)
|
chat_message = ChatMessage.find_by(id: chat_message_id)
|
||||||
return if chat_message&.message.blank?
|
return if chat_message&.message.blank?
|
||||||
|
|
||||||
DiscourseAI::ChatMessageClassificator.new(
|
DiscourseAi::ChatMessageClassificator.new(
|
||||||
DiscourseAI::Toxicity::ToxicityClassification.new,
|
DiscourseAi::Toxicity::ToxicityClassification.new,
|
||||||
).classify!(chat_message)
|
).classify!(chat_message)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -11,8 +11,8 @@ module ::Jobs
|
||||||
post = Post.find_by(id: post_id, post_type: Post.types[:regular])
|
post = Post.find_by(id: post_id, post_type: Post.types[:regular])
|
||||||
return if post&.raw.blank?
|
return if post&.raw.blank?
|
||||||
|
|
||||||
DiscourseAI::PostClassificator.new(
|
DiscourseAi::PostClassificator.new(
|
||||||
DiscourseAI::Toxicity::ToxicityClassification.new,
|
DiscourseAi::Toxicity::ToxicityClassification.new,
|
||||||
).classify!(post)
|
).classify!(post)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module ::DiscourseAI
|
module ::DiscourseAi
|
||||||
module Toxicity
|
module Toxicity
|
||||||
class ScanQueue
|
class ScanQueue
|
||||||
class << self
|
class << self
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module DiscourseAI
|
module DiscourseAi
|
||||||
module Toxicity
|
module Toxicity
|
||||||
class ToxicityClassification
|
class ToxicityClassification
|
||||||
CLASSIFICATION_LABELS = %i[
|
CLASSIFICATION_LABELS = %i[
|
||||||
|
@ -42,7 +42,7 @@ module DiscourseAI
|
||||||
|
|
||||||
def request(target_to_classify)
|
def request(target_to_classify)
|
||||||
data =
|
data =
|
||||||
::DiscourseAI::Inference::DiscourseClassifier.perform!(
|
::DiscourseAi::Inference::DiscourseClassifier.perform!(
|
||||||
"#{SiteSetting.ai_toxicity_inference_service_api_endpoint}/api/v1/classify",
|
"#{SiteSetting.ai_toxicity_inference_service_api_endpoint}/api/v1/classify",
|
||||||
SiteSetting.ai_toxicity_inference_service_api_model,
|
SiteSetting.ai_toxicity_inference_service_api_model,
|
||||||
content_of(target_to_classify),
|
content_of(target_to_classify),
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module ::DiscourseAI
|
module ::DiscourseAi
|
||||||
class ChatMessageClassificator < Classificator
|
class ChatMessageClassificator < Classificator
|
||||||
private
|
private
|
||||||
|
|
||||||
def flag!(chat_message, classification, verdicts, accuracies)
|
def flag!(chat_message, classification, verdicts, accuracies)
|
||||||
reviewable =
|
reviewable =
|
||||||
ReviewableAIChatMessage.needs_review!(
|
ReviewableAiChatMessage.needs_review!(
|
||||||
created_by: Discourse.system_user,
|
created_by: Discourse.system_user,
|
||||||
target: chat_message,
|
target: chat_message,
|
||||||
reviewable_by_moderator: true,
|
reviewable_by_moderator: true,
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module ::DiscourseAI
|
module ::DiscourseAi
|
||||||
class Classificator
|
class Classificator
|
||||||
def initialize(classification_model)
|
def initialize(classification_model)
|
||||||
@classification_model = classification_model
|
@classification_model = classification_model
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module ::DiscourseAI
|
module ::DiscourseAi
|
||||||
module Inference
|
module Inference
|
||||||
class DiscourseClassifier
|
class DiscourseClassifier
|
||||||
def self.perform!(endpoint, model, content, api_key)
|
def self.perform!(endpoint, model, content, api_key)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module ::DiscourseAI
|
module ::DiscourseAi
|
||||||
module Inference
|
module Inference
|
||||||
class DiscourseReranker
|
class DiscourseReranker
|
||||||
def self.perform!(endpoint, model, content, candidates, api_key)
|
def self.perform!(endpoint, model, content, candidates, api_key)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module ::DiscourseAI
|
module ::DiscourseAi
|
||||||
module Inference
|
module Inference
|
||||||
class OpenAICompletions
|
class OpenAICompletions
|
||||||
def self.perform!(model, content, api_key)
|
def self.perform!(model, content, api_key)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module ::DiscourseAI
|
module ::DiscourseAi
|
||||||
module Inference
|
module Inference
|
||||||
class OpenAIEmbeddings
|
class OpenAIEmbeddings
|
||||||
def self.perform!(content, model = nil)
|
def self.perform!(content, model = nil)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
module ::DiscourseAI
|
module ::DiscourseAi
|
||||||
class PostClassificator < Classificator
|
class PostClassificator < Classificator
|
||||||
private
|
private
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ module ::DiscourseAI
|
||||||
post.hide!(ReviewableScore.types[:inappropriate])
|
post.hide!(ReviewableScore.types[:inappropriate])
|
||||||
|
|
||||||
reviewable =
|
reviewable =
|
||||||
ReviewableAIPost.needs_review!(
|
ReviewableAiPost.needs_review!(
|
||||||
created_by: Discourse.system_user,
|
created_by: Discourse.system_user,
|
||||||
target: post,
|
target: post,
|
||||||
reviewable_by_moderator: true,
|
reviewable_by_moderator: true,
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
|
|
||||||
def classify(content)
|
def classify(content)
|
||||||
::DiscourseAI::Inference::DiscourseClassifier.perform!(
|
::DiscourseAi::Inference::DiscourseClassifier.perform!(
|
||||||
"#{SiteSetting.ai_toxicity_inference_service_api_endpoint}/api/v1/classify",
|
"#{SiteSetting.ai_toxicity_inference_service_api_endpoint}/api/v1/classify",
|
||||||
SiteSetting.ai_toxicity_inference_service_api_model,
|
SiteSetting.ai_toxicity_inference_service_api_model,
|
||||||
content,
|
content,
|
||||||
|
@ -37,7 +37,7 @@ task "ai:toxicity:calibration_stats", [:set_size] => [:environment] do |_, args|
|
||||||
flag_agreed_scores = flag_agreed.map { classify(_1) }
|
flag_agreed_scores = flag_agreed.map { classify(_1) }
|
||||||
flag_not_agreed_scores = flag_not_agreed.map { classify(_1) }
|
flag_not_agreed_scores = flag_not_agreed.map { classify(_1) }
|
||||||
|
|
||||||
DiscourseAI::Toxicity::Classifier::CLASSIFICATION_LABELS.each do |label|
|
DiscourseAi::Toxicity::Classifier::CLASSIFICATION_LABELS.each do |label|
|
||||||
puts "Label: #{label}"
|
puts "Label: #{label}"
|
||||||
|
|
||||||
label_agreed_scores = flag_agreed_scores.map { _1[label] }
|
label_agreed_scores = flag_agreed_scores.map { _1[label] }
|
||||||
|
|
18
plugin.rb
18
plugin.rb
|
@ -9,13 +9,13 @@
|
||||||
|
|
||||||
enabled_site_setting :discourse_ai_enabled
|
enabled_site_setting :discourse_ai_enabled
|
||||||
|
|
||||||
|
module ::DiscourseAi
|
||||||
|
PLUGIN_NAME = "discourse-ai"
|
||||||
|
end
|
||||||
|
|
||||||
require_relative "lib/discourse_ai/engine"
|
require_relative "lib/discourse_ai/engine"
|
||||||
|
|
||||||
after_initialize do
|
after_initialize do
|
||||||
module ::DiscourseAI
|
|
||||||
PLUGIN_NAME = "discourse-ai"
|
|
||||||
end
|
|
||||||
|
|
||||||
require_relative "lib/shared/inference/discourse_classifier"
|
require_relative "lib/shared/inference/discourse_classifier"
|
||||||
require_relative "lib/shared/inference/discourse_reranker"
|
require_relative "lib/shared/inference/discourse_reranker"
|
||||||
require_relative "lib/shared/inference/openai_completions"
|
require_relative "lib/shared/inference/openai_completions"
|
||||||
|
@ -30,16 +30,16 @@ after_initialize do
|
||||||
require_relative "lib/modules/sentiment/entry_point"
|
require_relative "lib/modules/sentiment/entry_point"
|
||||||
|
|
||||||
[
|
[
|
||||||
DiscourseAI::NSFW::EntryPoint.new,
|
DiscourseAi::NSFW::EntryPoint.new,
|
||||||
DiscourseAI::Toxicity::EntryPoint.new,
|
DiscourseAi::Toxicity::EntryPoint.new,
|
||||||
DiscourseAI::Sentiment::EntryPoint.new,
|
DiscourseAi::Sentiment::EntryPoint.new,
|
||||||
].each do |a_module|
|
].each do |a_module|
|
||||||
a_module.load_files
|
a_module.load_files
|
||||||
a_module.inject_into(self)
|
a_module.inject_into(self)
|
||||||
end
|
end
|
||||||
|
|
||||||
register_reviewable_type ReviewableAIChatMessage
|
register_reviewable_type ReviewableAiChatMessage
|
||||||
register_reviewable_type ReviewableAIPost
|
register_reviewable_type ReviewableAiPost
|
||||||
|
|
||||||
on(:reviewable_transitioned_to) do |new_status, reviewable|
|
on(:reviewable_transitioned_to) do |new_status, reviewable|
|
||||||
ModelAccuracy.adjust_model_accuracy(new_status, reviewable)
|
ModelAccuracy.adjust_model_accuracy(new_status, reviewable)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
require "rails_helper"
|
require "rails_helper"
|
||||||
|
|
||||||
describe DiscourseAI::NSFW::EntryPoint do
|
describe DiscourseAi::NSFW::EntryPoint do
|
||||||
fab!(:user) { Fabricate(:user) }
|
fab!(:user) { Fabricate(:user) }
|
||||||
|
|
||||||
describe "registering event callbacks" do
|
describe "registering event callbacks" do
|
||||||
|
|
|
@ -21,19 +21,19 @@ describe Jobs::EvaluatePostUploads do
|
||||||
|
|
||||||
subject.execute({ post_id: post.id })
|
subject.execute({ post_id: post.id })
|
||||||
|
|
||||||
expect(ReviewableFlaggedPost.where(target: post).count).to be_zero
|
expect(ReviewableAiPost.where(target: post).count).to be_zero
|
||||||
end
|
end
|
||||||
|
|
||||||
it "does nothing if there's no arg called post_id" do
|
it "does nothing if there's no arg called post_id" do
|
||||||
subject.execute({})
|
subject.execute({})
|
||||||
|
|
||||||
expect(ReviewableFlaggedPost.where(target: post).count).to be_zero
|
expect(ReviewableAiPost.where(target: post).count).to be_zero
|
||||||
end
|
end
|
||||||
|
|
||||||
it "does nothing if no post match the given id" do
|
it "does nothing if no post match the given id" do
|
||||||
subject.execute({ post_id: nil })
|
subject.execute({ post_id: nil })
|
||||||
|
|
||||||
expect(ReviewableFlaggedPost.where(target: post).count).to be_zero
|
expect(ReviewableAiPost.where(target: post).count).to be_zero
|
||||||
end
|
end
|
||||||
|
|
||||||
it "does nothing if the post has no uploads" do
|
it "does nothing if the post has no uploads" do
|
||||||
|
@ -41,7 +41,7 @@ describe Jobs::EvaluatePostUploads do
|
||||||
|
|
||||||
subject.execute({ post_id: post_no_uploads.id })
|
subject.execute({ post_id: post_no_uploads.id })
|
||||||
|
|
||||||
expect(ReviewableFlaggedPost.where(target: post_no_uploads).count).to be_zero
|
expect(ReviewableAiPost.where(target: post_no_uploads).count).to be_zero
|
||||||
end
|
end
|
||||||
|
|
||||||
it "does nothing if the upload is not an image" do
|
it "does nothing if the upload is not an image" do
|
||||||
|
@ -50,7 +50,7 @@ describe Jobs::EvaluatePostUploads do
|
||||||
|
|
||||||
subject.execute({ post_id: post.id })
|
subject.execute({ post_id: post.id })
|
||||||
|
|
||||||
expect(ReviewableFlaggedPost.where(target: post).count).to be_zero
|
expect(ReviewableAiPost.where(target: post).count).to be_zero
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ describe Jobs::EvaluatePostUploads do
|
||||||
it "flags and hides the post" do
|
it "flags and hides the post" do
|
||||||
subject.execute({ post_id: post.id })
|
subject.execute({ post_id: post.id })
|
||||||
|
|
||||||
expect(ReviewableAIPost.where(target: post).count).to eq(1)
|
expect(ReviewableAiPost.where(target: post).count).to eq(1)
|
||||||
expect(post.reload.hidden?).to eq(true)
|
expect(post.reload.hidden?).to eq(true)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
@ -72,7 +72,7 @@ describe Jobs::EvaluatePostUploads do
|
||||||
it "does nothing" do
|
it "does nothing" do
|
||||||
subject.execute({ post_id: post.id })
|
subject.execute({ post_id: post.id })
|
||||||
|
|
||||||
expect(ReviewableAIPost.where(target: post).count).to be_zero
|
expect(ReviewableAiPost.where(target: post).count).to be_zero
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
require "rails_helper"
|
require "rails_helper"
|
||||||
require_relative "../../../support/nsfw_inference_stubs"
|
require_relative "../../../support/nsfw_inference_stubs"
|
||||||
|
|
||||||
describe DiscourseAI::NSFW::NSFWClassification do
|
describe DiscourseAi::NSFW::NSFWClassification do
|
||||||
before { SiteSetting.ai_nsfw_inference_service_api_endpoint = "http://test.com" }
|
before { SiteSetting.ai_nsfw_inference_service_api_endpoint = "http://test.com" }
|
||||||
|
|
||||||
let(:available_models) { SiteSetting.ai_nsfw_models.split("|") }
|
let(:available_models) { SiteSetting.ai_nsfw_models.split("|") }
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
require "rails_helper"
|
require "rails_helper"
|
||||||
|
|
||||||
describe DiscourseAI::Sentiment::EntryPoint do
|
describe DiscourseAi::Sentiment::EntryPoint do
|
||||||
fab!(:user) { Fabricate(:user) }
|
fab!(:user) { Fabricate(:user) }
|
||||||
|
|
||||||
describe "registering event callbacks" do
|
describe "registering event callbacks" do
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
require "rails_helper"
|
require "rails_helper"
|
||||||
require_relative "../../../support/sentiment_inference_stubs"
|
require_relative "../../../support/sentiment_inference_stubs"
|
||||||
|
|
||||||
describe DiscourseAI::Sentiment::SentimentClassification do
|
describe DiscourseAi::Sentiment::SentimentClassification do
|
||||||
fab!(:target) { Fabricate(:post) }
|
fab!(:target) { Fabricate(:post) }
|
||||||
|
|
||||||
describe "#request" do
|
describe "#request" do
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
require "rails_helper"
|
require "rails_helper"
|
||||||
|
|
||||||
describe DiscourseAI::Toxicity::EntryPoint do
|
describe DiscourseAi::Toxicity::EntryPoint do
|
||||||
fab!(:user) { Fabricate(:user) }
|
fab!(:user) { Fabricate(:user) }
|
||||||
|
|
||||||
describe "registering event callbacks" do
|
describe "registering event callbacks" do
|
||||||
|
|
|
@ -18,19 +18,19 @@ describe Jobs::ToxicityClassifyChatMessage do
|
||||||
|
|
||||||
subject.execute({ chat_message_id: chat_message.id })
|
subject.execute({ chat_message_id: chat_message.id })
|
||||||
|
|
||||||
expect(ReviewableAIChatMessage.where(target: chat_message).count).to be_zero
|
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
|
||||||
end
|
end
|
||||||
|
|
||||||
it "does nothing if there's no arg called post_id" do
|
it "does nothing if there's no arg called post_id" do
|
||||||
subject.execute({})
|
subject.execute({})
|
||||||
|
|
||||||
expect(ReviewableAIChatMessage.where(target: chat_message).count).to be_zero
|
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
|
||||||
end
|
end
|
||||||
|
|
||||||
it "does nothing if no post match the given id" do
|
it "does nothing if no post match the given id" do
|
||||||
subject.execute({ chat_message_id: nil })
|
subject.execute({ chat_message_id: nil })
|
||||||
|
|
||||||
expect(ReviewableAIChatMessage.where(target: chat_message).count).to be_zero
|
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
|
||||||
end
|
end
|
||||||
|
|
||||||
it "does nothing if the post content is blank" do
|
it "does nothing if the post content is blank" do
|
||||||
|
@ -38,7 +38,7 @@ describe Jobs::ToxicityClassifyChatMessage do
|
||||||
|
|
||||||
subject.execute({ chat_message_id: chat_message.id })
|
subject.execute({ chat_message_id: chat_message.id })
|
||||||
|
|
||||||
expect(ReviewableAIChatMessage.where(target: chat_message).count).to be_zero
|
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ describe Jobs::ToxicityClassifyChatMessage do
|
||||||
|
|
||||||
subject.execute({ chat_message_id: chat_message.id })
|
subject.execute({ chat_message_id: chat_message.id })
|
||||||
|
|
||||||
expect(ReviewableAIChatMessage.where(target: chat_message).count).to eq(1)
|
expect(ReviewableAiChatMessage.where(target: chat_message).count).to eq(1)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -18,19 +18,19 @@ describe Jobs::ToxicityClassifyPost do
|
||||||
|
|
||||||
subject.execute({ post_id: post.id })
|
subject.execute({ post_id: post.id })
|
||||||
|
|
||||||
expect(ReviewableAIPost.where(target: post).count).to be_zero
|
expect(ReviewableAiPost.where(target: post).count).to be_zero
|
||||||
end
|
end
|
||||||
|
|
||||||
it "does nothing if there's no arg called post_id" do
|
it "does nothing if there's no arg called post_id" do
|
||||||
subject.execute({})
|
subject.execute({})
|
||||||
|
|
||||||
expect(ReviewableAIPost.where(target: post).count).to be_zero
|
expect(ReviewableAiPost.where(target: post).count).to be_zero
|
||||||
end
|
end
|
||||||
|
|
||||||
it "does nothing if no post match the given id" do
|
it "does nothing if no post match the given id" do
|
||||||
subject.execute({ post_id: nil })
|
subject.execute({ post_id: nil })
|
||||||
|
|
||||||
expect(ReviewableAIPost.where(target: post).count).to be_zero
|
expect(ReviewableAiPost.where(target: post).count).to be_zero
|
||||||
end
|
end
|
||||||
|
|
||||||
it "does nothing if the post content is blank" do
|
it "does nothing if the post content is blank" do
|
||||||
|
@ -38,7 +38,7 @@ describe Jobs::ToxicityClassifyPost do
|
||||||
|
|
||||||
subject.execute({ post_id: post.id })
|
subject.execute({ post_id: post.id })
|
||||||
|
|
||||||
expect(ReviewableAIPost.where(target: post).count).to be_zero
|
expect(ReviewableAiPost.where(target: post).count).to be_zero
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ describe Jobs::ToxicityClassifyPost do
|
||||||
|
|
||||||
subject.execute({ post_id: post.id })
|
subject.execute({ post_id: post.id })
|
||||||
|
|
||||||
expect(ReviewableAIPost.where(target: post).count).to eq(1)
|
expect(ReviewableAiPost.where(target: post).count).to eq(1)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
require "rails_helper"
|
require "rails_helper"
|
||||||
|
|
||||||
describe DiscourseAI::Toxicity::ScanQueue do
|
describe DiscourseAi::Toxicity::ScanQueue do
|
||||||
fab!(:group) { Fabricate(:group) }
|
fab!(:group) { Fabricate(:group) }
|
||||||
|
|
||||||
before do
|
before do
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
require "rails_helper"
|
require "rails_helper"
|
||||||
require_relative "../../../support/toxicity_inference_stubs"
|
require_relative "../../../support/toxicity_inference_stubs"
|
||||||
|
|
||||||
describe DiscourseAI::Toxicity::ToxicityClassification do
|
describe DiscourseAi::Toxicity::ToxicityClassification do
|
||||||
fab!(:target) { Fabricate(:post) }
|
fab!(:target) { Fabricate(:post) }
|
||||||
|
|
||||||
describe "#request" do
|
describe "#request" do
|
||||||
|
|
|
@ -42,7 +42,7 @@ describe ModelAccuracy do
|
||||||
end
|
end
|
||||||
|
|
||||||
it "updates the agreed flag if reviewable was approved and verdict is true" do
|
it "updates the agreed flag if reviewable was approved and verdict is true" do
|
||||||
reviewable = build_reviewable(ReviewableAIPost, true)
|
reviewable = build_reviewable(ReviewableAiPost, true)
|
||||||
|
|
||||||
described_class.adjust_model_accuracy(:approved, reviewable)
|
described_class.adjust_model_accuracy(:approved, reviewable)
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ describe ModelAccuracy do
|
||||||
end
|
end
|
||||||
|
|
||||||
it "updates the disagreed flag if the reviewable was approved and verdict is false" do
|
it "updates the disagreed flag if the reviewable was approved and verdict is false" do
|
||||||
reviewable = build_reviewable(ReviewableAIPost, false)
|
reviewable = build_reviewable(ReviewableAiPost, false)
|
||||||
|
|
||||||
described_class.adjust_model_accuracy(:approved, reviewable)
|
described_class.adjust_model_accuracy(:approved, reviewable)
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ describe ModelAccuracy do
|
||||||
end
|
end
|
||||||
|
|
||||||
it "updates the disagreed flag if reviewable was rejected and verdict is true" do
|
it "updates the disagreed flag if reviewable was rejected and verdict is true" do
|
||||||
reviewable = build_reviewable(ReviewableAIPost, true)
|
reviewable = build_reviewable(ReviewableAiPost, true)
|
||||||
|
|
||||||
described_class.adjust_model_accuracy(:rejected, reviewable)
|
described_class.adjust_model_accuracy(:rejected, reviewable)
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ describe ModelAccuracy do
|
||||||
end
|
end
|
||||||
|
|
||||||
it "updates the agreed flag if the reviewable was rejected and verdict is false" do
|
it "updates the agreed flag if the reviewable was rejected and verdict is false" do
|
||||||
reviewable = build_reviewable(ReviewableAIPost, false)
|
reviewable = build_reviewable(ReviewableAiPost, false)
|
||||||
|
|
||||||
described_class.adjust_model_accuracy(:rejected, reviewable)
|
described_class.adjust_model_accuracy(:rejected, reviewable)
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
require "rails_helper"
|
require "rails_helper"
|
||||||
|
|
||||||
RSpec.describe ReviewableAIChatMessage, type: :model do
|
RSpec.describe ReviewableAiChatMessage, type: :model do
|
||||||
fab!(:moderator) { Fabricate(:moderator) }
|
fab!(:moderator) { Fabricate(:moderator) }
|
||||||
fab!(:user) { Fabricate(:user) }
|
fab!(:user) { Fabricate(:user) }
|
||||||
fab!(:chat_channel) { Fabricate(:chat_channel) }
|
fab!(:chat_channel) { Fabricate(:chat_channel) }
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
require "rails_helper"
|
require "rails_helper"
|
||||||
|
|
||||||
describe ReviewableAIPost do
|
describe ReviewableAiPost do
|
||||||
fab!(:target) { Fabricate(:post) }
|
fab!(:target) { Fabricate(:post) }
|
||||||
|
|
||||||
describe "#build_actions" do
|
describe "#build_actions" do
|
|
@ -13,10 +13,10 @@ describe Plugin::Instance do
|
||||||
it "adjusts model accuracy" do
|
it "adjusts model accuracy" do
|
||||||
ToxicityInferenceStubs.stub_post_classification(post, toxic: true)
|
ToxicityInferenceStubs.stub_post_classification(post, toxic: true)
|
||||||
SiteSetting.ai_toxicity_flag_automatically = true
|
SiteSetting.ai_toxicity_flag_automatically = true
|
||||||
classification = DiscourseAI::Toxicity::ToxicityClassification.new
|
classification = DiscourseAi::Toxicity::ToxicityClassification.new
|
||||||
classificator = DiscourseAI::PostClassificator.new(classification)
|
classificator = DiscourseAi::PostClassificator.new(classification)
|
||||||
classificator.classify!(post)
|
classificator.classify!(post)
|
||||||
reviewable = ReviewableAIPost.find_by(target: post)
|
reviewable = ReviewableAiPost.find_by(target: post)
|
||||||
|
|
||||||
reviewable.perform admin, :agree_and_keep
|
reviewable.perform admin, :agree_and_keep
|
||||||
accuracy = ModelAccuracy.find_by(classification_type: classification.type)
|
accuracy = ModelAccuracy.find_by(classification_type: classification.type)
|
||||||
|
|
|
@ -3,10 +3,10 @@
|
||||||
require "rails_helper"
|
require "rails_helper"
|
||||||
require_relative "../support/toxicity_inference_stubs"
|
require_relative "../support/toxicity_inference_stubs"
|
||||||
|
|
||||||
describe DiscourseAI::ChatMessageClassificator do
|
describe DiscourseAi::ChatMessageClassificator do
|
||||||
fab!(:chat_message) { Fabricate(:chat_message) }
|
fab!(:chat_message) { Fabricate(:chat_message) }
|
||||||
|
|
||||||
let(:model) { DiscourseAI::Toxicity::ToxicityClassification.new }
|
let(:model) { DiscourseAi::Toxicity::ToxicityClassification.new }
|
||||||
let(:classification) { described_class.new(model) }
|
let(:classification) { described_class.new(model) }
|
||||||
|
|
||||||
describe "#classify!" do
|
describe "#classify!" do
|
||||||
|
@ -27,7 +27,7 @@ describe DiscourseAI::ChatMessageClassificator do
|
||||||
|
|
||||||
classification.classify!(chat_message)
|
classification.classify!(chat_message)
|
||||||
|
|
||||||
expect(ReviewableAIChatMessage.where(target: chat_message).count).to eq(1)
|
expect(ReviewableAiChatMessage.where(target: chat_message).count).to eq(1)
|
||||||
end
|
end
|
||||||
|
|
||||||
it "doesn't flags the message if the model decides we shouldn't" do
|
it "doesn't flags the message if the model decides we shouldn't" do
|
||||||
|
@ -35,14 +35,14 @@ describe DiscourseAI::ChatMessageClassificator do
|
||||||
|
|
||||||
classification.classify!(chat_message)
|
classification.classify!(chat_message)
|
||||||
|
|
||||||
expect(ReviewableAIChatMessage.where(target: chat_message).count).to be_zero
|
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
|
||||||
end
|
end
|
||||||
|
|
||||||
it "includes the model accuracy in the payload" do
|
it "includes the model accuracy in the payload" do
|
||||||
SiteSetting.ai_toxicity_flag_automatically = true
|
SiteSetting.ai_toxicity_flag_automatically = true
|
||||||
classification.classify!(chat_message)
|
classification.classify!(chat_message)
|
||||||
|
|
||||||
reviewable = ReviewableAIChatMessage.find_by(target: chat_message)
|
reviewable = ReviewableAiChatMessage.find_by(target: chat_message)
|
||||||
|
|
||||||
expect(
|
expect(
|
||||||
reviewable.payload.dig("accuracies", SiteSetting.ai_toxicity_inference_service_api_model),
|
reviewable.payload.dig("accuracies", SiteSetting.ai_toxicity_inference_service_api_model),
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
require "rails_helper"
|
require "rails_helper"
|
||||||
require_relative "../support/sentiment_inference_stubs"
|
require_relative "../support/sentiment_inference_stubs"
|
||||||
|
|
||||||
describe DiscourseAI::Classificator do
|
describe DiscourseAi::Classificator do
|
||||||
describe "#classify!" do
|
describe "#classify!" do
|
||||||
describe "saving the classification result" do
|
describe "saving the classification result" do
|
||||||
let(:classification_raw_result) do
|
let(:classification_raw_result) do
|
||||||
|
@ -15,8 +15,8 @@ describe DiscourseAI::Classificator do
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
let(:model) { DiscourseAI::Sentiment::SentimentClassification.new }
|
let(:model) { DiscourseAi::Sentiment::SentimentClassification.new }
|
||||||
let(:classification) { DiscourseAI::PostClassificator.new(model) }
|
let(:classification) { DiscourseAi::PostClassificator.new(model) }
|
||||||
fab!(:target) { Fabricate(:post) }
|
fab!(:target) { Fabricate(:post) }
|
||||||
|
|
||||||
before do
|
before do
|
||||||
|
|
|
@ -3,10 +3,10 @@
|
||||||
require "rails_helper"
|
require "rails_helper"
|
||||||
require_relative "../support/toxicity_inference_stubs"
|
require_relative "../support/toxicity_inference_stubs"
|
||||||
|
|
||||||
describe DiscourseAI::PostClassificator do
|
describe DiscourseAi::PostClassificator do
|
||||||
fab!(:post) { Fabricate(:post) }
|
fab!(:post) { Fabricate(:post) }
|
||||||
|
|
||||||
let(:model) { DiscourseAI::Toxicity::ToxicityClassification.new }
|
let(:model) { DiscourseAi::Toxicity::ToxicityClassification.new }
|
||||||
let(:classification) { described_class.new(model) }
|
let(:classification) { described_class.new(model) }
|
||||||
|
|
||||||
describe "#classify!" do
|
describe "#classify!" do
|
||||||
|
@ -26,7 +26,7 @@ describe DiscourseAI::PostClassificator do
|
||||||
|
|
||||||
classification.classify!(post)
|
classification.classify!(post)
|
||||||
|
|
||||||
expect(ReviewableAIPost.where(target: post).count).to eq(1)
|
expect(ReviewableAiPost.where(target: post).count).to eq(1)
|
||||||
expect(post.reload.hidden?).to eq(true)
|
expect(post.reload.hidden?).to eq(true)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -35,14 +35,14 @@ describe DiscourseAI::PostClassificator do
|
||||||
|
|
||||||
classification.classify!(post)
|
classification.classify!(post)
|
||||||
|
|
||||||
expect(ReviewableAIPost.where(target: post).count).to be_zero
|
expect(ReviewableAiPost.where(target: post).count).to be_zero
|
||||||
end
|
end
|
||||||
|
|
||||||
it "includes the model accuracy in the payload" do
|
it "includes the model accuracy in the payload" do
|
||||||
SiteSetting.ai_toxicity_flag_automatically = true
|
SiteSetting.ai_toxicity_flag_automatically = true
|
||||||
classification.classify!(post)
|
classification.classify!(post)
|
||||||
|
|
||||||
reviewable = ReviewableAIPost.find_by(target: post)
|
reviewable = ReviewableAiPost.find_by(target: post)
|
||||||
|
|
||||||
expect(
|
expect(
|
||||||
reviewable.payload.dig("accuracies", SiteSetting.ai_toxicity_inference_service_api_model),
|
reviewable.payload.dig("accuracies", SiteSetting.ai_toxicity_inference_service_api_model),
|
||||||
|
|
|
@ -15,7 +15,7 @@ class SentimentInferenceStubs
|
||||||
def stub_classification(post)
|
def stub_classification(post)
|
||||||
content = post.post_number == 1 ? "#{post.topic.title}\n#{post.raw}" : post.raw
|
content = post.post_number == 1 ? "#{post.topic.title}\n#{post.raw}" : post.raw
|
||||||
|
|
||||||
DiscourseAI::Sentiment::SentimentClassification.new.available_models.each do |model|
|
DiscourseAi::Sentiment::SentimentClassification.new.available_models.each do |model|
|
||||||
WebMock
|
WebMock
|
||||||
.stub_request(:post, endpoint)
|
.stub_request(:post, endpoint)
|
||||||
.with(body: JSON.dump(model: model, content: content))
|
.with(body: JSON.dump(model: model, content: content))
|
||||||
|
|
Loading…
Reference in New Issue