DEV: DiscourseAI -> DiscourseAi rename to have consistent folders and files (#9)

This commit is contained in:
Roman Rizzi 2023-03-14 16:03:50 -03:00 committed by GitHub
parent cbaa40edc5
commit aa2fca6086
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
51 changed files with 116 additions and 100 deletions

View File

@ -3,7 +3,7 @@
class ModelAccuracy < ActiveRecord::Base
def self.adjust_model_accuracy(new_status, reviewable)
return unless %i[approved rejected].include?(new_status)
return unless [ReviewableAIPost, ReviewableAIChatMessage].include?(reviewable.class)
return unless [ReviewableAiPost, ReviewableAiChatMessage].include?(reviewable.class)
verdicts = reviewable.payload.to_h["verdicts"] || {}

View File

@ -2,7 +2,7 @@
require_dependency "reviewable"
class ReviewableAIChatMessage < Reviewable
class ReviewableAiChatMessage < Reviewable
def self.action_aliases
{
agree_and_keep_hidden: :agree_and_delete,

View File

@ -2,7 +2,7 @@
require_dependency "reviewable"
class ReviewableAIPost < Reviewable
class ReviewableAiPost < Reviewable
# Penalties are handled by the modal after the action is performed
def self.action_aliases
{

View File

@ -1,5 +1,5 @@
# frozen_string_literal: true
class AIChatChannelSerializer < ApplicationSerializer
class AiChatChannelSerializer < ApplicationSerializer
attributes :id, :chatable, :chatable_type, :chatable_url, :title, :slug
end

View File

@ -2,12 +2,12 @@
require_dependency "reviewable_serializer"
class ReviewableAIChatMessageSerializer < ReviewableSerializer
class ReviewableAiChatMessageSerializer < ReviewableSerializer
payload_attributes :accuracies, :message_cooked
target_attributes :cooked
attributes :target_id
has_one :chat_channel, serializer: AIChatChannelSerializer, root: false, embed: :objects
has_one :chat_channel, serializer: AiChatChannelSerializer, root: false, embed: :objects
def chat_channel
object.chat_message.chat_channel

View File

@ -2,6 +2,6 @@
require_dependency "reviewable_flagged_post_serializer"
class ReviewableAIPostSerializer < ReviewableFlaggedPostSerializer
class ReviewableAiPostSerializer < ReviewableFlaggedPostSerializer
payload_attributes :accuracies
end

View File

@ -1,7 +1,7 @@
import Component from "@glimmer/component";
import { inject as service } from "@ember/service";
export default class ReviewableAIChatMessage extends Component {
export default class ReviewableAiChatMessage extends Component {
@service store;
get chatChannel() {

View File

@ -0,0 +1,3 @@
import Component from "@glimmer/component";
export default class ReviewableAiPost extends Component {}

View File

@ -1,3 +0,0 @@
import Component from "@glimmer/component";
export default class ReviewableAIPost extends Component {}

View File

@ -0,0 +1,13 @@
# frozen_string_literal: true
class MigrateDiscourseAiReviewables < ActiveRecord::Migration[7.0]
def up
DB.exec("UPDATE reviewables SET type='ReviewableAiPost' WHERE type='ReviewableAIPost'")
DB.exec(
"UPDATE reviewables SET type='ReviewableAiChatMessage' WHERE type='ReviewableAIChatMessage'",
)
end
def down
raise ActiveRecord::IrreversibleMigration
end
end

View File

@ -1,7 +1,8 @@
# frozen_string_literal: true
module DiscourseAI
module DiscourseAi
class Engine < ::Rails::Engine
isolate_namespace DiscourseAI
engine_name PLUGIN_NAME
isolate_namespace DiscourseAi
end
end

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
module DiscourseAI
module DiscourseAi
module NSFW
class EntryPoint
def load_files
@ -12,7 +12,7 @@ module DiscourseAI
nsfw_detection_cb =
Proc.new do |post|
if SiteSetting.ai_nsfw_detection_enabled &&
DiscourseAI::NSFW::NSFWClassification.new.can_classify?(post)
DiscourseAi::NSFW::NSFWClassification.new.can_classify?(post)
Jobs.enqueue(:evaluate_post_uploads, post_id: post.id)
end
end

View File

@ -11,7 +11,7 @@ module Jobs
return if post.uploads.none? { |u| FileHelper.is_supported_image?(u.url) }
DiscourseAI::PostClassificator.new(DiscourseAI::NSFW::NSFWClassification.new).classify!(post)
DiscourseAi::PostClassificator.new(DiscourseAi::NSFW::NSFWClassification.new).classify!(post)
end
end
end

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
module DiscourseAI
module DiscourseAi
module NSFW
class NSFWClassification
def type
@ -52,7 +52,7 @@ module DiscourseAI
upload_url = Discourse.store.cdn_url(upload.url)
upload_url = "#{Discourse.base_url_no_prefix}#{upload_url}" if upload_url.starts_with?("/")
DiscourseAI::Inference::DiscourseClassifier.perform!(
DiscourseAi::Inference::DiscourseClassifier.perform!(
"#{SiteSetting.ai_nsfw_inference_service_api_endpoint}/api/v1/classify",
model,
upload_url,

View File

@ -1,5 +1,6 @@
# frozen_string_literal: true
module DiscourseAI
module DiscourseAi
module Sentiment
class EntryPoint
def load_files

View File

@ -9,8 +9,8 @@ module ::Jobs
post = Post.find_by(id: post_id, post_type: Post.types[:regular])
return if post&.raw.blank?
DiscourseAI::PostClassificator.new(
DiscourseAI::Sentiment::SentimentClassification.new,
DiscourseAi::PostClassificator.new(
DiscourseAi::Sentiment::SentimentClassification.new,
).classify!(post)
end
end

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
module DiscourseAI
module DiscourseAi
module Sentiment
class SentimentClassification
def type
@ -39,7 +39,7 @@ module DiscourseAI
private
def request_with(model, content)
::DiscourseAI::Inference::DiscourseClassifier.perform!(
::DiscourseAi::Inference::DiscourseClassifier.perform!(
"#{SiteSetting.ai_sentiment_inference_service_api_endpoint}/api/v1/classify",
model,
content,

View File

@ -1,5 +1,6 @@
# frozen_string_literal: true
module DiscourseAI
module DiscourseAi
module Toxicity
class EntryPoint
def load_files
@ -11,13 +12,13 @@ module DiscourseAI
end
def inject_into(plugin)
post_analysis_cb = Proc.new { |post| DiscourseAI::Toxicity::ScanQueue.enqueue_post(post) }
post_analysis_cb = Proc.new { |post| DiscourseAi::Toxicity::ScanQueue.enqueue_post(post) }
plugin.on(:post_created, &post_analysis_cb)
plugin.on(:post_edited, &post_analysis_cb)
chat_message_analysis_cb =
Proc.new { |message| DiscourseAI::Toxicity::ScanQueue.enqueue_chat_message(message) }
Proc.new { |message| DiscourseAi::Toxicity::ScanQueue.enqueue_chat_message(message) }
plugin.on(:chat_message_created, &chat_message_analysis_cb)
plugin.on(:chat_message_edited, &chat_message_analysis_cb)

View File

@ -10,8 +10,8 @@ module ::Jobs
chat_message = ChatMessage.find_by(id: chat_message_id)
return if chat_message&.message.blank?
DiscourseAI::ChatMessageClassificator.new(
DiscourseAI::Toxicity::ToxicityClassification.new,
DiscourseAi::ChatMessageClassificator.new(
DiscourseAi::Toxicity::ToxicityClassification.new,
).classify!(chat_message)
end
end

View File

@ -11,8 +11,8 @@ module ::Jobs
post = Post.find_by(id: post_id, post_type: Post.types[:regular])
return if post&.raw.blank?
DiscourseAI::PostClassificator.new(
DiscourseAI::Toxicity::ToxicityClassification.new,
DiscourseAi::PostClassificator.new(
DiscourseAi::Toxicity::ToxicityClassification.new,
).classify!(post)
end
end

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
module ::DiscourseAI
module ::DiscourseAi
module Toxicity
class ScanQueue
class << self

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
module DiscourseAI
module DiscourseAi
module Toxicity
class ToxicityClassification
CLASSIFICATION_LABELS = %i[
@ -42,7 +42,7 @@ module DiscourseAI
def request(target_to_classify)
data =
::DiscourseAI::Inference::DiscourseClassifier.perform!(
::DiscourseAi::Inference::DiscourseClassifier.perform!(
"#{SiteSetting.ai_toxicity_inference_service_api_endpoint}/api/v1/classify",
SiteSetting.ai_toxicity_inference_service_api_model,
content_of(target_to_classify),

View File

@ -1,12 +1,12 @@
# frozen_string_literal: true
module ::DiscourseAI
module ::DiscourseAi
class ChatMessageClassificator < Classificator
private
def flag!(chat_message, classification, verdicts, accuracies)
reviewable =
ReviewableAIChatMessage.needs_review!(
ReviewableAiChatMessage.needs_review!(
created_by: Discourse.system_user,
target: chat_message,
reviewable_by_moderator: true,

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
module ::DiscourseAI
module ::DiscourseAi
class Classificator
def initialize(classification_model)
@classification_model = classification_model

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
module ::DiscourseAI
module ::DiscourseAi
module Inference
class DiscourseClassifier
def self.perform!(endpoint, model, content, api_key)

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
module ::DiscourseAI
module ::DiscourseAi
module Inference
class DiscourseReranker
def self.perform!(endpoint, model, content, candidates, api_key)

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
module ::DiscourseAI
module ::DiscourseAi
module Inference
class OpenAICompletions
def self.perform!(model, content, api_key)

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
module ::DiscourseAI
module ::DiscourseAi
module Inference
class OpenAIEmbeddings
def self.perform!(content, model = nil)

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
module ::DiscourseAI
module ::DiscourseAi
class PostClassificator < Classificator
private
@ -8,7 +8,7 @@ module ::DiscourseAI
post.hide!(ReviewableScore.types[:inappropriate])
reviewable =
ReviewableAIPost.needs_review!(
ReviewableAiPost.needs_review!(
created_by: Discourse.system_user,
target: post,
reviewable_by_moderator: true,

View File

@ -1,7 +1,7 @@
# frozen_string_literal: true
def classify(content)
::DiscourseAI::Inference::DiscourseClassifier.perform!(
::DiscourseAi::Inference::DiscourseClassifier.perform!(
"#{SiteSetting.ai_toxicity_inference_service_api_endpoint}/api/v1/classify",
SiteSetting.ai_toxicity_inference_service_api_model,
content,
@ -37,7 +37,7 @@ task "ai:toxicity:calibration_stats", [:set_size] => [:environment] do |_, args|
flag_agreed_scores = flag_agreed.map { classify(_1) }
flag_not_agreed_scores = flag_not_agreed.map { classify(_1) }
DiscourseAI::Toxicity::Classifier::CLASSIFICATION_LABELS.each do |label|
DiscourseAi::Toxicity::Classifier::CLASSIFICATION_LABELS.each do |label|
puts "Label: #{label}"
label_agreed_scores = flag_agreed_scores.map { _1[label] }

View File

@ -9,13 +9,13 @@
enabled_site_setting :discourse_ai_enabled
module ::DiscourseAi
PLUGIN_NAME = "discourse-ai"
end
require_relative "lib/discourse_ai/engine"
after_initialize do
module ::DiscourseAI
PLUGIN_NAME = "discourse-ai"
end
require_relative "lib/shared/inference/discourse_classifier"
require_relative "lib/shared/inference/discourse_reranker"
require_relative "lib/shared/inference/openai_completions"
@ -30,16 +30,16 @@ after_initialize do
require_relative "lib/modules/sentiment/entry_point"
[
DiscourseAI::NSFW::EntryPoint.new,
DiscourseAI::Toxicity::EntryPoint.new,
DiscourseAI::Sentiment::EntryPoint.new,
DiscourseAi::NSFW::EntryPoint.new,
DiscourseAi::Toxicity::EntryPoint.new,
DiscourseAi::Sentiment::EntryPoint.new,
].each do |a_module|
a_module.load_files
a_module.inject_into(self)
end
register_reviewable_type ReviewableAIChatMessage
register_reviewable_type ReviewableAIPost
register_reviewable_type ReviewableAiChatMessage
register_reviewable_type ReviewableAiPost
on(:reviewable_transitioned_to) do |new_status, reviewable|
ModelAccuracy.adjust_model_accuracy(new_status, reviewable)

View File

@ -2,7 +2,7 @@
require "rails_helper"
describe DiscourseAI::NSFW::EntryPoint do
describe DiscourseAi::NSFW::EntryPoint do
fab!(:user) { Fabricate(:user) }
describe "registering event callbacks" do

View File

@ -21,19 +21,19 @@ describe Jobs::EvaluatePostUploads do
subject.execute({ post_id: post.id })
expect(ReviewableFlaggedPost.where(target: post).count).to be_zero
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
it "does nothing if there's no arg called post_id" do
subject.execute({})
expect(ReviewableFlaggedPost.where(target: post).count).to be_zero
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
it "does nothing if no post match the given id" do
subject.execute({ post_id: nil })
expect(ReviewableFlaggedPost.where(target: post).count).to be_zero
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
it "does nothing if the post has no uploads" do
@ -41,7 +41,7 @@ describe Jobs::EvaluatePostUploads do
subject.execute({ post_id: post_no_uploads.id })
expect(ReviewableFlaggedPost.where(target: post_no_uploads).count).to be_zero
expect(ReviewableAiPost.where(target: post_no_uploads).count).to be_zero
end
it "does nothing if the upload is not an image" do
@ -50,7 +50,7 @@ describe Jobs::EvaluatePostUploads do
subject.execute({ post_id: post.id })
expect(ReviewableFlaggedPost.where(target: post).count).to be_zero
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
end
@ -61,7 +61,7 @@ describe Jobs::EvaluatePostUploads do
it "flags and hides the post" do
subject.execute({ post_id: post.id })
expect(ReviewableAIPost.where(target: post).count).to eq(1)
expect(ReviewableAiPost.where(target: post).count).to eq(1)
expect(post.reload.hidden?).to eq(true)
end
end
@ -72,7 +72,7 @@ describe Jobs::EvaluatePostUploads do
it "does nothing" do
subject.execute({ post_id: post.id })
expect(ReviewableAIPost.where(target: post).count).to be_zero
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
end
end

View File

@ -3,7 +3,7 @@
require "rails_helper"
require_relative "../../../support/nsfw_inference_stubs"
describe DiscourseAI::NSFW::NSFWClassification do
describe DiscourseAi::NSFW::NSFWClassification do
before { SiteSetting.ai_nsfw_inference_service_api_endpoint = "http://test.com" }
let(:available_models) { SiteSetting.ai_nsfw_models.split("|") }

View File

@ -2,7 +2,7 @@
require "rails_helper"
describe DiscourseAI::Sentiment::EntryPoint do
describe DiscourseAi::Sentiment::EntryPoint do
fab!(:user) { Fabricate(:user) }
describe "registering event callbacks" do

View File

@ -3,7 +3,7 @@
require "rails_helper"
require_relative "../../../support/sentiment_inference_stubs"
describe DiscourseAI::Sentiment::SentimentClassification do
describe DiscourseAi::Sentiment::SentimentClassification do
fab!(:target) { Fabricate(:post) }
describe "#request" do

View File

@ -2,7 +2,7 @@
require "rails_helper"
describe DiscourseAI::Toxicity::EntryPoint do
describe DiscourseAi::Toxicity::EntryPoint do
fab!(:user) { Fabricate(:user) }
describe "registering event callbacks" do

View File

@ -18,19 +18,19 @@ describe Jobs::ToxicityClassifyChatMessage do
subject.execute({ chat_message_id: chat_message.id })
expect(ReviewableAIChatMessage.where(target: chat_message).count).to be_zero
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
end
it "does nothing if there's no arg called post_id" do
subject.execute({})
expect(ReviewableAIChatMessage.where(target: chat_message).count).to be_zero
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
end
it "does nothing if no post match the given id" do
subject.execute({ chat_message_id: nil })
expect(ReviewableAIChatMessage.where(target: chat_message).count).to be_zero
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
end
it "does nothing if the post content is blank" do
@ -38,7 +38,7 @@ describe Jobs::ToxicityClassifyChatMessage do
subject.execute({ chat_message_id: chat_message.id })
expect(ReviewableAIChatMessage.where(target: chat_message).count).to be_zero
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
end
end
@ -47,7 +47,7 @@ describe Jobs::ToxicityClassifyChatMessage do
subject.execute({ chat_message_id: chat_message.id })
expect(ReviewableAIChatMessage.where(target: chat_message).count).to eq(1)
expect(ReviewableAiChatMessage.where(target: chat_message).count).to eq(1)
end
end
end

View File

@ -18,19 +18,19 @@ describe Jobs::ToxicityClassifyPost do
subject.execute({ post_id: post.id })
expect(ReviewableAIPost.where(target: post).count).to be_zero
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
it "does nothing if there's no arg called post_id" do
subject.execute({})
expect(ReviewableAIPost.where(target: post).count).to be_zero
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
it "does nothing if no post match the given id" do
subject.execute({ post_id: nil })
expect(ReviewableAIPost.where(target: post).count).to be_zero
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
it "does nothing if the post content is blank" do
@ -38,7 +38,7 @@ describe Jobs::ToxicityClassifyPost do
subject.execute({ post_id: post.id })
expect(ReviewableAIPost.where(target: post).count).to be_zero
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
end
@ -47,7 +47,7 @@ describe Jobs::ToxicityClassifyPost do
subject.execute({ post_id: post.id })
expect(ReviewableAIPost.where(target: post).count).to eq(1)
expect(ReviewableAiPost.where(target: post).count).to eq(1)
end
end
end

View File

@ -2,7 +2,7 @@
require "rails_helper"
describe DiscourseAI::Toxicity::ScanQueue do
describe DiscourseAi::Toxicity::ScanQueue do
fab!(:group) { Fabricate(:group) }
before do

View File

@ -3,7 +3,7 @@
require "rails_helper"
require_relative "../../../support/toxicity_inference_stubs"
describe DiscourseAI::Toxicity::ToxicityClassification do
describe DiscourseAi::Toxicity::ToxicityClassification do
fab!(:target) { Fabricate(:post) }
describe "#request" do

View File

@ -42,7 +42,7 @@ describe ModelAccuracy do
end
it "updates the agreed flag if reviewable was approved and verdict is true" do
reviewable = build_reviewable(ReviewableAIPost, true)
reviewable = build_reviewable(ReviewableAiPost, true)
described_class.adjust_model_accuracy(:approved, reviewable)
@ -51,7 +51,7 @@ describe ModelAccuracy do
end
it "updates the disagreed flag if the reviewable was approved and verdict is false" do
reviewable = build_reviewable(ReviewableAIPost, false)
reviewable = build_reviewable(ReviewableAiPost, false)
described_class.adjust_model_accuracy(:approved, reviewable)
@ -60,7 +60,7 @@ describe ModelAccuracy do
end
it "updates the disagreed flag if reviewable was rejected and verdict is true" do
reviewable = build_reviewable(ReviewableAIPost, true)
reviewable = build_reviewable(ReviewableAiPost, true)
described_class.adjust_model_accuracy(:rejected, reviewable)
@ -69,7 +69,7 @@ describe ModelAccuracy do
end
it "updates the agreed flag if the reviewable was rejected and verdict is false" do
reviewable = build_reviewable(ReviewableAIPost, false)
reviewable = build_reviewable(ReviewableAiPost, false)
described_class.adjust_model_accuracy(:rejected, reviewable)

View File

@ -2,7 +2,7 @@
require "rails_helper"
RSpec.describe ReviewableAIChatMessage, type: :model do
RSpec.describe ReviewableAiChatMessage, type: :model do
fab!(:moderator) { Fabricate(:moderator) }
fab!(:user) { Fabricate(:user) }
fab!(:chat_channel) { Fabricate(:chat_channel) }

View File

@ -2,7 +2,7 @@
require "rails_helper"
describe ReviewableAIPost do
describe ReviewableAiPost do
fab!(:target) { Fabricate(:post) }
describe "#build_actions" do

View File

@ -13,10 +13,10 @@ describe Plugin::Instance do
it "adjusts model accuracy" do
ToxicityInferenceStubs.stub_post_classification(post, toxic: true)
SiteSetting.ai_toxicity_flag_automatically = true
classification = DiscourseAI::Toxicity::ToxicityClassification.new
classificator = DiscourseAI::PostClassificator.new(classification)
classification = DiscourseAi::Toxicity::ToxicityClassification.new
classificator = DiscourseAi::PostClassificator.new(classification)
classificator.classify!(post)
reviewable = ReviewableAIPost.find_by(target: post)
reviewable = ReviewableAiPost.find_by(target: post)
reviewable.perform admin, :agree_and_keep
accuracy = ModelAccuracy.find_by(classification_type: classification.type)

View File

@ -3,10 +3,10 @@
require "rails_helper"
require_relative "../support/toxicity_inference_stubs"
describe DiscourseAI::ChatMessageClassificator do
describe DiscourseAi::ChatMessageClassificator do
fab!(:chat_message) { Fabricate(:chat_message) }
let(:model) { DiscourseAI::Toxicity::ToxicityClassification.new }
let(:model) { DiscourseAi::Toxicity::ToxicityClassification.new }
let(:classification) { described_class.new(model) }
describe "#classify!" do
@ -27,7 +27,7 @@ describe DiscourseAI::ChatMessageClassificator do
classification.classify!(chat_message)
expect(ReviewableAIChatMessage.where(target: chat_message).count).to eq(1)
expect(ReviewableAiChatMessage.where(target: chat_message).count).to eq(1)
end
it "doesn't flags the message if the model decides we shouldn't" do
@ -35,14 +35,14 @@ describe DiscourseAI::ChatMessageClassificator do
classification.classify!(chat_message)
expect(ReviewableAIChatMessage.where(target: chat_message).count).to be_zero
expect(ReviewableAiChatMessage.where(target: chat_message).count).to be_zero
end
it "includes the model accuracy in the payload" do
SiteSetting.ai_toxicity_flag_automatically = true
classification.classify!(chat_message)
reviewable = ReviewableAIChatMessage.find_by(target: chat_message)
reviewable = ReviewableAiChatMessage.find_by(target: chat_message)
expect(
reviewable.payload.dig("accuracies", SiteSetting.ai_toxicity_inference_service_api_model),

View File

@ -3,7 +3,7 @@
require "rails_helper"
require_relative "../support/sentiment_inference_stubs"
describe DiscourseAI::Classificator do
describe DiscourseAi::Classificator do
describe "#classify!" do
describe "saving the classification result" do
let(:classification_raw_result) do
@ -15,8 +15,8 @@ describe DiscourseAI::Classificator do
end
end
let(:model) { DiscourseAI::Sentiment::SentimentClassification.new }
let(:classification) { DiscourseAI::PostClassificator.new(model) }
let(:model) { DiscourseAi::Sentiment::SentimentClassification.new }
let(:classification) { DiscourseAi::PostClassificator.new(model) }
fab!(:target) { Fabricate(:post) }
before do

View File

@ -3,10 +3,10 @@
require "rails_helper"
require_relative "../support/toxicity_inference_stubs"
describe DiscourseAI::PostClassificator do
describe DiscourseAi::PostClassificator do
fab!(:post) { Fabricate(:post) }
let(:model) { DiscourseAI::Toxicity::ToxicityClassification.new }
let(:model) { DiscourseAi::Toxicity::ToxicityClassification.new }
let(:classification) { described_class.new(model) }
describe "#classify!" do
@ -26,7 +26,7 @@ describe DiscourseAI::PostClassificator do
classification.classify!(post)
expect(ReviewableAIPost.where(target: post).count).to eq(1)
expect(ReviewableAiPost.where(target: post).count).to eq(1)
expect(post.reload.hidden?).to eq(true)
end
@ -35,14 +35,14 @@ describe DiscourseAI::PostClassificator do
classification.classify!(post)
expect(ReviewableAIPost.where(target: post).count).to be_zero
expect(ReviewableAiPost.where(target: post).count).to be_zero
end
it "includes the model accuracy in the payload" do
SiteSetting.ai_toxicity_flag_automatically = true
classification.classify!(post)
reviewable = ReviewableAIPost.find_by(target: post)
reviewable = ReviewableAiPost.find_by(target: post)
expect(
reviewable.payload.dig("accuracies", SiteSetting.ai_toxicity_inference_service_api_model),

View File

@ -15,7 +15,7 @@ class SentimentInferenceStubs
def stub_classification(post)
content = post.post_number == 1 ? "#{post.topic.title}\n#{post.raw}" : post.raw
DiscourseAI::Sentiment::SentimentClassification.new.available_models.each do |model|
DiscourseAi::Sentiment::SentimentClassification.new.available_models.each do |model|
WebMock
.stub_request(:post, endpoint)
.with(body: JSON.dump(model: model, content: content))