discourse-ai/spec/shared/post_classificator_spec.rb
Roman Rizzi a838116cd5
FEATURE: Use dedicated reviewables for AI flags. (#4)
This change adds two new reviewable types: ReviewableAIPost and ReviewableAIChatMessage. They have the same actions as their existing counterparts: ReviewableFlaggedPost and ReviewableChatMessage.

We'll display the model used and their accuracy when showing these flags in the review queue and adjust the latter after staff performs an action, tracking a global accuracy per existing model in a separate table.


* FEATURE: Dedicated reviewables for AI flags

* Store and adjust model accuracy

* Display accuracy in reviewable templates
2023-03-07 15:39:28 -03:00

53 lines
1.6 KiB
Ruby

# frozen_string_literal: true
require "rails_helper"
require_relative "../support/toxicity_inference_stubs"
describe DiscourseAI::PostClassificator do
fab!(:post) { Fabricate(:post) }
let(:model) { DiscourseAI::Toxicity::ToxicityClassification.new }
let(:classification) { described_class.new(model) }
describe "#classify!" do
before { ToxicityInferenceStubs.stub_post_classification(post, toxic: true) }
it "stores the model classification data" do
classification.classify!(post)
result = ClassificationResult.find_by(target: post, classification_type: model.type)
classification = result.classification.symbolize_keys
expect(classification).to eq(ToxicityInferenceStubs.toxic_response)
end
it "flags the message and hides the post when the model decides we should" do
SiteSetting.ai_toxicity_flag_automatically = true
classification.classify!(post)
expect(ReviewableAIPost.where(target: post).count).to eq(1)
expect(post.reload.hidden?).to eq(true)
end
it "doesn't flags the message if the model decides we shouldn't" do
SiteSetting.ai_toxicity_flag_automatically = false
classification.classify!(post)
expect(ReviewableAIPost.where(target: post).count).to be_zero
end
it "includes the model accuracy in the payload" do
SiteSetting.ai_toxicity_flag_automatically = true
classification.classify!(post)
reviewable = ReviewableAIPost.find_by(target: post)
expect(
reviewable.payload.dig("accuracies", SiteSetting.ai_toxicity_inference_service_api_model),
).to be_zero
end
end
end