mirror of
				https://github.com/discourse/discourse-ai.git
				synced 2025-10-31 14:38:37 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			53 lines
		
	
	
		
			1.6 KiB
		
	
	
	
		
			Ruby
		
	
	
	
	
	
			
		
		
	
	
			53 lines
		
	
	
		
			1.6 KiB
		
	
	
	
		
			Ruby
		
	
	
	
	
	
| # frozen_string_literal: true
 | |
| 
 | |
| require "rails_helper"
 | |
| require_relative "../support/toxicity_inference_stubs"
 | |
| 
 | |
| describe DiscourseAi::PostClassificator do
 | |
|   fab!(:post) { Fabricate(:post) }
 | |
| 
 | |
|   let(:model) { DiscourseAi::Toxicity::ToxicityClassification.new }
 | |
|   let(:classification) { described_class.new(model) }
 | |
| 
 | |
|   describe "#classify!" do
 | |
|     before { ToxicityInferenceStubs.stub_post_classification(post, toxic: true) }
 | |
| 
 | |
|     it "stores the model classification data" do
 | |
|       classification.classify!(post)
 | |
|       result = ClassificationResult.find_by(target: post, classification_type: model.type)
 | |
| 
 | |
|       classification = result.classification.symbolize_keys
 | |
| 
 | |
|       expect(classification).to eq(ToxicityInferenceStubs.toxic_response)
 | |
|     end
 | |
| 
 | |
|     it "flags the message and hides the post when the model decides we should" do
 | |
|       SiteSetting.ai_toxicity_flag_automatically = true
 | |
| 
 | |
|       classification.classify!(post)
 | |
| 
 | |
|       expect(ReviewableAiPost.where(target: post).count).to eq(1)
 | |
|       expect(post.reload.hidden?).to eq(true)
 | |
|     end
 | |
| 
 | |
|     it "doesn't flags the message if the model decides we shouldn't" do
 | |
|       SiteSetting.ai_toxicity_flag_automatically = false
 | |
| 
 | |
|       classification.classify!(post)
 | |
| 
 | |
|       expect(ReviewableAiPost.where(target: post).count).to be_zero
 | |
|     end
 | |
| 
 | |
|     it "includes the model accuracy in the payload" do
 | |
|       SiteSetting.ai_toxicity_flag_automatically = true
 | |
|       classification.classify!(post)
 | |
| 
 | |
|       reviewable = ReviewableAiPost.find_by(target: post)
 | |
| 
 | |
|       expect(
 | |
|         reviewable.payload.dig("accuracies", SiteSetting.ai_toxicity_inference_service_api_model),
 | |
|       ).to be_zero
 | |
|     end
 | |
|   end
 | |
| end
 |