DEV: Hide soon to be deprecated modules settings (#872)

This commit is contained in:
Rafael dos Santos Silva 2024-10-28 14:27:25 -03:00 committed by GitHub
parent 945f04b089
commit 820b506910
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 19 additions and 4 deletions

View File

@ -6,8 +6,9 @@ discourse_ai:
ai_toxicity_enabled: ai_toxicity_enabled:
default: false default: false
client: true client: true
hidden: true
ai_toxicity_inference_service_api_endpoint: ai_toxicity_inference_service_api_endpoint:
default: "https://disorder-testing.demo-by-discourse.com" default: ""
ai_toxicity_inference_service_api_endpoint_srv: ai_toxicity_inference_service_api_endpoint_srv:
default: "" default: ""
hidden: true hidden: true
@ -72,9 +73,11 @@ discourse_ai:
- sentiment - sentiment
- emotion - emotion
ai_nsfw_detection_enabled: false ai_nsfw_detection_enabled:
default: false
hidden: true
ai_nsfw_inference_service_api_endpoint: ai_nsfw_inference_service_api_endpoint:
default: "https://nsfw-testing.demo-by-discourse.com" default: ""
ai_nsfw_inference_service_api_endpoint_srv: ai_nsfw_inference_service_api_endpoint_srv:
default: "" default: ""
hidden: true hidden: true

View File

@ -7,6 +7,7 @@ describe Jobs::ToxicityClassifyChatMessage do
before do before do
SiteSetting.ai_toxicity_enabled = true SiteSetting.ai_toxicity_enabled = true
SiteSetting.ai_toxicity_flag_automatically = true SiteSetting.ai_toxicity_flag_automatically = true
SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com"
end end
fab!(:chat_message) fab!(:chat_message)

View File

@ -8,6 +8,7 @@ describe Jobs::ToxicityClassifyPost do
before do before do
SiteSetting.ai_toxicity_enabled = true SiteSetting.ai_toxicity_enabled = true
SiteSetting.ai_toxicity_flag_automatically = true SiteSetting.ai_toxicity_flag_automatically = true
SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com"
end end
fab!(:post) fab!(:post)

View File

@ -5,6 +5,8 @@ require_relative "../../../support/toxicity_inference_stubs"
describe DiscourseAi::Toxicity::ToxicityClassification do describe DiscourseAi::Toxicity::ToxicityClassification do
fab!(:target) { Fabricate(:post) } fab!(:target) { Fabricate(:post) }
before { SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com" }
describe "#request" do describe "#request" do
it "returns the classification and the model used for it" do it "returns the classification and the model used for it" do
ToxicityInferenceStubs.stub_post_classification(target, toxic: false) ToxicityInferenceStubs.stub_post_classification(target, toxic: false)

View File

@ -3,7 +3,10 @@
require_relative "support/toxicity_inference_stubs" require_relative "support/toxicity_inference_stubs"
describe Plugin::Instance do describe Plugin::Instance do
before { SiteSetting.discourse_ai_enabled = true } before do
SiteSetting.discourse_ai_enabled = true
SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com"
end
describe "on reviewable_transitioned_to event" do describe "on reviewable_transitioned_to event" do
fab!(:post) fab!(:post)

View File

@ -8,6 +8,8 @@ describe DiscourseAi::ChatMessageClassificator do
let(:model) { DiscourseAi::Toxicity::ToxicityClassification.new } let(:model) { DiscourseAi::Toxicity::ToxicityClassification.new }
let(:classification) { described_class.new(model) } let(:classification) { described_class.new(model) }
before { SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com" }
describe "#classify!" do describe "#classify!" do
before { ToxicityInferenceStubs.stub_chat_message_classification(chat_message, toxic: true) } before { ToxicityInferenceStubs.stub_chat_message_classification(chat_message, toxic: true) }

View File

@ -8,6 +8,8 @@ describe DiscourseAi::PostClassificator do
let(:model) { DiscourseAi::Toxicity::ToxicityClassification.new } let(:model) { DiscourseAi::Toxicity::ToxicityClassification.new }
let(:classification) { described_class.new(model) } let(:classification) { described_class.new(model) }
before { SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com" }
describe "#classify!" do describe "#classify!" do
before { ToxicityInferenceStubs.stub_post_classification(post, toxic: true) } before { ToxicityInferenceStubs.stub_post_classification(post, toxic: true) }

View File

@ -10,6 +10,7 @@ RSpec.describe "Toxicity-flagged chat messages", type: :system, js: true do
sign_in(admin) sign_in(admin)
SiteSetting.ai_toxicity_enabled = true SiteSetting.ai_toxicity_enabled = true
SiteSetting.ai_toxicity_flag_automatically = true SiteSetting.ai_toxicity_flag_automatically = true
SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com"
ToxicityInferenceStubs.stub_chat_message_classification(chat_message, toxic: true) ToxicityInferenceStubs.stub_chat_message_classification(chat_message, toxic: true)