FIX: Prevent LLM enumerator from erroring when spam enabled (#1045)
This PR fixes an issue where LLM enumerator would error out when `SiteSetting.ai_spam_detection = true` but there was no `AiModerationSetting.spam` present. Typically, we add an `LlmDependencyValidator` for the setting itself, however, since Spam is unique in that it has it's model set in `AiModerationSetting` instead of a `SiteSetting`, we'll add a simple check here to prevent erroring out.
This commit is contained in:
parent
47ecf86aa1
commit
b480f13a0f
|
@ -38,7 +38,7 @@ module DiscourseAi
|
||||||
rval[model_id] << { type: :ai_embeddings_semantic_search }
|
rval[model_id] << { type: :ai_embeddings_semantic_search }
|
||||||
end
|
end
|
||||||
|
|
||||||
if SiteSetting.ai_spam_detection_enabled
|
if SiteSetting.ai_spam_detection_enabled && AiModerationSetting.spam.present?
|
||||||
model_id = AiModerationSetting.spam[:llm_model_id]
|
model_id = AiModerationSetting.spam[:llm_model_id]
|
||||||
rval[model_id] << { type: :ai_spam }
|
rval[model_id] << { type: :ai_spam }
|
||||||
end
|
end
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
RSpec.describe DiscourseAi::Configuration::LlmEnumerator do
|
||||||
|
fab!(:fake_model)
|
||||||
|
|
||||||
|
describe "#global_usage" do
|
||||||
|
before do
|
||||||
|
SiteSetting.ai_helper_model = "custom:#{fake_model.id}"
|
||||||
|
SiteSetting.ai_helper_enabled = true
|
||||||
|
end
|
||||||
|
|
||||||
|
it "returns a hash of Llm models in use globally" do
|
||||||
|
expect(described_class.global_usage).to eq(fake_model.id => [{ type: :ai_helper }])
|
||||||
|
end
|
||||||
|
|
||||||
|
it "doesn't error on spam when spam detection is enabled but moderation setting is missing" do
|
||||||
|
SiteSetting.ai_spam_detection_enabled = true
|
||||||
|
expect { described_class.global_usage }.not_to raise_error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
Loading…
Reference in New Issue