diff --git a/lib/configuration/llm_enumerator.rb b/lib/configuration/llm_enumerator.rb index 30a8b694..1209001d 100644 --- a/lib/configuration/llm_enumerator.rb +++ b/lib/configuration/llm_enumerator.rb @@ -38,7 +38,7 @@ module DiscourseAi rval[model_id] << { type: :ai_embeddings_semantic_search } end - if SiteSetting.ai_spam_detection_enabled + if SiteSetting.ai_spam_detection_enabled && AiModerationSetting.spam.present? model_id = AiModerationSetting.spam[:llm_model_id] rval[model_id] << { type: :ai_spam } end diff --git a/spec/configuration/llm_enumerator_spec.rb b/spec/configuration/llm_enumerator_spec.rb new file mode 100644 index 00000000..771dff35 --- /dev/null +++ b/spec/configuration/llm_enumerator_spec.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +RSpec.describe DiscourseAi::Configuration::LlmEnumerator do + fab!(:fake_model) + + describe "#global_usage" do + before do + SiteSetting.ai_helper_model = "custom:#{fake_model.id}" + SiteSetting.ai_helper_enabled = true + end + + it "returns a hash of Llm models in use globally" do + expect(described_class.global_usage).to eq(fake_model.id => [{ type: :ai_helper }]) + end + + it "doesn't error on spam when spam detection is enabled but moderation setting is missing" do + SiteSetting.ai_spam_detection_enabled = true + expect { described_class.global_usage }.not_to raise_error + end + end +end