FIX: Update spam controller action should consider seeded LLM properly (#1053)

The seeded LLM setting: `SiteSetting.ai_spam_detection_model_allowed_seeded_models` returns a _string_ with IDs separated by pipes. running `_map` on it will return an array with strings. We were previously checking for the id with custom prefix identifier, but instead we should be checking the stringified ID.
This commit is contained in:
Keegan George 2025-01-09 06:41:25 +09:00 committed by GitHub
parent 404092a68c
commit 24b69bf840
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 22 additions and 2 deletions

View File

@ -15,7 +15,7 @@ module DiscourseAi
llm_model_id = updated_params[:llm_model_id] = allowed_params[:llm_model_id]
if llm_model_id.to_i < 0 &&
!SiteSetting.ai_spam_detection_model_allowed_seeded_models_map.include?(
"custom:#{llm_model_id}",
llm_model_id.to_s,
)
return(
render_json_error(

View File

@ -37,7 +37,7 @@ RSpec.describe DiscourseAi::Admin::AiSpamController do
expect(response.status).to eq(422)
SiteSetting.ai_spam_detection_model_allowed_seeded_models = seeded_llm.identifier
SiteSetting.ai_spam_detection_model_allowed_seeded_models = seeded_llm.id.to_s
put "/admin/plugins/discourse-ai/ai-spam.json",
params: {
@ -49,6 +49,26 @@ RSpec.describe DiscourseAi::Admin::AiSpamController do
expect(response.status).to eq(200)
end
it "ensures that seeded llm ID is properly passed and allowed" do
seeded_llm = Fabricate(:seeded_model)
SiteSetting.ai_spam_detection_model_allowed_seeded_models = [
llm_model.id,
seeded_llm.id,
].join("|")
put "/admin/plugins/discourse-ai/ai-spam.json",
params: {
is_enabled: true,
llm_model_id: seeded_llm.id,
custom_instructions: "custom instructions",
}
expect(SiteSetting.ai_spam_detection_model_allowed_seeded_models).to eq(
"#{llm_model.id}|#{seeded_llm.id}",
)
expect(response.status).to eq(200)
end
it "can not enable spam detection without a model selected" do
put "/admin/plugins/discourse-ai/ai-spam.json",
params: {