diff --git a/app/models/llm_model.rb b/app/models/llm_model.rb index 609849f9..ab4c85c9 100644 --- a/app/models/llm_model.rb +++ b/app/models/llm_model.rb @@ -7,6 +7,7 @@ class LlmModel < ActiveRecord::Base belongs_to :user validates :url, exclusion: { in: [RESERVED_VLLM_SRV_URL] } + before_save :toggle_companion_user_before_save def self.enable_or_disable_srv_llm! srv_model = find_by(url: RESERVED_VLLM_SRV_URL) @@ -27,6 +28,10 @@ class LlmModel < ActiveRecord::Base end end + def toggle_companion_user_before_save + toggle_companion_user if enabled_chat_bot_changed? || new_record? + end + def toggle_companion_user return if name == "fake" && Rails.env.production? diff --git a/lib/completions/endpoints/gemini.rb b/lib/completions/endpoints/gemini.rb index 39dd320e..698cd252 100644 --- a/lib/completions/endpoints/gemini.rb +++ b/lib/completions/endpoints/gemini.rb @@ -23,7 +23,15 @@ module DiscourseAi end def default_options - { generationConfig: {} } + # the default setting is a problem, it blocks too much + categories = %w[HARASSMENT SEXUALLY_EXPLICIT HATE_SPEECH DANGEROUS_CONTENT] + + safety_settings = + categories.map do |category| + { category: "HARM_CATEGORY_#{category}", threshold: "BLOCK_NONE" } + end + + { generationConfig: {}, safetySettings: safety_settings } end def normalize_model_params(model_params) diff --git a/spec/lib/completions/endpoints/gemini_spec.rb b/spec/lib/completions/endpoints/gemini_spec.rb index 1fe0f5a6..dfa0d7fc 100644 --- a/spec/lib/completions/endpoints/gemini_spec.rb +++ b/spec/lib/completions/endpoints/gemini_spec.rb @@ -177,6 +177,12 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do expected_prompt = { "generationConfig" => { }, + "safetySettings" => [ + { "category" => "HARM_CATEGORY_HARASSMENT", "threshold" => "BLOCK_NONE" }, + { "category" => "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold" => "BLOCK_NONE" }, + { "category" => "HARM_CATEGORY_HATE_SPEECH", "threshold" => "BLOCK_NONE" }, + { "category" => "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold" => "BLOCK_NONE" }, + ], "contents" => [ { "role" => "user", diff --git a/spec/system/llms/ai_llm_spec.rb b/spec/system/llms/ai_llm_spec.rb index 371e9977..2440f71e 100644 --- a/spec/system/llms/ai_llm_spec.rb +++ b/spec/system/llms/ai_llm_spec.rb @@ -4,6 +4,8 @@ RSpec.describe "Admin dashboard", type: :system do fab!(:admin) it "correctly sets defaults" do + SiteSetting.ai_bot_enabled = true + sign_in(admin) visit "/admin/plugins/discourse-ai/ai-llms" @@ -18,6 +20,8 @@ RSpec.describe "Admin dashboard", type: :system do find(".ai-llm-editor__next").click() find("input.ai-llm-editor__api-key").fill_in(with: "abcd") + PageObjects::Components::DToggleSwitch.new(".ai-llm-editor__enabled-chat-bot").toggle + find(".ai-llm-editor__save").click() expect(page).to have_current_path("/admin/plugins/discourse-ai/ai-llms") @@ -35,5 +39,6 @@ RSpec.describe "Admin dashboard", type: :system do expect(llm.max_prompt_tokens.to_i).to eq(model_preset[:tokens]) expect(llm.provider).to eq("anthropic") expect(llm.display_name).to eq(model_preset[:display_name]) + expect(llm.user_id).not_to be_nil end end