From 9211b211f55564781b7af55c17dd12c814dde250 Mon Sep 17 00:00:00 2001 From: Sam Date: Mon, 17 Mar 2025 15:14:09 +1100 Subject: [PATCH] FEATURE: silent triage using ai persona (#1193) This allows for a new mode in persona triage where nothing is posted on topics. This allows people to perform all triage actions using tools Additionally introduces new APIs to create chat messages from tools which can be useful in certain moderation scenarios Co-authored-by: Natalie Tay * remove TODO code --------- Co-authored-by: Natalie Tay --- config/eval-llms.yml | 15 ++++ config/locales/client.en.yml | 3 + discourse_automation/llm_persona_triage.rb | 7 +- lib/ai_bot/playground.rb | 19 +++- lib/ai_bot/tool_runner.rb | 56 ++++++++++++ lib/automation/llm_persona_triage.rb | 3 +- lib/completions/prompt.rb | 43 +++++++++ .../llm_persona_triage_spec.rb | 88 +++++++++++++++++++ 8 files changed, 229 insertions(+), 5 deletions(-) diff --git a/config/eval-llms.yml b/config/eval-llms.yml index 7b25730c..dd90f5b9 100644 --- a/config/eval-llms.yml +++ b/config/eval-llms.yml @@ -49,6 +49,21 @@ llms: max_prompt_tokens: 200000 vision_enabled: true + claude-3.7-sonnet-thinking: + display_name: Claude 3.7 Sonnet + name: claude-3-7-sonnet-latest + tokenizer: DiscourseAi::Tokenizer::AnthropicTokenizer + api_key_env: ANTHROPIC_API_KEY + provider: anthropic + url: https://api.anthropic.com/v1/messages + max_prompt_tokens: 200000 + vision_enabled: true + provider_params: + disable_top_p: true + disable_temperature: true + enable_reasoning: true + reasoning_tokens: 1024 + gemini-2.0-flash: display_name: Gemini 2.0 Flash name: gemini-2-0-flash diff --git a/config/locales/client.en.yml b/config/locales/client.en.yml index 15792461..0eb81da4 100644 --- a/config/locales/client.en.yml +++ b/config/locales/client.en.yml @@ -107,6 +107,9 @@ en: whisper: label: "Reply as Whisper" description: "Whether the persona's response should be a whisper" + silent_mode: + label: "Silent Mode" + description: "In silent mode persona will receive the content but will not post anything on the forum - useful when performing triage using tools" llm_triage: fields: system_prompt: diff --git a/discourse_automation/llm_persona_triage.rb b/discourse_automation/llm_persona_triage.rb index b78e418c..cbe6c1ae 100644 --- a/discourse_automation/llm_persona_triage.rb +++ b/discourse_automation/llm_persona_triage.rb @@ -14,13 +14,15 @@ if defined?(DiscourseAutomation) content: DiscourseAi::Automation.available_persona_choices, } field :whisper, component: :boolean + field :silent_mode, component: :boolean script do |context, fields| post = context["post"] next if post&.user&.bot? - persona_id = fields["persona"]["value"] - whisper = fields["whisper"]["value"] + persona_id = fields.dig("persona", "value") + whisper = !!fields.dig("whisper", "value") + silent_mode = !!fields.dig("silent_mode", "value") begin RateLimiter.new( @@ -42,6 +44,7 @@ if defined?(DiscourseAutomation) persona_id: persona_id, whisper: whisper, automation: self.automation, + silent_mode: silent_mode, ) rescue => e Discourse.warn_exception( diff --git a/lib/ai_bot/playground.rb b/lib/ai_bot/playground.rb index 1f9a53d3..59533efd 100644 --- a/lib/ai_bot/playground.rb +++ b/lib/ai_bot/playground.rb @@ -188,7 +188,8 @@ module DiscourseAi whisper: nil, add_user_to_pm: false, stream_reply: false, - auto_set_title: false + auto_set_title: false, + silent_mode: false ) ai_persona = AiPersona.find_by(id: persona_id) raise Discourse::InvalidParameters.new(:persona_id) if !ai_persona @@ -207,7 +208,15 @@ module DiscourseAi add_user_to_pm: add_user_to_pm, stream_reply: stream_reply, auto_set_title: auto_set_title, + silent_mode: silent_mode, ) + rescue => e + if Rails.env.test? + p e + puts e.backtrace[0..10] + else + raise e + end end def initialize(bot) @@ -475,6 +484,7 @@ module DiscourseAi add_user_to_pm: true, stream_reply: nil, auto_set_title: true, + silent_mode: false, &blk ) # this is a multithreading issue @@ -482,6 +492,11 @@ module DiscourseAi # be properly loaded, ensure it is loaded PostCustomPrompt.none + if silent_mode + auto_set_title = false + stream_reply = false + end + reply = +"" post_streamer = nil @@ -590,7 +605,7 @@ module DiscourseAi end end - return if reply.blank? + return if reply.blank? || silent_mode if stream_reply post_streamer.finish diff --git a/lib/ai_bot/tool_runner.rb b/lib/ai_bot/tool_runner.rb index 14af53e2..8e350749 100644 --- a/lib/ai_bot/tool_runner.rb +++ b/lib/ai_bot/tool_runner.rb @@ -89,6 +89,13 @@ module DiscourseAi }, }; }, + createChatMessage: function(params) { + const result = _discourse_create_chat_message(params); + if (result.error) { + throw new Error(result.error); + } + return result; + }, }; const context = #{JSON.generate(@context)}; @@ -345,6 +352,55 @@ module DiscourseAi end, ) + mini_racer_context.attach( + "_discourse_create_chat_message", + ->(params) do + in_attached_function do + params = params.symbolize_keys + channel_name = params[:channel_name] + username = params[:username] + message = params[:message] + + # Validate parameters + return { error: "Missing required parameter: channel_name" } if channel_name.blank? + return { error: "Missing required parameter: username" } if username.blank? + return { error: "Missing required parameter: message" } if message.blank? + + # Find the user + user = User.find_by(username: username) + return { error: "User not found: #{username}" } if user.nil? + + # Find the channel + channel = Chat::Channel.find_by(name: channel_name) + if channel.nil? + # Try finding by slug if not found by name + channel = Chat::Channel.find_by(slug: channel_name.parameterize) + end + return { error: "Channel not found: #{channel_name}" } if channel.nil? + + begin + guardian = Guardian.new(user) + message = + ChatSDK::Message.create( + raw: message, + channel_id: channel.id, + guardian: guardian, + enforce_membership: !channel.direct_message_channel?, + ) + + { + success: true, + message_id: message.id, + message: message.message, + created_at: message.created_at.iso8601, + } + rescue => e + { error: "Failed to create chat message: #{e.message}" } + end + end + end, + ) + mini_racer_context.attach( "_discourse_search", ->(params) do diff --git a/lib/automation/llm_persona_triage.rb b/lib/automation/llm_persona_triage.rb index f1357644..abf7fa6d 100644 --- a/lib/automation/llm_persona_triage.rb +++ b/lib/automation/llm_persona_triage.rb @@ -2,11 +2,12 @@ module DiscourseAi module Automation module LlmPersonaTriage - def self.handle(post:, persona_id:, whisper: false, automation: nil) + def self.handle(post:, persona_id:, whisper: false, silent_mode: false, automation: nil) DiscourseAi::AiBot::Playground.reply_to_post( post: post, persona_id: persona_id, whisper: whisper, + silent_mode: silent_mode, ) rescue => e Discourse.warn_exception( diff --git a/lib/completions/prompt.rb b/lib/completions/prompt.rb index b4f836b1..79e71c06 100644 --- a/lib/completions/prompt.rb +++ b/lib/completions/prompt.rb @@ -41,6 +41,49 @@ module DiscourseAi @tool_choice = tool_choice end + # this new api tries to create symmetry between responses and prompts + # this means anything we get back from the model via endpoint can be easily appended + def push_model_response(response) + response = [response] if !response.is_a? Array + + thinking, thinking_signature, redacted_thinking_signature = nil + + response.each do |message| + if message.is_a?(Thinking) + # we can safely skip partials here + next if message.partial? + if message.redacted + redacted_thinking_signature = message.signature + else + thinking = message.message + thinking_signature = message.signature + end + elsif message.is_a?(ToolCall) + next if message.partial? + # this is a bit surprising about the API + # needing to add arguments is not ideal + push( + type: :tool_call, + content: { arguments: message.parameters }.to_json, + id: message.id, + name: message.name, + ) + elsif message.is_a?(String) + push(type: :model, content: message) + else + raise ArgumentError, "response must be an array of strings, ToolCalls, or Thinkings" + end + end + + # anthropic rules are that we attach thinking to last for the response + # it is odd, I wonder if long term we just keep thinking as a separate object + if thinking || redacted_thinking_signature + messages.last[:thinking] = thinking + messages.last[:thinking_signature] = thinking_signature + messages.last[:redacted_thinking_signature] = redacted_thinking_signature + end + end + def push( type:, content:, diff --git a/spec/lib/discourse_automation/llm_persona_triage_spec.rb b/spec/lib/discourse_automation/llm_persona_triage_spec.rb index 2ca5d188..da8b92df 100644 --- a/spec/lib/discourse_automation/llm_persona_triage_spec.rb +++ b/spec/lib/discourse_automation/llm_persona_triage_spec.rb @@ -239,4 +239,92 @@ describe DiscourseAi::Automation::LlmPersonaTriage do # should not inject persona into allowed users expect(topic.topic_allowed_users.pluck(:user_id).sort).to eq(original_user_ids.sort) end + + describe "LLM Persona Triage with Chat Message Creation" do + fab!(:user) + fab!(:bot_user) { Fabricate(:user) } + fab!(:chat_channel) { Fabricate(:category_channel) } + + fab!(:custom_tool) do + AiTool.create!( + name: "Chat Notifier", + tool_name: "chat_notifier", + description: "Creates a chat notification in a channel", + parameters: [ + { name: "channel_id", type: "integer", description: "Chat channel ID" }, + { name: "message", type: "string", description: "Message to post" }, + ], + script: <<~JS, + function invoke(params) { + // Create a chat message using the Chat API + const result = discourse.createChatMessage({ + channel_name: '#{chat_channel.name}', + username: '#{user.username}', + message: params.message + }); + + chain.setCustomRaw("We are done, stopping chaing"); + + return { + success: true, + message_id: result.message_id, + url: result.url, + message: params.message + }; + } + JS + summary: "Notify in chat channel", + created_by: Discourse.system_user, + ) + end + + before do + SiteSetting.chat_enabled = true + + ai_persona.update!(tools: ["custom-#{custom_tool.id}"]) + + # Set up automation fields + automation.fields.create!( + component: "choices", + name: "persona", + metadata: { + value: ai_persona.id, + }, + target: "script", + ) + + automation.fields.create!( + component: "boolean", + name: "silent_mode", + metadata: { + value: true, + }, + target: "script", + ) + end + + it "can silently analyze a post and create a chat notification" do + post = Fabricate(:post, raw: "Please help with my billing issue") + + # Tool response from LLM + tool_call = + DiscourseAi::Completions::ToolCall.new( + name: "chat_notifier", + parameters: { + "message" => "Hello world!", + }, + id: "tool_call_1", + ) + + DiscourseAi::Completions::Llm.with_prepared_responses([tool_call]) do + automation.running_in_background! + automation.trigger!({ "post" => post }) + end + + expect(post.topic.reload.posts.count).to eq(1) + + expect(chat_channel.chat_messages.count).to eq(1) + expect(chat_channel.chat_messages.last.message).to eq("Hello world!") + end + end end