FEATURE: silent triage using ai persona (#1193)

This allows for a new mode in persona triage where nothing is posted on topics.

This allows people to perform all triage actions using tools

Additionally introduces new APIs to create chat messages from tools which can be useful in certain moderation scenarios

Co-authored-by: Natalie Tay <natalie.tay@gmail.com>

* remove TODO code

---------

Co-authored-by: Natalie Tay <natalie.tay@gmail.com>
This commit is contained in:
Sam 2025-03-17 15:14:09 +11:00 committed by GitHub
parent 24e6aa52bb
commit 9211b211f5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 229 additions and 5 deletions

View File

@ -49,6 +49,21 @@ llms:
max_prompt_tokens: 200000 max_prompt_tokens: 200000
vision_enabled: true vision_enabled: true
claude-3.7-sonnet-thinking:
display_name: Claude 3.7 Sonnet
name: claude-3-7-sonnet-latest
tokenizer: DiscourseAi::Tokenizer::AnthropicTokenizer
api_key_env: ANTHROPIC_API_KEY
provider: anthropic
url: https://api.anthropic.com/v1/messages
max_prompt_tokens: 200000
vision_enabled: true
provider_params:
disable_top_p: true
disable_temperature: true
enable_reasoning: true
reasoning_tokens: 1024
gemini-2.0-flash: gemini-2.0-flash:
display_name: Gemini 2.0 Flash display_name: Gemini 2.0 Flash
name: gemini-2-0-flash name: gemini-2-0-flash

View File

@ -107,6 +107,9 @@ en:
whisper: whisper:
label: "Reply as Whisper" label: "Reply as Whisper"
description: "Whether the persona's response should be a whisper" description: "Whether the persona's response should be a whisper"
silent_mode:
label: "Silent Mode"
description: "In silent mode persona will receive the content but will not post anything on the forum - useful when performing triage using tools"
llm_triage: llm_triage:
fields: fields:
system_prompt: system_prompt:

View File

@ -14,13 +14,15 @@ if defined?(DiscourseAutomation)
content: DiscourseAi::Automation.available_persona_choices, content: DiscourseAi::Automation.available_persona_choices,
} }
field :whisper, component: :boolean field :whisper, component: :boolean
field :silent_mode, component: :boolean
script do |context, fields| script do |context, fields|
post = context["post"] post = context["post"]
next if post&.user&.bot? next if post&.user&.bot?
persona_id = fields["persona"]["value"] persona_id = fields.dig("persona", "value")
whisper = fields["whisper"]["value"] whisper = !!fields.dig("whisper", "value")
silent_mode = !!fields.dig("silent_mode", "value")
begin begin
RateLimiter.new( RateLimiter.new(
@ -42,6 +44,7 @@ if defined?(DiscourseAutomation)
persona_id: persona_id, persona_id: persona_id,
whisper: whisper, whisper: whisper,
automation: self.automation, automation: self.automation,
silent_mode: silent_mode,
) )
rescue => e rescue => e
Discourse.warn_exception( Discourse.warn_exception(

View File

@ -188,7 +188,8 @@ module DiscourseAi
whisper: nil, whisper: nil,
add_user_to_pm: false, add_user_to_pm: false,
stream_reply: false, stream_reply: false,
auto_set_title: false auto_set_title: false,
silent_mode: false
) )
ai_persona = AiPersona.find_by(id: persona_id) ai_persona = AiPersona.find_by(id: persona_id)
raise Discourse::InvalidParameters.new(:persona_id) if !ai_persona raise Discourse::InvalidParameters.new(:persona_id) if !ai_persona
@ -207,7 +208,15 @@ module DiscourseAi
add_user_to_pm: add_user_to_pm, add_user_to_pm: add_user_to_pm,
stream_reply: stream_reply, stream_reply: stream_reply,
auto_set_title: auto_set_title, auto_set_title: auto_set_title,
silent_mode: silent_mode,
) )
rescue => e
if Rails.env.test?
p e
puts e.backtrace[0..10]
else
raise e
end
end end
def initialize(bot) def initialize(bot)
@ -475,6 +484,7 @@ module DiscourseAi
add_user_to_pm: true, add_user_to_pm: true,
stream_reply: nil, stream_reply: nil,
auto_set_title: true, auto_set_title: true,
silent_mode: false,
&blk &blk
) )
# this is a multithreading issue # this is a multithreading issue
@ -482,6 +492,11 @@ module DiscourseAi
# be properly loaded, ensure it is loaded # be properly loaded, ensure it is loaded
PostCustomPrompt.none PostCustomPrompt.none
if silent_mode
auto_set_title = false
stream_reply = false
end
reply = +"" reply = +""
post_streamer = nil post_streamer = nil
@ -590,7 +605,7 @@ module DiscourseAi
end end
end end
return if reply.blank? return if reply.blank? || silent_mode
if stream_reply if stream_reply
post_streamer.finish post_streamer.finish

View File

@ -89,6 +89,13 @@ module DiscourseAi
}, },
}; };
}, },
createChatMessage: function(params) {
const result = _discourse_create_chat_message(params);
if (result.error) {
throw new Error(result.error);
}
return result;
},
}; };
const context = #{JSON.generate(@context)}; const context = #{JSON.generate(@context)};
@ -345,6 +352,55 @@ module DiscourseAi
end, end,
) )
mini_racer_context.attach(
"_discourse_create_chat_message",
->(params) do
in_attached_function do
params = params.symbolize_keys
channel_name = params[:channel_name]
username = params[:username]
message = params[:message]
# Validate parameters
return { error: "Missing required parameter: channel_name" } if channel_name.blank?
return { error: "Missing required parameter: username" } if username.blank?
return { error: "Missing required parameter: message" } if message.blank?
# Find the user
user = User.find_by(username: username)
return { error: "User not found: #{username}" } if user.nil?
# Find the channel
channel = Chat::Channel.find_by(name: channel_name)
if channel.nil?
# Try finding by slug if not found by name
channel = Chat::Channel.find_by(slug: channel_name.parameterize)
end
return { error: "Channel not found: #{channel_name}" } if channel.nil?
begin
guardian = Guardian.new(user)
message =
ChatSDK::Message.create(
raw: message,
channel_id: channel.id,
guardian: guardian,
enforce_membership: !channel.direct_message_channel?,
)
{
success: true,
message_id: message.id,
message: message.message,
created_at: message.created_at.iso8601,
}
rescue => e
{ error: "Failed to create chat message: #{e.message}" }
end
end
end,
)
mini_racer_context.attach( mini_racer_context.attach(
"_discourse_search", "_discourse_search",
->(params) do ->(params) do

View File

@ -2,11 +2,12 @@
module DiscourseAi module DiscourseAi
module Automation module Automation
module LlmPersonaTriage module LlmPersonaTriage
def self.handle(post:, persona_id:, whisper: false, automation: nil) def self.handle(post:, persona_id:, whisper: false, silent_mode: false, automation: nil)
DiscourseAi::AiBot::Playground.reply_to_post( DiscourseAi::AiBot::Playground.reply_to_post(
post: post, post: post,
persona_id: persona_id, persona_id: persona_id,
whisper: whisper, whisper: whisper,
silent_mode: silent_mode,
) )
rescue => e rescue => e
Discourse.warn_exception( Discourse.warn_exception(

View File

@ -41,6 +41,49 @@ module DiscourseAi
@tool_choice = tool_choice @tool_choice = tool_choice
end end
# this new api tries to create symmetry between responses and prompts
# this means anything we get back from the model via endpoint can be easily appended
def push_model_response(response)
response = [response] if !response.is_a? Array
thinking, thinking_signature, redacted_thinking_signature = nil
response.each do |message|
if message.is_a?(Thinking)
# we can safely skip partials here
next if message.partial?
if message.redacted
redacted_thinking_signature = message.signature
else
thinking = message.message
thinking_signature = message.signature
end
elsif message.is_a?(ToolCall)
next if message.partial?
# this is a bit surprising about the API
# needing to add arguments is not ideal
push(
type: :tool_call,
content: { arguments: message.parameters }.to_json,
id: message.id,
name: message.name,
)
elsif message.is_a?(String)
push(type: :model, content: message)
else
raise ArgumentError, "response must be an array of strings, ToolCalls, or Thinkings"
end
end
# anthropic rules are that we attach thinking to last for the response
# it is odd, I wonder if long term we just keep thinking as a separate object
if thinking || redacted_thinking_signature
messages.last[:thinking] = thinking
messages.last[:thinking_signature] = thinking_signature
messages.last[:redacted_thinking_signature] = redacted_thinking_signature
end
end
def push( def push(
type:, type:,
content:, content:,

View File

@ -239,4 +239,92 @@ describe DiscourseAi::Automation::LlmPersonaTriage do
# should not inject persona into allowed users # should not inject persona into allowed users
expect(topic.topic_allowed_users.pluck(:user_id).sort).to eq(original_user_ids.sort) expect(topic.topic_allowed_users.pluck(:user_id).sort).to eq(original_user_ids.sort)
end end
describe "LLM Persona Triage with Chat Message Creation" do
fab!(:user)
fab!(:bot_user) { Fabricate(:user) }
fab!(:chat_channel) { Fabricate(:category_channel) }
fab!(:custom_tool) do
AiTool.create!(
name: "Chat Notifier",
tool_name: "chat_notifier",
description: "Creates a chat notification in a channel",
parameters: [
{ name: "channel_id", type: "integer", description: "Chat channel ID" },
{ name: "message", type: "string", description: "Message to post" },
],
script: <<~JS,
function invoke(params) {
// Create a chat message using the Chat API
const result = discourse.createChatMessage({
channel_name: '#{chat_channel.name}',
username: '#{user.username}',
message: params.message
});
chain.setCustomRaw("We are done, stopping chaing");
return {
success: true,
message_id: result.message_id,
url: result.url,
message: params.message
};
}
JS
summary: "Notify in chat channel",
created_by: Discourse.system_user,
)
end
before do
SiteSetting.chat_enabled = true
ai_persona.update!(tools: ["custom-#{custom_tool.id}"])
# Set up automation fields
automation.fields.create!(
component: "choices",
name: "persona",
metadata: {
value: ai_persona.id,
},
target: "script",
)
automation.fields.create!(
component: "boolean",
name: "silent_mode",
metadata: {
value: true,
},
target: "script",
)
end
it "can silently analyze a post and create a chat notification" do
post = Fabricate(:post, raw: "Please help with my billing issue")
# Tool response from LLM
tool_call =
DiscourseAi::Completions::ToolCall.new(
name: "chat_notifier",
parameters: {
"message" => "Hello world!",
},
id: "tool_call_1",
)
DiscourseAi::Completions::Llm.with_prepared_responses([tool_call]) do
automation.running_in_background!
automation.trigger!({ "post" => post })
end
expect(post.topic.reload.posts.count).to eq(1)
expect(chat_channel.chat_messages.count).to eq(1)
expect(chat_channel.chat_messages.last.message).to eq("Hello world!")
end
end
end end