FEATURE: Add persona-based replies and whisper support to LLM triage (#1170)

This PR enhances the LLM triage automation with several important improvements:

- Add ability to use AI personas for automated replies instead of canned replies
- Add support for whisper responses
- Refactor LLM persona reply functionality into a reusable method
- Add new settings to configure response behavior in automations
- Improve error handling and logging
- Fix handling of personal messages in the triage flow
- Add comprehensive test coverage for new features
- Make personas configurable with more flexible requirements

This allows for more dynamic and context-aware responses in automated workflows, with better control over visibility and attribution.
This commit is contained in:
Sam 2025-03-06 17:18:15 +11:00 committed by GitHub
parent 453bb4821f
commit 01893bb6ed
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 192 additions and 45 deletions

View File

@ -146,6 +146,12 @@ en:
include_personal_messages:
label: "Include personal messages"
description: "Also scan and triage personal messages"
whisper:
label: "Reply as Whisper"
description: "Whether the AI's response should be a whisper"
reply_persona:
label: "Reply Persona"
description: "AI Persona to use for replies (must have default LLM), will be prioritized over canned reply"
model:
label: "Model"
description: "Language model used for triage"

View File

@ -9,22 +9,27 @@ if defined?(DiscourseAutomation)
triggerables %i[post_created_edited]
field :system_prompt, component: :message, required: false
field :search_for_text, component: :text, required: true
field :max_post_tokens, component: :text
field :stop_sequences, component: :text_list, required: false
# TODO move to triggerables
field :include_personal_messages, component: :boolean
# Inputs
field :model,
component: :choices,
required: true,
extra: {
content: DiscourseAi::Automation.available_models,
}
field :system_prompt, component: :message, required: false
field :search_for_text, component: :text, required: true
field :max_post_tokens, component: :text
field :stop_sequences, component: :text_list, required: false
field :temperature, component: :text
# Actions
field :category, component: :category
field :tags, component: :tags
field :hide_topic, component: :boolean
field :flag_post, component: :boolean
field :include_personal_messages, component: :boolean
field :temperature, component: :text
field :flag_type,
component: :choices,
required: false,
@ -32,21 +37,40 @@ if defined?(DiscourseAutomation)
content: DiscourseAi::Automation.flag_types,
},
default: "review"
field :canned_reply, component: :message
field :canned_reply_user, component: :user
field :canned_reply, component: :message
field :reply_persona,
component: :choices,
extra: {
content:
DiscourseAi::Automation.available_persona_choices(
require_user: false,
require_default_llm: true,
),
}
field :whisper, component: :boolean
script do |context, fields|
post = context["post"]
next if post&.user&.bot?
if post.topic.private_message?
include_personal_messages = fields.dig("include_personal_messages", "value")
next if !include_personal_messages
end
canned_reply = fields.dig("canned_reply", "value")
canned_reply_user = fields.dig("canned_reply_user", "value")
reply_persona_id = fields.dig("reply_persona", "value")
whisper = fields.dig("whisper", "value")
# nothing to do if we already replied
next if post.user.username == canned_reply_user
next if post.raw.strip == canned_reply.to_s.strip
system_prompt = fields["system_prompt"]["value"]
search_for_text = fields["search_for_text"]["value"]
model = fields["model"]["value"]
system_prompt = fields.dig("system_prompt", "value")
search_for_text = fields.dig("search_for_text", "value")
model = fields.dig("model", "value")
category_id = fields.dig("category", "value")
tags = fields.dig("tags", "value")
@ -65,11 +89,6 @@ if defined?(DiscourseAutomation)
stop_sequences = fields.dig("stop_sequences", "value")
if post.topic.private_message?
include_personal_messages = fields.dig("include_personal_messages", "value")
next if !include_personal_messages
end
begin
RateLimiter.new(
Discourse.system_user,
@ -94,6 +113,8 @@ if defined?(DiscourseAutomation)
tags: tags,
canned_reply: canned_reply,
canned_reply_user: canned_reply_user,
reply_persona_id: reply_persona_id,
whisper: whisper,
hide_topic: hide_topic,
flag_post: flag_post,
flag_type: flag_type.to_s.to_sym,
@ -101,9 +122,13 @@ if defined?(DiscourseAutomation)
stop_sequences: stop_sequences,
automation: self.automation,
temperature: temperature,
action: context["action"],
)
rescue => e
Discourse.warn_exception(e, message: "llm_triage: skipped triage on post #{post.id}")
Discourse.warn_exception(
e,
message: "llm_triage: skipped triage on post #{post.id} #{post.url}",
)
end
end
end

View File

@ -162,6 +162,20 @@ module DiscourseAi
end
end
def self.reply_to_post(post:, user: nil, persona_id: nil, whisper: nil)
ai_persona = AiPersona.find_by(id: persona_id)
raise Discourse::InvalidParameters.new(:persona_id) if !ai_persona
persona_class = ai_persona.class_instance
persona = persona_class.new
bot_user = user || ai_persona.user
raise Discourse::InvalidParameters.new(:user) if bot_user.nil?
bot = DiscourseAi::AiBot::Bot.as(bot_user, persona: persona)
playground = DiscourseAi::AiBot::Playground.new(bot)
playground.reply_to(post, whisper: whisper, context_style: :topic)
end
def initialize(bot)
@bot = bot
end

View File

@ -38,18 +38,17 @@ module DiscourseAi
values
end
def self.available_persona_choices
AiPersona
.joins(:user)
.where.not(user_id: nil)
.where.not(default_llm: nil)
.map do |persona|
{
id: persona.id,
translated_name: persona.name,
description: "#{persona.name} (#{persona.user.username})",
}
end
def self.available_persona_choices(require_user: true, require_default_llm: true)
relation = AiPersona.joins(:user)
relation = relation.where.not(user_id: nil) if require_user
relation = relation.where.not(default_llm: nil) if require_default_llm
relation.map do |persona|
{
id: persona.id,
translated_name: persona.name,
description: "#{persona.name} (#{persona.user.username})",
}
end
end
end
end

View File

@ -3,21 +3,16 @@ module DiscourseAi
module Automation
module LlmPersonaTriage
def self.handle(post:, persona_id:, whisper: false, automation: nil)
ai_persona = AiPersona.find_by(id: persona_id)
return if ai_persona.nil?
persona_class = ai_persona.class_instance
persona = persona_class.new
bot_user = ai_persona.user
return if bot_user.nil?
bot = DiscourseAi::AiBot::Bot.as(bot_user, persona: persona)
playground = DiscourseAi::AiBot::Playground.new(bot)
playground.reply_to(post, whisper: whisper, context_style: :topic)
DiscourseAi::AiBot::Playground.reply_to_post(
post: post,
persona_id: persona_id,
whisper: whisper,
)
rescue => e
Rails.logger.error("Error in LlmPersonaTriage: #{e.message}\n#{e.backtrace.join("\n")}")
Discourse.warn_exception(
e,
message: "Error responding to: #{post&.url} in LlmPersonaTriage.handle",
)
raise e if Rails.env.test?
nil
end

View File

@ -18,13 +18,21 @@ module DiscourseAi
automation: nil,
max_post_tokens: nil,
stop_sequences: nil,
temperature: nil
temperature: nil,
whisper: nil,
reply_persona_id: nil,
action: nil
)
if category_id.blank? && tags.blank? && canned_reply.blank? && hide_topic.blank? &&
flag_post.blank?
flag_post.blank? && reply_persona_id.blank?
raise ArgumentError, "llm_triage: no action specified!"
end
if action == :edit && category_id.blank? && tags.blank? && flag_post.blank? &&
hide_topic.blank?
return
end
llm = DiscourseAi::Completions::Llm.proxy(model)
s_prompt = system_prompt.to_s.sub("%%POST%%", "") # Backwards-compat. We no longer sub this.
@ -54,14 +62,32 @@ module DiscourseAi
if result.present? && result.downcase.include?(search_for_text.downcase)
user = User.find_by_username(canned_reply_user) if canned_reply_user.present?
original_user = user
user = user || Discourse.system_user
if canned_reply.present?
if reply_persona_id.present? && action != :edit
begin
DiscourseAi::AiBot::Playground.reply_to_post(
post: post,
persona_id: reply_persona_id,
whisper: whisper,
user: original_user,
)
rescue StandardError => e
Discourse.warn_exception(
e,
message: "Error responding to: #{post&.url} in LlmTriage.handle",
)
raise e if Rails.env.test?
end
elsif canned_reply.present? && action != :edit
post_type = whisper ? Post.types[:whisper] : Post.types[:regular]
PostCreator.create!(
user,
topic_id: post.topic_id,
raw: canned_reply,
reply_to_post_number: post.post_number,
skip_validations: true,
post_type: post_type,
)
end

View File

@ -219,6 +219,8 @@ module DiscourseAi
@processor ||=
DiscourseAi::Completions::AnthropicMessageProcessor.new(
streaming_mode: @streaming_mode,
partial_tool_calls: partial_tool_calls,
output_thinking: output_thinking,
)
else
@processor ||=

View File

@ -123,4 +123,84 @@ describe DiscourseAi::Automation::LlmTriage do
last_post = post.topic.reload.posts.order(:post_number).last
expect(last_post.raw).to eq post.raw
end
it "can respond using an AI persona when configured" do
bot_user = Fabricate(:user, username: "ai_assistant")
ai_persona =
Fabricate(
:ai_persona,
name: "Help Bot",
description: "AI assistant for forum help",
system_prompt: "You are a helpful forum assistant",
default_llm: llm_model,
user_id: bot_user.id,
)
# Configure the automation to use the persona instead of canned reply
add_automation_field("canned_reply", nil, type: "message") # Clear canned reply
add_automation_field("reply_persona", ai_persona.id, type: "choices")
add_automation_field("whisper", true, type: "boolean")
post = Fabricate(:post, raw: "I need help with a problem")
ai_response = "I'll help you with your problem!"
# Set up the test to provide both the triage and the persona responses
DiscourseAi::Completions::Llm.with_prepared_responses(["bad", ai_response]) do
automation.running_in_background!
automation.trigger!({ "post" => post })
end
# Verify the response was created
topic = post.topic.reload
last_post = topic.posts.order(:post_number).last
# Verify the AI persona's user created the post
expect(last_post.user_id).to eq(bot_user.id)
# Verify the content matches the AI response
expect(last_post.raw).to eq(ai_response)
# Verify it's a whisper post (since we set whisper: true)
expect(last_post.post_type).to eq(Post.types[:whisper])
end
it "does not create replies when the action is edit" do
# Set up bot user and persona
bot_user = Fabricate(:user, username: "helper_bot")
ai_persona =
Fabricate(
:ai_persona,
name: "Edit Helper",
description: "AI assistant for editing",
system_prompt: "You help with editing",
default_llm: llm_model,
user_id: bot_user.id,
)
# Configure the automation with both reply methods
add_automation_field("canned_reply", "This is a canned reply", type: "message")
add_automation_field("reply_persona", ai_persona.id, type: "choices")
# Create a post and capture its topic
post = Fabricate(:post, raw: "This needs to be evaluated")
topic = post.topic
# Get initial post count
initial_post_count = topic.posts.count
# Run automation with action: :edit and a matching response
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
automation.running_in_background!
automation.trigger!({ "post" => post, "action" => :edit })
end
# Topic should be updated (if configured) but no new posts
topic.reload
expect(topic.posts.count).to eq(initial_post_count)
# Verify no replies were created
last_post = topic.posts.order(:post_number).last
expect(last_post.id).to eq(post.id)
end
end