FEATURE: friendlier reply behavior in bot PMs (#535)
- Stop replying as bot, when human replies to another human - Reply as correct persona when replying directly to a persona - Fix paper cut where suppressing notifications was not doing so
This commit is contained in:
parent
f62703760f
commit
cc0369dd39
|
@ -40,6 +40,12 @@ module DiscourseAi
|
|||
|
||||
if mentionables.present?
|
||||
mentions = post.mentions.map(&:downcase)
|
||||
|
||||
# in case we are replying to a post by a bot
|
||||
if post.reply_to_post_number && post.reply_to_post.user
|
||||
mentions << post.reply_to_post.user.username_lower
|
||||
end
|
||||
|
||||
mentioned = mentionables.find { |mentionable| mentions.include?(mentionable[:username]) }
|
||||
|
||||
# direct PM to mentionable
|
||||
|
@ -51,6 +57,11 @@ module DiscourseAi
|
|||
bot_user ||= User.find_by(id: mentioned[:user_id]) if mentioned
|
||||
end
|
||||
|
||||
if bot_user && post.reply_to_post_number && !post.reply_to_post.user&.bot?
|
||||
# replying to a non-bot user
|
||||
return
|
||||
end
|
||||
|
||||
if bot_user
|
||||
persona_id = mentioned&.dig(:id) || post.topic.custom_fields["ai_persona_id"]
|
||||
persona = nil
|
||||
|
|
|
@ -258,9 +258,8 @@ Follow the provided writing composition instructions carefully and precisely ste
|
|||
parsed
|
||||
.css("span.mention")
|
||||
.each do |mention|
|
||||
mention.replace(
|
||||
"<a href='/u/#{mention.text.sub("@", "")}' class='mention'>#{mention.text}</a>",
|
||||
)
|
||||
no_at_username = mention.text.sub("@", "")
|
||||
mention.replace("<a href='/u/#{no_at_username}' class='mention'>#{no_at_username}</a>")
|
||||
end
|
||||
|
||||
parsed.to_html
|
||||
|
|
|
@ -31,6 +31,17 @@ module DiscourseAi
|
|||
"#{claude_prov}:claude-instant-1",
|
||||
max_tokens: 100_000,
|
||||
)
|
||||
foldable_models << Models::Anthropic.new(
|
||||
"#{claude_prov}:claude-3-haiku",
|
||||
max_tokens: 200_000,
|
||||
)
|
||||
foldable_models << Models::Anthropic.new(
|
||||
"#{claude_prov}:claude-3-sonnet",
|
||||
max_tokens: 200_000,
|
||||
)
|
||||
|
||||
# no opus yet for AWS bedrock
|
||||
foldable_models << Models::Anthropic.new("anthropic:claude-3-opus", max_tokens: 200_000)
|
||||
|
||||
mixtral_prov = "hugging_face"
|
||||
if DiscourseAi::Completions::Endpoints::Vllm.correctly_configured?(
|
||||
|
|
|
@ -177,6 +177,33 @@ RSpec.describe DiscourseAi::AiBot::Playground do
|
|||
|
||||
last_post.topic.reload
|
||||
expect(last_post.topic.allowed_users.pluck(:user_id)).to include(persona.user_id)
|
||||
|
||||
# does not reply if replying directly to a user
|
||||
# nothing is mocked, so this would result in HTTP error
|
||||
# if we were going to reply
|
||||
create_post(
|
||||
raw: "Please ignore this bot, I am replying to a user",
|
||||
topic: post.topic,
|
||||
user: admin,
|
||||
reply_to_post_number: post.post_number,
|
||||
)
|
||||
|
||||
# replies as correct persona if replying direct to persona
|
||||
DiscourseAi::Completions::Llm.with_prepared_responses(
|
||||
["Another reply"],
|
||||
llm: "open_ai:gpt-3.5-turbo-16k",
|
||||
) do
|
||||
create_post(
|
||||
raw: "Please ignore this bot, I am replying to a user",
|
||||
topic: post.topic,
|
||||
user: admin,
|
||||
reply_to_post_number: last_post.post_number,
|
||||
)
|
||||
end
|
||||
|
||||
last_post = post.topic.posts.order(:post_number).last
|
||||
expect(last_post.raw).to eq("Another reply")
|
||||
expect(last_post.user_id).to eq(persona.user_id)
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ module DiscourseAi
|
|||
|
||||
# note, magic surprise & is correct HTML 5 representation
|
||||
expected = <<~HTML
|
||||
<p><a href="/u/sam" class="mention">@sam</a> is a person<br>
|
||||
<p><a href="/u/sam" class="mention">sam</a> is a person<br>
|
||||
<a href="/test?silent=true">test1</a> is an internal link<br>
|
||||
<a href="/test?1=2&silent=true">test2</a> is an internal link<br>
|
||||
<a href="https://example.com" rel="noopener nofollow ugc">test3</a> is an external link<br>
|
||||
|
|
Loading…
Reference in New Issue