FIX: Made bot more robust (#226)

* FIX: Made bot more robust

This is a collection of small fixes

- Display "Searching for: ..." while searching instead of showing found 0 results.
- Only allow 5 commands in lang chain - 6 feels like too much
- On the 5th command stop informing the engine about functions, so it is forced to complete
- Add another 30 tokens of buffer and explain why
- Typo in command prompt


Co-authored-by: Alan Guo Xiang Tan <gxtan1990@gmail.com>
This commit is contained in:
Sam 2023-09-14 16:46:56 +10:00 committed by GitHub
parent d35c8d5eca
commit 9e94457154
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 49 additions and 24 deletions

View File

@ -113,6 +113,7 @@ en:
description: "AI Bot with Google access that can research information for you" description: "AI Bot with Google access that can research information for you"
default_pm_prefix: "[Untitled AI bot PM]" default_pm_prefix: "[Untitled AI bot PM]"
topic_not_found: "Summary unavailable, topic not found!" topic_not_found: "Summary unavailable, topic not found!"
searching: "Searching for: '%{query}'"
command_summary: command_summary:
categories: "List categories" categories: "List categories"
search: "Search" search: "Search"

View File

@ -52,7 +52,7 @@ module DiscourseAi
attr_reader :bot_user attr_reader :bot_user
BOT_NOT_FOUND = Class.new(StandardError) BOT_NOT_FOUND = Class.new(StandardError)
MAX_COMPLETIONS = 6 MAX_COMPLETIONS = 5
def self.as(bot_user) def self.as(bot_user)
available_bots = [DiscourseAi::AiBot::OpenAiBot, DiscourseAi::AiBot::AnthropicBot] available_bots = [DiscourseAi::AiBot::OpenAiBot, DiscourseAi::AiBot::AnthropicBot]
@ -83,14 +83,6 @@ module DiscourseAi
post.topic.save_custom_fields post.topic.save_custom_fields
end end
def max_commands_per_reply=(val)
@max_commands_per_reply = val
end
def max_commands_per_reply
@max_commands_per_reply || 5
end
def reply_to( def reply_to(
post, post,
total_completions: 0, total_completions: 0,
@ -100,11 +92,14 @@ module DiscourseAi
) )
return if total_completions > MAX_COMPLETIONS return if total_completions > MAX_COMPLETIONS
@persona = DiscourseAi::AiBot::Personas::General.new # do not allow commands when we are at the end of chain (total completions == MAX_COMPLETIONS)
allow_commands = (total_completions < MAX_COMPLETIONS)
@persona = DiscourseAi::AiBot::Personas::General.new(allow_commands: allow_commands)
if persona_name = post.topic.custom_fields["ai_persona"] if persona_name = post.topic.custom_fields["ai_persona"]
persona_class = persona_class =
DiscourseAi::AiBot::Personas.all.find { |current| current.name == persona_name } DiscourseAi::AiBot::Personas.all.find { |current| current.name == persona_name }
@persona = persona_class.new if persona_class @persona = persona_class.new(allow_commands: allow_commands) if persona_class
end end
prompt = prompt =

View File

@ -117,7 +117,7 @@ module DiscourseAi::AiBot::Commands
@last_query = search_string @last_query = search_string
show_progress(localized_description) show_progress(I18n.t("discourse_ai.ai_bot.searching", query: search_string))
results = results =
Search.execute( Search.execute(
@ -145,7 +145,14 @@ module DiscourseAi::AiBot::Commands
search = Search.new(search_string, guardian: Guardian.new) search = Search.new(search_string, guardian: Guardian.new)
results = nil
begin
results = semantic_search.search_for_topics(search.term) results = semantic_search.search_for_topics(search.term)
rescue => e
Discourse.warn_exception(e, message: "Semantic search failed")
end
if results
results = search.apply_filters(results) results = search.apply_filters(results)
results.each do |post| results.each do |post|
@ -157,6 +164,7 @@ module DiscourseAi::AiBot::Commands
break if posts.length >= MAX_RESULTS break if posts.length >= MAX_RESULTS
end end
end end
end
@last_num_results = posts.length @last_num_results = posts.length
# this is the general pattern from core # this is the general pattern from core

View File

@ -16,8 +16,9 @@ module DiscourseAi
# note this is about 100 tokens over, OpenAI have a more optimal representation # note this is about 100 tokens over, OpenAI have a more optimal representation
@function_size ||= tokenize(available_functions.to_json).length @function_size ||= tokenize(available_functions.to_json).length
# provide a buffer of 50 tokens in case our counting is off # provide a buffer of 80 tokens - our function counting is not
buffer = @function_size + reply_params[:max_tokens] + 50 # 100% accurate so this is a trial and error number
buffer = @function_size + reply_params[:max_tokens] + 80
if bot_user.id == DiscourseAi::AiBot::EntryPoint::GPT4_ID if bot_user.id == DiscourseAi::AiBot::EntryPoint::GPT4_ID
8192 - buffer 8192 - buffer
@ -90,7 +91,7 @@ module DiscourseAi
fn = partial.dig(:choices, 0, :delta, :function_call) fn = partial.dig(:choices, 0, :delta, :function_call)
if fn if fn
functions.add_function(fn[:name]) if fn[:name].present? functions.add_function(fn[:name]) if fn[:name].present?
functions.add_argument_fragment(fn[:arguments]) if fn[:arguments].present? functions.add_argument_fragment(fn[:arguments]) if !fn[:arguments].nil?
end end
end end

View File

@ -22,11 +22,17 @@ module DiscourseAi
I18n.t("discourse_ai.ai_bot.personas.#{to_s.demodulize.underscore}.description") I18n.t("discourse_ai.ai_bot.personas.#{to_s.demodulize.underscore}.description")
end end
def initialize(allow_commands: true)
@allow_commands = allow_commands
end
def commands def commands
[] []
end end
def render_commands(render_function_instructions:) def render_commands(render_function_instructions:)
return +"" if !@allow_commands
result = +"" result = +""
if render_function_instructions if render_function_instructions
result << "\n" result << "\n"
@ -55,12 +61,15 @@ module DiscourseAi
end end
def available_commands def available_commands
return [] if !@allow_commands
return @available_commands if @available_commands return @available_commands if @available_commands
@available_commands = all_available_commands.filter { |cmd| commands.include?(cmd) } @available_commands = all_available_commands.filter { |cmd| commands.include?(cmd) }
end end
def available_functions def available_functions
return [] if !@allow_commands
# note if defined? can be a problem in test # note if defined? can be a problem in test
# this can never be nil so it is safe # this can never be nil so it is safe
return @available_functions if @available_functions return @available_functions if @available_functions

View File

@ -84,7 +84,7 @@ module ::DiscourseAi
- When you run a command/function you will gain access to real information in a subsequant call! - When you run a command/function you will gain access to real information in a subsequant call!
- NEVER EVER pretend to know stuff, you ALWAYS lean on functions to discover the truth! - NEVER EVER pretend to know stuff, you ALWAYS lean on functions to discover the truth!
- You have direct access to data on this forum using !functions - You have direct access to data on this forum using !functions
- You are not a lier, liers are bad bots, you are a good bot! - You are not a liar, liars are bad bots, you are a good bot!
- You always prefer to say "I don't know" as opposed to inventing a lie! - You always prefer to say "I don't know" as opposed to inventing a lie!
{ {

View File

@ -42,7 +42,6 @@ RSpec.describe DiscourseAi::AiBot::Bot do
describe "#reply_to" do describe "#reply_to" do
it "can respond to a search command" do it "can respond to a search command" do
bot.system_prompt_style!(:simple) bot.system_prompt_style!(:simple)
bot.max_commands_per_reply = 2
expected_response = { expected_response = {
function_call: { function_call: {

View File

@ -34,6 +34,18 @@ module DiscourseAi::AiBot::Personas
topic topic
end end
it "can disable commands via constructor" do
persona = TestPersona.new(allow_commands: false)
rendered =
persona.render_system_prompt(topic: topic_with_users, render_function_instructions: true)
expect(rendered).not_to include("!tags")
expect(rendered).not_to include("!search")
expect(persona.available_functions).to be_empty
end
it "renders the system prompt" do it "renders the system prompt" do
freeze_time freeze_time