discourse-ai/lib/modules/ai_bot/anthropic_bot.rb
Sam d85b503ed4
FIX: guide GPT 3.5 better (#77)
* FIX: guide GPT 3.5 better

This limits search results to 10 cause we were blowing the whole token
budget on search results, additionally it includes a quick exchange at
the start of a session to try and guide GPT 3.5 to follow instructions

Sadly GPT 3.5 drifts off very quickly but this does improve stuff a bit.

It also attempts to correct some issues with anthropic, though it still is
surprisingly hard to ground

* add status:public, this is a bit of a hack but ensures that we can search
for any filter provided

* fix specs
2023-05-23 23:08:17 +10:00

75 lines
1.7 KiB
Ruby

# frozen_string_literal: true
module DiscourseAi
module AiBot
class AnthropicBot < Bot
def self.can_reply_as?(bot_user)
bot_user.id == DiscourseAi::AiBot::EntryPoint::CLAUDE_V1_ID
end
def bot_prompt_with_topic_context(post)
super(post).join("\n\n")
end
def prompt_limit
7500 # https://console.anthropic.com/docs/prompt-design#what-is-a-prompt
end
def get_delta(partial, context)
context[:pos] ||= 0
full = partial[:completion]
delta = full[context[:pos]..-1]
context[:pos] = full.length
if !context[:processed]
delta = ""
index = full.index("Assistant: ")
if index
delta = full[index + 11..-1]
context[:processed] = true
end
end
delta
end
private
def build_message(poster_username, content, system: false)
role = poster_username == bot_user.username ? "Assistant" : "Human"
"#{role}: #{content}"
end
def model_for
"claude-v1"
end
def get_updated_title(prompt)
DiscourseAi::Inference::AnthropicCompletions.perform!(
prompt,
model_for,
temperature: 0.7,
max_tokens: 40,
).dig(:completion)
end
def submit_prompt(prompt, prefer_low_cost: false, &blk)
DiscourseAi::Inference::AnthropicCompletions.perform!(
prompt,
model_for,
temperature: 0.4,
max_tokens: 3000,
&blk
)
end
def tokenize(text)
DiscourseAi::Tokenizer::AnthropicTokenizer.tokenize(text)
end
end
end
end