discourse-ai/lib/modules/ai_bot/anthropic_bot.rb

75 lines
1.9 KiB
Ruby

# frozen_string_literal: true
module DiscourseAi
module AiBot
class AnthropicBot < Bot
def self.can_reply_as?(bot_user)
bot_user.id == DiscourseAi::AiBot::EntryPoint::CLAUDE_V2_ID
end
def bot_prompt_with_topic_context(post, allow_commands:)
super(post, allow_commands: allow_commands).join("\n\n") + "\n\nAssistant:"
end
def prompt_limit(allow_commands: true)
# no side channel for commands, so we can ignore allow commands
50_000 # https://console.anthropic.com/docs/prompt-design#what-is-a-prompt
end
def title_prompt(post)
super(post).join("\n\n") + "\n\nAssistant:"
end
def get_delta(partial, context)
completion = partial[:completion]
if completion&.start_with?(" ") && !context[:processed_first]
completion = completion[1..-1]
context[:processed_first] = true
end
completion
end
def tokenizer
DiscourseAi::Tokenizer::AnthropicTokenizer
end
private
def build_message(poster_username, content, system: false, function: nil)
role = poster_username == bot_user.username ? "Assistant" : "Human"
if system || function
content
else
"#{role}: #{content}"
end
end
def model_for
"claude-2"
end
def get_updated_title(prompt)
DiscourseAi::Inference::AnthropicCompletions.perform!(
prompt,
model_for,
temperature: 0.4,
max_tokens: 40,
).dig(:completion)
end
def submit_prompt(prompt, post: nil, prefer_low_cost: false, &blk)
DiscourseAi::Inference::AnthropicCompletions.perform!(
prompt,
model_for,
temperature: 0.4,
max_tokens: 3000,
post: post,
stop_sequences: ["\n\nHuman:", "</function_calls>"],
&blk
)
end
end
end
end