mirror of
https://github.com/discourse/discourse-ai.git
synced 2025-03-09 11:48:47 +00:00
Open AI support function calling, this has a very specific shape that other LLMs have not quite adopted. This simulates a command framework using system prompts on LLMs that are not open AI. Features include: - Smart system prompt to steer the LLM - Parameter validation (we ensure all the params are specified correctly) This is being tested on Anthropic at the moment and intial results are promising.
63 lines
1.4 KiB
Ruby
63 lines
1.4 KiB
Ruby
# frozen_string_literal: true
|
|
|
|
module DiscourseAi
|
|
module AiBot
|
|
class AnthropicBot < Bot
|
|
def self.can_reply_as?(bot_user)
|
|
bot_user.id == DiscourseAi::AiBot::EntryPoint::CLAUDE_V2_ID
|
|
end
|
|
|
|
def bot_prompt_with_topic_context(post)
|
|
super(post).join("\n\n") + "\n\nAssistant:"
|
|
end
|
|
|
|
def prompt_limit
|
|
50_000 # https://console.anthropic.com/docs/prompt-design#what-is-a-prompt
|
|
end
|
|
|
|
def title_prompt(post)
|
|
super(post).join("\n\n") + "\n\nAssistant:"
|
|
end
|
|
|
|
def get_delta(partial, context)
|
|
partial[:completion]
|
|
end
|
|
|
|
private
|
|
|
|
def build_message(poster_username, content, system: false, function: nil)
|
|
role = poster_username == bot_user.username ? "Assistant" : "Human"
|
|
|
|
"#{role}: #{content}"
|
|
end
|
|
|
|
def model_for
|
|
"claude-2"
|
|
end
|
|
|
|
def get_updated_title(prompt)
|
|
DiscourseAi::Inference::AnthropicCompletions.perform!(
|
|
prompt,
|
|
model_for,
|
|
temperature: 0.7,
|
|
max_tokens: 40,
|
|
).dig(:completion)
|
|
end
|
|
|
|
def submit_prompt(prompt, prefer_low_cost: false, &blk)
|
|
DiscourseAi::Inference::AnthropicCompletions.perform!(
|
|
prompt,
|
|
model_for,
|
|
temperature: 0.4,
|
|
max_tokens: 3000,
|
|
&blk
|
|
)
|
|
end
|
|
|
|
def tokenize(text)
|
|
DiscourseAi::Tokenizer::AnthropicTokenizer.tokenize(text)
|
|
end
|
|
end
|
|
end
|
|
end
|