mirror of
https://github.com/discourse/discourse-ai.git
synced 2025-03-07 01:39:54 +00:00
This fixes 2 big issues: 1. No matter how hard you try, grounding anthropic title prompt is just too hard. This works around by only looking at the last sentence it returns and treating as title 2. Non English locales would be stuck with "generic" title, this ensures every bot message gets a title, using a custom field to track Also, slightly tunes some anthropic prompts.
63 lines
1.4 KiB
Ruby
63 lines
1.4 KiB
Ruby
# frozen_string_literal: true
|
|
|
|
module DiscourseAi
|
|
module AiBot
|
|
class AnthropicBot < Bot
|
|
def self.can_reply_as?(bot_user)
|
|
bot_user.id == DiscourseAi::AiBot::EntryPoint::CLAUDE_V2_ID
|
|
end
|
|
|
|
def bot_prompt_with_topic_context(post)
|
|
super(post).join("\n\n") + "\n\nAssistant:"
|
|
end
|
|
|
|
def prompt_limit
|
|
50_000 # https://console.anthropic.com/docs/prompt-design#what-is-a-prompt
|
|
end
|
|
|
|
def title_prompt(post)
|
|
super(post).join("\n\n") + "\n\nAssistant:"
|
|
end
|
|
|
|
def get_delta(partial, context)
|
|
partial[:completion]
|
|
end
|
|
|
|
private
|
|
|
|
def build_message(poster_username, content, system: false, function: nil)
|
|
role = poster_username == bot_user.username ? "Assistant" : "Human"
|
|
|
|
"#{role}: #{content}"
|
|
end
|
|
|
|
def model_for
|
|
"claude-2"
|
|
end
|
|
|
|
def get_updated_title(prompt)
|
|
DiscourseAi::Inference::AnthropicCompletions.perform!(
|
|
prompt,
|
|
model_for,
|
|
temperature: 0.4,
|
|
max_tokens: 40,
|
|
).dig(:completion)
|
|
end
|
|
|
|
def submit_prompt(prompt, prefer_low_cost: false, &blk)
|
|
DiscourseAi::Inference::AnthropicCompletions.perform!(
|
|
prompt,
|
|
model_for,
|
|
temperature: 0.4,
|
|
max_tokens: 3000,
|
|
&blk
|
|
)
|
|
end
|
|
|
|
def tokenize(text)
|
|
DiscourseAi::Tokenizer::AnthropicTokenizer.tokenize(text)
|
|
end
|
|
end
|
|
end
|
|
end
|