mirror of
https://github.com/discourse/discourse-ai.git
synced 2025-03-09 11:48:47 +00:00
Using RAG fragments can lead to considerably big system messages, which becomes problematic when models have a smaller context window. Before this change, we only look at the rest of the conversation to make sure we don't surpass the limit, which could lead to two unwanted scenarios when having large system messages: All other messages are excluded due to size. The system message already exceeds the limit. As a result, I'm putting a hard-limit of 60% of available tokens. We don't want to aggresively truncate because if rag fragments are included, the system message contains a lot of context to improve the model response, but we also want to make room for the recent messages in the conversation.
54 lines
1.6 KiB
Ruby
54 lines
1.6 KiB
Ruby
# frozen_string_literal: true
|
|
|
|
class TestDialect < DiscourseAi::Completions::Dialects::Dialect
|
|
attr_accessor :max_prompt_tokens
|
|
|
|
def trim(messages)
|
|
trim_messages(messages)
|
|
end
|
|
|
|
def tokenizer
|
|
DiscourseAi::Tokenizer::OpenAiTokenizer
|
|
end
|
|
end
|
|
|
|
RSpec.describe DiscourseAi::Completions::Dialects::Dialect do
|
|
describe "#trim_messages" do
|
|
let(:five_token_msg) { "This represents five tokens." }
|
|
|
|
it "should trim tool messages if tool_calls are trimmed" do
|
|
prompt = DiscourseAi::Completions::Prompt.new(five_token_msg)
|
|
prompt.push(type: :user, content: five_token_msg)
|
|
prompt.push(type: :tool_call, content: five_token_msg, id: 1)
|
|
prompt.push(type: :tool, content: five_token_msg, id: 1)
|
|
prompt.push(type: :user, content: five_token_msg)
|
|
|
|
dialect = TestDialect.new(prompt, "test")
|
|
dialect.max_prompt_tokens = 15 # fits the user messages and the tool_call message
|
|
|
|
trimmed = dialect.trim(prompt.messages)
|
|
|
|
expect(trimmed).to eq(
|
|
[{ type: :system, content: five_token_msg }, { type: :user, content: five_token_msg }],
|
|
)
|
|
end
|
|
|
|
it "limits the system message to 60% of available tokens" do
|
|
prompt = DiscourseAi::Completions::Prompt.new("I'm a system message consisting of 10 tokens")
|
|
prompt.push(type: :user, content: five_token_msg)
|
|
|
|
dialect = TestDialect.new(prompt, "test")
|
|
dialect.max_prompt_tokens = 15
|
|
|
|
trimmed = dialect.trim(prompt.messages)
|
|
|
|
expect(trimmed).to eq(
|
|
[
|
|
{ type: :system, content: "I'm a system message consisting of 10" },
|
|
{ type: :user, content: five_token_msg },
|
|
],
|
|
)
|
|
end
|
|
end
|
|
end
|