discourse-ai/spec/lib/modules/summarization/fold_content_spec.rb
Roman Rizzi ec97996905
FIX/REFACTOR: FoldContent revamp (#866)
* FIX/REFACTOR: FoldContent revamp

We hit a snag with our hot topic gist strategy: the regex we used to split the content didn't work, so we cannot send the original post separately. This was important for letting the model focus on what's new in the topic.

The algorithm doesn’t give us full control over how prompts are written, and figuring out how to format the content isn't straightforward. This means we're having to use more complicated workarounds, like regex.

To tackle this, I'm suggesting we simplify the approach a bit. Let's focus on summarizing as much as we can upfront, then gradually add new content until there's nothing left to summarize.

Also, the "extend" part is mostly for models with small context windows, which shouldn't pose a problem 99% of the time with the content volume we're dealing with.

* Fix fold docs

* Use #shift instead of #pop to get the first elem, not the last
2024-10-25 11:51:17 -03:00

56 lines
1.9 KiB
Ruby

# frozen_string_literal: true
RSpec.describe DiscourseAi::Summarization::FoldContent do
subject(:summarizer) { DiscourseAi::Summarization.topic_summary(topic) }
describe "#summarize" do
let!(:llm_model) { assign_fake_provider_to(:ai_summarization_model) }
fab!(:topic) { Fabricate(:topic, highest_post_number: 2) }
fab!(:post_1) { Fabricate(:post, topic: topic, post_number: 1, raw: "This is a text") }
before do
SiteSetting.ai_summarization_enabled = true
# Make sure each content fits in a single chunk.
# 700 is the number of tokens reserved for the prompt.
model_tokens =
700 +
DiscourseAi::Tokenizer::OpenAiTokenizer.size(
"(1 #{post_1.user.username_lower} said: This is a text ",
) + 3
llm_model.update!(max_prompt_tokens: model_tokens)
end
let(:single_summary) { "single" }
let(:concatenated_summary) { "this is a concatenated summary" }
let(:user) { User.new }
context "when the content to summarize fits in a single call" do
it "does one call to summarize content" do
result =
DiscourseAi::Completions::Llm.with_prepared_responses([single_summary]) do |spy|
summarizer.summarize(user).tap { expect(spy.completions).to eq(1) }
end
expect(result.summarized_text).to eq(single_summary)
end
end
context "when the content to summarize doesn't fit in a single call" do
fab!(:post_2) { Fabricate(:post, topic: topic, post_number: 2, raw: "This is a text") }
it "keeps extending the summary until there is nothing else to process" do
result =
DiscourseAi::Completions::Llm.with_prepared_responses(
[single_summary, concatenated_summary],
) { |spy| summarizer.summarize(user).tap { expect(spy.completions).to eq(2) } }
expect(result.summarized_text).to eq(concatenated_summary)
end
end
end
end