REFACTOR: Cohesive narrative for single-chunk summaries. (#103)

Single and multi-chunk summaries end using different prompts for the last summary. This change detects when the summarized content fits in a single chunk and uses a slightly different prompt, which leads to more consistent summary formats.

This PR also moves the chunk-splitting step to the `FoldContent` strategy as preparation for implementing streamed summaries.
This commit is contained in:
Roman Rizzi 2023-07-13 17:05:41 -03:00 committed by GitHub
parent 48d880d3c8
commit 5f0c617880
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 131 additions and 76 deletions

View File

@ -36,13 +36,17 @@ module DiscourseAi
instructions = build_base_prompt(opts)
text_to_summarize = contents.map { |c| format_content_item(c) }.join
truncated_content = tokenizer.truncate(text_to_summarize, max_tokens - reserved_tokens)
truncated_content = tokenizer.truncate(text_to_summarize, available_tokens)
instructions += "<input>#{truncated_content}</input>\nAssistant:\n"
completion(instructions)
end
def summarize_single(chunk_text, opts)
summarize_chunk(chunk_text, opts.merge(single_chunk: true))
end
private
def summarize_chunk(chunk_text, opts)
@ -50,8 +54,15 @@ module DiscourseAi
end
def build_base_prompt(opts)
initial_instruction =
if opts[:single_chunk]
"Summarize the following forum discussion inside the given <input> tag, creating a cohesive narrative."
else
"Summarize the following forum discussion inside the given <input> tag."
end
base_prompt = <<~TEXT
Human: Summarize the following forum discussion inside the given <input> tag.
Human: #{initial_instruction}
Include only the summary inside <ai> tags.
TEXT
@ -63,7 +74,7 @@ module DiscourseAi
:content_title
]
base_prompt += "Don't use more than 400 words.\n"
base_prompt += "Don't use more than 400 words.\n" unless opts[:single_chunk]
end
def completion(prompt)

View File

@ -21,32 +21,11 @@ module DiscourseAi
raise NotImplemented
end
def summarize_in_chunks(contents, opts)
chunks = []
section = { ids: [], summary: "" }
contents.each do |item|
new_content = format_content_item(item)
if tokenizer.can_expand_tokens?(
section[:summary],
new_content,
max_tokens - reserved_tokens,
)
section[:summary] += new_content
section[:ids] << item[:id]
else
chunks << section
section = { ids: [item[:id]], summary: new_content }
end
def summarize_in_chunks(chunks, opts)
chunks.map do |chunk|
chunk[:summary] = summarize_chunk(chunk[:summary], opts)
chunk
end
chunks << section if section[:summary].present?
chunks.each { |chunk| chunk[:summary] = summarize_chunk(chunk[:summary], opts) }
chunks
end
def concatenate_summaries(_summaries)
@ -57,13 +36,7 @@ module DiscourseAi
raise NotImplemented
end
attr_reader :model
protected
attr_reader :max_tokens
def summarize_chunk(_chunk_text, _opts)
def summarize_single(chunk_text, opts)
raise NotImplemented
end
@ -71,11 +44,29 @@ module DiscourseAi
"(#{item[:id]} #{item[:poster]} said: #{item[:text]} "
end
def available_tokens
max_tokens - reserved_tokens
end
attr_reader :model, :max_tokens
protected
def reserved_tokens
# Reserve tokens for the response and the base prompt
# ~500 words
700
end
def summarize_chunk(_chunk_text, _opts)
raise NotImplemented
end
def tokenizer
raise NotImplemented
end
delegate :can_expand_tokens?, to: :tokenizer
end
end
end

View File

@ -29,11 +29,15 @@ module DiscourseAi
def summarize_with_truncation(contents, opts)
text_to_summarize = contents.map { |c| format_content_item(c) }.join
truncated_content =
::DiscourseAi::Tokenizer::BertTokenizer.truncate(text_to_summarize, max_tokens)
::DiscourseAi::Tokenizer::BertTokenizer.truncate(text_to_summarize, available_tokens)
completion(truncated_content)
end
def summarize_single(chunk_text, _opts)
completion(chunk_text)
end
private
def summarize_chunk(chunk_text, _opts)

View File

@ -37,7 +37,7 @@ module DiscourseAi
messages = [{ role: "system", content: build_base_prompt(opts) }]
text_to_summarize = contents.map { |c| format_content_item(c) }.join
truncated_content = tokenizer.truncate(text_to_summarize, max_tokens - reserved_tokens)
truncated_content = tokenizer.truncate(text_to_summarize, available_tokens)
messages << {
role: "user",
@ -47,13 +47,24 @@ module DiscourseAi
completion(messages)
end
def summarize_single(chunk_text, opts)
summarize_chunk(chunk_text, opts.merge(single_chunk: true))
end
private
def summarize_chunk(chunk_text, opts)
summary_instruction =
if opts[:single_chunk]
"Summarize the following forum discussion, creating a cohesive narrative:"
else
"Summarize the following in 400 words:"
end
completion(
[
{ role: "system", content: build_base_prompt(opts) },
{ role: "user", content: "Summarize the following in 400 words:\n#{chunk_text}" },
{ role: "user", content: "#{summary_instruction}\n#{chunk_text}" },
],
)
end

View File

@ -18,11 +18,45 @@ module DiscourseAi
def summarize(content)
opts = content.except(:contents)
summaries = completion_model.summarize_in_chunks(content[:contents], opts)
return { summary: summaries.first[:summary], chunks: [] } if summaries.length == 1
chunks = split_into_chunks(content[:contents])
{ summary: completion_model.concatenate_summaries(summaries), chunks: summaries }
if chunks.length == 1
{ summary: completion_model.summarize_single(chunks.first[:summary], opts), chunks: [] }
else
summaries = completion_model.summarize_in_chunks(chunks, opts)
{ summary: completion_model.concatenate_summaries(summaries), chunks: summaries }
end
end
private
def split_into_chunks(contents)
section = { ids: [], summary: "" }
chunks =
contents.reduce([]) do |sections, item|
new_content = completion_model.format_content_item(item)
if completion_model.can_expand_tokens?(
section[:summary],
new_content,
completion_model.available_tokens,
)
section[:summary] += new_content
section[:ids] << item[:id]
else
sections << section
section = { ids: [item[:id]], summary: new_content }
end
sections
end
chunks << section if section[:summary].present?
chunks
end
end
end

View File

@ -16,6 +16,10 @@ RSpec.describe DiscourseAi::Summarization::Models::Anthropic do
}
end
def as_chunk(item)
{ ids: [item[:id]], summary: "(#{item[:id]} #{item[:poster]} said: #{item[:text]} " }
end
def expected_messages(contents, opts)
base_prompt = <<~TEXT
Human: Summarize the following forum discussion inside the given <input> tag.
@ -43,8 +47,8 @@ RSpec.describe DiscourseAi::Summarization::Models::Anthropic do
"<ai>This is summary 1</ai>",
)
summarized_chunks =
model.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
chunks = content[:contents].map { |c| as_chunk(c) }
summarized_chunks = model.summarize_in_chunks(chunks, opts).map { |c| c[:summary] }
expect(summarized_chunks).to contain_exactly("This is summary 1")
end
@ -66,8 +70,8 @@ RSpec.describe DiscourseAi::Summarization::Models::Anthropic do
)
end
summarized_chunks =
model.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
chunks = content[:contents].map { |c| as_chunk(c) }
summarized_chunks = model.summarize_in_chunks(chunks, opts).map { |c| c[:summary] }
expect(summarized_chunks).to contain_exactly("This is summary 1", "This is summary 2")
end

View File

@ -32,6 +32,10 @@ RSpec.describe DiscourseAi::Summarization::Models::Discourse do
end
end
def as_chunk(item)
{ ids: [item[:id]], summary: "(#{item[:id]} #{item[:poster]} said: #{item[:text]} " }
end
describe "#summarize_in_chunks" do
context "when the content fits in a single chunk" do
it "performs a request to summarize" do
@ -39,8 +43,8 @@ RSpec.describe DiscourseAi::Summarization::Models::Discourse do
stub_request(expected_messages(content[:contents], opts), "This is summary 1")
summarized_chunks =
model.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
chunks = content[:contents].map { |c| as_chunk(c) }
summarized_chunks = model.summarize_in_chunks(chunks, opts).map { |c| c[:summary] }
expect(summarized_chunks).to contain_exactly("This is summary 1")
end
@ -59,8 +63,8 @@ RSpec.describe DiscourseAi::Summarization::Models::Discourse do
stub_request(expected_messages([item], opts), "This is summary #{idx + 1}")
end
summarized_chunks =
model.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
chunks = content[:contents].map { |c| as_chunk(c) }
summarized_chunks = model.summarize_in_chunks(chunks, opts).map { |c| c[:summary] }
expect(summarized_chunks).to contain_exactly("This is summary 1", "This is summary 2")
end

View File

@ -16,6 +16,10 @@ RSpec.describe DiscourseAi::Summarization::Models::OpenAi do
}
end
def as_chunk(item)
{ ids: [item[:id]], summary: "(#{item[:id]} #{item[:poster]} said: #{item[:text]} " }
end
def expected_messages(contents, opts)
base_prompt = <<~TEXT
You are a summarization bot.
@ -46,8 +50,8 @@ RSpec.describe DiscourseAi::Summarization::Models::OpenAi do
"This is summary 1",
)
summarized_chunks =
model.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
chunks = content[:contents].map { |c| as_chunk(c) }
summarized_chunks = model.summarize_in_chunks(chunks, opts).map { |c| c[:summary] }
expect(summarized_chunks).to contain_exactly("This is summary 1")
end
@ -69,8 +73,8 @@ RSpec.describe DiscourseAi::Summarization::Models::OpenAi do
)
end
summarized_chunks =
model.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
chunks = content[:contents].map { |c| as_chunk(c) }
summarized_chunks = model.summarize_in_chunks(chunks, opts).map { |c| c[:summary] }
expect(summarized_chunks).to contain_exactly("This is summary 1", "This is summary 2")
end

View File

@ -4,38 +4,26 @@ class DummyCompletionModel
SINGLE_SUMMARY = "this is a single summary"
CONCATENATED_SUMMARIES = "this is a concatenated summary"
def initialize(prompt_length)
@max_length = prompt_length
def initialize(max_tokens)
@summarization_calls = 0
@available_tokens = max_tokens
end
attr_reader :max_length, :summarization_calls
attr_reader :max_length, :summarization_calls, :available_tokens
def summarize_in_chunks(contents, opts)
chunks = []
delegate :can_expand_tokens?, to: :tokenizer
section = { ids: [], summary: "" }
def summarize_single(single_chunk, opts)
@summarization_calls += 1
SINGLE_SUMMARY
end
contents.each do |item|
new_content = "(#{item[:id]} #{item[:poster]} said: #{item[:text]} "
if tokenizer.can_expand_tokens?(section[:summary], new_content, max_length)
section[:summary] += new_content
section[:ids] << item[:id]
else
chunks << section
section = { id: [item[:id]], summary: new_content }
end
end
chunks << section if section[:summary].present?
chunks.each do |chunk|
def summarize_in_chunks(chunks, opts)
chunks.map do |chunk|
chunk[:summary] = SINGLE_SUMMARY
@summarization_calls += 1
chunk
end
chunks
end
def concatenate_summaries(summaries)
@ -48,6 +36,10 @@ class DummyCompletionModel
SINGLE_SUMMARY
end
def format_content_item(item)
"(#{item[:id]} #{item[:poster]} said: #{item[:text]} "
end
def tokenizer
DiscourseAi::Tokenizer::BertTokenizer
end