DEV: Split content based on llmmodel's max_output_tokens (#1456)

In discourse/discourse-translator#249 we introduced splitting content (post.raw) prior to sending to translation as we were using a sync api.

Now that we're streaming thanks to #1424, we'll chunk based on the LlmModel.max_output_tokens.
This commit is contained in:
Natalie Tay 2025-06-23 21:11:20 +08:00 committed by GitHub
parent 740be26625
commit 683bb5725b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 86 additions and 89 deletions

View File

@ -49,7 +49,7 @@ module Jobs
# do nothing, there are too many sporadic lookup failures # do nothing, there are too many sporadic lookup failures
rescue => e rescue => e
DiscourseAi::Translation::VerboseLogger.log( DiscourseAi::Translation::VerboseLogger.log(
"Failed to translate post #{post.id} to #{locale}: #{e.message}", "Failed to translate post #{post.id} to #{locale}: #{e.message}\n\n#{e.backtrace[0..3].join("\n")}",
) )
end end
end end

View File

@ -47,7 +47,7 @@ module Jobs
# do nothing, there are too many sporadic lookup failures # do nothing, there are too many sporadic lookup failures
rescue => e rescue => e
DiscourseAi::Translation::VerboseLogger.log( DiscourseAi::Translation::VerboseLogger.log(
"Failed to translate topic #{topic.id} to #{locale}: #{e.message}", "Failed to translate topic #{topic.id} to #{locale}: #{e.message}\n\n#{e.backtrace[0..3].join("\n")}",
) )
end end
end end

View File

@ -40,7 +40,7 @@ module Jobs
# do nothing, there are too many sporadic lookup failures # do nothing, there are too many sporadic lookup failures
rescue => e rescue => e
DiscourseAi::Translation::VerboseLogger.log( DiscourseAi::Translation::VerboseLogger.log(
"Failed to translate category #{category.id} to #{locale}: #{e.message}", "Failed to translate category #{category.id} to #{locale}: #{e.message}\n\n#{e.backtrace[0..3].join("\n")}",
) )
ensure ensure
remaining_limit -= 1 remaining_limit -= 1

View File

@ -65,7 +65,7 @@ module Jobs
# do nothing, there are too many sporadic lookup failures # do nothing, there are too many sporadic lookup failures
rescue => e rescue => e
DiscourseAi::Translation::VerboseLogger.log( DiscourseAi::Translation::VerboseLogger.log(
"Failed to translate post #{post.id} to #{locale}: #{e.message}", "Failed to translate post #{post.id} to #{locale}: #{e.message}\n\n#{e.backtrace[0..3].join("\n")}",
) )
end end
end end

View File

@ -62,7 +62,7 @@ module Jobs
# do nothing, there are too many sporadic lookup failures # do nothing, there are too many sporadic lookup failures
rescue => e rescue => e
DiscourseAi::Translation::VerboseLogger.log( DiscourseAi::Translation::VerboseLogger.log(
"Failed to translate topic #{topic.id} to #{locale}: #{e.message}", "Failed to translate topic #{topic.id} to #{locale}: #{e.message}\n\n#{e.backtrace[0..3].join("\n")}",
) )
end end
end end

View File

@ -15,26 +15,34 @@ module DiscourseAi
if (ai_persona = AiPersona.find_by(id: persona_setting)).blank? if (ai_persona = AiPersona.find_by(id: persona_setting)).blank?
return nil return nil
end end
translation_user = ai_persona.user || Discourse.system_user
persona_klass = ai_persona.class_instance persona_klass = ai_persona.class_instance
persona = persona_klass.new persona = persona_klass.new
llm_model = LlmModel.find_by(id: preferred_llm_model(persona_klass)) model = LlmModel.find_by(id: preferred_llm_model(persona_klass))
return nil if llm_model.blank? return nil if model.blank?
bot = bot = DiscourseAi::Personas::Bot.as(translation_user, persona:, model:)
DiscourseAi::Personas::Bot.as(
ai_persona.user || Discourse.system_user,
persona: persona,
model: llm_model,
)
ContentSplitter
.split(content: @text, chunk_size: model.max_output_tokens)
.map { |text| get_translation(text:, bot:, translation_user:) }
.join("")
end
private
def formatted_content(content)
{ content:, target_locale: @target_locale }.to_json
end
def get_translation(text:, bot:, translation_user:)
context = context =
DiscourseAi::Personas::BotContext.new( DiscourseAi::Personas::BotContext.new(
user: ai_persona.user || Discourse.system_user, user: translation_user,
skip_tool_details: true, skip_tool_details: true,
feature_name: "translation", feature_name: "translation",
messages: [{ type: :user, content: formatted_content }], messages: [{ type: :user, content: formatted_content(text) }],
topic: @topic, topic: @topic,
post: @post, post: @post,
) )
@ -47,12 +55,6 @@ module DiscourseAi
structured_output&.read_buffered_property(:translation) structured_output&.read_buffered_property(:translation)
end end
def formatted_content
{ content: @text, target_locale: @target_locale }.to_json
end
private
def persona_setting def persona_setting
raise NotImplementedError raise NotImplementedError
end end

View File

@ -3,7 +3,7 @@
module DiscourseAi module DiscourseAi
module Translation module Translation
class ContentSplitter class ContentSplitter
CHUNK_SIZE = 3000 DEFAULT_CHUNK_SIZE = 8192
BBCODE_PATTERNS = [ BBCODE_PATTERNS = [
%r{\[table.*?\].*?\[/table\]}m, %r{\[table.*?\].*?\[/table\]}m,
@ -23,16 +23,17 @@ module DiscourseAi
/\s+/, # any whitespace /\s+/, # any whitespace
].freeze ].freeze
def self.split(content) def self.split(content:, chunk_size: DEFAULT_CHUNK_SIZE)
return [] if content.nil? return [] if content.nil?
return [""] if content.empty? return [""] if content.empty?
return [content] if content.length <= CHUNK_SIZE chunk_size ||= DEFAULT_CHUNK_SIZE
return [content] if content.length <= chunk_size
chunks = [] chunks = []
remaining = content.dup remaining = content.dup
while remaining.present? while remaining.present?
chunk = extract_mixed_chunk(remaining) chunk = extract_mixed_chunk(remaining, size: chunk_size)
break if chunk.empty? break if chunk.empty?
chunks << chunk chunks << chunk
remaining = remaining[chunk.length..-1] remaining = remaining[chunk.length..-1]
@ -43,9 +44,8 @@ module DiscourseAi
private private
def self.extract_mixed_chunk(text, size: CHUNK_SIZE) def self.extract_mixed_chunk(text, size:)
return text if text.length <= size return text if text.length <= size
flexible_size = size * 1.5
# try each splitting strategy in order # try each splitting strategy in order
split_point = split_point =
@ -54,7 +54,7 @@ module DiscourseAi
-> { find_nearest_bbcode_end_index(text, size) }, -> { find_nearest_bbcode_end_index(text, size) },
-> { find_text_boundary(text, size) }, -> { find_text_boundary(text, size) },
-> { size }, -> { size },
].lazy.map(&:call).compact.find { |pos| pos <= flexible_size } ].lazy.map(&:call).compact.find { |pos| pos <= size }
text[0...split_point] text[0...split_point]
end end
@ -64,13 +64,15 @@ module DiscourseAi
begin begin
doc = Nokogiri::HTML5.fragment(text) doc = Nokogiri::HTML5.fragment(text)
current_length = 0 max_length_within_target = 0
doc.children.each do |node| doc.children.each do |node|
html = node.to_html html = node.to_html
end_pos = current_length + html.length end_pos = max_length_within_target + html.length
return end_pos if end_pos > target_pos if (max_length_within_target > 0 && end_pos > target_pos)
current_length = end_pos return max_length_within_target
end
max_length_within_target = end_pos
end end
nil nil
rescue Nokogiri::SyntaxError rescue Nokogiri::SyntaxError
@ -79,13 +81,15 @@ module DiscourseAi
end end
def self.find_nearest_bbcode_end_index(text, target_pos) def self.find_nearest_bbcode_end_index(text, target_pos)
max_length_within_target = 0
BBCODE_PATTERNS.each do |pattern| BBCODE_PATTERNS.each do |pattern|
text.scan(pattern) do |_| text.scan(pattern) do |_|
match = $~ match = $~
tag_start = match.begin(0)
tag_end = match.end(0) tag_end = match.end(0)
if (max_length_within_target > 0 && tag_end > target_pos)
return tag_end if tag_start <= target_pos && tag_end > target_pos return max_length_within_target
end
max_length_within_target = tag_end
end end
end end

View File

@ -11,11 +11,7 @@ module DiscourseAi
return if post.raw.length > SiteSetting.ai_translation_max_post_length return if post.raw.length > SiteSetting.ai_translation_max_post_length
target_locale = target_locale.to_s.sub("-", "_") target_locale = target_locale.to_s.sub("-", "_")
translated_raw = translated_raw = PostRawTranslator.new(text: post.raw, target_locale:, post:).translate
ContentSplitter
.split(post.raw)
.map { |text| PostRawTranslator.new(text:, target_locale:, post:).translate }
.join("")
localization = localization =
PostLocalization.find_or_initialize_by(post_id: post.id, locale: target_locale) PostLocalization.find_or_initialize_by(post_id: post.id, locale: target_locale)

View File

@ -1,7 +1,5 @@
# frozen_string_literal: true # frozen_string_literal: true
require "rails_helper"
describe DiscourseAi::Translation::BaseTranslator do describe DiscourseAi::Translation::BaseTranslator do
let!(:persona) do let!(:persona) do
AiPersona.find( AiPersona.find(
@ -28,7 +26,7 @@ describe DiscourseAi::Translation::BaseTranslator do
DiscourseAi::Translation::PostRawTranslator.new(text:, target_locale:, post:) DiscourseAi::Translation::PostRawTranslator.new(text:, target_locale:, post:)
allow(DiscourseAi::Completions::Prompt).to receive(:new).with( allow(DiscourseAi::Completions::Prompt).to receive(:new).with(
persona.system_prompt, persona.system_prompt,
messages: array_including({ type: :user, content: post_translator.formatted_content }), messages: array_including({ type: :user, content: a_string_including(text) }),
post_id: post.id, post_id: post.id,
topic_id: post.topic_id, topic_id: post.topic_id,
).and_call_original ).and_call_original

View File

@ -1,95 +1,92 @@
# frozen_string_literal: true # frozen_string_literal: true
describe DiscourseAi::Translation::ContentSplitter do describe DiscourseAi::Translation::ContentSplitter do
let(:original_limit) { 4000 }
after { described_class.const_set(:CHUNK_SIZE, original_limit) }
def set_limit(value)
described_class.const_set(:CHUNK_SIZE, value)
end
it "returns empty array for empty input" do it "returns empty array for empty input" do
expect(described_class.split("")).to eq([""]) expect(described_class.split(content: "")).to eq([""])
end end
it "handles content with only spaces" do it "handles content with only spaces" do
expect(described_class.split(" ")).to eq([" "]) expect(described_class.split(content: " ")).to eq([" "])
expect(described_class.split(" ")).to eq([" "]) expect(described_class.split(content: " ")).to eq([" "])
end end
it "handles nil input" do it "handles nil input" do
expect(described_class.split(nil)).to eq([]) expect(described_class.split(content: nil)).to eq([])
end end
it "doesn't split content under limit" do it "doesn't split content under limit" do
text = "hello world" content = "hello world"
expect(described_class.split(text)).to eq([text]) expect(described_class.split(content:, chunk_size: 20)).to eq([content])
end
it "splits to max chunk size if unsplittable" do
content = "a" * 100
expect(described_class.split(content:, chunk_size: 10)).to eq(["a" * 10] * 10)
end end
it "preserves HTML tags" do it "preserves HTML tags" do
set_limit(10) content = "<p>hello</p><p>meow</p>"
text = "<p>hello</p><p>meow</p>" expect(described_class.split(content:, chunk_size: 15)).to eq(%w[<p>hello</p> <p>meow</p>])
expect(described_class.split(text)).to eq(%w[<p>hello</p> <p>meow</p>])
set_limit(35) content = "<div>hello</div> <div>jurassic</div> <p>world</p>"
text = "<div>hello</div> <div>jurassic</div> <p>world</p>" expect(described_class.split(content:, chunk_size: 40)).to eq(
expect(described_class.split(text)).to eq( ["<div>hello</div> <div>jurassic</div> ", "<p>world</p>"],
["<div>hello</div> <div>jurassic</div>", " <p>world</p>"],
) )
end end
it "preserves BBCode tags" do it "preserves BBCode tags" do
set_limit(20) content = "[quote]hello[/quote][details]world[/details]"
text = "[quote]hello[/quote][details]world[/details]" expect(described_class.split(content:, chunk_size: 25)).to eq(
expect(described_class.split(text)).to eq(["[quote]hello[/quote]", "[details]world[/details]"]) ["[quote]hello[/quote]", "[details]world[/details]"],
)
end end
it "doesn't split in middle of words" do it "doesn't split in middle of words" do
set_limit(10) content = "my kitty best in the world"
text = "my kitty best in the world" expect(described_class.split(content:, chunk_size: 10)).to eq(
expect(described_class.split(text)).to eq(["my kitty ", "best in ", "the world"]) ["my kitty ", "best in ", "the world"],
)
end end
it "handles nested tags properly" do it "handles nested tags properly" do
set_limit(25) content = "<div>hello<p>cat</p>world</div><p>meow</p>"
text = "<div>hello<p>cat</p>world</div><p>meow</p>" expect(described_class.split(content:, chunk_size: 35)).to eq(
expect(described_class.split(text)).to eq(%w[<div>hello<p>cat</p>world</div> <p>meow</p>]) %w[<div>hello<p>cat</p>world</div> <p>meow</p>],
)
end end
it "handles mixed HTML and BBCode" do it "handles mixed HTML and BBCode" do
set_limit(15) content = "<div>hello</div>[quote]world[/quote]<p>beautiful</p>"
text = "<div>hello</div>[quote]world[/quote]<p>beautiful</p>" expect(described_class.split(content:, chunk_size: 20)).to eq(
expect(described_class.split(text)).to eq(
["<div>hello</div>", "[quote]world[/quote]", "<p>beautiful</p>"], ["<div>hello</div>", "[quote]world[/quote]", "<p>beautiful</p>"],
) )
end end
it "preserves newlines in sensible places" do it "preserves newlines in sensible places" do
set_limit(10) content = "hello\nbeautiful\nworld\n"
text = "hello\nbeautiful\nworld\n" expect(described_class.split(content:, chunk_size: 10)).to eq(
expect(described_class.split(text)).to eq(["hello\n", "beautiful\n", "world\n"]) ["hello\n", "beautiful\n", "world\n"],
)
end end
it "handles email content properly" do it "handles email content properly" do
set_limit(20) content = "From: test@test.com\nTo: other@test.com\nSubject: Hello\n\nContent here"
text = "From: test@test.com\nTo: other@test.com\nSubject: Hello\n\nContent here" expect(described_class.split(content:, chunk_size: 20)).to eq(
expect(described_class.split(text)).to eq(
["From: test@test.com\n", "To: other@test.com\n", "Subject: Hello\n\n", "Content here"], ["From: test@test.com\n", "To: other@test.com\n", "Subject: Hello\n\n", "Content here"],
) )
end end
it "keeps code blocks intact" do it "keeps code blocks intact" do
set_limit(30) content = "Text\n```\ncode block\nhere\n```\nmore text"
text = "Text\n```\ncode block\nhere\n```\nmore text" expect(described_class.split(content:, chunk_size: 30)).to eq(
expect(described_class.split(text)).to eq(["Text\n```\ncode block\nhere\n```\n", "more text"]) ["Text\n```\ncode block\nhere\n```\n", "more text"],
)
end end
context "with multiple details tags" do context "with multiple details tags" do
it "splits correctly between details tags" do it "splits correctly between details tags" do
set_limit(30) content = "<details>first content</details><details>second content</details>"
text = "<details>first content</details><details>second content</details>" expect(described_class.split(content:, chunk_size: 35)).to eq(
expect(described_class.split(text)).to eq(
["<details>first content</details>", "<details>second content</details>"], ["<details>first content</details>", "<details>second content</details>"],
) )
end end