mirror of
https://github.com/discourse/discourse-ai.git
synced 2025-07-02 12:32:14 +00:00
In discourse/discourse-translator#249 we introduced splitting content (post.raw) prior to sending to translation as we were using a sync api. Now that we're streaming thanks to #1424, we'll chunk based on the LlmModel.max_output_tokens.
29 lines
942 B
Ruby
29 lines
942 B
Ruby
# frozen_string_literal: true
|
|
|
|
module DiscourseAi
|
|
module Translation
|
|
class PostLocalizer
|
|
def self.localize(post, target_locale = I18n.locale)
|
|
if post.blank? || target_locale.blank? || post.locale == target_locale.to_s ||
|
|
post.raw.blank?
|
|
return
|
|
end
|
|
return if post.raw.length > SiteSetting.ai_translation_max_post_length
|
|
target_locale = target_locale.to_s.sub("-", "_")
|
|
|
|
translated_raw = PostRawTranslator.new(text: post.raw, target_locale:, post:).translate
|
|
|
|
localization =
|
|
PostLocalization.find_or_initialize_by(post_id: post.id, locale: target_locale)
|
|
|
|
localization.raw = translated_raw
|
|
localization.cooked = PrettyText.cook(translated_raw)
|
|
localization.post_version = post.version
|
|
localization.localizer_user_id = Discourse.system_user.id
|
|
localization.save!
|
|
localization
|
|
end
|
|
end
|
|
end
|
|
end
|