2023-07-13 11:41:36 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
|
|
|
module DiscourseAi
|
|
|
|
module Embeddings
|
|
|
|
module Strategies
|
|
|
|
class Truncation
|
|
|
|
def id
|
2023-09-05 10:08:23 -04:00
|
|
|
1
|
2023-07-13 11:41:36 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def version
|
|
|
|
1
|
|
|
|
end
|
|
|
|
|
2023-09-05 10:08:23 -04:00
|
|
|
def prepare_text_from(target, tokenizer, max_length)
|
|
|
|
case target
|
2023-07-13 11:41:36 -04:00
|
|
|
when Topic
|
2023-09-05 10:08:23 -04:00
|
|
|
topic_truncation(target, tokenizer, max_length)
|
2023-07-13 11:41:36 -04:00
|
|
|
when Post
|
2023-09-05 10:08:23 -04:00
|
|
|
post_truncation(target, tokenizer, max_length)
|
FEATURE: AI Bot RAG support. (#537)
This PR lets you associate uploads to an AI persona, which we'll split and generate embeddings from. When building the system prompt to get a bot reply, we'll do a similarity search followed by a re-ranking (if available). This will let us find the most relevant fragments from the body of knowledge you associated with the persona, resulting in better, more informed responses.
For now, we'll only allow plain-text files, but this will change in the future.
Commits:
* FEATURE: RAG embeddings for the AI Bot
This first commit introduces a UI where admins can upload text files, which we'll store, split into fragments,
and generate embeddings of. In a next commit, we'll use those to give the bot additional information during
conversations.
* Basic asymmetric similarity search to provide guidance in system prompt
* Fix tests and lint
* Apply reranker to fragments
* Uploads filter, css adjustments and file validations
* Add placeholder for rag fragments
* Update annotations
2024-04-01 12:43:34 -04:00
|
|
|
when RagDocumentFragment
|
|
|
|
tokenizer.truncate(target.fragment, max_length)
|
2023-07-13 11:41:36 -04:00
|
|
|
else
|
|
|
|
raise ArgumentError, "Invalid target type"
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2023-09-05 10:08:23 -04:00
|
|
|
private
|
|
|
|
|
|
|
|
def topic_information(topic)
|
|
|
|
info = +""
|
2023-07-13 11:41:36 -04:00
|
|
|
|
2023-12-29 12:05:02 -05:00
|
|
|
if topic&.title.present?
|
|
|
|
info << topic.title
|
2023-09-05 10:08:23 -04:00
|
|
|
info << "\n\n"
|
2023-12-29 12:05:02 -05:00
|
|
|
end
|
|
|
|
if topic&.category&.name.present?
|
|
|
|
info << topic.category.name
|
|
|
|
info << "\n\n"
|
|
|
|
end
|
|
|
|
if SiteSetting.tagging_enabled && topic&.tags.present?
|
2023-09-05 10:08:23 -04:00
|
|
|
info << topic.tags.pluck(:name).join(", ")
|
2023-12-29 12:05:02 -05:00
|
|
|
info << "\n\n"
|
2023-07-13 11:41:36 -04:00
|
|
|
end
|
2023-12-29 12:05:02 -05:00
|
|
|
|
|
|
|
info
|
2023-09-05 10:08:23 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def topic_truncation(topic, tokenizer, max_length)
|
|
|
|
text = +topic_information(topic)
|
2023-07-13 11:41:36 -04:00
|
|
|
|
2024-05-28 05:15:42 -04:00
|
|
|
if topic&.topic_embed&.embed_content_cache.present?
|
2024-01-05 08:27:45 -05:00
|
|
|
text << Nokogiri::HTML5.fragment(topic.topic_embed.embed_content_cache).text
|
|
|
|
text << "\n\n"
|
|
|
|
end
|
|
|
|
|
2023-07-13 17:59:25 -04:00
|
|
|
topic.posts.find_each do |post|
|
2024-01-05 08:27:45 -05:00
|
|
|
text << Nokogiri::HTML5.fragment(post.cooked).text
|
2023-09-05 10:08:23 -04:00
|
|
|
break if tokenizer.size(text) >= max_length #maybe keep a partial counter to speed this up?
|
|
|
|
text << "\n\n"
|
2023-07-13 11:41:36 -04:00
|
|
|
end
|
|
|
|
|
2023-09-05 10:08:23 -04:00
|
|
|
tokenizer.truncate(text, max_length)
|
2023-07-13 11:41:36 -04:00
|
|
|
end
|
|
|
|
|
2023-12-29 10:28:45 -05:00
|
|
|
def post_truncation(post, tokenizer, max_length)
|
2023-09-05 10:08:23 -04:00
|
|
|
text = +topic_information(post.topic)
|
2024-01-05 08:27:45 -05:00
|
|
|
|
2024-05-28 05:15:42 -04:00
|
|
|
if post.is_first_post? && post.topic&.topic_embed&.embed_content_cache.present?
|
2024-01-05 08:27:45 -05:00
|
|
|
text << Nokogiri::HTML5.fragment(post.topic.topic_embed.embed_content_cache).text
|
|
|
|
else
|
|
|
|
text << Nokogiri::HTML5.fragment(post.cooked).text
|
|
|
|
end
|
2023-07-13 11:41:36 -04:00
|
|
|
|
2023-09-05 10:08:23 -04:00
|
|
|
tokenizer.truncate(text, max_length)
|
2023-07-13 11:41:36 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|