58 lines
1.3 KiB
Ruby
58 lines
1.3 KiB
Ruby
# frozen_string_literal: true
|
|
|
|
module DiscourseAi
|
|
module Embeddings
|
|
module VectorRepresentations
|
|
class MultilingualE5Large < Base
|
|
def vector_from(text)
|
|
if DiscourseAi::Inference::HuggingFaceTextEmbeddings.configured?
|
|
truncated_text = tokenizer.truncate(text, max_sequence_length - 2)
|
|
DiscourseAi::Inference::HuggingFaceTextEmbeddings.perform!(truncated_text).first
|
|
elsif discourse_embeddings_endpoint.present?
|
|
DiscourseAi::Inference::DiscourseClassifier.perform!(
|
|
"#{discourse_embeddings_endpoint}/api/v1/classify",
|
|
name,
|
|
"query: #{text}",
|
|
SiteSetting.ai_embeddings_discourse_service_api_key,
|
|
)
|
|
else
|
|
raise "No inference endpoint configured"
|
|
end
|
|
end
|
|
|
|
def id
|
|
3
|
|
end
|
|
|
|
def version
|
|
1
|
|
end
|
|
|
|
def name
|
|
"multilingual-e5-large"
|
|
end
|
|
|
|
def dimensions
|
|
1024
|
|
end
|
|
|
|
def max_sequence_length
|
|
512
|
|
end
|
|
|
|
def pg_function
|
|
"<=>"
|
|
end
|
|
|
|
def pg_index_type
|
|
"vector_cosine_ops"
|
|
end
|
|
|
|
def tokenizer
|
|
DiscourseAi::Tokenizer::MultilingualE5LargeTokenizer
|
|
end
|
|
end
|
|
end
|
|
end
|
|
end
|