mirror of
https://github.com/discourse/discourse-ai.git
synced 2025-03-08 02:12:14 +00:00
In a previous refactor, we moved the responsibility of querying and storing embeddings into the `Schema` class. Now, it's time for embedding generation. The motivation behind these changes is to isolate vector characteristics in simple objects to later replace them with a DB-backed version, similar to what we did with LLM configs.
57 lines
1.2 KiB
Ruby
57 lines
1.2 KiB
Ruby
# frozen_string_literal: true
|
|
|
|
module DiscourseAi
|
|
module Embeddings
|
|
module VectorRepresentations
|
|
class AllMpnetBaseV2 < Base
|
|
class << self
|
|
def name
|
|
"all-mpnet-base-v2"
|
|
end
|
|
|
|
def correctly_configured?
|
|
SiteSetting.ai_embeddings_discourse_service_api_endpoint_srv.present? ||
|
|
SiteSetting.ai_embeddings_discourse_service_api_endpoint.present?
|
|
end
|
|
|
|
def dependant_setting_names
|
|
%w[
|
|
ai_embeddings_discourse_service_api_key
|
|
ai_embeddings_discourse_service_api_endpoint_srv
|
|
ai_embeddings_discourse_service_api_endpoint
|
|
]
|
|
end
|
|
end
|
|
|
|
def dimensions
|
|
768
|
|
end
|
|
|
|
def max_sequence_length
|
|
384
|
|
end
|
|
|
|
def id
|
|
1
|
|
end
|
|
|
|
def version
|
|
1
|
|
end
|
|
|
|
def pg_function
|
|
"<#>"
|
|
end
|
|
|
|
def tokenizer
|
|
DiscourseAi::Tokenizer::AllMpnetBaseV2Tokenizer
|
|
end
|
|
|
|
def inference_client
|
|
DiscourseAi::Inference::DiscourseClassifier.instance(self.class.name)
|
|
end
|
|
end
|
|
end
|
|
end
|
|
end
|