From b515b4f66d7d348fd04ba2b161f25e9762454e17 Mon Sep 17 00:00:00 2001 From: Keegan George Date: Fri, 8 Mar 2024 08:02:50 -0800 Subject: [PATCH] FEATURE: AI Quick Semantic Search (#501) This PR adds AI semantic search to the search pop available on every page. It depends on several new and optional settings, like per post embeddings and a reranker model, so this is an experimental endeavour. --------- Co-authored-by: Rafael Silva --- .../embeddings/embeddings_controller.rb | 28 +++++++ .../ai-quick-search-loader.gjs | 15 ++++ .../ai-quick-semantic-search.gjs | 76 +++++++++++++++++++ .../ai-quick-search-info.gjs | 31 ++++++++ .../discourse/services/quick-search.js | 7 ++ .../embeddings/common/semantic-search.scss | 9 +++ config/locales/client.en.yml | 2 + config/locales/server.en.yml | 1 + config/routes.rb | 1 + config/settings.yml | 11 ++- lib/embeddings/semantic_search.rb | 69 +++++++++++++++++ .../all_mpnet_base_v2.rb | 2 +- lib/embeddings/vector_representations/base.rb | 51 ++++++++++--- .../vector_representations/bge_large_en.rb | 8 +- .../vector_representations/gemini.rb | 2 +- .../multilingual_e5_large.rb | 2 +- .../text_embedding_3_large.rb | 2 +- .../text_embedding_3_small.rb | 2 +- .../text_embedding_ada_002.rb | 2 +- lib/inference/hugging_face_text_embeddings.rb | 73 +++++++++++++----- 20 files changed, 355 insertions(+), 39 deletions(-) create mode 100644 assets/javascripts/discourse/connectors/search-menu-before-advanced-search/ai-quick-search-loader.gjs create mode 100644 assets/javascripts/discourse/connectors/search-menu-initial-options/ai-quick-semantic-search.gjs create mode 100644 assets/javascripts/discourse/connectors/search-menu-results-top/ai-quick-search-info.gjs create mode 100644 assets/javascripts/discourse/services/quick-search.js diff --git a/app/controllers/discourse_ai/embeddings/embeddings_controller.rb b/app/controllers/discourse_ai/embeddings/embeddings_controller.rb index a512bc55..122727e3 100644 --- a/app/controllers/discourse_ai/embeddings/embeddings_controller.rb +++ b/app/controllers/discourse_ai/embeddings/embeddings_controller.rb @@ -36,6 +36,34 @@ module DiscourseAi render_serialized(grouped_results, GroupedSearchResultSerializer, result: grouped_results) end end + + def quick_search + query = params[:q].to_s + + if query.length < SiteSetting.min_search_term_length + raise Discourse::InvalidParameters.new(:q) + end + + grouped_results = + Search::GroupedSearchResults.new( + type_filter: SEMANTIC_SEARCH_TYPE, + term: query, + search_context: guardian, + use_pg_headlines_for_excerpt: false, + ) + + semantic_search = DiscourseAi::Embeddings::SemanticSearch.new(guardian) + + if !semantic_search.cached_query?(query) + RateLimiter.new(current_user, "semantic-search", 60, 1.minutes).performed! + end + + hijack do + semantic_search.quick_search(query).each { |topic_post| grouped_results.add(topic_post) } + + render_serialized(grouped_results, GroupedSearchResultSerializer, result: grouped_results) + end + end end end end diff --git a/assets/javascripts/discourse/connectors/search-menu-before-advanced-search/ai-quick-search-loader.gjs b/assets/javascripts/discourse/connectors/search-menu-before-advanced-search/ai-quick-search-loader.gjs new file mode 100644 index 00000000..84b34281 --- /dev/null +++ b/assets/javascripts/discourse/connectors/search-menu-before-advanced-search/ai-quick-search-loader.gjs @@ -0,0 +1,15 @@ +import Component from "@glimmer/component"; +import { inject as service } from "@ember/service"; +import loadingSpinner from "discourse/helpers/loading-spinner"; + +export default class AiQuickSearchLoader extends Component { + @service quickSearch; + + +} diff --git a/assets/javascripts/discourse/connectors/search-menu-initial-options/ai-quick-semantic-search.gjs b/assets/javascripts/discourse/connectors/search-menu-initial-options/ai-quick-semantic-search.gjs new file mode 100644 index 00000000..7beda333 --- /dev/null +++ b/assets/javascripts/discourse/connectors/search-menu-initial-options/ai-quick-semantic-search.gjs @@ -0,0 +1,76 @@ +import Component from "@glimmer/component"; +import { action } from "@ember/object"; +import { inject as service } from "@ember/service"; +import AssistantItem from "discourse/components/search-menu/results/assistant-item"; +import { ajax } from "discourse/lib/ajax"; +import { popupAjaxError } from "discourse/lib/ajax-error"; +import { isValidSearchTerm, translateResults } from "discourse/lib/search"; +import i18n from "discourse-common/helpers/i18n"; + +export default class AiQuickSemanticSearch extends Component { + static shouldRender(_args, { siteSettings }) { + return siteSettings.ai_embeddings_semantic_quick_search_enabled; + } + + @service search; + @service quickSearch; + @service siteSettings; + + @action + async searchTermChanged() { + if (!this.search.activeGlobalSearchTerm) { + this.search.noResults = false; + this.search.results = {}; + this.quickSearch.loading = false; + this.quickSearch.invalidTerm = false; + } else if ( + !isValidSearchTerm(this.search.activeGlobalSearchTerm, this.siteSettings) + ) { + this.search.noResults = true; + this.search.results = {}; + this.quickSearch.loading = false; + this.quickSearch.invalidTerm = true; + return; + } else { + await this.performSearch(); + } + } + + async performSearch() { + this.quickSearch.loading = true; + this.quickSearch.invalidTerm = false; + + try { + const results = await ajax(`/discourse-ai/embeddings/quick-search`, { + data: { + q: this.search.activeGlobalSearchTerm, + }, + }); + + const searchResults = await translateResults(results); + + if (searchResults) { + this.search.noResults = results.resultTypes.length === 0; + this.search.results = searchResults; + } + } catch (error) { + popupAjaxError(error); + } finally { + this.quickSearch.loading = false; + } + } + + +} diff --git a/assets/javascripts/discourse/connectors/search-menu-results-top/ai-quick-search-info.gjs b/assets/javascripts/discourse/connectors/search-menu-results-top/ai-quick-search-info.gjs new file mode 100644 index 00000000..714791de --- /dev/null +++ b/assets/javascripts/discourse/connectors/search-menu-results-top/ai-quick-search-info.gjs @@ -0,0 +1,31 @@ +import Component from "@glimmer/component"; +import { inject as service } from "@ember/service"; +import { isValidSearchTerm } from "discourse/lib/search"; +import i18n from "discourse-common/helpers/i18n"; + +export default class AiQuickSearchInfo extends Component { + @service search; + @service siteSettings; + @service quickSearch; + + get termTooShort() { + // We check the validity again here because the input may have changed + // since the last time we checked, so we may want to stop showing the error + const validity = !isValidSearchTerm( + this.search.activeGlobalSearchTerm, + this.siteSettings + ); + + return ( + validity && + this.quickSearch.invalidTerm && + this.search.activeGlobalSearchTerm?.length > 0 + ); + } + + +} diff --git a/assets/javascripts/discourse/services/quick-search.js b/assets/javascripts/discourse/services/quick-search.js new file mode 100644 index 00000000..9ab9a3c0 --- /dev/null +++ b/assets/javascripts/discourse/services/quick-search.js @@ -0,0 +1,7 @@ +import { tracked } from "@glimmer/tracking"; +import Service from "@ember/service"; + +export default class QuickSearch extends Service { + @tracked loading = false; + @tracked invalidTerm = false; +} diff --git a/assets/stylesheets/modules/embeddings/common/semantic-search.scss b/assets/stylesheets/modules/embeddings/common/semantic-search.scss index 3ba771de..c41c9ca8 100644 --- a/assets/stylesheets/modules/embeddings/common/semantic-search.scss +++ b/assets/stylesheets/modules/embeddings/common/semantic-search.scss @@ -72,3 +72,12 @@ } } } + +// Hides other buttons and only shows loader +// while AI quick search is in progress +.search-input { + .ai-quick-search-spinner ~ a.clear-search, + .ai-quick-search-spinner ~ a.show-advanced-search { + display: none; + } +} diff --git a/config/locales/client.en.yml b/config/locales/client.en.yml index 9029e0e8..f608a8c6 100644 --- a/config/locales/client.en.yml +++ b/config/locales/client.en.yml @@ -211,6 +211,8 @@ en: none: "Sorry, our AI search found no matching topics." new: "Press 'Search' to begin looking for new results with AI" ai_generated_result: "Search result found using AI" + quick_search: + suffix: "in all topics and posts with AI" ai_bot: pm_warning: "AI chatbot messages are monitored regularly by moderators." diff --git a/config/locales/server.en.yml b/config/locales/server.en.yml index 4ecf5f2c..484d76d0 100644 --- a/config/locales/server.en.yml +++ b/config/locales/server.en.yml @@ -84,6 +84,7 @@ en: ai_embeddings_backfill_batch_size: "Number of embeddings to backfill every 15 minutes." ai_embeddings_pg_connection_string: "PostgreSQL connection string for the embeddings module. Needs pgvector extension enabled and a series of tables created. See docs for more info." ai_embeddings_semantic_search_enabled: "Enable full-page semantic search." + ai_embeddings_semantic_quick_search_enabled: "Enable semantic search option in search menu popup." ai_embeddings_semantic_related_include_closed_topics: "Include closed topics in semantic search results" ai_embeddings_semantic_search_hyde_model: "Model used to expand keywords to get better results during a semantic search" ai_embeddings_per_post_enabled: Generate embeddings for each post diff --git a/config/routes.rb b/config/routes.rb index a360cd69..7bf59b5f 100644 --- a/config/routes.rb +++ b/config/routes.rb @@ -12,6 +12,7 @@ DiscourseAi::Engine.routes.draw do scope module: :embeddings, path: "/embeddings", defaults: { format: :json } do get "semantic-search" => "embeddings#search" + get "quick-search" => "embeddings#quick_search" end scope module: :ai_bot, path: "/ai-bot", defaults: { format: :json } do diff --git a/config/settings.yml b/config/settings.yml index 90d47d2b..d3e4467b 100644 --- a/config/settings.yml +++ b/config/settings.yml @@ -137,6 +137,12 @@ discourse_ai: default: "" hidden: true ai_hugging_face_tei_api_key: "" + ai_hugging_face_tei_reranker_endpoint: + default: "" + ai_hugging_face_tei_reranker_endpoint_srv: + default: "" + hidden: true + ai_hugging_face_tei_reranker_api_key: "" ai_google_custom_search_api_key: default: "" secret: true @@ -232,7 +238,6 @@ discourse_ai: - "llava" - "open_ai:gpt-4-vision-preview" - ai_embeddings_enabled: default: false client: true @@ -282,6 +287,10 @@ discourse_ai: allow_any: false enum: "DiscourseAi::Configuration::LlmEnumerator" validator: "DiscourseAi::Configuration::LlmValidator" + ai_embeddings_semantic_quick_search_enabled: + default: false + client: true + validator: "DiscourseAi::Configuration::LlmDependencyValidator" ai_summarization_discourse_service_api_endpoint: "" ai_summarization_discourse_service_api_endpoint_srv: diff --git a/lib/embeddings/semantic_search.rb b/lib/embeddings/semantic_search.rb index 2af2e7d1..e13ada39 100644 --- a/lib/embeddings/semantic_search.rb +++ b/lib/embeddings/semantic_search.rb @@ -82,6 +82,75 @@ module DiscourseAi guardian.filter_allowed_categories(query_filter_results) end + def quick_search(query) + max_semantic_results_per_page = 100 + search = Search.new(query, { guardian: guardian }) + search_term = search.term + + return [] if search_term.nil? || search_term.length < SiteSetting.min_search_term_length + + strategy = DiscourseAi::Embeddings::Strategies::Truncation.new + vector_rep = + DiscourseAi::Embeddings::VectorRepresentations::Base.current_representation(strategy) + + digest = OpenSSL::Digest::SHA1.hexdigest(search_term) + + embedding_key = + build_embedding_key( + digest, + SiteSetting.ai_embeddings_semantic_search_hyde_model, + SiteSetting.ai_embeddings_model, + ) + + search_term_embedding = + Discourse + .cache + .fetch(embedding_key, expires_in: 1.week) do + vector_rep.vector_from(search_term, asymetric: true) + end + + candidate_post_ids = + vector_rep.asymmetric_posts_similarity_search( + search_term_embedding, + limit: max_semantic_results_per_page, + offset: 0, + ) + + semantic_results = + ::Post + .where(post_type: ::Topic.visible_post_types(guardian.user)) + .public_posts + .where("topics.visible") + .where(id: candidate_post_ids) + .order("array_position(ARRAY#{candidate_post_ids}, posts.id)") + + filtered_results = search.apply_filters(semantic_results) + + rerank_posts_payload = + filtered_results + .map(&:cooked) + .map { Nokogiri::HTML5.fragment(_1).text } + .map { _1.truncate(2000, omission: "") } + + reranked_results = + DiscourseAi::Inference::HuggingFaceTextEmbeddings.rerank( + search_term, + rerank_posts_payload, + ) + + reordered_ids = reranked_results.map { _1[:index] }.map { filtered_results[_1].id }.take(5) + + reranked_semantic_results = + ::Post + .where(post_type: ::Topic.visible_post_types(guardian.user)) + .public_posts + .where("topics.visible") + .where(id: reordered_ids) + .order("array_position(ARRAY#{reordered_ids}, posts.id)") + + guardian.filter_allowed_categories(reranked_semantic_results) + end + private attr_reader :guardian diff --git a/lib/embeddings/vector_representations/all_mpnet_base_v2.rb b/lib/embeddings/vector_representations/all_mpnet_base_v2.rb index 5d5793f2..60560ff4 100644 --- a/lib/embeddings/vector_representations/all_mpnet_base_v2.rb +++ b/lib/embeddings/vector_representations/all_mpnet_base_v2.rb @@ -23,7 +23,7 @@ module DiscourseAi end end - def vector_from(text) + def vector_from(text, asymetric: false) DiscourseAi::Inference::DiscourseClassifier.perform!( "#{discourse_embeddings_endpoint}/api/v1/classify", self.class.name, diff --git a/lib/embeddings/vector_representations/base.rb b/lib/embeddings/vector_representations/base.rb index 61c24789..205639b2 100644 --- a/lib/embeddings/vector_representations/base.rb +++ b/lib/embeddings/vector_representations/base.rb @@ -54,6 +54,7 @@ module DiscourseAi count = DB.query_single("SELECT count(*) FROM #{table_name};").first lists = [count < 1_000_000 ? count / 1000 : Math.sqrt(count).to_i, 10].max probes = [count < 1_000_000 ? lists / 10 : Math.sqrt(lists).to_i, 1].max + Discourse.cache.write("#{table_name}-probes", probes) existing_index = DB.query_single(<<~SQL, index_name: index_name).first SELECT @@ -144,18 +145,9 @@ module DiscourseAi DB.exec("COMMENT ON INDEX #{index_name} IS '#{Time.now.to_i}';") DB.exec("RESET work_mem;") DB.exec("RESET maintenance_work_mem;") - - database = DB.query_single("SELECT current_database();").first - - # This is a global setting, if we set it based on post count - # we will be unable to use the index for topics - # Hopefully https://github.com/pgvector/pgvector/issues/235 will make this better - if table_name == topic_table_name - DB.exec("ALTER DATABASE #{database} SET ivfflat.probes = #{probes};") - end end - def vector_from(text) + def vector_from(text, asymetric: false) raise NotImplementedError end @@ -206,6 +198,7 @@ module DiscourseAi def asymmetric_topics_similarity_search(raw_vector, limit:, offset:, return_distance: false) results = DB.query(<<~SQL, query_embedding: raw_vector, limit: limit, offset: offset) + #{probes_sql(topic_table_name)} SELECT topic_id, embeddings #{pg_function} '[:query_embedding]' AS distance @@ -227,8 +220,37 @@ module DiscourseAi raise MissingEmbeddingError end + def asymmetric_posts_similarity_search(raw_vector, limit:, offset:, return_distance: false) + results = DB.query(<<~SQL, query_embedding: raw_vector, limit: limit, offset: offset) + #{probes_sql(post_table_name)} + SELECT + post_id, + embeddings #{pg_function} '[:query_embedding]' AS distance + FROM + #{post_table_name} + INNER JOIN + posts AS p ON p.id = post_id + INNER JOIN + topics AS t ON t.id = p.topic_id AND t.archetype = 'regular' + ORDER BY + embeddings #{pg_function} '[:query_embedding]' + LIMIT :limit + OFFSET :offset + SQL + + if return_distance + results.map { |r| [r.post_id, r.distance] } + else + results.map(&:post_id) + end + rescue PG::Error => e + Rails.logger.error("Error #{e} querying embeddings for model #{name}") + raise MissingEmbeddingError + end + def symmetric_topics_similarity_search(topic) DB.query(<<~SQL, topic_id: topic.id).map(&:topic_id) + #{probes_sql(topic_table_name)} SELECT topic_id FROM @@ -275,6 +297,11 @@ module DiscourseAi "#{table_name}_search" end + def probes_sql(table_name) + probes = Discourse.cache.read("#{table_name}-probes") + probes.present? ? "SET LOCAL ivfflat.probes TO #{probes};" : "" + end + def name raise NotImplementedError end @@ -303,6 +330,10 @@ module DiscourseAi raise NotImplementedError end + def asymmetric_query_prefix + raise NotImplementedError + end + protected def save_to_db(target, vector, digest) diff --git a/lib/embeddings/vector_representations/bge_large_en.rb b/lib/embeddings/vector_representations/bge_large_en.rb index cf7adec4..bdc2f157 100644 --- a/lib/embeddings/vector_representations/bge_large_en.rb +++ b/lib/embeddings/vector_representations/bge_large_en.rb @@ -30,7 +30,9 @@ module DiscourseAi end end - def vector_from(text) + def vector_from(text, asymetric: false) + text = "#{asymmetric_query_prefix} #{text}" if asymetric + if SiteSetting.ai_cloudflare_workers_api_token.present? DiscourseAi::Inference::CloudflareWorkersAi .perform!(inference_model_name, { text: text }) @@ -82,6 +84,10 @@ module DiscourseAi def tokenizer DiscourseAi::Tokenizer::BgeLargeEnTokenizer end + + def asymmetric_query_prefix + "Represent this sentence for searching relevant passages:" + end end end end diff --git a/lib/embeddings/vector_representations/gemini.rb b/lib/embeddings/vector_representations/gemini.rb index 0762432f..39b9d22e 100644 --- a/lib/embeddings/vector_representations/gemini.rb +++ b/lib/embeddings/vector_representations/gemini.rb @@ -42,7 +42,7 @@ module DiscourseAi "vector_cosine_ops" end - def vector_from(text) + def vector_from(text, asymetric: false) response = DiscourseAi::Inference::GeminiEmbeddings.perform!(text) response[:embedding][:values] end diff --git a/lib/embeddings/vector_representations/multilingual_e5_large.rb b/lib/embeddings/vector_representations/multilingual_e5_large.rb index 59133263..a65a4094 100644 --- a/lib/embeddings/vector_representations/multilingual_e5_large.rb +++ b/lib/embeddings/vector_representations/multilingual_e5_large.rb @@ -28,7 +28,7 @@ module DiscourseAi end end - def vector_from(text) + def vector_from(text, asymetric: false) if DiscourseAi::Inference::HuggingFaceTextEmbeddings.configured? truncated_text = tokenizer.truncate(text, max_sequence_length - 2) DiscourseAi::Inference::HuggingFaceTextEmbeddings.perform!(truncated_text).first diff --git a/lib/embeddings/vector_representations/text_embedding_3_large.rb b/lib/embeddings/vector_representations/text_embedding_3_large.rb index 65cae0ed..890657e8 100644 --- a/lib/embeddings/vector_representations/text_embedding_3_large.rb +++ b/lib/embeddings/vector_representations/text_embedding_3_large.rb @@ -44,7 +44,7 @@ module DiscourseAi "vector_cosine_ops" end - def vector_from(text) + def vector_from(text, asymetric: false) response = DiscourseAi::Inference::OpenAiEmbeddings.perform!( text, diff --git a/lib/embeddings/vector_representations/text_embedding_3_small.rb b/lib/embeddings/vector_representations/text_embedding_3_small.rb index 842ed183..c307f684 100644 --- a/lib/embeddings/vector_representations/text_embedding_3_small.rb +++ b/lib/embeddings/vector_representations/text_embedding_3_small.rb @@ -42,7 +42,7 @@ module DiscourseAi "vector_cosine_ops" end - def vector_from(text) + def vector_from(text, asymetric: false) response = DiscourseAi::Inference::OpenAiEmbeddings.perform!(text, model: self.class.name) response[:data].first[:embedding] end diff --git a/lib/embeddings/vector_representations/text_embedding_ada_002.rb b/lib/embeddings/vector_representations/text_embedding_ada_002.rb index a5bbe1ac..d90b555a 100644 --- a/lib/embeddings/vector_representations/text_embedding_ada_002.rb +++ b/lib/embeddings/vector_representations/text_embedding_ada_002.rb @@ -42,7 +42,7 @@ module DiscourseAi "vector_cosine_ops" end - def vector_from(text) + def vector_from(text, asymetric: false) response = DiscourseAi::Inference::OpenAiEmbeddings.perform!(text, model: self.class.name) response[:data].first[:embedding] end diff --git a/lib/inference/hugging_face_text_embeddings.rb b/lib/inference/hugging_face_text_embeddings.rb index 6ec7af88..d0a5cddb 100644 --- a/lib/inference/hugging_face_text_embeddings.rb +++ b/lib/inference/hugging_face_text_embeddings.rb @@ -3,32 +3,63 @@ module ::DiscourseAi module Inference class HuggingFaceTextEmbeddings - def self.perform!(content) - headers = { "Referer" => Discourse.base_url, "Content-Type" => "application/json" } - body = { inputs: content, truncate: true }.to_json + class << self + def perform!(content) + headers = { "Referer" => Discourse.base_url, "Content-Type" => "application/json" } + body = { inputs: content, truncate: true }.to_json - if SiteSetting.ai_hugging_face_tei_endpoint_srv.present? - service = DiscourseAi::Utils::DnsSrv.lookup(SiteSetting.ai_hugging_face_tei_endpoint_srv) - api_endpoint = "https://#{service.target}:#{service.port}" - else - api_endpoint = SiteSetting.ai_hugging_face_tei_endpoint + if SiteSetting.ai_hugging_face_tei_endpoint_srv.present? + service = + DiscourseAi::Utils::DnsSrv.lookup(SiteSetting.ai_hugging_face_tei_endpoint_srv) + api_endpoint = "https://#{service.target}:#{service.port}" + else + api_endpoint = SiteSetting.ai_hugging_face_tei_endpoint + end + + if SiteSetting.ai_hugging_face_tei_api_key.present? + headers["X-API-KEY"] = SiteSetting.ai_hugging_face_tei_api_key + end + + conn = Faraday.new { |f| f.adapter FinalDestination::FaradayAdapter } + response = conn.post(api_endpoint, body, headers) + + raise Net::HTTPBadResponse if ![200].include?(response.status) + + JSON.parse(response.body, symbolize_names: true) end - if SiteSetting.ai_hugging_face_tei_api_key.present? - headers["X-API-KEY"] = SiteSetting.ai_hugging_face_tei_api_key + def rerank(content, candidates) + headers = { "Referer" => Discourse.base_url, "Content-Type" => "application/json" } + body = { query: content, texts: candidates, truncate: true }.to_json + + if SiteSetting.ai_hugging_face_tei_reranker_endpoint_srv.present? + service = + DiscourseAi::Utils::DnsSrv.lookup( + SiteSetting.ai_hugging_face_tei_reranker_endpoint_srv, + ) + api_endpoint = "https://#{service.target}:#{service.port}" + else + api_endpoint = SiteSetting.ai_hugging_face_tei_reranker_endpoint + end + + if SiteSetting.ai_hugging_face_tei_reranker_api_key.present? + headers["X-API-KEY"] = SiteSetting.ai_hugging_face_tei_reranker_api_key + end + + conn = Faraday.new { |f| f.adapter FinalDestination::FaradayAdapter } + response = conn.post("#{api_endpoint}/rerank", body, headers) + + if response.status != 200 + raise Net::HTTPBadResponse.new("Status: #{response.status}\n\n#{response.body}") + end + + JSON.parse(response.body, symbolize_names: true) end - conn = Faraday.new { |f| f.adapter FinalDestination::FaradayAdapter } - response = conn.post(api_endpoint, body, headers) - - raise Net::HTTPBadResponse if ![200].include?(response.status) - - JSON.parse(response.body, symbolize_names: true) - end - - def self.configured? - SiteSetting.ai_hugging_face_tei_endpoint.present? || - SiteSetting.ai_hugging_face_tei_endpoint_srv.present? + def configured? + SiteSetting.ai_hugging_face_tei_endpoint.present? || + SiteSetting.ai_hugging_face_tei_endpoint_srv.present? + end end end end