discourse-ai/lib/configuration/llm_vision_enumerator.rb
Sam b487de933d
FEATURE: add support for all vision models (#646)
Previoulsy on GPT-4-vision was supported, change introduces support
for Google/Anthropic and new OpenAI models

Additionally this makes vision work properly in dev environments
cause we sent the encoded payload via prompt vs sending urls
2024-05-28 10:31:15 -03:00

35 lines
935 B
Ruby

# frozen_string_literal: true
require "enum_site_setting"
module DiscourseAi
module Configuration
class LlmVisionEnumerator < ::EnumSiteSetting
def self.valid_value?(val)
true
end
def self.values
begin
result =
DiscourseAi::Completions::Llm.vision_models_by_provider.flat_map do |provider, models|
endpoint = DiscourseAi::Completions::Endpoints::Base.endpoint_for(provider.to_s)
models.map do |model_name|
{ name: endpoint.display_name(model_name), value: "#{provider}:#{model_name}" }
end
end
result << { name: "Llava", value: "llava" }
result
# TODO add support for LlmModel as well
# LlmModel.all.each do |model|
# llm_models << { name: model.display_name, value: "custom:#{model.id}" }
# end
end
end
end
end
end