2023-12-26 12:49:55 -05:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
|
|
|
module DiscourseAi
|
|
|
|
module Completions
|
|
|
|
module Endpoints
|
|
|
|
class Vllm < Base
|
2024-01-29 14:04:25 -05:00
|
|
|
class << self
|
2024-05-13 14:54:42 -04:00
|
|
|
def can_contact?(endpoint_name)
|
|
|
|
endpoint_name == "vllm"
|
2024-01-29 14:04:25 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def dependant_setting_names
|
|
|
|
%w[ai_vllm_endpoint_srv ai_vllm_endpoint]
|
|
|
|
end
|
|
|
|
|
|
|
|
def correctly_configured?(_model_name)
|
|
|
|
SiteSetting.ai_vllm_endpoint_srv.present? || SiteSetting.ai_vllm_endpoint.present?
|
|
|
|
end
|
|
|
|
|
|
|
|
def endpoint_name(model_name)
|
|
|
|
"vLLM - #{model_name}"
|
|
|
|
end
|
2023-12-26 12:49:55 -05:00
|
|
|
end
|
|
|
|
|
2024-01-04 07:53:47 -05:00
|
|
|
def normalize_model_params(model_params)
|
|
|
|
model_params = model_params.dup
|
|
|
|
|
|
|
|
# max_tokens, temperature are already supported
|
|
|
|
if model_params[:stop_sequences]
|
|
|
|
model_params[:stop] = model_params.delete(:stop_sequences)
|
|
|
|
end
|
|
|
|
|
|
|
|
model_params
|
|
|
|
end
|
|
|
|
|
2023-12-26 12:49:55 -05:00
|
|
|
def default_options
|
2023-12-26 15:50:02 -05:00
|
|
|
{ max_tokens: 2000, model: model }
|
2023-12-26 12:49:55 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def provider_id
|
|
|
|
AiApiAuditLog::Provider::Vllm
|
|
|
|
end
|
|
|
|
|
|
|
|
private
|
|
|
|
|
|
|
|
def model_uri
|
2024-06-18 13:32:14 -04:00
|
|
|
if llm_model&.url && !llm_model&.url == LlmModel::RESERVED_VLLM_SRV_URL
|
|
|
|
return URI(llm_model.url)
|
|
|
|
end
|
2024-05-16 08:50:22 -04:00
|
|
|
|
2023-12-26 12:49:55 -05:00
|
|
|
service = DiscourseAi::Utils::DnsSrv.lookup(SiteSetting.ai_vllm_endpoint_srv)
|
|
|
|
if service.present?
|
2024-05-07 09:02:16 -04:00
|
|
|
api_endpoint = "https://#{service.target}:#{service.port}/v1/chat/completions"
|
2023-12-26 12:49:55 -05:00
|
|
|
else
|
2024-05-07 09:02:16 -04:00
|
|
|
api_endpoint = "#{SiteSetting.ai_vllm_endpoint}/v1/chat/completions"
|
2023-12-26 12:49:55 -05:00
|
|
|
end
|
|
|
|
@uri ||= URI(api_endpoint)
|
|
|
|
end
|
|
|
|
|
|
|
|
def prepare_payload(prompt, model_params, _dialect)
|
|
|
|
default_options
|
|
|
|
.merge(model_params)
|
2024-05-07 09:02:16 -04:00
|
|
|
.merge(messages: prompt)
|
2023-12-26 12:49:55 -05:00
|
|
|
.tap { |payload| payload[:stream] = true if @streaming_mode }
|
|
|
|
end
|
|
|
|
|
|
|
|
def prepare_request(payload)
|
|
|
|
headers = { "Referer" => Discourse.base_url, "Content-Type" => "application/json" }
|
2024-01-10 17:23:07 -05:00
|
|
|
|
2024-05-16 08:50:22 -04:00
|
|
|
api_key = llm_model&.api_key || SiteSetting.ai_vllm_api_key
|
|
|
|
headers["X-API-KEY"] = api_key if api_key.present?
|
2024-01-10 17:23:07 -05:00
|
|
|
|
2023-12-26 12:49:55 -05:00
|
|
|
Net::HTTP::Post.new(model_uri, headers).tap { |r| r.body = payload }
|
|
|
|
end
|
|
|
|
|
|
|
|
def partials_from(decoded_chunk)
|
|
|
|
decoded_chunk
|
|
|
|
.split("\n")
|
|
|
|
.map do |line|
|
|
|
|
data = line.split("data: ", 2)[1]
|
|
|
|
data == "[DONE]" ? nil : data
|
|
|
|
end
|
|
|
|
.compact
|
|
|
|
end
|
2024-05-07 09:02:16 -04:00
|
|
|
|
|
|
|
def extract_completion_from(response_raw)
|
|
|
|
parsed = JSON.parse(response_raw, symbolize_names: true).dig(:choices, 0)
|
|
|
|
# half a line sent here
|
|
|
|
return if !parsed
|
|
|
|
|
|
|
|
response_h = @streaming_mode ? parsed.dig(:delta) : parsed.dig(:message)
|
|
|
|
|
|
|
|
response_h.dig(:content)
|
|
|
|
end
|
2023-12-26 12:49:55 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|