Sam 03fc94684b
FIX: AI helper not working correctly with mixtral (#399)
* FIX: AI helper not working correctly with mixtral

This PR introduces a new function on the generic llm called #generate

This will replace the implementation of completion!

#generate introduces a new way to pass temperature, max_tokens and stop_sequences

Then LLM implementers need to implement #normalize_model_params to
ensure the generic names match the LLM specific endpoint

This also adds temperature and stop_sequences to completion_prompts
this allows for much more robust completion prompts

* port everything over to #generate

* Fix translation

- On anthropic this no longer throws random "This is your translation:"
- On mixtral this actually works

* fix markdown table generation as well
2024-01-04 09:53:47 -03:00

116 lines
3.2 KiB
Ruby

# frozen_string_literal: true
module DiscourseAi
module Completions
module Endpoints
class Gemini < Base
def self.can_contact?(model_name)
%w[gemini-pro].include?(model_name)
end
def default_options
{ generationConfig: {} }
end
def normalize_model_params(model_params)
model_params = model_params.dup
if model_params[:stop_sequences]
model_params[:stopSequences] = model_params.delete(:stop_sequences)
end
if model_params[:temperature]
model_params[:maxOutputTokens] = model_params.delete(:max_tokens)
end
# temperature already supported
model_params
end
def provider_id
AiApiAuditLog::Provider::Gemini
end
private
def model_uri
url =
"https://generativelanguage.googleapis.com/v1beta/models/#{model}:#{@streaming_mode ? "streamGenerateContent" : "generateContent"}?key=#{SiteSetting.ai_gemini_api_key}"
URI(url)
end
def prepare_payload(prompt, model_params, dialect)
default_options
.merge(contents: prompt)
.tap do |payload|
payload[:tools] = dialect.tools if dialect.tools.present?
payload[:generationConfig].merge!(model_params) if model_params.present?
end
end
def prepare_request(payload)
headers = { "Content-Type" => "application/json" }
Net::HTTP::Post.new(model_uri, headers).tap { |r| r.body = payload }
end
def extract_completion_from(response_raw)
parsed = JSON.parse(response_raw, symbolize_names: true)
response_h = parsed.dig(:candidates, 0, :content, :parts, 0)
@has_function_call ||= response_h.dig(:functionCall).present?
@has_function_call ? response_h[:functionCall] : response_h.dig(:text)
end
def partials_from(decoded_chunk)
decoded_chunk
.split("\n")
.map do |line|
if line == ","
nil
elsif line.starts_with?("[")
line[1..-1]
elsif line.ends_with?("]")
line[0..-1]
else
line
end
end
.compact_blank
end
def extract_prompt_for_tokenizer(prompt)
prompt.to_s
end
def has_tool?(_response_data)
@has_function_call
end
def add_to_buffer(function_buffer, _response_data, partial)
if partial[:name].present?
function_buffer.at("tool_name").content = partial[:name]
function_buffer.at("tool_id").content = partial[:name]
end
if partial[:args]
argument_fragments =
partial[:args].reduce(+"") do |memo, (arg_name, value)|
memo << "\n<#{arg_name}>#{value}</#{arg_name}>"
end
argument_fragments << "\n"
function_buffer.at("parameters").children =
Nokogiri::HTML5::DocumentFragment.parse(argument_fragments)
end
function_buffer
end
end
end
end
end