2023-12-15 12:32:01 -05:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
|
|
|
module DiscourseAi
|
|
|
|
module Completions
|
|
|
|
module Dialects
|
2023-12-18 16:06:01 -05:00
|
|
|
class Gemini < Dialect
|
|
|
|
class << self
|
|
|
|
def can_translate?(model_name)
|
2024-04-17 01:37:19 -04:00
|
|
|
%w[gemini-pro gemini-1.5-pro].include?(model_name)
|
2023-12-18 16:06:01 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def tokenizer
|
|
|
|
DiscourseAi::Tokenizer::OpenAiTokenizer ## TODO Replace with GeminiTokenizer
|
|
|
|
end
|
2023-12-15 12:32:01 -05:00
|
|
|
end
|
|
|
|
|
2023-12-18 16:06:01 -05:00
|
|
|
def translate
|
2024-01-04 16:15:34 -05:00
|
|
|
# Gemini complains if we don't alternate model/user roles.
|
|
|
|
noop_model_response = { role: "model", parts: { text: "Ok." } }
|
|
|
|
|
2024-01-12 12:36:44 -05:00
|
|
|
messages = prompt.messages
|
2023-12-15 12:32:01 -05:00
|
|
|
|
2024-01-12 12:36:44 -05:00
|
|
|
# Gemini doesn't use an assistant msg to improve long-context responses.
|
|
|
|
messages.pop if messages.last[:type] == :model
|
2023-12-18 16:06:01 -05:00
|
|
|
|
2024-02-27 02:24:30 -05:00
|
|
|
memo = []
|
|
|
|
|
|
|
|
trim_messages(messages).each do |msg|
|
2024-01-12 12:36:44 -05:00
|
|
|
if msg[:type] == :system
|
|
|
|
memo << { role: "user", parts: { text: msg[:content] } }
|
|
|
|
memo << noop_model_response.dup
|
|
|
|
elsif msg[:type] == :model
|
|
|
|
memo << { role: "model", parts: { text: msg[:content] } }
|
|
|
|
elsif msg[:type] == :tool_call
|
|
|
|
call_details = JSON.parse(msg[:content], symbolize_names: true)
|
2024-01-04 16:15:34 -05:00
|
|
|
|
2024-01-12 12:36:44 -05:00
|
|
|
memo << {
|
|
|
|
role: "model",
|
|
|
|
parts: {
|
|
|
|
functionCall: {
|
2024-03-08 16:46:40 -05:00
|
|
|
name: msg[:name] || call_details[:name],
|
2024-01-12 12:36:44 -05:00
|
|
|
args: call_details[:arguments],
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
elsif msg[:type] == :tool
|
|
|
|
memo << {
|
|
|
|
role: "function",
|
|
|
|
parts: {
|
|
|
|
functionResponse: {
|
2024-03-08 16:46:40 -05:00
|
|
|
name: msg[:name] || msg[:id],
|
2024-01-12 12:36:44 -05:00
|
|
|
response: {
|
|
|
|
content: msg[:content],
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
else
|
|
|
|
# Gemini quirk. Doesn't accept tool -> user or user -> user msgs.
|
|
|
|
previous_msg_role = memo.last&.dig(:role)
|
2024-02-27 02:24:30 -05:00
|
|
|
if previous_msg_role == "user" || previous_msg_role == "function"
|
2024-01-12 12:36:44 -05:00
|
|
|
memo << noop_model_response.dup
|
|
|
|
end
|
|
|
|
|
|
|
|
memo << { role: "user", parts: { text: msg[:content] } }
|
|
|
|
end
|
|
|
|
end
|
2024-02-27 02:24:30 -05:00
|
|
|
|
|
|
|
memo
|
2023-12-15 12:32:01 -05:00
|
|
|
end
|
|
|
|
|
2023-12-18 16:06:01 -05:00
|
|
|
def tools
|
2024-01-12 12:36:44 -05:00
|
|
|
return if prompt.tools.blank?
|
2023-12-18 16:06:01 -05:00
|
|
|
|
|
|
|
translated_tools =
|
2024-01-12 12:36:44 -05:00
|
|
|
prompt.tools.map do |t|
|
2024-01-04 16:15:34 -05:00
|
|
|
tool = t.slice(:name, :description)
|
2023-12-18 16:06:01 -05:00
|
|
|
|
2024-01-04 16:15:34 -05:00
|
|
|
if t[:parameters]
|
|
|
|
tool[:parameters] = t[:parameters].reduce(
|
|
|
|
{ type: "object", required: [], properties: {} },
|
|
|
|
) do |memo, p|
|
|
|
|
name = p[:name]
|
|
|
|
memo[:required] << name if p[:required]
|
2023-12-18 16:06:01 -05:00
|
|
|
|
2024-01-04 16:15:34 -05:00
|
|
|
memo[:properties][name] = p.except(:name, :required, :item_type)
|
|
|
|
|
|
|
|
memo[:properties][name][:items] = { type: p[:item_type] } if p[:item_type]
|
|
|
|
memo
|
|
|
|
end
|
2023-12-18 16:06:01 -05:00
|
|
|
end
|
|
|
|
|
2024-01-04 16:15:34 -05:00
|
|
|
tool
|
2023-12-18 16:06:01 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
[{ function_declarations: translated_tools }]
|
|
|
|
end
|
|
|
|
|
|
|
|
def max_prompt_tokens
|
2024-04-17 01:37:19 -04:00
|
|
|
if model_name == "gemini-1.5-pro"
|
|
|
|
# technically we support 1 million tokens, but we're being conservative
|
|
|
|
800_000
|
|
|
|
else
|
|
|
|
16_384 # 50% of model tokens
|
|
|
|
end
|
2023-12-18 16:06:01 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
protected
|
|
|
|
|
|
|
|
def calculate_message_token(context)
|
|
|
|
self.class.tokenizer.size(context[:content].to_s + context[:name].to_s)
|
2023-12-15 12:32:01 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|