2023-11-23 10:58:54 -05:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
|
|
|
module DiscourseAi
|
|
|
|
module Completions
|
|
|
|
module Dialects
|
2023-12-18 16:06:01 -05:00
|
|
|
class ChatGpt < Dialect
|
|
|
|
class << self
|
2024-07-30 12:44:57 -04:00
|
|
|
def can_translate?(model_provider)
|
|
|
|
model_provider == "open_ai" || model_provider == "azure"
|
2023-12-18 16:06:01 -05:00
|
|
|
end
|
2023-11-23 10:58:54 -05:00
|
|
|
end
|
|
|
|
|
2024-01-15 21:48:00 -05:00
|
|
|
VALID_ID_REGEX = /\A[a-zA-Z0-9_]+\z/
|
|
|
|
|
2024-05-07 09:02:16 -04:00
|
|
|
def native_tool_support?
|
2024-07-30 12:44:57 -04:00
|
|
|
llm_model.provider == "open_ai" || llm_model.provider == "azure"
|
2024-05-07 09:02:16 -04:00
|
|
|
end
|
2023-12-18 16:06:01 -05:00
|
|
|
|
2024-05-07 09:02:16 -04:00
|
|
|
def translate
|
|
|
|
@embed_user_ids =
|
|
|
|
prompt.messages.any? do |m|
|
2024-01-15 21:48:00 -05:00
|
|
|
m[:id] && m[:type] == :user && !m[:id].to_s.match?(VALID_ID_REGEX)
|
|
|
|
end
|
|
|
|
|
2024-05-07 09:02:16 -04:00
|
|
|
super
|
2023-11-23 10:58:54 -05:00
|
|
|
end
|
|
|
|
|
2023-12-18 16:06:01 -05:00
|
|
|
def max_prompt_tokens
|
|
|
|
# provide a buffer of 120 tokens - our function counting is not
|
|
|
|
# 100% accurate and getting numbers to align exactly is very hard
|
2024-01-04 07:53:47 -05:00
|
|
|
buffer = (opts[:max_tokens] || 2500) + 50
|
2023-12-18 16:06:01 -05:00
|
|
|
|
|
|
|
if tools.present?
|
|
|
|
# note this is about 100 tokens over, OpenAI have a more optimal representation
|
2024-07-30 12:44:57 -04:00
|
|
|
@function_size ||= llm_model.tokenizer_class.size(tools.to_json.to_s)
|
2023-12-18 16:06:01 -05:00
|
|
|
buffer += @function_size
|
|
|
|
end
|
|
|
|
|
2024-07-30 12:44:57 -04:00
|
|
|
llm_model.max_prompt_tokens - buffer
|
2023-12-18 16:06:01 -05:00
|
|
|
end
|
|
|
|
|
2024-09-16 19:41:00 -04:00
|
|
|
# no support for streaming or tools or system messages
|
|
|
|
def is_gpt_o?
|
|
|
|
llm_model.provider == "open_ai" && llm_model.name.include?("o1-")
|
|
|
|
end
|
|
|
|
|
2024-11-18 17:22:39 -05:00
|
|
|
def disable_native_tools?
|
|
|
|
return @disable_native_tools if defined?(@disable_native_tools)
|
|
|
|
!!@disable_native_tools = llm_model.lookup_custom_param("disable_native_tools")
|
|
|
|
end
|
|
|
|
|
2023-12-18 16:06:01 -05:00
|
|
|
private
|
|
|
|
|
2024-05-07 09:02:16 -04:00
|
|
|
def tools_dialect
|
2024-11-18 17:22:39 -05:00
|
|
|
if disable_native_tools?
|
|
|
|
super
|
|
|
|
else
|
|
|
|
@tools_dialect ||= DiscourseAi::Completions::Dialects::OpenAiTools.new(prompt.tools)
|
|
|
|
end
|
2024-05-07 09:02:16 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def system_msg(msg)
|
2024-11-18 17:22:39 -05:00
|
|
|
content = msg[:content]
|
|
|
|
if disable_native_tools? && tools_dialect.instructions.present?
|
|
|
|
content = content + "\n\n" + tools_dialect.instructions
|
|
|
|
end
|
|
|
|
|
2024-09-16 19:41:00 -04:00
|
|
|
if is_gpt_o?
|
2024-11-18 17:22:39 -05:00
|
|
|
{ role: "user", content: content }
|
2024-09-16 19:41:00 -04:00
|
|
|
else
|
2024-11-18 17:22:39 -05:00
|
|
|
{ role: "system", content: content }
|
2024-09-16 19:41:00 -04:00
|
|
|
end
|
2024-05-07 09:02:16 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def model_msg(msg)
|
|
|
|
{ role: "assistant", content: msg[:content] }
|
|
|
|
end
|
|
|
|
|
|
|
|
def tool_call_msg(msg)
|
2024-11-18 17:22:39 -05:00
|
|
|
if disable_native_tools?
|
|
|
|
super
|
|
|
|
else
|
|
|
|
tools_dialect.from_raw_tool_call(msg)
|
|
|
|
end
|
2024-05-07 09:02:16 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def tool_msg(msg)
|
2024-11-18 17:22:39 -05:00
|
|
|
if disable_native_tools?
|
|
|
|
super
|
|
|
|
else
|
|
|
|
tools_dialect.from_raw_tool(msg)
|
|
|
|
end
|
2024-05-07 09:02:16 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def user_msg(msg)
|
|
|
|
user_message = { role: "user", content: msg[:content] }
|
|
|
|
|
|
|
|
if msg[:id]
|
|
|
|
if @embed_user_ids
|
|
|
|
user_message[:content] = "#{msg[:id]}: #{msg[:content]}"
|
|
|
|
else
|
|
|
|
user_message[:name] = msg[:id]
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2024-07-24 15:29:47 -04:00
|
|
|
user_message[:content] = inline_images(user_message[:content], msg) if vision_support?
|
2024-05-07 09:02:16 -04:00
|
|
|
user_message
|
|
|
|
end
|
|
|
|
|
2024-04-11 02:22:59 -04:00
|
|
|
def inline_images(content, message)
|
2024-07-24 15:29:47 -04:00
|
|
|
encoded_uploads = prompt.encoded_uploads(message)
|
|
|
|
return content if encoded_uploads.blank?
|
|
|
|
|
|
|
|
content_w_imgs =
|
|
|
|
encoded_uploads.reduce([]) do |memo, details|
|
|
|
|
memo << {
|
|
|
|
type: "image_url",
|
|
|
|
image_url: {
|
|
|
|
url: "data:#{details[:mime_type]};base64,#{details[:base64]}",
|
|
|
|
},
|
|
|
|
}
|
2024-04-11 02:22:59 -04:00
|
|
|
end
|
|
|
|
|
2024-07-24 15:29:47 -04:00
|
|
|
content_w_imgs << { type: "text", text: message[:content] }
|
2024-04-11 02:22:59 -04:00
|
|
|
end
|
|
|
|
|
2023-12-18 16:06:01 -05:00
|
|
|
def per_message_overhead
|
|
|
|
# open ai defines about 4 tokens per message of overhead
|
|
|
|
4
|
|
|
|
end
|
|
|
|
|
|
|
|
def calculate_message_token(context)
|
2024-07-30 12:44:57 -04:00
|
|
|
llm_model.tokenizer_class.size(context[:content].to_s + context[:name].to_s)
|
2023-11-23 10:58:54 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|