2023-12-18 16:06:01 -05:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
|
|
|
module DiscourseAi
|
|
|
|
module Completions
|
|
|
|
module Dialects
|
|
|
|
class Dialect
|
|
|
|
class << self
|
2024-12-05 15:45:58 -05:00
|
|
|
def can_translate?(llm_model)
|
2023-12-18 16:06:01 -05:00
|
|
|
raise NotImplemented
|
|
|
|
end
|
|
|
|
|
2024-05-13 11:46:42 -04:00
|
|
|
def all_dialects
|
|
|
|
[
|
2023-12-18 16:06:01 -05:00
|
|
|
DiscourseAi::Completions::Dialects::ChatGpt,
|
|
|
|
DiscourseAi::Completions::Dialects::Gemini,
|
2024-03-18 15:48:46 -04:00
|
|
|
DiscourseAi::Completions::Dialects::Claude,
|
2024-04-10 17:24:17 -04:00
|
|
|
DiscourseAi::Completions::Dialects::Command,
|
2024-09-30 20:45:03 -04:00
|
|
|
DiscourseAi::Completions::Dialects::Ollama,
|
2024-11-19 01:28:09 -05:00
|
|
|
DiscourseAi::Completions::Dialects::Mistral,
|
2024-12-05 15:45:58 -05:00
|
|
|
DiscourseAi::Completions::Dialects::Nova,
|
2024-05-13 14:54:42 -04:00
|
|
|
DiscourseAi::Completions::Dialects::OpenAiCompatible,
|
2023-12-18 16:06:01 -05:00
|
|
|
]
|
2024-05-13 11:46:42 -04:00
|
|
|
end
|
|
|
|
|
2024-12-05 15:45:58 -05:00
|
|
|
def dialect_for(llm_model)
|
2024-05-13 16:28:27 -04:00
|
|
|
dialects = []
|
|
|
|
|
2024-01-10 23:56:40 -05:00
|
|
|
if Rails.env.test? || Rails.env.development?
|
2024-05-13 14:54:42 -04:00
|
|
|
dialects = [DiscourseAi::Completions::Dialects::Fake]
|
2024-01-10 23:56:40 -05:00
|
|
|
end
|
|
|
|
|
2024-05-13 14:54:42 -04:00
|
|
|
dialects = dialects.concat(all_dialects)
|
|
|
|
|
2024-12-05 15:45:58 -05:00
|
|
|
dialect = dialects.find { |d| d.can_translate?(llm_model) }
|
2023-12-18 20:04:15 -05:00
|
|
|
raise DiscourseAi::Completions::Llm::UNKNOWN_MODEL if !dialect
|
2024-05-13 14:54:42 -04:00
|
|
|
|
2023-12-18 20:04:15 -05:00
|
|
|
dialect
|
2023-12-18 16:06:01 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2024-07-30 12:44:57 -04:00
|
|
|
def initialize(generic_prompt, llm_model, opts: {})
|
2023-12-18 16:06:01 -05:00
|
|
|
@prompt = generic_prompt
|
|
|
|
@opts = opts
|
2024-05-16 08:50:22 -04:00
|
|
|
@llm_model = llm_model
|
2023-12-18 16:06:01 -05:00
|
|
|
end
|
|
|
|
|
2024-05-07 09:02:16 -04:00
|
|
|
VALID_ID_REGEX = /\A[a-zA-Z0-9_]+\z/
|
2023-12-18 16:06:01 -05:00
|
|
|
|
2024-05-07 09:02:16 -04:00
|
|
|
def can_end_with_assistant_msg?
|
|
|
|
false
|
2024-03-05 14:04:37 -05:00
|
|
|
end
|
|
|
|
|
2024-05-07 09:02:16 -04:00
|
|
|
def native_tool_support?
|
|
|
|
false
|
2024-03-05 14:04:37 -05:00
|
|
|
end
|
|
|
|
|
2024-07-24 15:29:47 -04:00
|
|
|
def vision_support?
|
2024-07-30 12:44:57 -04:00
|
|
|
llm_model.vision_enabled?
|
2024-07-24 15:29:47 -04:00
|
|
|
end
|
|
|
|
|
2023-12-18 16:06:01 -05:00
|
|
|
def tools
|
2024-05-07 09:02:16 -04:00
|
|
|
@tools ||= tools_dialect.translated_tools
|
|
|
|
end
|
|
|
|
|
2024-10-04 19:46:57 -04:00
|
|
|
def tool_choice
|
|
|
|
prompt.tool_choice
|
|
|
|
end
|
|
|
|
|
2024-05-07 09:02:16 -04:00
|
|
|
def translate
|
|
|
|
messages = prompt.messages
|
2023-12-18 16:06:01 -05:00
|
|
|
|
2024-05-07 09:02:16 -04:00
|
|
|
# Some models use an assistant msg to improve long-context responses.
|
|
|
|
if messages.last[:type] == :model && can_end_with_assistant_msg?
|
|
|
|
messages = messages.dup
|
|
|
|
messages.pop
|
2023-12-18 16:06:01 -05:00
|
|
|
end
|
|
|
|
|
2024-05-07 09:02:16 -04:00
|
|
|
trim_messages(messages).map { |msg| send("#{msg[:type]}_msg", msg) }.compact
|
2023-12-18 16:06:01 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def conversation_context
|
|
|
|
raise NotImplemented
|
|
|
|
end
|
|
|
|
|
|
|
|
def max_prompt_tokens
|
|
|
|
raise NotImplemented
|
|
|
|
end
|
|
|
|
|
2024-03-01 15:53:21 -05:00
|
|
|
attr_reader :prompt
|
|
|
|
|
2023-12-18 16:06:01 -05:00
|
|
|
private
|
|
|
|
|
2024-07-30 12:44:57 -04:00
|
|
|
attr_reader :opts, :llm_model
|
2023-12-18 16:06:01 -05:00
|
|
|
|
2024-01-12 12:36:44 -05:00
|
|
|
def trim_messages(messages)
|
2023-12-18 16:06:01 -05:00
|
|
|
prompt_limit = max_prompt_tokens
|
2024-01-12 12:36:44 -05:00
|
|
|
current_token_count = 0
|
2024-07-30 12:44:57 -04:00
|
|
|
message_step_size = (prompt_limit / 25).to_i * -1
|
2023-12-18 16:06:01 -05:00
|
|
|
|
2024-01-15 02:51:14 -05:00
|
|
|
trimmed_messages = []
|
2023-12-18 16:06:01 -05:00
|
|
|
|
2024-01-15 02:51:14 -05:00
|
|
|
range = (0..-1)
|
|
|
|
if messages.dig(0, :type) == :system
|
2024-07-12 14:09:01 -04:00
|
|
|
max_system_tokens = prompt_limit * 0.6
|
2024-01-15 02:51:14 -05:00
|
|
|
system_message = messages[0]
|
2024-07-12 14:09:01 -04:00
|
|
|
system_size = calculate_message_token(system_message)
|
|
|
|
|
|
|
|
if system_size > max_system_tokens
|
|
|
|
system_message[:content] = tokenizer.truncate(
|
|
|
|
system_message[:content],
|
|
|
|
max_system_tokens,
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
2024-01-15 02:51:14 -05:00
|
|
|
trimmed_messages << system_message
|
|
|
|
current_token_count += calculate_message_token(system_message)
|
|
|
|
range = (1..-1)
|
|
|
|
end
|
2024-01-04 08:44:07 -05:00
|
|
|
|
2024-01-15 02:51:14 -05:00
|
|
|
reversed_trimmed_msgs = []
|
2023-12-18 16:06:01 -05:00
|
|
|
|
2024-01-15 02:51:14 -05:00
|
|
|
messages[range].reverse.each do |msg|
|
|
|
|
break if current_token_count >= prompt_limit
|
2023-12-18 16:06:01 -05:00
|
|
|
|
2024-01-15 02:51:14 -05:00
|
|
|
message_tokens = calculate_message_token(msg)
|
2023-12-18 16:06:01 -05:00
|
|
|
|
2024-01-15 02:51:14 -05:00
|
|
|
dupped_msg = msg.dup
|
2023-12-18 16:06:01 -05:00
|
|
|
|
2024-01-15 02:51:14 -05:00
|
|
|
# Don't trim tool call metadata.
|
|
|
|
if msg[:type] == :tool_call
|
|
|
|
break if current_token_count + message_tokens + per_message_overhead > prompt_limit
|
|
|
|
|
|
|
|
current_token_count += message_tokens + per_message_overhead
|
|
|
|
reversed_trimmed_msgs << dupped_msg
|
|
|
|
next
|
|
|
|
end
|
|
|
|
|
|
|
|
# Trimming content to make sure we respect token limit.
|
|
|
|
while dupped_msg[:content].present? &&
|
|
|
|
message_tokens + current_token_count + per_message_overhead > prompt_limit
|
|
|
|
dupped_msg[:content] = dupped_msg[:content][0..message_step_size] || ""
|
|
|
|
message_tokens = calculate_message_token(dupped_msg)
|
|
|
|
end
|
|
|
|
|
|
|
|
next if dupped_msg[:content].blank?
|
|
|
|
|
|
|
|
current_token_count += message_tokens + per_message_overhead
|
|
|
|
|
|
|
|
reversed_trimmed_msgs << dupped_msg
|
|
|
|
end
|
2023-12-18 16:06:01 -05:00
|
|
|
|
2024-01-15 21:48:00 -05:00
|
|
|
reversed_trimmed_msgs.pop if reversed_trimmed_msgs.last&.dig(:type) == :tool
|
|
|
|
|
2024-01-15 02:51:14 -05:00
|
|
|
trimmed_messages.concat(reversed_trimmed_msgs.reverse)
|
2023-12-18 16:06:01 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def per_message_overhead
|
|
|
|
0
|
|
|
|
end
|
|
|
|
|
2024-01-12 12:36:44 -05:00
|
|
|
def calculate_message_token(msg)
|
2024-07-30 12:44:57 -04:00
|
|
|
llm_model.tokenizer_class.size(msg[:content].to_s)
|
2024-01-08 08:28:03 -05:00
|
|
|
end
|
2024-05-07 09:02:16 -04:00
|
|
|
|
|
|
|
def tools_dialect
|
|
|
|
@tools_dialect ||= DiscourseAi::Completions::Dialects::XmlTools.new(prompt.tools)
|
|
|
|
end
|
|
|
|
|
|
|
|
def system_msg(msg)
|
|
|
|
raise NotImplemented
|
|
|
|
end
|
|
|
|
|
2024-11-18 17:22:39 -05:00
|
|
|
def model_msg(msg)
|
2024-05-07 09:02:16 -04:00
|
|
|
raise NotImplemented
|
|
|
|
end
|
|
|
|
|
|
|
|
def user_msg(msg)
|
|
|
|
raise NotImplemented
|
|
|
|
end
|
|
|
|
|
|
|
|
def tool_call_msg(msg)
|
2024-11-18 17:22:39 -05:00
|
|
|
new_content = tools_dialect.from_raw_tool_call(msg)
|
|
|
|
msg = msg.merge(content: new_content)
|
|
|
|
model_msg(msg)
|
2024-05-07 09:02:16 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def tool_msg(msg)
|
2024-11-18 17:22:39 -05:00
|
|
|
new_content = tools_dialect.from_raw_tool(msg)
|
|
|
|
msg = msg.merge(content: new_content)
|
|
|
|
user_msg(msg)
|
2024-05-07 09:02:16 -04:00
|
|
|
end
|
2023-12-18 16:06:01 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|