2023-11-23 10:58:54 -05:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
|
|
|
module DiscourseAi
|
|
|
|
module Completions
|
|
|
|
module Endpoints
|
|
|
|
class CannedResponse
|
|
|
|
CANNED_RESPONSE_ERROR = Class.new(StandardError)
|
|
|
|
|
|
|
|
def self.can_contact?(_)
|
|
|
|
Rails.env.test?
|
|
|
|
end
|
|
|
|
|
|
|
|
def initialize(responses)
|
|
|
|
@responses = responses
|
|
|
|
@completions = 0
|
2023-12-11 17:26:56 -05:00
|
|
|
@prompt = nil
|
2023-11-23 10:58:54 -05:00
|
|
|
end
|
|
|
|
|
2024-01-04 07:53:47 -05:00
|
|
|
def normalize_model_params(model_params)
|
|
|
|
# max_tokens, temperature, stop_sequences are already supported
|
|
|
|
model_params
|
|
|
|
end
|
|
|
|
|
2023-12-11 17:26:56 -05:00
|
|
|
attr_reader :responses, :completions, :prompt
|
2023-11-23 10:58:54 -05:00
|
|
|
|
2023-12-11 17:26:56 -05:00
|
|
|
def perform_completion!(prompt, _user, _model_params)
|
|
|
|
@prompt = prompt
|
2023-11-23 10:58:54 -05:00
|
|
|
response = responses[completions]
|
|
|
|
if response.nil?
|
|
|
|
raise CANNED_RESPONSE_ERROR,
|
|
|
|
"The number of completions you requested exceed the number of canned responses"
|
|
|
|
end
|
|
|
|
|
|
|
|
@completions += 1
|
|
|
|
if block_given?
|
|
|
|
cancelled = false
|
|
|
|
cancel_fn = lambda { cancelled = true }
|
|
|
|
|
2023-12-18 16:06:01 -05:00
|
|
|
# We buffer and return tool invocations in one go.
|
|
|
|
if is_tool?(response)
|
|
|
|
yield(response, cancel_fn)
|
|
|
|
else
|
|
|
|
response.each_char do |char|
|
|
|
|
break if cancelled
|
|
|
|
yield(char, cancel_fn)
|
|
|
|
end
|
2023-11-23 10:58:54 -05:00
|
|
|
end
|
|
|
|
else
|
|
|
|
response
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def tokenizer
|
|
|
|
DiscourseAi::Tokenizer::OpenAiTokenizer
|
|
|
|
end
|
2023-12-18 16:06:01 -05:00
|
|
|
|
|
|
|
private
|
|
|
|
|
|
|
|
def is_tool?(response)
|
|
|
|
Nokogiri::HTML5.fragment(response).at("function_calls").present?
|
|
|
|
end
|
2023-11-23 10:58:54 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|