2023-11-28 23:17:46 -05:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
|
|
|
module DiscourseAi
|
|
|
|
module Tokenizer
|
|
|
|
class OpenAiTokenizer < BasicTokenizer
|
|
|
|
class << self
|
|
|
|
def tokenizer
|
|
|
|
@@tokenizer ||= Tiktoken.get_encoding("cl100k_base")
|
|
|
|
end
|
|
|
|
|
|
|
|
def tokenize(text)
|
|
|
|
tokenizer.encode(text)
|
|
|
|
end
|
|
|
|
|
2024-04-12 09:32:46 -04:00
|
|
|
def encode(text)
|
|
|
|
tokenizer.encode(text)
|
|
|
|
end
|
|
|
|
|
|
|
|
def decode(token_ids)
|
|
|
|
tokenizer.decode(token_ids)
|
|
|
|
end
|
|
|
|
|
2023-11-28 23:17:46 -05:00
|
|
|
def truncate(text, max_length)
|
2024-03-14 16:33:30 -04:00
|
|
|
# fast track common case, /2 to handle unicode chars
|
|
|
|
# than can take more than 1 token per char
|
|
|
|
return text if !SiteSetting.ai_strict_token_counting && text.size < max_length / 2
|
2023-11-28 23:17:46 -05:00
|
|
|
|
|
|
|
tokenizer.decode(tokenize(text).take(max_length))
|
|
|
|
rescue Tiktoken::UnicodeError
|
|
|
|
max_length = max_length - 1
|
|
|
|
retry
|
|
|
|
end
|
|
|
|
|
|
|
|
def can_expand_tokens?(text, addition, max_length)
|
2024-03-14 16:33:30 -04:00
|
|
|
# fast track common case, /2 to handle unicode chars
|
|
|
|
# than can take more than 1 token per char
|
|
|
|
if !SiteSetting.ai_strict_token_counting && text.size + addition.size < max_length / 2
|
|
|
|
return true
|
|
|
|
end
|
2023-11-28 23:17:46 -05:00
|
|
|
|
|
|
|
tokenizer.encode(text).length + tokenizer.encode(addition).length < max_length
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|