mirror of
https://github.com/discourse/discourse-ai.git
synced 2025-08-05 20:53:28 +00:00
* DEV: One LLM abstraction to rule them all * REFACTOR: HyDE search uses new LLM abstraction * REFACTOR: Summarization uses the LLM abstraction * Updated documentation and made small fixes. Remove Bedrock claude-2 restriction
41 lines
756 B
Ruby
41 lines
756 B
Ruby
# frozen_string_literal: true
|
|
|
|
module DiscourseAi
|
|
module Summarization
|
|
module Models
|
|
class Base
|
|
def initialize(model, max_tokens:)
|
|
@model = model
|
|
@max_tokens = max_tokens
|
|
end
|
|
|
|
def correctly_configured?
|
|
raise NotImplemented
|
|
end
|
|
|
|
def display_name
|
|
raise NotImplemented
|
|
end
|
|
|
|
def configuration_hint
|
|
raise NotImplemented
|
|
end
|
|
|
|
def available_tokens
|
|
max_tokens - reserved_tokens
|
|
end
|
|
|
|
attr_reader :model, :max_tokens
|
|
|
|
protected
|
|
|
|
def reserved_tokens
|
|
# Reserve tokens for the response and the base prompt
|
|
# ~500 words
|
|
700
|
|
end
|
|
end
|
|
end
|
|
end
|
|
end
|