mirror of
https://github.com/discourse/discourse-ai.git
synced 2025-07-01 20:12:15 +00:00
This commit introduces a new Forum Researcher persona specialized in deep forum content analysis along with comprehensive improvements to our AI infrastructure. Key additions: New Forum Researcher persona with advanced filtering and analysis capabilities Robust filtering system supporting tags, categories, dates, users, and keywords LLM formatter to efficiently process and chunk research results Infrastructure improvements: Implemented CancelManager class to centrally manage AI completion cancellations Replaced callback-based cancellation with a more robust pattern Added systematic cancellation monitoring with callbacks Other improvements: Added configurable default_enabled flag to control which personas are enabled by default Updated translation strings for the new researcher functionality Added comprehensive specs for the new components Renames Researcher -> Web Researcher This change makes our AI platform more stable while adding powerful research capabilities that can analyze forum trends and surface relevant content.
107 lines
3.0 KiB
Ruby
107 lines
3.0 KiB
Ruby
# frozen_string_literal: true
|
|
|
|
module DiscourseAi
|
|
module Completions
|
|
module Endpoints
|
|
class CannedResponse
|
|
CANNED_RESPONSE_ERROR = Class.new(StandardError)
|
|
|
|
def initialize(responses)
|
|
@responses = responses
|
|
@completions = 0
|
|
@dialect = nil
|
|
end
|
|
|
|
def normalize_model_params(model_params)
|
|
# max_tokens, temperature, stop_sequences are already supported
|
|
model_params
|
|
end
|
|
|
|
attr_reader :responses, :completions, :dialect, :model_params
|
|
|
|
def prompt_messages
|
|
dialect.prompt.messages
|
|
end
|
|
|
|
def perform_completion!(
|
|
dialect,
|
|
_user,
|
|
model_params,
|
|
feature_name: nil,
|
|
feature_context: nil,
|
|
partial_tool_calls: false,
|
|
output_thinking: false,
|
|
cancel_manager: nil
|
|
)
|
|
@dialect = dialect
|
|
@model_params = model_params
|
|
response = responses[completions]
|
|
if response.nil?
|
|
raise CANNED_RESPONSE_ERROR,
|
|
"The number of completions you requested exceed the number of canned responses"
|
|
end
|
|
|
|
response = as_structured_output(response) if model_params[:response_format].present?
|
|
|
|
raise response if response.is_a?(StandardError)
|
|
|
|
@completions += 1
|
|
if block_given?
|
|
cancelled = false
|
|
cancel_fn = lambda { cancelled = true }
|
|
|
|
# We buffer and return tool invocations in one go.
|
|
as_array = response.is_a?(Array) ? response : [response]
|
|
as_array.each do |response|
|
|
if is_tool?(response)
|
|
yield(response, cancel_fn)
|
|
elsif is_thinking?(response)
|
|
yield(response, cancel_fn)
|
|
elsif is_structured_output?(response)
|
|
yield(response, cancel_fn)
|
|
else
|
|
response.each_char do |char|
|
|
break if cancelled
|
|
yield(char, cancel_fn)
|
|
end
|
|
end
|
|
end
|
|
end
|
|
|
|
response = response.first if response.is_a?(Array) && response.length == 1
|
|
|
|
response
|
|
end
|
|
|
|
def tokenizer
|
|
DiscourseAi::Tokenizer::OpenAiTokenizer
|
|
end
|
|
|
|
private
|
|
|
|
def is_thinking?(response)
|
|
response.is_a?(DiscourseAi::Completions::Thinking)
|
|
end
|
|
|
|
def is_tool?(response)
|
|
response.is_a?(DiscourseAi::Completions::ToolCall)
|
|
end
|
|
|
|
def is_structured_output?(response)
|
|
response.is_a?(DiscourseAi::Completions::StructuredOutput)
|
|
end
|
|
|
|
def as_structured_output(response)
|
|
schema_properties = model_params[:response_format].dig(:json_schema, :schema, :properties)
|
|
return response if schema_properties.blank?
|
|
|
|
output = DiscourseAi::Completions::StructuredOutput.new(schema_properties)
|
|
output << { schema_properties.keys.first => response }.to_json
|
|
|
|
output
|
|
end
|
|
end
|
|
end
|
|
end
|
|
end
|