discourse-ai/lib/completions/endpoints/anthropic.rb

136 lines
3.8 KiB
Ruby
Raw Normal View History

# frozen_string_literal: true
module DiscourseAi
module Completions
module Endpoints
class Anthropic < Base
def self.can_contact?(model_provider)
model_provider == "anthropic"
end
def normalize_model_params(model_params)
# max_tokens, temperature, stop_sequences are already supported
model_params
end
def default_options(dialect)
mapped_model =
case llm_model.name
when "claude-2"
"claude-2.1"
when "claude-instant-1"
"claude-instant-1.2"
when "claude-3-haiku"
"claude-3-haiku-20240307"
when "claude-3-sonnet"
"claude-3-sonnet-20240229"
when "claude-3-opus"
"claude-3-opus-20240229"
when "claude-3-5-sonnet"
"claude-3-5-sonnet-latest"
else
llm_model.name
end
FEATURE: AI artifacts (#898) This is a significant PR that introduces AI Artifacts functionality to the discourse-ai plugin along with several other improvements. Here are the key changes: 1. AI Artifacts System: - Adds a new `AiArtifact` model and database migration - Allows creation of web artifacts with HTML, CSS, and JavaScript content - Introduces security settings (`strict`, `lax`, `disabled`) for controlling artifact execution - Implements artifact rendering in iframes with sandbox protection - New `CreateArtifact` tool for AI to generate interactive content 2. Tool System Improvements: - Adds support for partial tool calls, allowing incremental updates during generation - Better handling of tool call states and progress tracking - Improved XML tool processing with CDATA support - Fixes for tool parameter handling and duplicate invocations 3. LLM Provider Updates: - Updates for Anthropic Claude models with correct token limits - Adds support for native/XML tool modes in Gemini integration - Adds new model configurations including Llama 3.1 models - Improvements to streaming response handling 4. UI Enhancements: - New artifact viewer component with expand/collapse functionality - Security controls for artifact execution (click-to-run in strict mode) - Improved dialog and response handling - Better error management for tool execution 5. Security Improvements: - Sandbox controls for artifact execution - Public/private artifact sharing controls - Security settings to control artifact behavior - CSP and frame-options handling for artifacts 6. Technical Improvements: - Better post streaming implementation - Improved error handling in completions - Better memory management for partial tool calls - Enhanced testing coverage 7. Configuration: - New site settings for artifact security - Extended LLM model configurations - Additional tool configuration options This PR significantly enhances the plugin's capabilities for generating and displaying interactive content while maintaining security and providing flexible configuration options for administrators.
2024-11-18 17:22:39 -05:00
# Note: Anthropic requires this param
max_tokens = 4096
max_tokens = 8192 if mapped_model.match?(/3.5/)
options = { model: mapped_model, max_tokens: max_tokens }
options[:stop_sequences] = ["</function_calls>"] if !dialect.native_tool_support? &&
dialect.prompt.has_tools?
options
end
def provider_id
AiApiAuditLog::Provider::Anthropic
end
private
def xml_tags_to_strip(dialect)
if dialect.prompt.has_tools?
%w[thinking search_quality_reflection search_quality_score]
else
[]
end
end
# this is an approximation, we will update it later if request goes through
def prompt_size(prompt)
tokenizer.size(prompt.system_prompt.to_s + " " + prompt.messages.to_s)
end
def model_uri
URI(llm_model.url)
end
def xml_tools_enabled?
!@native_tool_support
end
def prepare_payload(prompt, model_params, dialect)
@native_tool_support = dialect.native_tool_support?
payload = default_options(dialect).merge(model_params).merge(messages: prompt.messages)
payload[:system] = prompt.system_prompt if prompt.system_prompt.present?
payload[:stream] = true if @streaming_mode
if prompt.has_tools?
payload[:tools] = prompt.tools
if dialect.tool_choice.present?
payload[:tool_choice] = { type: "tool", name: dialect.tool_choice }
end
end
payload
end
def prepare_request(payload)
headers = {
"anthropic-version" => "2023-06-01",
"x-api-key" => llm_model.api_key,
"content-type" => "application/json",
}
Net::HTTP::Post.new(model_uri, headers).tap { |r| r.body = payload }
end
def decode_chunk(partial_data)
@decoder ||= JsonStreamDecoder.new
(@decoder << partial_data)
.map { |parsed_json| processor.process_streamed_message(parsed_json) }
.compact
end
def decode(response_data)
processor.process_message(response_data)
end
def processor
@processor ||=
DiscourseAi::Completions::AnthropicMessageProcessor.new(
streaming_mode: @streaming_mode,
partial_tool_calls: partial_tool_calls,
)
end
def has_tool?(_response_data)
processor.tool_calls.present?
end
def tool_calls
processor.to_tool_calls
end
def final_log_update(log)
log.request_tokens = processor.input_tokens if processor.input_tokens
log.response_tokens = processor.output_tokens if processor.output_tokens
end
end
end
end
end