diff --git a/lib/completions/dialects/chat_gpt.rb b/lib/completions/dialects/chat_gpt.rb index e6cf7df1..18872359 100644 --- a/lib/completions/dialects/chat_gpt.rb +++ b/lib/completions/dialects/chat_gpt.rb @@ -39,6 +39,11 @@ module DiscourseAi llm_model.max_prompt_tokens - buffer end + # no support for streaming or tools or system messages + def is_gpt_o? + llm_model.provider == "open_ai" && llm_model.name.include?("o1-") + end + private def tools_dialect @@ -46,7 +51,11 @@ module DiscourseAi end def system_msg(msg) - { role: "system", content: msg[:content] } + if is_gpt_o? + { role: "user", content: msg[:content] } + else + { role: "system", content: msg[:content] } + end end def model_msg(msg) diff --git a/lib/completions/endpoints/open_ai.rb b/lib/completions/endpoints/open_ai.rb index c74d3e83..43f19053 100644 --- a/lib/completions/endpoints/open_ai.rb +++ b/lib/completions/endpoints/open_ai.rb @@ -27,6 +27,17 @@ module DiscourseAi AiApiAuditLog::Provider::OpenAI end + def perform_completion!(dialect, user, model_params = {}, feature_name: nil, &blk) + if dialect.respond_to?(:is_gpt_o?) && dialect.is_gpt_o? && block_given? + # we need to disable streaming and simulate it + blk.call "", lambda { |*| } + response = super(dialect, user, model_params, feature_name: feature_name, &nil) + blk.call response, lambda { |*| } + else + super + end + end + private def model_uri