From 6de9c53a71e953e1c022d2e0fec93153ad0190d5 Mon Sep 17 00:00:00 2001 From: Sam Date: Wed, 10 Apr 2024 22:53:20 +1000 Subject: [PATCH] FEATURE: remove gpt-4-turbo-0125 preview swap with gpt-4-turbo (#568) Open AI just released gpt-4-turbo (with vision) This change stops using the old preview model and swaps with the officially released gpt-4-turbo To come is an implementation of vision. --- lib/completions/dialects/chat_gpt.rb | 3 --- lib/completions/endpoints/open_ai.rb | 3 +-- lib/summarization/entry_point.rb | 2 +- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/lib/completions/dialects/chat_gpt.rb b/lib/completions/dialects/chat_gpt.rb index 6cd9f3b8..915535f3 100644 --- a/lib/completions/dialects/chat_gpt.rb +++ b/lib/completions/dialects/chat_gpt.rb @@ -11,7 +11,6 @@ module DiscourseAi gpt-4 gpt-3.5-turbo-16k gpt-4-32k - gpt-4-0125-preview gpt-4-turbo gpt-4-vision-preview ].include?(model_name) @@ -124,8 +123,6 @@ module DiscourseAi 8192 when "gpt-4-32k" 32_768 - when "gpt-4-0125-preview" - 131_072 when "gpt-4-turbo" 131_072 else diff --git a/lib/completions/endpoints/open_ai.rb b/lib/completions/endpoints/open_ai.rb index 029384bb..50044810 100644 --- a/lib/completions/endpoints/open_ai.rb +++ b/lib/completions/endpoints/open_ai.rb @@ -13,7 +13,6 @@ module DiscourseAi gpt-4 gpt-3.5-turbo-16k gpt-4-32k - gpt-4-0125-preview gpt-4-turbo gpt-4-vision-preview ].include?(model_name) @@ -75,7 +74,7 @@ module DiscourseAi end def default_options - { model: model == "gpt-4-turbo" ? "gpt-4-0125-preview" : model } + { model: model } end def provider_id diff --git a/lib/summarization/entry_point.rb b/lib/summarization/entry_point.rb index 42308519..480d6bdb 100644 --- a/lib/summarization/entry_point.rb +++ b/lib/summarization/entry_point.rb @@ -7,7 +7,7 @@ module DiscourseAi foldable_models = [ Models::OpenAi.new("open_ai:gpt-4", max_tokens: 8192), Models::OpenAi.new("open_ai:gpt-4-32k", max_tokens: 32_768), - Models::OpenAi.new("open_ai:gpt-4-0125-preview", max_tokens: 100_000), + Models::OpenAi.new("open_ai:gpt-4-turbo", max_tokens: 100_000), Models::OpenAi.new("open_ai:gpt-3.5-turbo", max_tokens: 4096), Models::OpenAi.new("open_ai:gpt-3.5-turbo-16k", max_tokens: 16_384), Models::Llama2.new(