FEATURE: remove gpt-4-turbo-0125 preview swap with gpt-4-turbo (#568)

Open AI just released gpt-4-turbo (with vision)

This change stops using the old preview model and swaps with the
officially released gpt-4-turbo

To come is an implementation of vision.
This commit is contained in:
Sam 2024-04-10 22:53:20 +10:00 committed by GitHub
parent f20ada00db
commit 6de9c53a71
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 2 additions and 6 deletions

View File

@ -11,7 +11,6 @@ module DiscourseAi
gpt-4
gpt-3.5-turbo-16k
gpt-4-32k
gpt-4-0125-preview
gpt-4-turbo
gpt-4-vision-preview
].include?(model_name)
@ -124,8 +123,6 @@ module DiscourseAi
8192
when "gpt-4-32k"
32_768
when "gpt-4-0125-preview"
131_072
when "gpt-4-turbo"
131_072
else

View File

@ -13,7 +13,6 @@ module DiscourseAi
gpt-4
gpt-3.5-turbo-16k
gpt-4-32k
gpt-4-0125-preview
gpt-4-turbo
gpt-4-vision-preview
].include?(model_name)
@ -75,7 +74,7 @@ module DiscourseAi
end
def default_options
{ model: model == "gpt-4-turbo" ? "gpt-4-0125-preview" : model }
{ model: model }
end
def provider_id

View File

@ -7,7 +7,7 @@ module DiscourseAi
foldable_models = [
Models::OpenAi.new("open_ai:gpt-4", max_tokens: 8192),
Models::OpenAi.new("open_ai:gpt-4-32k", max_tokens: 32_768),
Models::OpenAi.new("open_ai:gpt-4-0125-preview", max_tokens: 100_000),
Models::OpenAi.new("open_ai:gpt-4-turbo", max_tokens: 100_000),
Models::OpenAi.new("open_ai:gpt-3.5-turbo", max_tokens: 4096),
Models::OpenAi.new("open_ai:gpt-3.5-turbo-16k", max_tokens: 16_384),
Models::Llama2.new(