FEATURE: support gpt-4-0125 which was just released (#443)
The new model has better performance and is always preferable to the old one which has unicode issues during function calls.
This commit is contained in:
parent
b461ebc4ca
commit
092da860e2
|
@ -11,7 +11,7 @@ module DiscourseAi
|
||||||
gpt-4
|
gpt-4
|
||||||
gpt-3.5-turbo-16k
|
gpt-3.5-turbo-16k
|
||||||
gpt-4-32k
|
gpt-4-32k
|
||||||
gpt-4-1106-preview
|
gpt-4-0125-preview
|
||||||
gpt-4-turbo
|
gpt-4-turbo
|
||||||
].include?(model_name)
|
].include?(model_name)
|
||||||
end
|
end
|
||||||
|
@ -122,7 +122,7 @@ module DiscourseAi
|
||||||
8192
|
8192
|
||||||
when "gpt-4-32k"
|
when "gpt-4-32k"
|
||||||
32_768
|
32_768
|
||||||
when "gpt-4-1106-preview"
|
when "gpt-4-0125-preview"
|
||||||
131_072
|
131_072
|
||||||
when "gpt-4-turbo"
|
when "gpt-4-turbo"
|
||||||
131_072
|
131_072
|
||||||
|
|
|
@ -10,7 +10,7 @@ module DiscourseAi
|
||||||
gpt-4
|
gpt-4
|
||||||
gpt-3.5-turbo-16k
|
gpt-3.5-turbo-16k
|
||||||
gpt-4-32k
|
gpt-4-32k
|
||||||
gpt-4-1106-preview
|
gpt-4-0125-preview
|
||||||
gpt-4-turbo
|
gpt-4-turbo
|
||||||
].include?(model_name)
|
].include?(model_name)
|
||||||
end
|
end
|
||||||
|
@ -27,7 +27,7 @@ module DiscourseAi
|
||||||
end
|
end
|
||||||
|
|
||||||
def default_options
|
def default_options
|
||||||
{ model: model == "gpt-4-turbo" ? "gpt-4-1106-preview" : model }
|
{ model: model == "gpt-4-turbo" ? "gpt-4-0125-preview" : model }
|
||||||
end
|
end
|
||||||
|
|
||||||
def provider_id
|
def provider_id
|
||||||
|
|
|
@ -7,7 +7,7 @@ module DiscourseAi
|
||||||
foldable_models = [
|
foldable_models = [
|
||||||
Models::OpenAi.new("gpt-4", max_tokens: 8192),
|
Models::OpenAi.new("gpt-4", max_tokens: 8192),
|
||||||
Models::OpenAi.new("gpt-4-32k", max_tokens: 32_768),
|
Models::OpenAi.new("gpt-4-32k", max_tokens: 32_768),
|
||||||
Models::OpenAi.new("gpt-4-1106-preview", max_tokens: 100_000),
|
Models::OpenAi.new("gpt-4-0125-preview", max_tokens: 100_000),
|
||||||
Models::OpenAi.new("gpt-3.5-turbo", max_tokens: 4096),
|
Models::OpenAi.new("gpt-3.5-turbo", max_tokens: 4096),
|
||||||
Models::OpenAi.new("gpt-3.5-turbo-16k", max_tokens: 16_384),
|
Models::OpenAi.new("gpt-3.5-turbo-16k", max_tokens: 16_384),
|
||||||
Models::Anthropic.new("claude-2", max_tokens: 200_000),
|
Models::Anthropic.new("claude-2", max_tokens: 200_000),
|
||||||
|
|
Loading…
Reference in New Issue