FIX: call after tool calls failing on OpenAI / Gemini (#599)
A recent change meant that llm instance got cached internally, repeat calls to inference would cache data in Endpoint object leading model to failures. Both Gemini and Open AI expect a clean endpoint object cause they set data. This amends internals to make sure llm.generate will always operate on clean objects
This commit is contained in:
parent
aab59b9327
commit
6623928b95
|
@ -12,9 +12,9 @@ module DiscourseAi
|
|||
<<~PROMPT
|
||||
You are Discourse Helper Bot
|
||||
|
||||
- Discourse Helper Bot understand *markdown* and responds in Discourse **markdown**.
|
||||
- Discourse Helper Bot has access to the search function on meta.discourse.org and can help you find answers to your questions.
|
||||
- Discourse Helper Bot ALWAYS backs up answers with actual search results from meta.discourse.org, even if the information is in your training set
|
||||
- Discourse Helper Bot understands *markdown* and responds in Discourse **markdown**.
|
||||
- Discourse Helper Bot has access to the search function on meta.discourse.org and can help answer user questions.
|
||||
- Discourse Helper Bot ALWAYS backs up answers with actual search results from meta.discourse.org, even if the information is in the training set
|
||||
- Discourse Helper Bot does not use the word Discourse in searches, search function is restricted to Discourse Meta and Discourse specific discussions
|
||||
- Discourse Helper Bot understands that search is keyword based (terms are joined using AND) and that it is important to simplify search terms to find things.
|
||||
- Discourse Helper Bot understands that users often badly phrase and misspell words, it will compensate for that by guessing what user means.
|
||||
|
|
|
@ -139,7 +139,6 @@ module DiscourseAi
|
|||
return if !parsed
|
||||
|
||||
response_h = @streaming_mode ? parsed.dig(:delta) : parsed.dig(:message)
|
||||
|
||||
@has_function_call ||= response_h.dig(:tool_calls).present?
|
||||
@has_function_call ? response_h.dig(:tool_calls, 0) : response_h.dig(:content)
|
||||
end
|
||||
|
|
|
@ -100,23 +100,24 @@ module DiscourseAi
|
|||
if @canned_llm && @canned_llm != model_name
|
||||
raise "Invalid call LLM call, expected #{@canned_llm} but got #{model_name}"
|
||||
end
|
||||
return new(dialect_klass, @canned_response, model_name)
|
||||
return new(dialect_klass, nil, model_name, gateway: @canned_response)
|
||||
end
|
||||
|
||||
gateway =
|
||||
gateway_klass =
|
||||
DiscourseAi::Completions::Endpoints::Base.endpoint_for(
|
||||
provider_name,
|
||||
model_name_without_prov,
|
||||
).new(model_name_without_prov, dialect_klass.tokenizer)
|
||||
)
|
||||
|
||||
new(dialect_klass, gateway, model_name_without_prov)
|
||||
new(dialect_klass, gateway_klass, model_name_without_prov)
|
||||
end
|
||||
end
|
||||
|
||||
def initialize(dialect_klass, gateway, model_name)
|
||||
def initialize(dialect_klass, gateway_klass, model_name, gateway: nil)
|
||||
@dialect_klass = dialect_klass
|
||||
@gateway = gateway
|
||||
@gateway_klass = gateway_klass
|
||||
@model_name = model_name
|
||||
@gateway = gateway
|
||||
end
|
||||
|
||||
delegate :tokenizer, to: :dialect_klass
|
||||
|
@ -174,6 +175,7 @@ module DiscourseAi
|
|||
|
||||
model_params.keys.each { |key| model_params.delete(key) if model_params[key].nil? }
|
||||
|
||||
gateway = @gateway || gateway_klass.new(model_name, dialect_klass.tokenizer)
|
||||
dialect = dialect_klass.new(prompt, model_name, opts: model_params)
|
||||
gateway.perform_completion!(dialect, user, model_params, &partial_read_blk)
|
||||
end
|
||||
|
@ -186,7 +188,7 @@ module DiscourseAi
|
|||
|
||||
private
|
||||
|
||||
attr_reader :dialect_klass, :gateway
|
||||
attr_reader :dialect_klass, :gateway_klass
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -170,6 +170,89 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenAi do
|
|||
UploadCreator.new(image100x100, "image.jpg").create_for(Discourse.system_user.id)
|
||||
end
|
||||
|
||||
describe "repeat calls" do
|
||||
it "can properly reset context" do
|
||||
llm = DiscourseAi::Completions::Llm.proxy("open_ai:gpt-4-turbo")
|
||||
|
||||
tools = [
|
||||
{
|
||||
name: "echo",
|
||||
description: "echo something",
|
||||
parameters: [
|
||||
{ name: "text", type: "string", description: "text to echo", required: true },
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
prompt =
|
||||
DiscourseAi::Completions::Prompt.new(
|
||||
"You are a bot",
|
||||
messages: [type: :user, id: "user1", content: "echo hello"],
|
||||
tools: tools,
|
||||
)
|
||||
|
||||
response = {
|
||||
id: "chatcmpl-9JxkAzzaeO4DSV3omWvok9TKhCjBH",
|
||||
object: "chat.completion",
|
||||
created: 1_714_544_914,
|
||||
model: "gpt-4-turbo-2024-04-09",
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: nil,
|
||||
tool_calls: [
|
||||
{
|
||||
id: "call_I8LKnoijVuhKOM85nnEQgWwd",
|
||||
type: "function",
|
||||
function: {
|
||||
name: "echo",
|
||||
arguments: "{\"text\":\"hello\"}",
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
logprobs: nil,
|
||||
finish_reason: "tool_calls",
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: 55,
|
||||
completion_tokens: 13,
|
||||
total_tokens: 68,
|
||||
},
|
||||
system_fingerprint: "fp_ea6eb70039",
|
||||
}.to_json
|
||||
|
||||
stub_request(:post, "https://api.openai.com/v1/chat/completions").to_return(body: response)
|
||||
|
||||
result = llm.generate(prompt, user: user)
|
||||
|
||||
expected = (<<~TXT).strip
|
||||
<function_calls>
|
||||
<invoke>
|
||||
<tool_name>echo</tool_name>
|
||||
<parameters>
|
||||
<text>hello</text>
|
||||
</parameters>
|
||||
<tool_id>call_I8LKnoijVuhKOM85nnEQgWwd</tool_id>
|
||||
</invoke>
|
||||
</function_calls>
|
||||
TXT
|
||||
|
||||
expect(result.strip).to eq(expected)
|
||||
|
||||
stub_request(:post, "https://api.openai.com/v1/chat/completions").to_return(
|
||||
body: { choices: [message: { content: "OK" }] }.to_json,
|
||||
)
|
||||
|
||||
result = llm.generate(prompt, user: user)
|
||||
|
||||
expect(result).to eq("OK")
|
||||
end
|
||||
end
|
||||
|
||||
describe "image support" do
|
||||
it "can handle images" do
|
||||
llm = DiscourseAi::Completions::Llm.proxy("open_ai:gpt-4-turbo")
|
||||
|
|
|
@ -4,8 +4,9 @@ RSpec.describe DiscourseAi::Completions::Llm do
|
|||
subject(:llm) do
|
||||
described_class.new(
|
||||
DiscourseAi::Completions::Dialects::OrcaStyle,
|
||||
canned_response,
|
||||
nil,
|
||||
"hugging_face:Upstage-Llama-2-*-instruct-v2",
|
||||
gateway: canned_response,
|
||||
)
|
||||
end
|
||||
|
||||
|
|
Loading…
Reference in New Issue