FIX: call after tool calls failing on OpenAI / Gemini (#599)

A recent change meant that llm instance got cached internally, repeat calls
to inference would cache data in Endpoint object leading model to
failures.

Both Gemini and Open AI expect a clean endpoint object cause they
set data.

This amends internals to make sure llm.generate will always operate
on clean objects
This commit is contained in:
Sam 2024-05-01 17:50:58 +10:00 committed by GitHub
parent aab59b9327
commit 6623928b95
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 97 additions and 12 deletions

View File

@ -12,9 +12,9 @@ module DiscourseAi
<<~PROMPT <<~PROMPT
You are Discourse Helper Bot You are Discourse Helper Bot
- Discourse Helper Bot understand *markdown* and responds in Discourse **markdown**. - Discourse Helper Bot understands *markdown* and responds in Discourse **markdown**.
- Discourse Helper Bot has access to the search function on meta.discourse.org and can help you find answers to your questions. - Discourse Helper Bot has access to the search function on meta.discourse.org and can help answer user questions.
- Discourse Helper Bot ALWAYS backs up answers with actual search results from meta.discourse.org, even if the information is in your training set - Discourse Helper Bot ALWAYS backs up answers with actual search results from meta.discourse.org, even if the information is in the training set
- Discourse Helper Bot does not use the word Discourse in searches, search function is restricted to Discourse Meta and Discourse specific discussions - Discourse Helper Bot does not use the word Discourse in searches, search function is restricted to Discourse Meta and Discourse specific discussions
- Discourse Helper Bot understands that search is keyword based (terms are joined using AND) and that it is important to simplify search terms to find things. - Discourse Helper Bot understands that search is keyword based (terms are joined using AND) and that it is important to simplify search terms to find things.
- Discourse Helper Bot understands that users often badly phrase and misspell words, it will compensate for that by guessing what user means. - Discourse Helper Bot understands that users often badly phrase and misspell words, it will compensate for that by guessing what user means.

View File

@ -139,7 +139,6 @@ module DiscourseAi
return if !parsed return if !parsed
response_h = @streaming_mode ? parsed.dig(:delta) : parsed.dig(:message) response_h = @streaming_mode ? parsed.dig(:delta) : parsed.dig(:message)
@has_function_call ||= response_h.dig(:tool_calls).present? @has_function_call ||= response_h.dig(:tool_calls).present?
@has_function_call ? response_h.dig(:tool_calls, 0) : response_h.dig(:content) @has_function_call ? response_h.dig(:tool_calls, 0) : response_h.dig(:content)
end end

View File

@ -100,23 +100,24 @@ module DiscourseAi
if @canned_llm && @canned_llm != model_name if @canned_llm && @canned_llm != model_name
raise "Invalid call LLM call, expected #{@canned_llm} but got #{model_name}" raise "Invalid call LLM call, expected #{@canned_llm} but got #{model_name}"
end end
return new(dialect_klass, @canned_response, model_name) return new(dialect_klass, nil, model_name, gateway: @canned_response)
end end
gateway = gateway_klass =
DiscourseAi::Completions::Endpoints::Base.endpoint_for( DiscourseAi::Completions::Endpoints::Base.endpoint_for(
provider_name, provider_name,
model_name_without_prov, model_name_without_prov,
).new(model_name_without_prov, dialect_klass.tokenizer) )
new(dialect_klass, gateway, model_name_without_prov) new(dialect_klass, gateway_klass, model_name_without_prov)
end end
end end
def initialize(dialect_klass, gateway, model_name) def initialize(dialect_klass, gateway_klass, model_name, gateway: nil)
@dialect_klass = dialect_klass @dialect_klass = dialect_klass
@gateway = gateway @gateway_klass = gateway_klass
@model_name = model_name @model_name = model_name
@gateway = gateway
end end
delegate :tokenizer, to: :dialect_klass delegate :tokenizer, to: :dialect_klass
@ -174,6 +175,7 @@ module DiscourseAi
model_params.keys.each { |key| model_params.delete(key) if model_params[key].nil? } model_params.keys.each { |key| model_params.delete(key) if model_params[key].nil? }
gateway = @gateway || gateway_klass.new(model_name, dialect_klass.tokenizer)
dialect = dialect_klass.new(prompt, model_name, opts: model_params) dialect = dialect_klass.new(prompt, model_name, opts: model_params)
gateway.perform_completion!(dialect, user, model_params, &partial_read_blk) gateway.perform_completion!(dialect, user, model_params, &partial_read_blk)
end end
@ -186,7 +188,7 @@ module DiscourseAi
private private
attr_reader :dialect_klass, :gateway attr_reader :dialect_klass, :gateway_klass
end end
end end
end end

View File

@ -170,6 +170,89 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenAi do
UploadCreator.new(image100x100, "image.jpg").create_for(Discourse.system_user.id) UploadCreator.new(image100x100, "image.jpg").create_for(Discourse.system_user.id)
end end
describe "repeat calls" do
it "can properly reset context" do
llm = DiscourseAi::Completions::Llm.proxy("open_ai:gpt-4-turbo")
tools = [
{
name: "echo",
description: "echo something",
parameters: [
{ name: "text", type: "string", description: "text to echo", required: true },
],
},
]
prompt =
DiscourseAi::Completions::Prompt.new(
"You are a bot",
messages: [type: :user, id: "user1", content: "echo hello"],
tools: tools,
)
response = {
id: "chatcmpl-9JxkAzzaeO4DSV3omWvok9TKhCjBH",
object: "chat.completion",
created: 1_714_544_914,
model: "gpt-4-turbo-2024-04-09",
choices: [
{
index: 0,
message: {
role: "assistant",
content: nil,
tool_calls: [
{
id: "call_I8LKnoijVuhKOM85nnEQgWwd",
type: "function",
function: {
name: "echo",
arguments: "{\"text\":\"hello\"}",
},
},
],
},
logprobs: nil,
finish_reason: "tool_calls",
},
],
usage: {
prompt_tokens: 55,
completion_tokens: 13,
total_tokens: 68,
},
system_fingerprint: "fp_ea6eb70039",
}.to_json
stub_request(:post, "https://api.openai.com/v1/chat/completions").to_return(body: response)
result = llm.generate(prompt, user: user)
expected = (<<~TXT).strip
<function_calls>
<invoke>
<tool_name>echo</tool_name>
<parameters>
<text>hello</text>
</parameters>
<tool_id>call_I8LKnoijVuhKOM85nnEQgWwd</tool_id>
</invoke>
</function_calls>
TXT
expect(result.strip).to eq(expected)
stub_request(:post, "https://api.openai.com/v1/chat/completions").to_return(
body: { choices: [message: { content: "OK" }] }.to_json,
)
result = llm.generate(prompt, user: user)
expect(result).to eq("OK")
end
end
describe "image support" do describe "image support" do
it "can handle images" do it "can handle images" do
llm = DiscourseAi::Completions::Llm.proxy("open_ai:gpt-4-turbo") llm = DiscourseAi::Completions::Llm.proxy("open_ai:gpt-4-turbo")

View File

@ -4,8 +4,9 @@ RSpec.describe DiscourseAi::Completions::Llm do
subject(:llm) do subject(:llm) do
described_class.new( described_class.new(
DiscourseAi::Completions::Dialects::OrcaStyle, DiscourseAi::Completions::Dialects::OrcaStyle,
canned_response, nil,
"hugging_face:Upstage-Llama-2-*-instruct-v2", "hugging_face:Upstage-Llama-2-*-instruct-v2",
gateway: canned_response,
) )
end end