FIX: Claude's max_tookens_to_sample is a required field (#97)

This commit is contained in:
Roman Rizzi 2023-06-27 14:42:33 -03:00 committed by GitHub
parent 9a79afcdbf
commit 1b568f2391
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 38 additions and 40 deletions

View File

@ -22,9 +22,9 @@ module ::DiscourseAi
payload = { model: model, prompt: prompt }
payload[:temperature] = temperature if temperature
payload[:top_p] = top_p if top_p
payload[:max_tokens_to_sample] = max_tokens if max_tokens
payload[:max_tokens_to_sample] = max_tokens || 2000
payload[:temperature] = temperature if temperature
payload[:stream] = true if block_given?
Net::HTTP.start(

View File

@ -85,8 +85,8 @@ RSpec.describe Jobs::CreateAiReply do
deltas,
model: "claude-v1.3",
req_opts: {
temperature: 0.4,
max_tokens_to_sample: 3000,
temperature: 0.4,
stream: true,
},
)

View File

@ -3,10 +3,10 @@
require_relative "../../../../support/anthropic_completion_stubs"
RSpec.describe DiscourseAi::Summarization::Models::Anthropic do
let(:model) { "claude-v1" }
let(:max_tokens) { 720 }
subject(:model) { described_class.new(model_name, max_tokens: max_tokens) }
subject { described_class.new(model, max_tokens: max_tokens) }
let(:model_name) { "claude-v1" }
let(:max_tokens) { 720 }
let(:content) do
{
@ -44,7 +44,7 @@ RSpec.describe DiscourseAi::Summarization::Models::Anthropic do
)
summarized_chunks =
subject.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
model.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
expect(summarized_chunks).to contain_exactly("This is summary 1")
end
@ -67,7 +67,7 @@ RSpec.describe DiscourseAi::Summarization::Models::Anthropic do
end
summarized_chunks =
subject.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
model.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
expect(summarized_chunks).to contain_exactly("This is summary 1", "This is summary 2")
end
@ -86,9 +86,7 @@ RSpec.describe DiscourseAi::Summarization::Models::Anthropic do
AnthropicCompletionStubs.stub_response(messages, "<ai>concatenated summary</ai>")
expect(subject.concatenate_summaries(["summary 1", "summary 2"])).to eq(
"concatenated summary",
)
expect(model.concatenate_summaries(["summary 1", "summary 2"])).to eq("concatenated summary")
end
end
@ -110,7 +108,7 @@ RSpec.describe DiscourseAi::Summarization::Models::Anthropic do
AnthropicCompletionStubs.stub_response(instructions, "<ai>truncated summary</ai>")
expect(subject.summarize_with_truncation(content[:contents], opts)).to eq("truncated summary")
expect(model.summarize_with_truncation(content[:contents], opts)).to eq("truncated summary")
end
end
end

View File

@ -1,10 +1,10 @@
# frozen_string_literal: true
RSpec.describe DiscourseAi::Summarization::Models::Discourse do
let(:model) { "bart-large-cnn-samsum" }
let(:max_tokens) { 20 }
subject(:model) { described_class.new(model_name, max_tokens: max_tokens) }
subject { described_class.new(model, max_tokens: max_tokens) }
let(:model_name) { "bart-large-cnn-samsum" }
let(:max_tokens) { 20 }
let(:content) do
{
@ -22,7 +22,7 @@ RSpec.describe DiscourseAi::Summarization::Models::Discourse do
:post,
"#{SiteSetting.ai_summarization_discourse_service_api_endpoint}/api/v1/classify",
)
.with(body: JSON.dump(model: model, content: prompt))
.with(body: JSON.dump(model: model_name, content: prompt))
.to_return(status: 200, body: JSON.dump(summary_text: response))
end
@ -40,7 +40,7 @@ RSpec.describe DiscourseAi::Summarization::Models::Discourse do
stub_request(expected_messages(content[:contents], opts), "This is summary 1")
summarized_chunks =
subject.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
model.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
expect(summarized_chunks).to contain_exactly("This is summary 1")
end
@ -60,7 +60,7 @@ RSpec.describe DiscourseAi::Summarization::Models::Discourse do
end
summarized_chunks =
subject.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
model.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
expect(summarized_chunks).to contain_exactly("This is summary 1", "This is summary 2")
end
@ -73,9 +73,7 @@ RSpec.describe DiscourseAi::Summarization::Models::Discourse do
stub_request(messages, "concatenated summary")
expect(subject.concatenate_summaries(["summary 1", "summary 2"])).to eq(
"concatenated summary",
)
expect(model.concatenate_summaries(["summary 1", "summary 2"])).to eq("concatenated summary")
end
end
@ -87,7 +85,7 @@ RSpec.describe DiscourseAi::Summarization::Models::Discourse do
stub_request("( 1 asd said : this is", "truncated summary")
expect(subject.summarize_with_truncation(content[:contents], opts)).to eq("truncated summary")
expect(model.summarize_with_truncation(content[:contents], opts)).to eq("truncated summary")
end
end
end

View File

@ -3,10 +3,10 @@
require_relative "../../../../support/openai_completions_inference_stubs"
RSpec.describe DiscourseAi::Summarization::Models::OpenAi do
let(:model) { "gpt-3.5-turbo" }
let(:max_tokens) { 720 }
subject(:model) { described_class.new(model_name, max_tokens: max_tokens) }
subject { described_class.new(model, max_tokens: max_tokens) }
let(:model_name) { "gpt-3.5-turbo" }
let(:max_tokens) { 720 }
let(:content) do
{
@ -47,7 +47,7 @@ RSpec.describe DiscourseAi::Summarization::Models::OpenAi do
)
summarized_chunks =
subject.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
model.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
expect(summarized_chunks).to contain_exactly("This is summary 1")
end
@ -70,7 +70,7 @@ RSpec.describe DiscourseAi::Summarization::Models::OpenAi do
end
summarized_chunks =
subject.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
model.summarize_in_chunks(content[:contents], opts).map { |c| c[:summary] }
expect(summarized_chunks).to contain_exactly("This is summary 1", "This is summary 2")
end
@ -90,9 +90,7 @@ RSpec.describe DiscourseAi::Summarization::Models::OpenAi do
OpenAiCompletionsInferenceStubs.stub_response(messages, "concatenated summary")
expect(subject.concatenate_summaries(["summary 1", "summary 2"])).to eq(
"concatenated summary",
)
expect(model.concatenate_summaries(["summary 1", "summary 2"])).to eq("concatenated summary")
end
end
@ -110,7 +108,7 @@ RSpec.describe DiscourseAi::Summarization::Models::OpenAi do
OpenAiCompletionsInferenceStubs.stub_response(truncated_version, "truncated summary")
expect(subject.summarize_with_truncation(content[:contents], opts)).to eq("truncated summary")
expect(model.summarize_with_truncation(content[:contents], opts)).to eq("truncated summary")
end
end
end

View File

@ -4,6 +4,8 @@ require_relative "../../../../support/summarization/dummy_completion_model"
RSpec.describe DiscourseAi::Summarization::Strategies::FoldContent do
describe "#summarize" do
subject(:strategy) { described_class.new(model) }
let(:summarize_text) { "This is a text" }
let(:model) { DummyCompletionModel.new(model_tokens) }
let(:model_tokens) do
@ -11,13 +13,11 @@ RSpec.describe DiscourseAi::Summarization::Strategies::FoldContent do
DiscourseAi::Tokenizer::BertTokenizer.size("(1 asd said: This is a text ") + 3
end
subject { described_class.new(model) }
let(:content) { { contents: [{ poster: "asd", id: 1, text: summarize_text }] } }
context "when the content to summarize fits in a single call" do
it "does one call to summarize content" do
result = subject.summarize(content)
result = strategy.summarize(content)
expect(model.summarization_calls).to eq(1)
expect(result[:summary]).to eq(DummyCompletionModel::SINGLE_SUMMARY)
@ -28,7 +28,7 @@ RSpec.describe DiscourseAi::Summarization::Strategies::FoldContent do
it "summarizes each chunk and then concatenates them" do
content[:contents] << { poster: "asd2", id: 2, text: summarize_text }
result = subject.summarize(content)
result = strategy.summarize(content)
expect(model.summarization_calls).to eq(3)
expect(result[:summary]).to eq(DummyCompletionModel::CONCATENATED_SUMMARIES)

View File

@ -4,19 +4,19 @@ require_relative "../../../../support/summarization/dummy_completion_model"
RSpec.describe DiscourseAi::Summarization::Strategies::TruncateContent do
describe "#summarize" do
subject(:strategy) { described_class.new(model) }
let(:summarize_text) { "This is a text" }
let(:model_tokens) { summarize_text.length }
let(:model) { DummyCompletionModel.new(model_tokens) }
subject { described_class.new(model) }
let(:content) { { contents: [{ poster: "asd", id: 1, text: summarize_text }] } }
context "when the content to summarize doesn't fit in a single call" do
it "summarizes a truncated version" do
content[:contents] << { poster: "asd2", id: 2, text: summarize_text }
result = subject.summarize(content)
result = strategy.summarize(content)
expect(model.summarization_calls).to eq(1)
expect(result[:summary]).to eq(DummyCompletionModel::SINGLE_SUMMARY)

View File

@ -9,7 +9,7 @@ RSpec.describe DiscourseAi::Inference::AnthropicCompletions do
response_text = "1. Serenity\\n2. Laughter\\n3. Adventure"
prompt = "Human: write 3 words\n\n"
user_id = 183
req_opts = { temperature: 0.5, max_tokens_to_sample: 700 }
req_opts = { max_tokens_to_sample: 700, temperature: 0.5 }
AnthropicCompletionStubs.stub_response(prompt, response_text, req_opts: req_opts)

View File

@ -17,7 +17,11 @@ class AnthropicCompletionStubs
def stub_response(prompt, response_text, req_opts: {})
WebMock
.stub_request(:post, "https://api.anthropic.com/v1/complete")
.with(body: { model: "claude-v1", prompt: prompt }.merge(req_opts).to_json)
.with(
body: { model: "claude-v1", prompt: prompt, max_tokens_to_sample: 2000 }.merge(
req_opts,
).to_json,
)
.to_return(status: 200, body: JSON.dump(response(response_text)))
end