DEV: Remove custom prefix in specs

This commit is contained in:
Keegan George 2025-07-16 11:39:55 -07:00
parent 0fadf1da1a
commit b675c4c39b
No known key found for this signature in database
GPG Key ID: 91B40E38537AC000
49 changed files with 106 additions and 135 deletions

View File

@ -30,7 +30,7 @@ module DiscourseAi
messages: [{ type: :user, content: chat, id: "User" }],
)
DiscourseAi::Completions::Llm.proxy(SiteSetting.ai_helper_model).generate(
DiscourseAi::Completions::Llm.proxy(SiteSetting.ai_default_llm_model).generate(
prompt,
user: Discourse.system_user,
stop_sequences: ["</input>"],

View File

@ -38,7 +38,6 @@ RSpec.describe DiscourseAi::Configuration::Feature do
it "returns the configured llm model" do
SiteSetting.ai_summarization_persona = ai_persona.id
allow_configuring_setting { SiteSetting.ai_summarization_model = "custom:#{llm_model.id}" }
expect(ai_feature.llm_models).to eq([llm_model])
end
end
@ -55,8 +54,6 @@ RSpec.describe DiscourseAi::Configuration::Feature do
it "returns the persona's default llm when no specific helper model is set" do
SiteSetting.ai_helper_proofreader_persona = ai_persona.id
SiteSetting.ai_helper_model = ""
expect(ai_feature.llm_models).to eq([llm_model])
end
end
@ -75,11 +72,7 @@ RSpec.describe DiscourseAi::Configuration::Feature do
it "uses translation model when configured" do
SiteSetting.ai_translation_locale_detector_persona = ai_persona.id
ai_persona.update!(default_llm_id: nil)
allow_configuring_setting do
SiteSetting.ai_translation_model = "custom:#{translation_model.id}"
end
ai_persona.update!(default_llm_id: translation_model.id)
expect(ai_feature.llm_models).to eq([translation_model])
end
end

View File

@ -51,7 +51,7 @@ RSpec.describe DiscourseAi::Configuration::LlmEnumerator do
component: "text",
name: "model",
metadata: {
value: "custom:#{fake_model.id}",
value: fake_model.id,
},
target: "script",
)

View File

@ -5,10 +5,8 @@ describe Jobs::TopicsLocaleDetectionBackfill do
subject(:job) { described_class.new }
before do
assign_fake_provider_to(:ai_default_llm_model)
SiteSetting.discourse_ai_enabled = true
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end
SiteSetting.ai_translation_enabled = true
SiteSetting.ai_translation_backfill_hourly_rate = 100
SiteSetting.content_localization_supported_locales = "en"

View File

@ -4,7 +4,7 @@ require_relative "endpoint_compliance"
RSpec.describe DiscourseAi::Completions::Endpoints::Anthropic do
let(:url) { "https://api.anthropic.com/v1/messages" }
fab!(:model) { Fabricate(:anthropic_model, name: "claude-3-opus", vision_enabled: true) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(model) }
let(:image100x100) { plugin_file_from_fixtures("100x100.jpg") }
let(:upload100x100) do
UploadCreator.new(image100x100, "image.jpg").create_for(Discourse.system_user.id)
@ -374,7 +374,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Anthropic do
model.provider_params["reasoning_tokens"] = 10_000
model.save!
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
result = proxy.generate(prompt, user: Discourse.system_user)
expect(result).to eq("Hello!")
@ -432,7 +432,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Anthropic do
},
).to_return(status: 200, body: body)
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
result = proxy.generate(prompt, user: Discourse.system_user)
expect(result).to eq("Hello!")

View File

@ -47,7 +47,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
model.provider_params["disable_native_tools"] = true
model.save!
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
incomplete_tool_call = <<~XML.strip
<thinking>I should be ignored</thinking>
@ -122,7 +122,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
end
it "supports streaming function calls" do
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
request = nil
@ -293,7 +293,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
describe "Claude 3 support" do
it "supports regular completions" do
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
request = nil
@ -340,7 +340,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
model.provider_params["reasoning_tokens"] = 10_000
model.save!
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
request = nil
@ -387,7 +387,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
end
it "supports claude 3 streaming" do
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
request = nil
@ -448,7 +448,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
},
)
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
request = nil
content = {
@ -487,7 +487,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
describe "disabled tool use" do
it "handles tool_choice: :none by adding a prefill message instead of using tool_choice param" do
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
request = nil
# Create a prompt with tool_choice: :none
@ -549,7 +549,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
describe "forced tool use" do
it "can properly force tool use" do
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
request = nil
tools = [
@ -640,7 +640,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
{ type: "message_delta", delta: { usage: { output_tokens: 25 } } },
].map { |message| encode_message(message) }
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
request = nil
bedrock_mock.with_chunk_array_support do
stub_request(
@ -718,7 +718,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
{ type: "message_delta", delta: { usage: { output_tokens: 25 } } },
].map { |message| encode_message(message) }
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
request = nil
bedrock_mock.with_chunk_array_support do
stub_request(

View File

@ -3,7 +3,7 @@ require_relative "endpoint_compliance"
RSpec.describe DiscourseAi::Completions::Endpoints::Cohere do
fab!(:cohere_model)
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{cohere_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(cohere_model) }
fab!(:user)
let(:prompt) do

View File

@ -160,7 +160,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
req_body = nil
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
url = "#{model.url}:generateContent?key=123"
stub_request(:post, url).with(
@ -186,7 +186,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
req_body = nil
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
url = "#{model.url}:generateContent?key=123"
stub_request(:post, url).with(
@ -220,7 +220,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
req_body = nil
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
url = "#{model.url}:generateContent?key=123"
stub_request(:post, url).with(
@ -246,7 +246,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
req_body = nil
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
url = "#{model.url}:generateContent?key=123"
stub_request(:post, url).with(
@ -274,7 +274,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
req_body = nil
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
url = "#{model.url}:generateContent?key=123"
stub_request(:post, url).with(
@ -297,7 +297,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
it "properly encodes tool calls" do
prompt = DiscourseAi::Completions::Prompt.new("Hello", tools: [echo_tool])
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
url = "#{model.url}:generateContent?key=123"
response_json = { "functionCall" => { name: "echo", args: { text: "<S>ydney" } } }
@ -332,7 +332,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
req_body = nil
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
url = "#{model.url}:generateContent?key=123"
stub_request(:post, url).with(
@ -410,7 +410,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
payload = rows.map { |r| "data: #{r.to_json}\n\n" }.join
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
url = "#{model.url}:streamGenerateContent?alt=sse&key=123"
prompt = DiscourseAi::Completions::Prompt.new("Hello", tools: [echo_tool])
@ -450,7 +450,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
TEXT
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
url = "#{model.url}:streamGenerateContent?alt=sse&key=123"
output = []
@ -478,7 +478,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
split = data.split("|")
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
url = "#{model.url}:streamGenerateContent?alt=sse&key=123"
output = []
@ -497,7 +497,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
req_body = nil
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
url = "#{model.url}:generateContent?key=123"
stub_request(:post, url).with(
@ -525,7 +525,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
req_body = nil
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
url = "#{model.url}:generateContent?key=123"
stub_request(:post, url).with(
@ -600,7 +600,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
req_body = nil
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
url = "#{model.url}:streamGenerateContent?alt=sse&key=123"
stub_request(:post, url).with(
@ -657,7 +657,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
TEXT
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
url = "#{model.url}:streamGenerateContent?alt=sse&key=123"
output = []

View File

@ -28,7 +28,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
end
it "should be able to make a simple request" do
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{nova_model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
content = {
"output" => {
@ -90,7 +90,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
stub_request(:post, stream_url).to_return(status: 200, body: messages.join)
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{nova_model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
responses = []
proxy.generate("Hello!", user: user) { |partial| responses << partial }
@ -104,7 +104,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
#model.provider_params["disable_native_tools"] = true
#model.save!
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{nova_model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(model)
prompt =
DiscourseAi::Completions::Prompt.new(
"You are a helpful assistant.",

View File

@ -177,7 +177,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenAi do
describe "max tokens for reasoning models" do
it "uses max_completion_tokens for reasoning models" do
model.update!(name: "o3-mini", max_output_tokens: 999)
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
prompt =
DiscourseAi::Completions::Prompt.new(
"You are a bot",
@ -216,7 +216,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenAi do
describe "repeat calls" do
it "can properly reset context" do
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
tools = [
{
@ -297,7 +297,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenAi do
describe "max tokens remapping" do
it "remaps max_tokens to max_completion_tokens for reasoning models" do
model.update!(name: "o3-mini")
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
body_parsed = nil
stub_request(:post, "https://api.openai.com/v1/chat/completions").with(
@ -313,7 +313,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenAi do
describe "forced tool use" do
it "can properly force tool use" do
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
tools = [
{
@ -441,7 +441,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenAi do
describe "disabled tool use" do
it "can properly disable tool use with :none" do
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
tools = [
{
@ -532,7 +532,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenAi do
describe "image support" do
it "can handle images" do
model = Fabricate(:llm_model, vision_enabled: true)
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
llm = DiscourseAi::Completions::Llm.proxy(model)
prompt =
DiscourseAi::Completions::Prompt.new(
"You are image bot",

View File

@ -25,7 +25,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenRouter do
body: { "choices" => [message: { role: "assistant", content: "world" }] }.to_json,
)
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{open_router_model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(open_router_model)
result = proxy.generate("hello", user: user)
expect(result).to eq("world")
@ -62,7 +62,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenRouter do
body: { "choices" => [message: { role: "assistant", content: "test response" }] }.to_json,
)
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{open_router_model.id}")
proxy = DiscourseAi::Completions::Llm.proxy(open_router_model)
# Request with parameters that should be ignored
proxy.generate("test", user: user, top_p: 0.9, temperature: 0.8, max_tokens: 500)

View File

@ -68,7 +68,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Vllm do
fab!(:llm_model) { Fabricate(:vllm_model) }
fab!(:user)
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
let(:vllm_mock) { VllmMock.new(endpoint) }
let(:compliance) do

View File

@ -46,9 +46,7 @@ RSpec.describe DiscourseAi::Completions::Llm do
body: result,
)
result = +""
described_class
.proxy("custom:#{model.id}")
.generate(prompt, user: user) { |partial| result << partial }
described_class.proxy(model).generate(prompt, user: user) { |partial| result << partial }
expect(result).to eq("Hello")
log = AiApiAuditLog.order("id desc").first
@ -75,7 +73,7 @@ RSpec.describe DiscourseAi::Completions::Llm do
)
result =
described_class.proxy("custom:#{model.id}").generate(
described_class.proxy(model).generate(
"Hello",
user: user,
feature_name: "llm_triage",
@ -99,7 +97,7 @@ RSpec.describe DiscourseAi::Completions::Llm do
DiscourseAi::Completions::Endpoints::Fake.chunk_count = 10
end
let(:llm) { described_class.proxy("custom:#{fake_model.id}") }
let(:llm) { described_class.proxy(fake_model) }
let(:prompt) do
DiscourseAi::Completions::Prompt.new(

View File

@ -49,11 +49,8 @@ RSpec.describe DiscourseAi::Automation do
expect(models).to match_array(
[
{ "translated_name" => "#{llm_model.display_name}", "id" => "custom:#{llm_model.id}" },
{
"translated_name" => "#{seeded_model.display_name}",
"id" => "custom:#{seeded_model.id}",
},
{ "translated_name" => "#{llm_model.display_name}", "id" => llm_model.id.to_s },
{ "translated_name" => "#{seeded_model.display_name}", "id" => seeded_model.id.to_s },
],
)
end

View File

@ -24,7 +24,7 @@ describe DiscourseAutomation do
it "can trigger via automation" do
add_automation_field("sender", user.username, type: "user")
add_automation_field("receivers", [user.username], type: "email_group_user")
add_automation_field("model", "custom:#{llm_model.id}")
add_automation_field("model", llm_model.id)
add_automation_field("title", "Weekly report")
DiscourseAi::Completions::Llm.with_prepared_responses(["An Amazing Report!!!"]) do
@ -38,7 +38,7 @@ describe DiscourseAutomation do
it "can target a topic" do
add_automation_field("sender", user.username, type: "user")
add_automation_field("topic_id", "#{post.topic_id}")
add_automation_field("model", "custom:#{llm_model.id}")
add_automation_field("model", llm_model.id)
DiscourseAi::Completions::Llm.with_prepared_responses(["An Amazing Report!!!"]) do
automation.trigger!

View File

@ -27,7 +27,7 @@ describe DiscourseAi::Automation::LlmTriage do
SiteSetting.tagging_enabled = true
add_automation_field("system_prompt", "hello %%POST%%")
add_automation_field("search_for_text", "bad")
add_automation_field("model", "custom:#{llm_model.id}")
add_automation_field("model", llm_model.id)
add_automation_field("category", category.id, type: "category")
add_automation_field("tags", %w[aaa bbb], type: "tags")
add_automation_field("hide_topic", true, type: "boolean")

View File

@ -12,7 +12,7 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Completions::Llm.with_prepared_responses(["good"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
model: llm_model.id.to_s,
hide_topic: true,
system_prompt: "test %%POST%%",
search_for_text: "bad",
@ -27,7 +27,7 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
model: llm_model.id.to_s,
hide_topic: true,
system_prompt: "test %%POST%%",
search_for_text: "bad",
@ -44,7 +44,7 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
model: llm_model.id.to_s,
category_id: category.id,
system_prompt: "test %%POST%%",
search_for_text: "bad",
@ -60,7 +60,7 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
model: llm_model.id.to_s,
system_prompt: "test %%POST%%",
search_for_text: "bad",
canned_reply: "test canned reply 123",
@ -79,7 +79,7 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
model: llm_model.id.to_s,
system_prompt: "test %%POST%%",
search_for_text: "bad",
flag_post: true,
@ -97,7 +97,7 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
model: llm_model.id.to_s,
system_prompt: "test %%POST%%",
search_for_text: "bad",
flag_post: true,
@ -114,7 +114,7 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
model: llm_model.id.to_s,
system_prompt: "test %%POST%%",
search_for_text: "bad",
flag_post: true,
@ -132,7 +132,7 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
model: llm_model.id.to_s,
system_prompt: "test %%POST%%",
search_for_text: "bad",
flag_post: true,
@ -158,7 +158,7 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
model: llm_model.id.to_s,
system_prompt: "test %%POST%%",
search_for_text: "bad",
flag_post: true,
@ -174,7 +174,7 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Completions::Llm.with_prepared_responses(["Bad.\n\nYo"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
model: llm_model.id.to_s,
system_prompt: "test %%POST%%",
search_for_text: "bad",
flag_post: true,
@ -191,7 +191,7 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
model: llm_model.id.to_s,
system_prompt: "test %%POST%%",
search_for_text: "BAD",
flag_post: true,
@ -210,7 +210,7 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
model: llm_model.id.to_s,
system_prompt: "test %%POST%%",
search_for_text: "bad",
flag_post: true,
@ -229,7 +229,7 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do |spy|
triage(
post: post,
model: "custom:#{llm_model.id}",
model: llm_model.id.to_s,
system_prompt: "test %%POST%%",
search_for_text: "bad",
flag_post: true,
@ -249,7 +249,7 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
model: llm_model.id.to_s,
system_prompt: "test %%POST%%",
search_for_text: "bad",
flag_post: true,

View File

@ -43,7 +43,7 @@ module DiscourseAi
sender_username: user.username,
receivers: ["fake@discourse.com"],
title: "test report %DATE%",
model: "custom:#{llm_model.id}",
model: llm_model.id,
category_ids: nil,
tags: nil,
allow_secure_categories: false,
@ -80,7 +80,7 @@ module DiscourseAi
sender_username: user.username,
receivers: [receiver.username],
title: "test report",
model: "custom:#{llm_model.id}",
model: llm_model.id,
category_ids: nil,
tags: nil,
allow_secure_categories: false,
@ -125,7 +125,7 @@ module DiscourseAi
sender_username: user.username,
receivers: [receiver.username],
title: "test report",
model: "custom:#{llm_model.id}",
model: llm_model.id,
category_ids: nil,
tags: nil,
allow_secure_categories: false,
@ -168,7 +168,7 @@ module DiscourseAi
sender_username: user.username,
receivers: [receiver.username],
title: "test report",
model: "custom:#{llm_model.id}",
model: llm_model.id,
category_ids: nil,
tags: nil,
allow_secure_categories: false,
@ -200,7 +200,7 @@ module DiscourseAi
sender_username: user.username,
receivers: [group_for_reports.name],
title: "group report",
model: "custom:#{llm_model.id}",
model: llm_model.id,
category_ids: nil,
tags: nil,
allow_secure_categories: false,
@ -228,7 +228,7 @@ module DiscourseAi
sender_username: user.username,
receivers: [receiver.username],
title: "test report",
model: "custom:#{llm_model.id}",
model: llm_model.id,
category_ids: nil,
tags: nil,
allow_secure_categories: false,

View File

@ -1,7 +1,7 @@
# frozen_string_literal: true
RSpec.describe DiscourseAi::Personas::QuestionConsolidator do
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{Fabricate(:fake_model).id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(Fabricate(:fake_model)) }
let(:fake_endpoint) { DiscourseAi::Completions::Endpoints::Fake }
fab!(:user)

View File

@ -2,7 +2,7 @@
RSpec.describe DiscourseAi::Personas::Tools::CreateArtifact do
fab!(:llm_model)
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
fab!(:post)
before { SiteSetting.ai_bot_enabled = true }

View File

@ -12,7 +12,7 @@ RSpec.describe DiscourseAi::Personas::Tools::CreateImage do
end
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(gpt_35_turbo.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{gpt_35_turbo.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(gpt_35_turbo) }
let(:progress_blk) { Proc.new {} }
let(:create_image) { described_class.new({ prompts: prompts }, llm: llm, bot_user: bot_user) }

View File

@ -12,7 +12,7 @@ RSpec.describe DiscourseAi::Personas::Tools::DallE do
end
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(gpt_35_turbo.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{gpt_35_turbo.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(gpt_35_turbo) }
let(:progress_blk) { Proc.new {} }
let(:dall_e) { described_class.new({ prompts: prompts }, llm: llm, bot_user: bot_user) }

View File

@ -3,7 +3,7 @@
RSpec.describe DiscourseAi::Personas::Tools::DbSchema do
fab!(:llm_model)
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(llm_model.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
before { SiteSetting.ai_bot_enabled = true }
describe "#process" do

View File

@ -4,7 +4,7 @@ RSpec.describe DiscourseAi::Personas::Tools::DiscourseMetaSearch do
fab!(:llm_model) { Fabricate(:llm_model, max_prompt_tokens: 8192) }
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(llm_model.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
let(:progress_blk) { Proc.new {} }
let(:mock_search_json) { plugin_file_from_fixtures("search.json", "search_meta").read }

View File

@ -17,7 +17,7 @@ RSpec.describe DiscourseAi::Personas::Tools::EditImage do
end
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(gpt_35_turbo.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{gpt_35_turbo.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(gpt_35_turbo) }
let(:progress_blk) { Proc.new {} }
let(:prompt) { "add a rainbow in the background" }

View File

@ -4,7 +4,7 @@ require "rails_helper"
RSpec.describe DiscourseAi::Personas::Tools::GithubFileContent do
fab!(:llm_model)
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
let(:tool) do
described_class.new(

View File

@ -5,7 +5,7 @@ require "rails_helper"
RSpec.describe DiscourseAi::Personas::Tools::GithubPullRequestDiff do
let(:bot_user) { Fabricate(:user) }
fab!(:llm_model)
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
let(:tool) { described_class.new({ repo: repo, pull_id: pull_id }, bot_user: bot_user, llm: llm) }
context "with #sort_and_shorten_diff" do

View File

@ -5,7 +5,7 @@ require "rails_helper"
RSpec.describe DiscourseAi::Personas::Tools::GithubSearchCode do
let(:bot_user) { Fabricate(:user) }
fab!(:llm_model)
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
let(:tool) { described_class.new({ repo: repo, query: query }, bot_user: bot_user, llm: llm) }
context "with valid search results" do

View File

@ -4,7 +4,7 @@ require "rails_helper"
RSpec.describe DiscourseAi::Personas::Tools::GithubSearchFiles do
fab!(:llm_model)
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model.id) }
let(:tool) do
described_class.new(

View File

@ -3,7 +3,7 @@
RSpec.describe DiscourseAi::Personas::Tools::Google do
fab!(:llm_model)
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(llm_model.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
let(:progress_blk) { Proc.new {} }
let(:search) { described_class.new({ query: "some search term" }, bot_user: bot_user, llm: llm) }

View File

@ -19,7 +19,7 @@ RSpec.describe DiscourseAi::Personas::Tools::Image do
toggle_enabled_bots(bots: [gpt_35_turbo])
end
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{gpt_35_turbo.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(gpt_35_turbo) }
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(gpt_35_turbo.name) }

View File

@ -3,7 +3,7 @@
RSpec.describe DiscourseAi::Personas::Tools::JavascriptEvaluator do
fab!(:llm_model)
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(llm_model.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
let(:progress_blk) { Proc.new {} }
before { SiteSetting.ai_bot_enabled = true }

View File

@ -3,7 +3,7 @@
RSpec.describe DiscourseAi::Personas::Tools::ListCategories do
fab!(:llm_model)
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(llm_model.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
before { SiteSetting.ai_bot_enabled = true }

View File

@ -3,7 +3,7 @@
RSpec.describe DiscourseAi::Personas::Tools::ListTags do
fab!(:llm_model)
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(llm_model.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
before do
SiteSetting.ai_bot_enabled = true

View File

@ -3,7 +3,7 @@
RSpec.describe DiscourseAi::Personas::Tools::Read do
fab!(:llm_model)
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(llm_model.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
let(:tool) { described_class.new({ topic_id: topic_with_tags.id }, bot_user: bot_user, llm: llm) }
fab!(:parent_category) { Fabricate(:category, name: "animals") }

View File

@ -6,7 +6,7 @@ RSpec.describe DiscourseAi::Personas::Tools::Researcher do
fab!(:llm_model)
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(llm_model.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
let(:progress_blk) { Proc.new {} }
fab!(:admin)

View File

@ -3,7 +3,7 @@
RSpec.describe DiscourseAi::Personas::Tools::SearchSettings do
fab!(:llm_model)
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(llm_model.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
let(:fake_settings) do
[

View File

@ -6,7 +6,7 @@ RSpec.describe DiscourseAi::Personas::Tools::Search do
fab!(:llm_model)
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(llm_model.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
let(:progress_blk) { Proc.new {} }
fab!(:admin)

View File

@ -12,7 +12,7 @@ RSpec.describe DiscourseAi::Personas::Tools::SettingContext, if: has_rg? do
fab!(:llm_model)
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(llm_model.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
before { SiteSetting.ai_bot_enabled = true }

View File

@ -3,7 +3,7 @@
RSpec.describe DiscourseAi::Personas::Tools::Summarize do
fab!(:llm_model)
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(llm_model.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
let(:progress_blk) { Proc.new {} }
before { SiteSetting.ai_bot_enabled = true }

View File

@ -3,7 +3,7 @@
RSpec.describe DiscourseAi::Personas::Tools::Time do
fab!(:llm_model)
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(llm_model.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
before { SiteSetting.ai_bot_enabled = true }

View File

@ -3,7 +3,7 @@
RSpec.describe DiscourseAi::Personas::Tools::WebBrowser do
fab!(:llm_model)
let(:bot_user) { DiscourseAi::AiBot::EntryPoint.find_user_from_model(llm_model.name) }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
before { SiteSetting.ai_bot_enabled = true }

View File

@ -8,10 +8,7 @@ describe DiscourseAi::Translation::BaseTranslator do
end
before do
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end
assign_fake_provider_to(:ai_default_llm_model)
SiteSetting.ai_translation_enabled = true
end

View File

@ -3,11 +3,7 @@
describe DiscourseAi::Translation::CategoryLocalizer do
subject(:localizer) { described_class }
before do
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end
end
before { assign_fake_provider_to(:ai_default_llm_model) }
def post_raw_translator_stub(opts)
mock = instance_double(DiscourseAi::Translation::PostRawTranslator)

View File

@ -2,10 +2,8 @@
describe DiscourseAi::Translation::EntryPoint do
before do
assign_fake_provider_to(:ai_default_llm_model)
SiteSetting.discourse_ai_enabled = true
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end
SiteSetting.ai_translation_enabled = true
SiteSetting.content_localization_supported_locales = "en"
end
@ -59,9 +57,7 @@ describe DiscourseAi::Translation::EntryPoint do
before do
SiteSetting.discourse_ai_enabled = true
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end
assign_fake_provider_to(:ai_default_llm_model)
end
it "enqueues detect topic locale and translate topic job" do

View File

@ -7,11 +7,7 @@ describe DiscourseAi::Translation::LanguageDetector do
)
end
before do
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end
end
before { assign_fake_provider_to(:ai_default_llm_model) }
describe ".detect" do
let(:locale_detector) { described_class.new("meow") }

View File

@ -2,7 +2,7 @@
RSpec.describe AiTool do
fab!(:llm_model) { Fabricate(:llm_model, name: "claude-2") }
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{llm_model.id}") }
let(:llm) { DiscourseAi::Completions::Llm.proxy(llm_model) }
fab!(:topic)
fab!(:post) { Fabricate(:post, topic: topic, raw: "bananas are a tasty fruit") }
fab!(:bot_user) { Discourse.system_user }

View File

@ -52,17 +52,17 @@ RSpec.describe DiscourseAi::Admin::AiLlmsController do
# setting the setting calls the model
DiscourseAi::Completions::Llm.with_prepared_responses(["OK"]) do
SiteSetting.ai_helper_model = "custom:#{llm_model.id}"
assign_fake_provider_to(:ai_default_llm_model)
SiteSetting.ai_helper_enabled = true
end
DiscourseAi::Completions::Llm.with_prepared_responses(["OK"]) do
SiteSetting.ai_summarization_model = "custom:#{llm_model2.id}"
assign_fake_provider_to(:ai_default_llm_model)
SiteSetting.ai_summarization_enabled = true
end
DiscourseAi::Completions::Llm.with_prepared_responses(["OK"]) do
SiteSetting.ai_embeddings_semantic_search_hyde_model = "custom:#{llm_model2.id}"
assign_fake_provider_to(:ai_default_llm_model)
SiteSetting.ai_embeddings_semantic_search_enabled = true
end

View File

@ -26,7 +26,7 @@ RSpec.describe ProblemCheck::AiLlmStatus do
before do
stub_request(:post, post_url).to_return(status: 200, body: success_response, headers: {})
SiteSetting.ai_summarization_model = "custom:#{llm_model.id}"
assign_fake_provider_to(:ai_default_llm_model)
SiteSetting.ai_summarization_enabled = true
end