FEATURE: improve o3-mini support (#1106)

* DEV: raise timeout for reasoning LLMs

* FIX: use id to identify llms, not model_name

model_name is not unique, in the case of reasoning models
you may configure the same llm multiple times using different
reasoning levels.
This commit is contained in:
Sam 2025-02-03 08:45:56 +11:00 committed by GitHub
parent 381a2715c8
commit cf86d274a0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 7 additions and 3 deletions

View File

@ -152,7 +152,7 @@ export default class BotSelector extends Component {
resetTargetRecipients() {
if (this.allowLLMSelector) {
const botUsername = this.currentUser.ai_enabled_chat_bots.find(
(bot) => bot.model_name === this.llm
(bot) => bot.id === this.llm
).username;
this.composer.set("targetRecipients", botUsername);
} else {
@ -170,7 +170,7 @@ export default class BotSelector extends Component {
return availableBots.map((bot) => {
return {
id: bot.model_name,
id: bot.id,
name: bot.display_name,
};
});

View File

@ -7,7 +7,11 @@ module DiscourseAi
attr_reader :partial_tool_calls
CompletionFailed = Class.new(StandardError)
TIMEOUT = 60
# 6 minutes
# Reasoning LLMs can take a very long time to respond, generally it will be under 5 minutes
# The alternative is to have per LLM timeouts but that would make it extra confusing for people
# configuring. Let's try this simple solution first.
TIMEOUT = 360
class << self
def endpoint_for(provider_name)