REFACTOR: Use LLM abstraction in the AI Helper. (#312)

It also removes the need for multiple versions of our seeded prompts per model, further simplifying the code.
This commit is contained in:
Roman Rizzi 2023-11-27 09:33:31 -03:00 committed by GitHub
parent 5a4598a7b4
commit 54a8dd9556
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 615 additions and 999 deletions

View File

@ -11,7 +11,7 @@ module DiscourseAi
def prompts
render json:
ActiveModel::ArraySerializer.new(
DiscourseAi::AiHelper::LlmPrompt.new.available_prompts,
DiscourseAi::AiHelper::Assistant.new.available_prompts,
root: false,
),
status: 200
@ -29,39 +29,34 @@ module DiscourseAi
hijack do
render json:
DiscourseAi::AiHelper::LlmPrompt.new.generate_and_send_prompt(prompt, params),
DiscourseAi::AiHelper::Assistant.new.generate_and_send_prompt(
prompt,
input,
current_user,
),
status: 200
end
rescue ::DiscourseAi::Inference::OpenAiCompletions::CompletionFailed,
::DiscourseAi::Inference::HuggingFaceTextGeneration::CompletionFailed,
::DiscourseAi::Inference::AnthropicCompletions::CompletionFailed => e
rescue DiscourseAi::Completions::Endpoints::Base::CompletionFailed => e
render_json_error I18n.t("discourse_ai.ai_helper.errors.completion_request_failed"),
status: 502
end
def suggest_title
input = get_text_param!
input_hash = { text: input }
llm_prompt =
DiscourseAi::AiHelper::LlmPrompt
.new
.available_prompts(name_filter: "generate_titles")
.first
prompt = CompletionPrompt.find_by(id: llm_prompt[:id])
raise Discourse::InvalidParameters.new(:mode) if !prompt || !prompt.enabled?
prompt = CompletionPrompt.enabled_by_name("generate_titles")
raise Discourse::InvalidParameters.new(:mode) if !prompt
hijack do
render json:
DiscourseAi::AiHelper::LlmPrompt.new.generate_and_send_prompt(
DiscourseAi::AiHelper::Assistant.new.generate_and_send_prompt(
prompt,
input_hash,
input,
current_user,
),
status: 200
end
rescue ::DiscourseAi::Inference::OpenAiCompletions::CompletionFailed,
::DiscourseAi::Inference::HuggingFaceTextGeneration::CompletionFailed,
::DiscourseAi::Inference::AnthropicCompletions::CompletionFailed => e
rescue DiscourseAi::Completions::Endpoints::Base::CompletionFailed => e
render_json_error I18n.t("discourse_ai.ai_helper.errors.completion_request_failed"),
status: 502
end
@ -98,21 +93,18 @@ module DiscourseAi
def explain
post_id = get_post_param!
text = get_text_param!
post = Post.find_by(id: post_id)
term_to_explain = get_text_param!
post = Post.includes(:topic).find_by(id: post_id)
raise Discourse::InvalidParameters.new(:post_id) unless post
render json:
DiscourseAi::AiHelper::TopicHelper.new(
{ text: text },
current_user,
post: post,
).explain,
DiscourseAi::AiHelper::TopicHelper.new(current_user).explain(
term_to_explain,
post,
),
status: 200
rescue ::DiscourseAi::Inference::OpenAiCompletions::CompletionFailed,
::DiscourseAi::Inference::HuggingFaceTextGeneration::CompletionFailed,
::DiscourseAi::Inference::AnthropicCompletions::CompletionFailed => e
rescue DiscourseAi::Completions::Endpoints::Base::CompletionFailed => e
render_json_error I18n.t("discourse_ai.ai_helper.errors.completion_request_failed"),
status: 502
end

View File

@ -1,40 +1,37 @@
# frozen_string_literal: true
class CompletionPrompt < ActiveRecord::Base
# TODO(roman): Remove sept 2023.
self.ignored_columns = ["value"]
# TODO(roman): Remove may 2024.
self.ignored_columns = ["provider"]
TRANSLATE = -301
GENERATE_TITLES = -302
PROOFREAD = -303
MARKDOWN_TABLE = -304
CUSTOM_PROMPT = -305
EXPLAIN = -306
enum :prompt_type, { text: 0, list: 1, diff: 2 }
validates :messages, length: { maximum: 20 }
validate :each_message_length
def messages_with_user_input(user_input)
return messages unless user_input.present?
if user_input[:custom_prompt].present?
case ::DiscourseAi::AiHelper::LlmPrompt.new.enabled_provider
when "huggingface"
self.messages.each { |msg| msg.sub!("{{custom_prompt}}", user_input[:custom_prompt]) }
else
self.messages.each do |msg|
msg["content"].sub!("{{custom_prompt}}", user_input[:custom_prompt])
end
end
def self.enabled_by_name(name)
where(enabled: true).find_by(name: name)
end
case ::DiscourseAi::AiHelper::LlmPrompt.new.enabled_provider
when "openai"
self.messages << { role: "user", content: user_input[:text] }
when "anthropic"
self.messages << { "role" => "Input", "content" => "<input>#{user_input[:text]}</input>" }
when "huggingface"
self.messages.first.sub("{{user_input}}", user_input[:text])
end
def messages_with_input(input)
return unless input
messages_hash.merge(input: "<input>#{input}</input")
end
private
def messages_hash
@messages_hash ||= messages.symbolize_keys!
end
def each_message_length
messages.each_with_index do |msg, idx|
next if msg["content"].length <= 1000
@ -56,7 +53,6 @@ end
# created_at :datetime not null
# updated_at :datetime not null
# messages :jsonb
# provider :text
#
# Indexes
#

View File

@ -1,158 +0,0 @@
# frozen_string_literal: true
CompletionPrompt.seed do |cp|
cp.id = -1
cp.provider = "openai"
cp.name = "translate"
cp.prompt_type = CompletionPrompt.prompt_types[:text]
cp.messages = [{ role: "system", content: <<~TEXT }]
I want you to act as an English translator, spelling corrector and improver. I will speak to you
in any language and you will detect the language, translate it and answer in the corrected and
improved version of my text, in English. I want you to replace my simplified A0-level words and
sentences with more beautiful and elegant, upper level English words and sentences.
Keep the meaning same, but make them more literary. I want you to only reply the correction,
the improvements and nothing else, do not write explanations.
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -2
cp.provider = "openai"
cp.name = "generate_titles"
cp.prompt_type = CompletionPrompt.prompt_types[:list]
cp.messages = [{ role: "system", content: <<~TEXT }]
I want you to act as a title generator for written pieces. I will provide you with a text,
and you will generate five attention-grabbing titles. Please keep the title concise and under 20 words,
and ensure that the meaning is maintained. Replies will utilize the language type of the topic.
I want you to only reply the list of options and nothing else, do not write explanations.
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -3
cp.provider = "openai"
cp.name = "proofread"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.messages = [
{ role: "system", content: <<~TEXT },
You are a markdown proofreader. You correct egregious typos and phrasing issues but keep the user's original voice.
You do not touch code blocks. I will provide you with text to proofread. If nothing needs fixing, then you will echo the text back.
Optionally, a user can specify intensity. Intensity 10 is a pedantic English teacher correcting the text.
Intensity 1 is a minimal proofreader. By default, you operate at intensity 1.
TEXT
{ role: "user", content: "![amazing car|100x100, 22%](upload://hapy.png)" },
{ role: "assistant", content: "![Amazing car|100x100, 22%](upload://hapy.png)" },
{ role: "user", content: <<~TEXT },
Intensity 1:
The rain in spain stays mainly in the plane.
TEXT
{ role: "assistant", content: "The rain in Spain, stays mainly in the Plane." },
{ role: "user", content: "The rain in Spain, stays mainly in the Plane." },
{ role: "assistant", content: "The rain in Spain, stays mainly in the Plane." },
{ role: "user", content: <<~TEXT },
Intensity 1:
Hello,
Sometimes the logo isn't changing automatically when color scheme changes.
![Screen Recording 2023-03-17 at 18.04.22|video](upload://2rcVL0ZMxHPNtPWQbZjwufKpWVU.mov)
TEXT
{ role: "assistant", content: <<~TEXT },
Hello,
Sometimes the logo does not change automatically when the color scheme changes.
![Screen Recording 2023-03-17 at 18.04.22|video](upload://2rcVL0ZMxHPNtPWQbZjwufKpWVU.mov)
TEXT
{ role: "user", content: <<~TEXT },
Intensity 1:
Any ideas what is wrong with this peace of cod?
> This quot contains a typo
```ruby
# this has speling mistakes
testin.atypo = 11
baad = "bad"
```
TEXT
{ role: "assistant", content: <<~TEXT },
Any ideas what is wrong with this piece of code?
> This quot contains a typo
```ruby
# This has spelling mistakes
testing.a_typo = 11
bad = "bad"
```
TEXT
]
end
CompletionPrompt.seed do |cp|
cp.id = -4
cp.provider = "openai"
cp.name = "markdown_table"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.messages = [
{ role: "system", content: <<~TEXT },
You are a markdown table formatter, I will provide you text and you will format it into a markdown table
TEXT
{ role: "user", content: "sam,joe,jane\nage: 22| 10|11" },
{ role: "assistant", content: <<~TEXT },
| | sam | joe | jane |
|---|---|---|---|
| age | 22 | 10 | 11 |
TEXT
{ role: "user", content: <<~TEXT },
sam: speed 100, age 22
jane: age 10
fred: height 22
TEXT
{ role: "assistant", content: <<~TEXT },
| | speed | age | height |
|---|---|---|---|
| sam | 100 | 22 | - |
| jane | - | 10 | - |
| fred | - | - | 22 |
TEXT
{ role: "user", content: <<~TEXT },
chrome 22ms (first load 10ms)
firefox 10ms (first load: 9ms)
TEXT
{ role: "assistant", content: <<~TEXT },
| Browser | Load Time (ms) | First Load Time (ms) |
|---|---|---|
| Chrome | 22 | 10 |
| Firefox | 10 | 9 |
TEXT
]
end
CompletionPrompt.seed do |cp|
cp.id = -5
cp.provider = "openai"
cp.name = "custom_prompt"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.messages = [{ role: "system", content: <<~TEXT }]
You are a helpful assistant, I will provide you with a text below,
you will {{custom_prompt}} and you will reply with the result.
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -6
cp.provider = "openai"
cp.name = "explain"
cp.prompt_type = CompletionPrompt.prompt_types[:text]
cp.messages = [{ role: "system", content: <<~TEXT }]
You are a helpful assistant. Act as a tutor explaining terms to a student in a specific
context. Reply with a paragraph with a brief explanation about what the term means in the
content provided, format the response using markdown. Reply only with the explanation and
nothing more.
Term to explain:
{{search}}
Context where it was used:
{{context}}
Title of the conversation where it was used:
{{topic}}
TEXT
end

View File

@ -1,93 +0,0 @@
# frozen_string_literal: true
CompletionPrompt.seed do |cp|
cp.id = -101
cp.provider = "anthropic"
cp.name = "translate"
cp.prompt_type = CompletionPrompt.prompt_types[:text]
cp.messages = [{ role: "Human", content: <<~TEXT }]
I want you to act as an English translator, spelling corrector and improver. I will speak to you
in any language and you will detect the language, translate it and answer in the corrected and
improved version of my text, in English. I want you to replace my simplified A0-level words and
sentences with more beautiful and elegant, upper level English words and sentences.
Keep the meaning same, but make them more literary. I will provide you with a text inside <input> tags,
please put the translation between <ai></ai> tags.
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -102
cp.provider = "anthropic"
cp.name = "generate_titles"
cp.prompt_type = CompletionPrompt.prompt_types[:list]
cp.messages = [{ role: "Human", content: <<~TEXT }]
I want you to act as a title generator for written pieces. I will provide you with a text inside <input> tags,
and you will generate five attention-grabbing titles. Please keep the title concise and under 20 words,
and ensure that the meaning is maintained. Replies will utilize the language type of the topic.
Please put each suggestion between <ai></ai> tags.
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -103
cp.provider = "anthropic"
cp.name = "proofread"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.messages = [{ role: "Human", content: <<~TEXT }]
You are a markdown proofreader. You correct egregious typos and phrasing issues but keep the user's original voice.
You do not touch code blocks. I will provide you with text to proofread. If nothing needs fixing, then you will echo the text back.
Optionally, a user can specify intensity. Intensity 10 is a pedantic English teacher correcting the text.
Intensity 1 is a minimal proofreader. By default, you operate at intensity 1.
I will provide you with a text inside <input> tags,
please reply with the corrected text between <ai></ai> tags.
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -104
cp.provider = "anthropic"
cp.name = "markdown_table"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.messages = [{ role: "Human", content: <<~TEXT }]
You are a markdown table formatter, I will provide you text and you will format it into a markdown table.
I will provide you with a text inside <input> tags,
please reply with the corrected text between <ai></ai> tags.
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -105
cp.provider = "anthropic"
cp.name = "custom_prompt"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.messages = [{ role: "Human", content: <<~TEXT }]
You are a helpful assistant, I will provide you with a text inside <input> tags,
you will {{custom_prompt}} and you will reply with the result between <ai></ai> tags.
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -106
cp.provider = "anthropic"
cp.name = "explain"
cp.prompt_type = CompletionPrompt.prompt_types[:text]
cp.messages = [{ role: "Human", content: <<~TEXT }, { role: "Assistant", content: "" }]
You are a helpful assistant, I will provide you with a term inside <input> tags,
and the context where it was used inside <context> tags, the title of the topic
where it was used between <topic> tags, optionally the post it was written
in response to in <post> tags and you will reply with an explanation of what the
term means in this context between <ai></ai> tags.
<input>
{{search}}
</input>
<context>
{{context}}
</context>
<topic>
{{topic}}
</topic>
TEXT
end

View File

@ -1,154 +0,0 @@
# frozen_string_literal: true
CompletionPrompt.seed do |cp|
cp.id = -201
cp.provider = "huggingface"
cp.name = "translate"
cp.prompt_type = CompletionPrompt.prompt_types[:text]
cp.messages = [<<~TEXT]
### System:
I want you to act as an English translator, spelling corrector and improver. I will speak to you
in any language and you will detect the language, translate it and answer in the corrected and
improved version of my text, in English. I want you to replace my simplified A0-level words and
sentences with more beautiful and elegant, upper level English words and sentences.
Keep the meaning same, but make them more literary. I want you to only reply the correction,
the improvements and nothing else, do not write explanations.
### User:
{{user_input}}
### Assistant:
Here is the corrected, translated and improved version of the text:
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -202
cp.provider = "huggingface"
cp.name = "generate_titles"
cp.prompt_type = CompletionPrompt.prompt_types[:list]
cp.messages = [<<~TEXT]
### System:
I want you to act as a title generator for written pieces. I will provide you with a text,
and you will generate five attention-grabbing titles. Please keep the title concise and under 20 words,
and ensure that the meaning is maintained. Replies will utilize the language type of the topic.
I want you to only reply the list of options and nothing else, do not write explanations.
### User:
{{user_input}}
### Assistant:
Here are five titles for the text:
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -203
cp.provider = "huggingface"
cp.name = "proofread"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.messages = [<<~TEXT]
### System:
You are a markdown proofreader. You correct egregious typos and phrasing issues but keep the user's original voice.
You do not touch code blocks. I will provide you with text to proofread. If nothing needs fixing, then you will echo the text back.
Optionally, a user can specify intensity. Intensity 10 is a pedantic English teacher correcting the text.
Intensity 1 is a minimal proofreader. By default, you operate at intensity 1.
### User:
Rewrite the following text to correct any errors:
{{user_input}}
### Assistant:
Here is a proofread version of the text:
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -204
cp.provider = "huggingface"
cp.name = "markdown_table"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.messages = [<<~TEXT]
### System:
You are a markdown table formatter, I will provide you text and you will format it into a markdown table
### User:
sam,joe,jane
age: 22| 10|11
### Assistant:
| | sam | joe | jane |
|---|---|---|---|
| age | 22 | 10 | 11 |
### User:
sam: speed 100, age 22
jane: age 10
fred: height 22
### Assistant:
| | speed | age | height |
|---|---|---|---|
| sam | 100 | 22 | - |
| jane | - | 10 | - |
| fred | - | - | 22 |
### User:
chrome 22ms (first load 10ms)
firefox 10ms (first load: 9ms)
### Assistant:
| Browser | Load Time (ms) | First Load Time (ms) |
|---|---|---|
| Chrome | 22 | 10 |
| Firefox | 10 | 9 |
### User:
{{user_input}}
### Assistant:
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -205
cp.provider = "huggingface"
cp.name = "custom_prompt"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.messages = [<<~TEXT]
### System:
You are a helpful assistant, I will provide you with a text below,
you will {{custom_prompt}} and you will reply with the result.
### User:
{{user_input}}
### Assistant:
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -206
cp.provider = "huggingface"
cp.name = "explain"
cp.prompt_type = CompletionPrompt.prompt_types[:text]
cp.messages = [<<~TEXT]
### System:
You are a helpful assistant. Act as a tutor explaining terms to a student in a specific
context. Reply with a paragraph with a brief explanation about what the term means in the
content provided, format the response using markdown. Reply only with the explanation and
nothing more.
### User:
Term to explain:
{{search}}
Context where it was used:
{{context}}
Title of the conversation where it was used:
{{topic}}
### Assistant:
TEXT
end

View File

@ -0,0 +1,174 @@
# frozen_string_literal: true
# frozen_string_literal: true
CompletionPrompt.seed do |cp|
cp.id = -301
cp.name = "translate"
cp.prompt_type = CompletionPrompt.prompt_types[:text]
cp.messages = { insts: <<~TEXT }
I want you to act as an English translator, spelling corrector and improver. I will write to you
in any language and you will detect the language, translate it and answer in the corrected and
improved version of my text, in English. I want you to replace my simplified A0-level words and
sentences with more beautiful and elegant, upper level English words and sentences.
Keep the meaning same, but make them more literary. I want you to only reply the correction,
the improvements and nothing else, do not write explanations.
You will find the text between <input></input> XML tags.
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -302
cp.name = "generate_titles"
cp.prompt_type = CompletionPrompt.prompt_types[:list]
cp.messages = {
insts: <<~TEXT,
I want you to act as a title generator for written pieces. I will provide you with a text,
and you will generate five attention-grabbing titles. Please keep the title concise and under 20 words,
and ensure that the meaning is maintained. Replies will utilize the language type of the topic.
I want you to only reply the list of options and nothing else, do not write explanations.
Each title you generate must be separated by *.
You will find the text between <input></input> XML tags.
TEXT
examples: [
[
"<input>In the labyrinth of time, a solitary horse, etched in gold by the setting sun, embarked on an infinite journey.</input>",
"The solitary horse*The horse etched in gold*A horse's infinite journey*A horse lost in time*A horse's last ride",
],
],
}
end
CompletionPrompt.seed do |cp|
cp.id = -303
cp.name = "proofread"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.messages = {
insts: <<~TEXT,
You are a markdown proofreader. You correct egregious typos and phrasing issues but keep the user's original voice.
You do not touch code blocks. I will provide you with text to proofread. If nothing needs fixing, then you will echo the text back.
Optionally, a user can specify intensity. Intensity 10 is a pedantic English teacher correcting the text.
Intensity 1 is a minimal proofreader. By default, you operate at intensity 1.
You will find the text between <input></input> XML tags.
TEXT
examples: [
[
"<input>![amazing car|100x100, 22%](upload://hapy.png)</input>",
"![Amazing car|100x100, 22%](upload://hapy.png)",
],
[<<~TEXT, "The rain in Spain, stays mainly in the Plane."],
<input>
Intensity 1:
The rain in spain stays mainly in the plane.
</input>
TEXT
[
"The rain in Spain, stays mainly in the Plane.",
"The rain in Spain, stays mainly in the Plane.",
],
[<<~TEXT, <<~TEXT],
<input>
Intensity 1:
Hello,
Sometimes the logo isn't changing automatically when color scheme changes.
![Screen Recording 2023-03-17 at 18.04.22|video](upload://2rcVL0ZMxHPNtPWQbZjwufKpWVU.mov)
</input>
TEXT
Hello,
Sometimes the logo does not change automatically when the color scheme changes.
![Screen Recording 2023-03-17 at 18.04.22|video](upload://2rcVL0ZMxHPNtPWQbZjwufKpWVU.mov)
TEXT
[<<~TEXT, <<~TEXT],
<input>
Intensity 1:
Any ideas what is wrong with this peace of cod?
> This quot contains a typo
```ruby
# this has speling mistakes
testin.atypo = 11
baad = "bad"
```
</input>
TEXT
Any ideas what is wrong with this piece of code?
> This quot contains a typo
```ruby
# This has spelling mistakes
testing.a_typo = 11
bad = "bad"
```
TEXT
],
}
end
CompletionPrompt.seed do |cp|
cp.id = -304
cp.name = "markdown_table"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.messages = {
insts: <<~TEXT,
You are a markdown table formatter, I will provide you text inside <input></input> XML tags and you will format it into a markdown table
TEXT
examples: [
["<input>sam,joe,jane\nage: 22| 10|11</input>", <<~TEXT],
| | sam | joe | jane |
|---|---|---|---|
| age | 22 | 10 | 11 |
TEXT
[<<~TEXT, <<~TEXT],
<input>
sam: speed 100, age 22
jane: age 10
fred: height 22
</input>
TEXT
| | speed | age | height |
|---|---|---|---|
| sam | 100 | 22 | - |
| jane | - | 10 | - |
| fred | - | - | 22 |
TEXT
[<<~TEXT, <<~TEXT],
<input>
chrome 22ms (first load 10ms)
firefox 10ms (first load: 9ms)
</input>
TEXT
| Browser | Load Time (ms) | First Load Time (ms) |
|---|---|---|
| Chrome | 22 | 10 |
| Firefox | 10 | 9 |
TEXT
],
}
end
CompletionPrompt.seed do |cp|
cp.id = -305
cp.name = "custom_prompt"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.messages = { insts: <<~TEXT }
You are a helpful assistant. I will give you instructions inside <input></input> XML tags.
You will look at them and reply with a result.
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -306
cp.name = "explain"
cp.prompt_type = CompletionPrompt.prompt_types[:text]
cp.messages = { insts: <<~TEXT }
You are a tutor explaining a term to a student in a specific context.
I will provide everything you need to know inside <input> tags, which consists of the term I want you
to explain inside <term> tags, the context of where it was used inside <context> tags, the title of
the topic where it was used inside <topic> tags, and optionally, the previous post in the conversation
in <replyTo> tags.
Using all this information, write a paragraph with a brief explanation
of what the term means. Format the response using Markdown. Reply only with the explanation and
nothing more.
TEXT
end

View File

@ -0,0 +1,9 @@
# frozen_string_literal: true
class SwitchToGenericCompletionPrompts < ActiveRecord::Migration[7.0]
def change
remove_column :completion_prompts, :provider, :text
DB.exec("DELETE FROM completion_prompts WHERE (id < 0 AND id > -300)")
end
end

View File

@ -0,0 +1,110 @@
# frozen_string_literal: true
module DiscourseAi
module AiHelper
class Assistant
def available_prompts(name_filter: nil)
cp = CompletionPrompt
prompts = name_filter ? [cp.enabled_by_name(name_filter)] : cp.where(enabled: true)
prompts.map do |prompt|
translation =
I18n.t("discourse_ai.ai_helper.prompts.#{prompt.name}", default: nil) ||
prompt.translated_name || prompt.name
{
id: prompt.id,
name: prompt.name,
translated_name: translation,
prompt_type: prompt.prompt_type,
icon: icon_map(prompt.name),
location: location_map(prompt.name),
}
end
end
def generate_and_send_prompt(completion_prompt, input, user)
llm = DiscourseAi::Completions::LLM.proxy(SiteSetting.ai_helper_model)
generic_prompt = completion_prompt.messages_with_input(input)
completion_result = llm.completion!(generic_prompt, user)
result = { type: completion_prompt.prompt_type }
result[:diff] = parse_diff(input, completion_result) if completion_prompt.diff?
result[:suggestions] = (
if completion_prompt.list?
parse_list(completion_result)
else
[completion_result]
end
)
result
end
private
def icon_map(name)
case name
when "translate"
"language"
when "generate_titles"
"heading"
when "proofread"
"spell-check"
when "markdown_table"
"table"
when "tone"
"microphone"
when "custom_prompt"
"comment"
when "rewrite"
"pen"
when "explain"
"question"
else
nil
end
end
def location_map(name)
case name
when "translate"
%w[composer post]
when "generate_titles"
%w[composer]
when "proofread"
%w[composer]
when "markdown_table"
%w[composer]
when "tone"
%w[composer]
when "custom_prompt"
%w[composer]
when "rewrite"
%w[composer]
when "explain"
%w[post]
when "summarize"
%w[post]
else
%w[composer post]
end
end
def parse_diff(text, suggestion)
cooked_text = PrettyText.cook(text)
cooked_suggestion = PrettyText.cook(suggestion)
DiscourseDiff.new(cooked_text, cooked_suggestion).inline_html
end
def parse_list(list)
list.split("*")
end
end
end
end

View File

@ -18,20 +18,14 @@ module DiscourseAi
end
def suggested_title
input_hash = { text: thread_content }
return nil if thread_content.blank?
return nil if input_hash[:text].blank?
prompt = CompletionPrompt.enabled_by_name(id: "generate_titles")
raise Discourse::InvalidParameters.new(:mode) if !prompt
llm_prompt =
DiscourseAi::AiHelper::LlmPrompt
.new
.available_prompts(name_filter: "generate_titles")
.first
prompt = CompletionPrompt.find_by(id: llm_prompt[:id])
raise Discourse::InvalidParameters.new(:mode) if !prompt || !prompt.enabled?
response = DiscourseAi::AiHelper::LlmPrompt.new.generate_and_send_prompt(prompt, input_hash)
response.dig(:suggestions).first
response =
DiscourseAi::AiHelper::LlmPrompt.new.generate_and_send_prompt(prompt, thread_content)
response.dig(:suggestions)&.first
end
end
end

View File

@ -5,7 +5,7 @@ module DiscourseAi
def load_files
require_relative "chat_thread_titler"
require_relative "jobs/regular/generate_chat_thread_title"
require_relative "llm_prompt"
require_relative "assistant"
require_relative "painter"
require_relative "semantic_categorizer"
require_relative "topic_helper"

View File

@ -1,192 +0,0 @@
# frozen_string_literal: true
module DiscourseAi
module AiHelper
class LlmPrompt
def available_prompts(name_filter: nil)
cp = CompletionPrompt
cp = cp.where(name: name_filter) if name_filter.present?
cp
.where(provider: enabled_provider)
.where(enabled: true)
.map do |prompt|
translation =
I18n.t("discourse_ai.ai_helper.prompts.#{prompt.name}", default: nil) ||
prompt.translated_name || prompt.name
{
id: prompt.id,
name: prompt.name,
translated_name: translation,
prompt_type: prompt.prompt_type,
icon: icon_map(prompt.name),
location: location_map(prompt.name),
}
end
end
def generate_and_send_prompt(prompt, params)
case enabled_provider
when "openai"
openai_call(prompt, params)
when "anthropic"
anthropic_call(prompt, params)
when "huggingface"
huggingface_call(prompt, params)
end
end
def enabled_provider
case SiteSetting.ai_helper_model
when /gpt/
"openai"
when /claude/
"anthropic"
else
"huggingface"
end
end
private
def icon_map(name)
case name
when "translate"
"language"
when "generate_titles"
"heading"
when "proofread"
"spell-check"
when "markdown_table"
"table"
when "tone"
"microphone"
when "custom_prompt"
"comment"
when "rewrite"
"pen"
when "explain"
"question"
else
nil
end
end
def location_map(name)
case name
when "translate"
%w[composer post]
when "generate_titles"
%w[composer]
when "proofread"
%w[composer]
when "markdown_table"
%w[composer]
when "tone"
%w[composer]
when "custom_prompt"
%w[composer]
when "rewrite"
%w[composer]
when "explain"
%w[post]
when "summarize"
%w[post]
else
%w[composer post]
end
end
def generate_diff(text, suggestion)
cooked_text = PrettyText.cook(text)
cooked_suggestion = PrettyText.cook(suggestion)
DiscourseDiff.new(cooked_text, cooked_suggestion).inline_html
end
def parse_content(prompt, content)
return "" if content.blank?
case enabled_provider
when "openai"
return content.strip if !prompt.list?
content.gsub("\"", "").gsub(/\d./, "").split("\n").map(&:strip)
when "anthropic"
parse_antropic_content(prompt, content)
when "huggingface"
return [content.strip.delete_prefix('"').delete_suffix('"')] if !prompt.list?
content.gsub("\"", "").gsub(/\d./, "").split("\n").map(&:strip)
end
end
def openai_call(prompt, params)
result = { type: prompt.prompt_type }
messages = prompt.messages_with_user_input(params)
result[:suggestions] = DiscourseAi::Inference::OpenAiCompletions
.perform!(messages, SiteSetting.ai_helper_model)
.dig(:choices)
.to_a
.flat_map { |choice| parse_content(prompt, choice.dig(:message, :content).to_s) }
.compact_blank
result[:diff] = generate_diff(params[:text], result[:suggestions].first) if prompt.diff?
result
end
def anthropic_call(prompt, params)
result = { type: prompt.prompt_type }
filled_message = prompt.messages_with_user_input(params)
message =
filled_message.map { |msg| "#{msg["role"]}: #{msg["content"]}" }.join("\n\n") +
"Assistant:"
response = DiscourseAi::Inference::AnthropicCompletions.perform!(message)
result[:suggestions] = parse_content(prompt, response.dig(:completion))
result[:diff] = generate_diff(params[:text], result[:suggestions].first) if prompt.diff?
result
end
def huggingface_call(prompt, params)
result = { type: prompt.prompt_type }
message = prompt.messages_with_user_input(params)
response =
DiscourseAi::Inference::HuggingFaceTextGeneration.perform!(
message,
SiteSetting.ai_helper_model,
)
result[:suggestions] = parse_content(prompt, response.dig(:generated_text))
result[:diff] = generate_diff(params[:text], result[:suggestions].first) if prompt.diff?
result
end
def parse_antropic_content(prompt, content)
if prompt.list?
suggestions = Nokogiri::HTML5.fragment(content).search("ai").map(&:text)
if suggestions.length > 1
suggestions
else
suggestions.first.split("\n").map(&:strip)
end
else
[Nokogiri::HTML5.fragment(content).at("ai").text]
end
end
end
end
end

View File

@ -4,7 +4,7 @@ module DiscourseAi
module AiHelper
class Painter
def commission_thumbnails(theme, user)
stable_diffusion_prompt = difussion_prompt(theme)
stable_diffusion_prompt = difussion_prompt(theme, user)
return [] if stable_diffusion_prompt.blank?
@ -29,51 +29,13 @@ module DiscourseAi
private
def difussion_prompt(text)
llm_prompt = LlmPrompt.new
prompt_for_provider =
completion_prompts.find { |prompt| prompt.provider == llm_prompt.enabled_provider }
return "" if prompt_for_provider.nil?
llm_prompt
.generate_and_send_prompt(prompt_for_provider, { text: text })
.dig(:suggestions)
.first
end
def completion_prompts
[
CompletionPrompt.new(
provider: "anthropic",
prompt_type: CompletionPrompt.prompt_types[:text],
messages: [{ role: "Human", content: <<~TEXT }],
def difussion_prompt(text, user)
prompt = { insts: <<~TEXT, input: text }
Provide me a StableDiffusion prompt to generate an image that illustrates the following post in 40 words or less, be creative.
The post is provided between <input> tags and the Stable Diffusion prompt string should be returned between <ai> tags.
You'll find the post between <input></input> XML tags.
TEXT
),
CompletionPrompt.new(
provider: "openai",
prompt_type: CompletionPrompt.prompt_types[:text],
messages: [{ role: "system", content: <<~TEXT }],
Provide me a StableDiffusion prompt to generate an image that illustrates the following post in 40 words or less, be creative.
TEXT
),
CompletionPrompt.new(
provider: "huggingface",
prompt_type: CompletionPrompt.prompt_types[:text],
messages: [<<~TEXT],
### System:
Provide me a StableDiffusion prompt to generate an image that illustrates the following post in 40 words or less, be creative.
### User:
{{user_input}}
### Assistant:
Here is a StableDiffusion prompt:
TEXT
),
]
DiscourseAi::Completions::LLM.proxy(SiteSetting.ai_helper_model).completion!(prompt, user)
end
end
end

View File

@ -3,31 +3,33 @@
module DiscourseAi
module AiHelper
class TopicHelper
def initialize(input, user, params = {})
def initialize(user)
@user = user
@text = input[:text]
@params = params
end
def explain
return nil if @text.blank?
return nil unless post = Post.find_by(id: @params[:post])
def explain(term_to_explain, post)
return nil unless term_to_explain
return nil unless post
reply_to = post.topic.first_post
topic = reply_to.topic
reply_to = post.reply_to_post
topic = post.topic
llm_prompt =
DiscourseAi::AiHelper::LlmPrompt.new.available_prompts(name_filter: "explain").first
prompt = CompletionPrompt.find_by(id: llm_prompt[:id])
prompt = CompletionPrompt.enabled_by_name("explain")
raise Discourse::InvalidParameters.new(:mode) if !prompt
prompt.messages.first["content"].gsub!("{{search}}", @text)
prompt.messages.first["content"].gsub!("{{context}}", post.raw)
prompt.messages.first["content"].gsub!("{{topic}}", topic.title)
# TODO inject this conditionally
#prompt.messages.first["content"].gsub!("{{post}}", reply_to.raw)
input = <<~TEXT
<term>#{term_to_explain}</term>
<context>#{post.raw}</context>
<topic>#{topic.title}</topic>
#{reply_to ? "<replyTo>#{reply_to.raw}</replyTo>" : nil}
TEXT
DiscourseAi::AiHelper::LlmPrompt.new.generate_and_send_prompt(prompt, nil)
end
DiscourseAi::AiHelper::Assistant.new.generate_and_send_prompt(prompt, input, user)
end
private
attr_reader :user
end
end
end

View File

@ -0,0 +1,52 @@
# frozen_string_literal: true
RSpec.describe DiscourseAi::AiHelper::Assistant do
fab!(:user) { Fabricate(:user) }
let(:prompt) { CompletionPrompt.find_by(id: mode) }
let(:english_text) { <<~STRING }
To perfect his horror, Caesar, surrounded at the base of the statue by the impatient daggers of his friends,
discovers among the faces and blades that of Marcus Brutus, his protege, perhaps his son, and he no longer
defends himself, but instead exclaims: 'You too, my son!' Shakespeare and Quevedo capture the pathetic cry.
STRING
describe "#generate_and_send_prompt" do
context "when using a prompt that returns text" do
let(:mode) { CompletionPrompt::TRANSLATE }
let(:text_to_translate) { <<~STRING }
Para que su horror sea perfecto, César, acosado al pie de la estatua por lo impacientes puñales de sus amigos,
descubre entre las caras y los aceros la de Marco Bruto, su protegido, acaso su hijo,
y ya no se defiende y exclama: ¡ también, hijo mío! Shakespeare y Quevedo recogen el patético grito.
STRING
it "Sends the prompt to the LLM and returns the response" do
response =
DiscourseAi::Completions::LLM.with_prepared_responses([english_text]) do
subject.generate_and_send_prompt(prompt, text_to_translate, user)
end
expect(response[:suggestions]).to contain_exactly(english_text)
end
end
context "when using a prompt that returns a list" do
let(:mode) { CompletionPrompt::GENERATE_TITLES }
let(:titles) do
"The solitary horse*The horse etched in gold*A horse's infinite journey*A horse lost in time*A horse's last ride"
end
it "returns an array with each title" do
expected = titles.split("*")
response =
DiscourseAi::Completions::LLM.with_prepared_responses([titles]) do
subject.generate_and_send_prompt(prompt, english_text, user)
end
expect(response[:suggestions]).to contain_exactly(*expected)
end
end
end
end

View File

@ -1,67 +0,0 @@
# frozen_string_literal: true
RSpec.describe DiscourseAi::AiHelper::LlmPrompt do
let(:prompt) { CompletionPrompt.find_by(name: mode, provider: "openai") }
describe "#generate_and_send_prompt" do
context "when using the translate mode" do
let(:mode) { OpenAiCompletionsInferenceStubs::TRANSLATE }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
it "Sends the prompt to chatGPT and returns the response" do
response =
subject.generate_and_send_prompt(
prompt,
{ text: OpenAiCompletionsInferenceStubs.spanish_text },
)
expect(response[:suggestions]).to contain_exactly(
OpenAiCompletionsInferenceStubs.translated_response.strip,
)
end
end
context "when using the proofread mode" do
let(:mode) { OpenAiCompletionsInferenceStubs::PROOFREAD }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
it "Sends the prompt to chatGPT and returns the response" do
response =
subject.generate_and_send_prompt(
prompt,
{ text: OpenAiCompletionsInferenceStubs.translated_response },
)
expect(response[:suggestions]).to contain_exactly(
OpenAiCompletionsInferenceStubs.proofread_response.strip,
)
end
end
context "when generating titles" do
let(:mode) { OpenAiCompletionsInferenceStubs::GENERATE_TITLES }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
it "returns an array with each title" do
expected =
OpenAiCompletionsInferenceStubs
.generated_titles
.gsub("\"", "")
.gsub(/\d./, "")
.split("\n")
.map(&:strip)
response =
subject.generate_and_send_prompt(
prompt,
{ text: OpenAiCompletionsInferenceStubs.translated_response },
)
expect(response[:suggestions]).to contain_exactly(*expected)
end
end
end
end

View File

@ -31,17 +31,13 @@ RSpec.describe DiscourseAi::AiHelper::Painter do
TEXT
it "returns 4 samples" do
expected_prompt = [
{ role: "system", content: <<~TEXT },
Provide me a StableDiffusion prompt to generate an image that illustrates the following post in 40 words or less, be creative.
TEXT
{ role: "user", content: raw_content },
]
OpenAiCompletionsInferenceStubs.stub_response(expected_prompt, expected_image_prompt)
StableDiffusionStubs.new.stub_response(expected_image_prompt, artifacts)
thumbnails =
DiscourseAi::Completions::LLM.with_prepared_responses([expected_image_prompt]) do
thumbnails = subject.commission_thumbnails(raw_content, user)
end
thumbnail_urls = Upload.last(4).map(&:short_url)
expect(thumbnails).to contain_exactly(*thumbnail_urls)

View File

@ -2,12 +2,13 @@
RSpec.describe DiscourseAi::AiHelper::AssistantController do
describe "#suggest" do
let(:text) { OpenAiCompletionsInferenceStubs.translated_response }
let(:mode) { "-3" }
let(:text_to_proofread) { "The rain in spain stays mainly in the plane." }
let(:proofreaded_text) { "The rain in Spain, stays mainly in the Plane." }
let(:mode) { CompletionPrompt::PROOFREAD }
context "when not logged in" do
it "returns a 403 response" do
post "/discourse-ai/ai-helper/suggest", params: { text: text, mode: mode }
post "/discourse-ai/ai-helper/suggest", params: { text: text_to_proofread, mode: mode }
expect(response.status).to eq(403)
end
@ -22,7 +23,7 @@ RSpec.describe DiscourseAi::AiHelper::AssistantController do
end
it "returns a 403 response" do
post "/discourse-ai/ai-helper/suggest", params: { text: text, mode: mode }
post "/discourse-ai/ai-helper/suggest", params: { text: text_to_proofread, mode: mode }
expect(response.status).to eq(403)
end
@ -40,7 +41,11 @@ RSpec.describe DiscourseAi::AiHelper::AssistantController do
it "returns a 400 if the helper mode is invalid" do
invalid_mode = "asd"
post "/discourse-ai/ai-helper/suggest", params: { text: text, mode: invalid_mode }
post "/discourse-ai/ai-helper/suggest",
params: {
text: text_to_proofread,
mode: invalid_mode,
}
expect(response.status).to eq(400)
end
@ -52,24 +57,27 @@ RSpec.describe DiscourseAi::AiHelper::AssistantController do
end
it "returns a generic error when the completion call fails" do
WebMock.stub_request(:post, "https://api.openai.com/v1/chat/completions").to_return(
status: 500,
)
DiscourseAi::Completions::LLM
.any_instance
.expects(:completion!)
.raises(DiscourseAi::Completions::Endpoints::Base::CompletionFailed)
post "/discourse-ai/ai-helper/suggest", params: { mode: mode, text: text }
post "/discourse-ai/ai-helper/suggest", params: { mode: mode, text: text_to_proofread }
expect(response.status).to eq(502)
end
it "returns a suggestion" do
OpenAiCompletionsInferenceStubs.stub_prompt("proofread")
expected_diff =
"<div class=\"inline-diff\"><p>The rain in <ins>Spain</ins><ins>,</ins><ins> </ins><del>spain </del>stays mainly in the <ins>Plane</ins><del>plane</del>.</p></div>"
post "/discourse-ai/ai-helper/suggest", params: { mode: mode, text: text }
DiscourseAi::Completions::LLM.with_prepared_responses([proofreaded_text]) do
post "/discourse-ai/ai-helper/suggest", params: { mode: mode, text: text_to_proofread }
expect(response.status).to eq(200)
expect(response.parsed_body["suggestions"].first).to eq(
OpenAiCompletionsInferenceStubs.proofread_response.strip,
)
expect(response.parsed_body["suggestions"].first).to eq(proofreaded_text)
expect(response.parsed_body["diff"]).to eq(expected_diff)
end
end
end
end

View File

@ -10,6 +10,8 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
sign_in(user)
end
let(:input) { "The rain in spain stays mainly in the Plane." }
let(:composer) { PageObjects::Components::Composer.new }
let(:ai_helper_context_menu) { PageObjects::Components::AIHelperContextMenu.new }
let(:diff_modal) { PageObjects::Modals::DiffModal.new }
@ -31,14 +33,14 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
context "when triggering AI with context menu in composer" do
it "shows the context menu when selecting a passage of text in the composer" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.translated_response)
trigger_context_menu(input)
expect(ai_helper_context_menu).to have_context_menu
end
it "does not show the context menu when selecting insuffient text" do
visit("/latest")
page.find("#create-topic").click
composer.fill_content(OpenAiCompletionsInferenceStubs.translated_response)
composer.fill_content(input)
page.execute_script(
"const input = document.querySelector('.d-editor-input'); input.setSelectionRange(0, 2);",
)
@ -46,276 +48,261 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
end
it "shows context menu in 'trigger' state when first showing" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.translated_response)
trigger_context_menu(input)
expect(ai_helper_context_menu).to be_showing_triggers
end
it "shows prompt options in context menu when AI button is clicked" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.translated_response)
trigger_context_menu(input)
ai_helper_context_menu.click_ai_button
expect(ai_helper_context_menu).to be_showing_options
end
it "closes the context menu when clicking outside" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.translated_response)
trigger_context_menu(input)
find(".d-editor-preview").click
expect(ai_helper_context_menu).to have_no_context_menu
end
context "when using custom prompt" do
let(:mode) { OpenAiCompletionsInferenceStubs::CUSTOM_PROMPT }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
let(:mode) { CompletionPrompt::CUSTOM_PROMPT }
let(:custom_prompt_input) { "Translate to French" }
let(:custom_prompt_response) { "La pluie en Espagne reste principalement dans l'avion." }
it "shows custom prompt option" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.translated_response)
trigger_context_menu(input)
ai_helper_context_menu.click_ai_button
expect(ai_helper_context_menu).to have_custom_prompt
end
it "enables the custom prompt button when input is filled" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.translated_response)
trigger_context_menu(input)
ai_helper_context_menu.click_ai_button
expect(ai_helper_context_menu).to have_custom_prompt_button_disabled
ai_helper_context_menu.fill_custom_prompt(
OpenAiCompletionsInferenceStubs.custom_prompt_input,
)
ai_helper_context_menu.fill_custom_prompt(custom_prompt_input)
expect(ai_helper_context_menu).to have_custom_prompt_button_enabled
end
it "replaces the composed message with AI generated content" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.translated_response)
trigger_context_menu(input)
ai_helper_context_menu.click_ai_button
ai_helper_context_menu.fill_custom_prompt(
OpenAiCompletionsInferenceStubs.custom_prompt_input,
)
ai_helper_context_menu.fill_custom_prompt(custom_prompt_input)
DiscourseAi::Completions::LLM.with_prepared_responses([custom_prompt_response]) do
ai_helper_context_menu.click_custom_prompt_button
wait_for do
composer.composer_input.value ==
OpenAiCompletionsInferenceStubs.custom_prompt_response.strip
end
wait_for { composer.composer_input.value == custom_prompt_response }
expect(composer.composer_input.value).to eq(
OpenAiCompletionsInferenceStubs.custom_prompt_response.strip,
)
expect(composer.composer_input.value).to eq(custom_prompt_response)
end
end
end
context "when not a member of custom prompt group" do
let(:mode) { OpenAiCompletionsInferenceStubs::CUSTOM_PROMPT }
let(:mode) { CompletionPrompt::CUSTOM_PROMPT }
before { SiteSetting.ai_helper_custom_prompts_allowed_groups = non_member_group.id.to_s }
it "does not show custom prompt option" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.translated_response)
trigger_context_menu(input)
ai_helper_context_menu.click_ai_button
expect(ai_helper_context_menu).to have_no_custom_prompt
end
end
context "when using translation mode" do
let(:mode) { OpenAiCompletionsInferenceStubs::TRANSLATE }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
let(:mode) { CompletionPrompt::TRANSLATE }
let(:spanish_input) { "La lluvia en España se queda principalmente en el avión." }
it "replaces the composed message with AI generated content" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.spanish_text)
trigger_context_menu(spanish_input)
ai_helper_context_menu.click_ai_button
ai_helper_context_menu.select_helper_model(
OpenAiCompletionsInferenceStubs.text_mode_to_id(mode),
)
wait_for do
composer.composer_input.value == OpenAiCompletionsInferenceStubs.translated_response.strip
DiscourseAi::Completions::LLM.with_prepared_responses([input]) do
ai_helper_context_menu.select_helper_model(mode)
wait_for { composer.composer_input.value == input }
expect(composer.composer_input.value).to eq(input)
end
expect(composer.composer_input.value).to eq(
OpenAiCompletionsInferenceStubs.translated_response.strip,
)
end
it "shows reset options after results are complete" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.spanish_text)
trigger_context_menu(spanish_input)
ai_helper_context_menu.click_ai_button
ai_helper_context_menu.select_helper_model(
OpenAiCompletionsInferenceStubs.text_mode_to_id(mode),
)
wait_for do
composer.composer_input.value == OpenAiCompletionsInferenceStubs.translated_response.strip
end
DiscourseAi::Completions::LLM.with_prepared_responses([input]) do
ai_helper_context_menu.select_helper_model(mode)
wait_for { composer.composer_input.value == input }
ai_helper_context_menu.click_confirm_button
expect(ai_helper_context_menu).to be_showing_resets
end
end
it "reverts results when Undo button is clicked" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.spanish_text)
trigger_context_menu(spanish_input)
ai_helper_context_menu.click_ai_button
ai_helper_context_menu.select_helper_model(
OpenAiCompletionsInferenceStubs.text_mode_to_id(mode),
)
wait_for do
composer.composer_input.value == OpenAiCompletionsInferenceStubs.translated_response.strip
end
DiscourseAi::Completions::LLM.with_prepared_responses([input]) do
ai_helper_context_menu.select_helper_model(mode)
wait_for { composer.composer_input.value == input }
ai_helper_context_menu.click_confirm_button
ai_helper_context_menu.click_undo_button
expect(composer.composer_input.value).to eq(OpenAiCompletionsInferenceStubs.spanish_text)
expect(composer.composer_input.value).to eq(spanish_input)
end
end
it "reverts results when revert button is clicked" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.spanish_text)
trigger_context_menu(spanish_input)
ai_helper_context_menu.click_ai_button
ai_helper_context_menu.select_helper_model(
OpenAiCompletionsInferenceStubs.text_mode_to_id(mode),
)
wait_for do
composer.composer_input.value == OpenAiCompletionsInferenceStubs.translated_response.strip
end
DiscourseAi::Completions::LLM.with_prepared_responses([input]) do
ai_helper_context_menu.select_helper_model(mode)
wait_for { composer.composer_input.value == input }
ai_helper_context_menu.click_revert_button
expect(composer.composer_input.value).to eq(OpenAiCompletionsInferenceStubs.spanish_text)
expect(composer.composer_input.value).to eq(spanish_input)
end
end
it "reverts results when Ctrl/Cmd + Z is pressed on the keyboard" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.spanish_text)
trigger_context_menu(spanish_input)
ai_helper_context_menu.click_ai_button
ai_helper_context_menu.select_helper_model(
OpenAiCompletionsInferenceStubs.text_mode_to_id(mode),
)
wait_for do
composer.composer_input.value == OpenAiCompletionsInferenceStubs.translated_response.strip
end
DiscourseAi::Completions::LLM.with_prepared_responses([input]) do
ai_helper_context_menu.select_helper_model(mode)
wait_for { composer.composer_input.value == input }
ai_helper_context_menu.press_undo_keys
expect(composer.composer_input.value).to eq(OpenAiCompletionsInferenceStubs.spanish_text)
expect(composer.composer_input.value).to eq(spanish_input)
end
end
it "confirms the results when confirm button is pressed" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.spanish_text)
trigger_context_menu(spanish_input)
ai_helper_context_menu.click_ai_button
ai_helper_context_menu.select_helper_model(
OpenAiCompletionsInferenceStubs.text_mode_to_id(mode),
)
wait_for do
composer.composer_input.value == OpenAiCompletionsInferenceStubs.translated_response.strip
end
DiscourseAi::Completions::LLM.with_prepared_responses([input]) do
ai_helper_context_menu.select_helper_model(mode)
wait_for { composer.composer_input.value == input }
ai_helper_context_menu.click_confirm_button
expect(composer.composer_input.value).to eq(
OpenAiCompletionsInferenceStubs.translated_response.strip,
)
expect(composer.composer_input.value).to eq(input)
end
end
it "hides the context menu when pressing Escape on the keyboard" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.spanish_text)
trigger_context_menu(spanish_input)
ai_helper_context_menu.click_ai_button
ai_helper_context_menu.press_escape_key
expect(ai_helper_context_menu).to have_no_context_menu
end
it "shows the changes in a modal when view changes button is pressed" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.spanish_text)
trigger_context_menu(spanish_input)
ai_helper_context_menu.click_ai_button
ai_helper_context_menu.select_helper_model(
OpenAiCompletionsInferenceStubs.text_mode_to_id(mode),
)
wait_for do
composer.composer_input.value == OpenAiCompletionsInferenceStubs.translated_response.strip
end
DiscourseAi::Completions::LLM.with_prepared_responses([input]) do
ai_helper_context_menu.select_helper_model(mode)
wait_for { composer.composer_input.value == input }
ai_helper_context_menu.click_view_changes_button
expect(diff_modal).to be_visible
expect(diff_modal.old_value).to eq(
OpenAiCompletionsInferenceStubs.spanish_text.gsub(/[[:space:]]+/, " ").strip,
)
expect(diff_modal.old_value).to eq(spanish_input.gsub(/[[:space:]]+/, " ").strip)
expect(diff_modal.new_value).to eq(
OpenAiCompletionsInferenceStubs
.translated_response
.gsub(/[[:space:]]+/, " ")
.gsub(/[]/, "'")
.gsub(/[“”]/, '"')
.strip,
input.gsub(/[[:space:]]+/, " ").gsub(/[]/, "'").gsub(/[“”]/, '"').strip,
)
diff_modal.confirm_changes
expect(ai_helper_context_menu).to be_showing_resets
end
end
it "should not close the context menu when in review state" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.spanish_text)
trigger_context_menu(spanish_input)
ai_helper_context_menu.click_ai_button
ai_helper_context_menu.select_helper_model(
OpenAiCompletionsInferenceStubs.text_mode_to_id(mode),
)
wait_for do
composer.composer_input.value == OpenAiCompletionsInferenceStubs.translated_response.strip
end
DiscourseAi::Completions::LLM.with_prepared_responses([input]) do
ai_helper_context_menu.select_helper_model(mode)
wait_for { composer.composer_input.value == input }
find(".d-editor-preview").click
expect(ai_helper_context_menu).to have_context_menu
end
end
context "when using the proofreading mode" do
let(:mode) { OpenAiCompletionsInferenceStubs::PROOFREAD }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
it "replaces the composed message with AI generated content" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.translated_response)
ai_helper_context_menu.click_ai_button
ai_helper_context_menu.select_helper_model(
OpenAiCompletionsInferenceStubs.text_mode_to_id(mode),
)
wait_for do
composer.composer_input.value == OpenAiCompletionsInferenceStubs.proofread_response.strip
end
expect(composer.composer_input.value).to eq(
OpenAiCompletionsInferenceStubs.proofread_response.strip,
)
context "when using the proofreading mode" do
let(:mode) { CompletionPrompt::PROOFREAD }
let(:proofread_text) { "The rain in Spain, stays mainly in the Plane." }
it "replaces the composed message with AI generated content" do
trigger_context_menu(input)
ai_helper_context_menu.click_ai_button
DiscourseAi::Completions::LLM.with_prepared_responses([proofread_text]) do
ai_helper_context_menu.select_helper_model(mode)
wait_for { composer.composer_input.value == proofread_text }
expect(composer.composer_input.value).to eq(proofread_text)
end
end
end
end
context "when suggesting titles with AI title suggester" do
let(:mode) { OpenAiCompletionsInferenceStubs::GENERATE_TITLES }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
let(:mode) { CompletionPrompt::GENERATE_TITLES }
let(:titles) do
"Rainy Spain*Plane-Bound Delights*Mysterious Spain*Plane-Rain Chronicles*Unveiling Spain"
end
it "opens a menu with title suggestions" do
visit("/latest")
page.find("#create-topic").click
composer.fill_content(OpenAiCompletionsInferenceStubs.translated_response)
composer.fill_content(input)
DiscourseAi::Completions::LLM.with_prepared_responses([titles]) do
ai_suggestion_dropdown.click_suggest_titles_button
wait_for { ai_suggestion_dropdown.has_dropdown? }
expect(ai_suggestion_dropdown).to have_dropdown
end
end
it "replaces the topic title with the selected title" do
visit("/latest")
page.find("#create-topic").click
composer.fill_content(OpenAiCompletionsInferenceStubs.translated_response)
composer.fill_content(input)
DiscourseAi::Completions::LLM.with_prepared_responses([titles]) do
ai_suggestion_dropdown.click_suggest_titles_button
wait_for { ai_suggestion_dropdown.has_dropdown? }
ai_suggestion_dropdown.select_suggestion_by_value(2)
expected_title = "The Quiet Piece that Moves Literature: A Gaucho's Story"
ai_suggestion_dropdown.select_suggestion_by_value(1)
expected_title = "Plane-Bound Delights"
expect(find("#reply-title").value).to eq(expected_title)
end
end
it "closes the menu when clicking outside" do
visit("/latest")
page.find("#create-topic").click
composer.fill_content(OpenAiCompletionsInferenceStubs.translated_response)
composer.fill_content(input)
DiscourseAi::Completions::LLM.with_prepared_responses([titles]) do
ai_suggestion_dropdown.click_suggest_titles_button
wait_for { ai_suggestion_dropdown.has_dropdown? }
@ -324,6 +311,7 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
expect(ai_suggestion_dropdown).to have_no_dropdown
end
end
it "only shows trigger button if there is sufficient content in the composer" do
visit("/latest")
@ -332,7 +320,7 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
expect(ai_suggestion_dropdown).to have_no_suggestion_button
composer.fill_content(OpenAiCompletionsInferenceStubs.translated_response)
composer.fill_content(input)
expect(ai_suggestion_dropdown).to have_suggestion_button
end
end
@ -350,7 +338,7 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
DiscourseAi::AiHelper::SemanticCategorizer.any_instance.stubs(:categories).returns(response)
visit("/latest")
page.find("#create-topic").click
composer.fill_content(OpenAiCompletionsInferenceStubs.translated_response)
composer.fill_content(input)
ai_suggestion_dropdown.click_suggest_category_button
wait_for { ai_suggestion_dropdown.has_dropdown? }
@ -376,7 +364,7 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
visit("/latest")
page.find("#create-topic").click
composer.fill_content(OpenAiCompletionsInferenceStubs.translated_response)
composer.fill_content(input)
ai_suggestion_dropdown.click_suggest_tags_button
@ -391,41 +379,35 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
end
context "when AI helper is disabled" do
let(:mode) { OpenAiCompletionsInferenceStubs::GENERATE_TITLES }
before do
OpenAiCompletionsInferenceStubs.stub_prompt(mode)
SiteSetting.composer_ai_helper_enabled = false
end
let(:mode) { CompletionPrompt::GENERATE_TITLES }
before { SiteSetting.composer_ai_helper_enabled = false }
it "does not trigger AI context menu" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.translated_response)
trigger_context_menu(input)
expect(ai_helper_context_menu).to have_no_context_menu
end
it "does not trigger AI suggestion buttons" do
visit("/latest")
page.find("#create-topic").click
composer.fill_content(OpenAiCompletionsInferenceStubs.translated_response)
composer.fill_content(input)
expect(ai_suggestion_dropdown).to have_no_suggestion_button
end
end
context "when user is not a member of AI helper allowed group" do
let(:mode) { OpenAiCompletionsInferenceStubs::GENERATE_TITLES }
before do
OpenAiCompletionsInferenceStubs.stub_prompt(mode)
SiteSetting.ai_helper_allowed_groups = non_member_group.id.to_s
end
let(:mode) { CompletionPrompt::GENERATE_TITLES }
before { SiteSetting.ai_helper_allowed_groups = non_member_group.id.to_s }
it "does not trigger AI context menu" do
trigger_context_menu(OpenAiCompletionsInferenceStubs.translated_response)
trigger_context_menu(input)
expect(ai_helper_context_menu).to have_no_context_menu
end
it "does not trigger AI suggestion buttons" do
visit("/latest")
page.find("#create-topic").click
composer.fill_content(OpenAiCompletionsInferenceStubs.translated_response)
composer.fill_content(input)
expect(ai_suggestion_dropdown).to have_no_suggestion_button
end
end

View File

@ -13,11 +13,18 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
)
end
fab!(:post_2) do
Fabricate(:post, topic: topic, raw: OpenAiCompletionsInferenceStubs.spanish_text)
Fabricate(:post, topic: topic, raw: "La lluvia en España se queda principalmente en el avión.")
end
let(:topic_page) { PageObjects::Pages::Topic.new }
let(:post_ai_helper) { PageObjects::Components::AIHelperPostOptions.new }
let(:explain_response) { <<~STRING }
In this context, \"pie\" refers to a baked dessert typically consisting of a pastry crust and filling.
The person states they enjoy eating pie, considering it a good dessert. They note that some people wastefully
throw pie at others, but the person themselves chooses to eat the pie rather than throwing it. Overall, \"pie\"
is being used to refer the the baked dessert food item.
STRING
before do
Group.find_by(id: Group::AUTO_GROUPS[:admins]).add(user)
SiteSetting.composer_ai_helper_enabled = true
@ -50,44 +57,40 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
context "when using explain mode" do
skip "TODO: Fix explain mode option not appearing in spec" do
let(:mode) { OpenAiCompletionsInferenceStubs::EXPLAIN }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
let(:mode) { CompletionPrompt::EXPLAIN }
it "shows an explanation of the selected text" do
select_post_text(post)
post_ai_helper.click_ai_button
post_ai_helper.select_helper_model(OpenAiCompletionsInferenceStubs.text_mode_to_id(mode))
wait_for do
post_ai_helper.suggestion_value ==
OpenAiCompletionsInferenceStubs.explain_response.strip
DiscourseAi::Completions::LLM.with_prepared_responses([explain_response]) do
post_ai_helper.select_helper_model(mode)
wait_for { post_ai_helper.suggestion_value == explain_response }
expect(post_ai_helper.suggestion_value).to eq(explain_response)
end
expect(post_ai_helper.suggestion_value).to eq(
OpenAiCompletionsInferenceStubs.explain_response.strip,
)
end
end
end
context "when using translate mode" do
skip "TODO: Fix WebMock request for translate mode not working" do
let(:mode) { OpenAiCompletionsInferenceStubs::TRANSLATE }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
let(:mode) { CompletionPrompt::TRANSLATE }
let(:translated_input) { "The rain in Spain, stays mainly in the Plane." }
it "shows a translation of the selected text" do
select_post_text(post_2)
post_ai_helper.click_ai_button
post_ai_helper.select_helper_model(OpenAiCompletionsInferenceStubs.text_mode_to_id(mode))
wait_for do
post_ai_helper.suggestion_value ==
OpenAiCompletionsInferenceStubs.translated_response.strip
DiscourseAi::Completions::LLM.with_prepared_responses([translated_input]) do
post_ai_helper.select_helper_model(mode)
wait_for { post_ai_helper.suggestion_value == translated_input }
expect(post_ai_helper.suggestion_value).to eq(translated_input)
end
expect(post_ai_helper.suggestion_value).to eq(
OpenAiCompletionsInferenceStubs.translated_response.strip,
)
end
end
end