FEATURE: Composer AI helper (#8)

* FEATURE: Composer AI helper

This change introduces a new composer button for the group members listed in the `ai_helper_allowed_groups` site setting.

Users can use chatGPT to review, improve, or translate their posts to English.

* Add a safeguard for PMs and don't rely on parentView
This commit is contained in:
Roman Rizzi 2023-03-15 17:02:20 -03:00 committed by GitHub
parent aa2fca6086
commit f99fe7e1ed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 841 additions and 10 deletions

View File

@ -0,0 +1,43 @@
# frozen_string_literal: true
module DiscourseAi
module AiHelper
class AssistantController < ::ApplicationController
requires_plugin ::DiscourseAi::PLUGIN_NAME
requires_login
before_action :ensure_can_request_suggestions
def suggest
raise Discourse::InvalidParameters.new(:text) if params[:text].blank?
if !DiscourseAi::AiHelper::OpenAiPrompt::VALID_TYPES.include?(params[:mode])
raise Discourse::InvalidParameters.new(:mode)
end
RateLimiter.new(current_user, "ai_assistant", 6, 3.minutes).performed!
hijack do
render json:
DiscourseAi::AiHelper::OpenAiPrompt.new.generate_and_send_prompt(
params[:mode],
params[:text],
),
status: 200
end
end
private
def ensure_can_request_suggestions
user_group_ids = current_user.group_ids
allowed =
SiteSetting.ai_helper_allowed_groups_map.any? do |group_id|
user_group_ids.include?(group_id)
end
raise Discourse::InvalidAccess if !allowed
end
end
end
end

View File

@ -0,0 +1,67 @@
<DModalBody @title="discourse_ai.ai_helper.title">
<span>{{i18n "discourse_ai.ai_helper.description"}}</span>
<ComboBox
@value={{this.selected}}
@content={{this.helperOptions}}
@onChange={{action this.updateSelected}}
@valueProperty="value"
@class="ai-helper-mode"
/>
<div class="text-preview">
<Textarea
@value={{this.composedMessage}}
disabled="true"
class="preview-area"
/>
</div>
<div class="selection-hint">{{i18n
"discourse_ai.ai_helper.selection_hint"
}}</div>
<div class="text-preview">
<ConditionalLoadingSpinner @condition={{this.loading}} />
{{#unless this.loading}}
{{#if this.selectingTopicTitle}}
<div class="radios">
{{#each this.generatedTitlesSuggestions as |title index|}}
<label class="radio-label" for="title-suggestion-{{index}}">
<RadioButton
@id="title-suggestion-{{index}}"
@name="title-suggestion"
@value={{title}}
@selection={{this.selectedTitle}}
/>
<b>{{title}}</b>
</label>
{{/each}}
</div>
{{else if this.proofreadingText}}
{{html-safe this.proofreadDiff}}
{{else if this.translatingText}}
<Textarea
@value={{this.translatedSuggestion}}
disabled="true"
class="preview-area"
/>
{{/if}}
{{/unless}}
</div>
</DModalBody>
<div class="modal-footer">
{{#if this.canSave}}
<DButton
@class="btn-primary create"
@action={{this.applySuggestion}}
@label="save"
/>
<DModalCancel @close={{route-action "closeModal"}} />
{{else}}
<div class="ai-helper-waiting-selection">Select an option...</div>
{{/if}}
</div>

View File

@ -0,0 +1,128 @@
import Component from "@glimmer/component";
import { tracked } from "@glimmer/tracking";
import { action, computed } from "@ember/object";
import I18n from "I18n";
import { ajax } from "discourse/lib/ajax";
import { popupAjaxError } from "discourse/lib/ajax-error";
const TRANSLATE = "translate";
const GENERATE_TITLES = "generate_titles";
const PROOFREAD = "proofread";
export default class AiHelper extends Component {
@tracked selected = null;
@tracked loading = false;
@tracked generatedTitlesSuggestions = [];
@tracked proofReadSuggestion = null;
@tracked translatedSuggestion = null;
@tracked selectedTitle = null;
@tracked proofreadDiff = null;
helperOptions = [
{
name: I18n.t("discourse_ai.ai_helper.modes.translate"),
value: TRANSLATE,
},
{
name: I18n.t("discourse_ai.ai_helper.modes.generate_titles"),
value: GENERATE_TITLES,
},
{
name: I18n.t("discourse_ai.ai_helper.modes.proofreader"),
value: PROOFREAD,
},
];
get composedMessage() {
const editor = this.args.editor;
return editor.getSelected().value || editor.value;
}
@computed("selected", "selectedTitle", "translatingText", "proofreadingText")
get canSave() {
return (
(this.selected === GENERATE_TITLES && this.selectedTitle) ||
this.translatingText ||
this.proofreadingText
);
}
@computed("selected", "translatedSuggestion")
get translatingText() {
return this.selected === TRANSLATE && this.translatedSuggestion;
}
@computed("selected", "proofReadSuggestion")
get proofreadingText() {
return this.selected === PROOFREAD && this.proofReadSuggestion;
}
@computed("selected", "generatedTitlesSuggestions")
get selectingTopicTitle() {
return (
this.selected === GENERATE_TITLES &&
this.generatedTitlesSuggestions.length > 0
);
}
_updateSuggestedByAI(value, data) {
switch (value) {
case GENERATE_TITLES:
this.generatedTitlesSuggestions = data.suggestions;
break;
case TRANSLATE:
this.translatedSuggestion = data.suggestions[0];
break;
case PROOFREAD:
this.proofReadSuggestion = data.suggestions[0];
this.proofreadDiff = data.diff;
break;
}
}
@action
async updateSelected(value) {
this.loading = true;
this.selected = value;
if (value === GENERATE_TITLES) {
this.selectedTitle = null;
}
if (this.hasSuggestion) {
this.loading = false;
} else {
return ajax("/discourse-ai/ai-helper/suggest", {
method: "POST",
data: { mode: this.selected, text: this.composedMessage },
})
.then((data) => {
this._updateSuggestedByAI(value, data);
})
.catch(popupAjaxError)
.finally(() => (this.loading = false));
}
}
@action
applySuggestion() {
if (this.selectingTopicTitle) {
const composer = this.args.editor.outletArgs?.composer;
if (composer) {
composer.set("title", this.selectedTitle);
}
} else {
const newText = this.proofreadingText
? this.proofReadSuggestion
: this.translatedSuggestion;
this.args.editor.replaceText(this.composedMessage, newText);
}
this.args.closeModal();
}
}

View File

@ -3,9 +3,9 @@
<tbody>
{{#each-in @accuracies as |model acc|}}
<tr>
<td colspan="4">{{i18n "discourse-ai.reviewables.model_used"}}</td>
<td colspan="4">{{i18n "discourse_ai.reviewables.model_used"}}</td>
<td colspan="3">{{model}}</td>
<td colspan="4">{{i18n "discourse-ai.reviewables.accuracy"}}</td>
<td colspan="4">{{i18n "discourse_ai.reviewables.accuracy"}}</td>
<td colspan="3">{{acc}}%</td>
</tr>
{{/each-in}}

View File

@ -0,0 +1 @@
<AiHelper @editor={{this.editor}} @closeModal={{route-action "closeModal"}} />

View File

@ -0,0 +1,70 @@
import { withPluginApi } from "discourse/lib/plugin-api";
import showModal from "discourse/lib/show-modal";
function initializeComposerAIHelper(api) {
api.modifyClass("component:composer-editor", {
actions: {
extraButtons(toolbar) {
this._super(toolbar);
const removeAiHelperFromPM =
this.composerModel.privateMessage &&
!this.siteSettings.ai_helper_allowed_in_pm;
if (removeAiHelperFromPM) {
const extrasGroup = toolbar.groups.find((g) => g.group === "extras");
const newButtons = extrasGroup.buttons.filter(
(b) => b.id !== "ai-helper"
);
extrasGroup.buttons = newButtons;
}
},
},
});
api.modifyClass("component:d-editor", {
pluginId: "discourse-ai",
actions: {
openAIHelper() {
if (this.value) {
showModal("composer-ai-helper").setProperties({ editor: this });
}
},
},
});
api.onToolbarCreate((toolbar) => {
toolbar.addButton({
id: "ai-helper",
title: "discourse_ai.ai_helper.title",
group: "extras",
icon: "magic",
className: "composer-ai-helper",
sendAction: () => toolbar.context.send("openAIHelper"),
});
});
}
export default {
name: "discourse_ai-composer-helper",
initialize(container) {
const settings = container.lookup("site-settings:main");
const user = container.lookup("service:current-user");
const helperEnabled =
settings.discourse_ai_enabled && settings.composer_ai_helper_enabled;
const allowedGroups = settings.ai_helper_allowed_groups
.split("|")
.map(parseInt);
const canUseAssistant =
user && user.groups.some((g) => allowedGroups.includes(g.id));
if (helperEnabled && canUseAssistant) {
withPluginApi("1.6.0", initializeComposerAIHelper);
}
},
};

View File

@ -0,0 +1,27 @@
.composer-ai-helper-modal {
.combobox,
.text-preview,
.ai-helper-waiting-selection {
margin: 10px 0 10px 0;
}
.text-preview {
ins {
background-color: var(--success-low);
text-decoration: underline;
}
del {
background-color: var(--danger-low);
text-decoration: line-through;
}
.preview-area {
height: 200px;
}
}
.selection-hint {
font-size: var(--font-down-2);
margin-bottom: 20px;
}
}

View File

@ -1,6 +1,14 @@
en:
js:
discourse-ai:
discourse_ai:
ai_helper:
title: "Suggest changes using AI"
description: "Choose one of the options below, and the AI will suggest you a new version of the text."
selection_hint: "Hint: You can also select a portion of the text before opening the helper to rewrite only that."
modes:
translate: Translate to English
generate_titles: Suggest topic titles
proofreader: Proofread text
reviewables:
model_used: "Model used:"
accuracy: "Accuracy:"

View File

@ -1,6 +1,6 @@
en:
site_settings:
ai_enabled: "Enable the discourse ai plugin."
discourse_ai_enabled: "Enable the discourse AI plugin."
ai_toxicity_enabled: "Enable the toxicity module."
ai_toxicity_inference_service_api_endpoint: "URL where the API is running for the toxicity module"
ai_toxicity_inference_service_api_key: "API key for the toxicity API"
@ -19,6 +19,23 @@ en:
ai_sentiment_inference_service_api_endpoint: "URL where the API is running for the sentiment module"
ai_sentiment_inference_service_api_key: "API key for the sentiment API"
ai_sentiment_models: "Models to use for inference. Sentiment classifies post on the positive/neutral/negative space. Emotion classifies on the anger/disgust/fear/joy/neutral/sadness/surprise space."
ai_nsfw_detection_enabled: "Enable the NSFW module."
ai_nsfw_inference_service_api_endpoint: "URL where the API is running for the NSFW module"
ai_nsfw_inference_service_api_key: "API key for the NSFW API"
ai_nsfw_flag_automatically: "Automatically flag NSFW posts that are above the configured thresholds."
ai_nsfw_flag_threshold_general: "General Threshold for an image to be considered NSFW."
ai_nsfw_flag_threshold_drawings: "Threshold for a drawing to be considered NSFW."
ai_nsfw_flag_threshold_hentai: "Threshold for an image classified as hentai to be considered NSFW."
ai_nsfw_flag_threshold_porn: "Threshold for an image classified as porn to be considered NSFW."
ai_nsfw_flag_threshold_sexy: "Threshold for an image classified as sexy to be considered NSFW."
ai_nsfw_models: "Models to use for NSFW inference."
composer_ai_helper_enabled: "Enable the Composer's AI helper."
ai_openai_api_key: "API key for the AI helper"
ai_helper_allowed_groups: "Users on these groups will see the AI helper button in the composer."
ai_helper_allowed_in_pm: "Enable the composer's AI helper in PMs."
reviewables:
reasons:
flagged_by_toxicity: The AI plugin flagged this after classifying it as toxic.

10
config/routes.rb Normal file
View File

@ -0,0 +1,10 @@
# frozen_string_literal: true
DiscourseAi::Engine.routes.draw do
# AI-helper routes
scope module: :ai_helper, path: "/ai-helper", defaults: { format: :json } do
post "suggest" => "assistant#suggest"
end
end
Discourse::Application.routes.append { mount ::DiscourseAi::Engine, at: "discourse-ai" }

View File

@ -87,3 +87,17 @@ plugins:
ai_openai_api_key:
default: ""
composer_ai_helper_enabled:
default: false
client: true
ai_helper_allowed_groups:
client: true
type: group_list
list_type: compact
default: "3|14" # 3: @staff, 14: @trust_level_4
allow_any: false
refresh: true
ai_helper_allowed_in_pm:
default: false
client: true

View File

@ -1,6 +1,6 @@
# frozen_string_literal: true
module DiscourseAi
module ::DiscourseAi
class Engine < ::Rails::Engine
engine_name PLUGIN_NAME
isolate_namespace DiscourseAi

View File

@ -0,0 +1,14 @@
# frozen_string_literal: true
module DiscourseAi
module AiHelper
class EntryPoint
def load_files
require_relative "open_ai_prompt"
end
def inject_into(plugin)
plugin.register_svg_icon("magic")
end
end
end
end

View File

@ -0,0 +1,92 @@
# frozen_string_literal: true
module DiscourseAi
module AiHelper
class OpenAiPrompt
TRANSLATE = "translate"
GENERATE_TITLES = "generate_titles"
PROOFREAD = "proofread"
VALID_TYPES = [TRANSLATE, GENERATE_TITLES, PROOFREAD]
def get_prompt_for(prompt_type)
case prompt_type
when TRANSLATE
translate_prompt
when GENERATE_TITLES
generate_titles_prompt
when PROOFREAD
proofread_prompt
end
end
def generate_and_send_prompt(prompt_type, text)
result = {}
prompt = [
{ role: "system", content: get_prompt_for(prompt_type) },
{ role: "user", content: text },
]
result[:suggestions] = DiscourseAi::Inference::OpenAiCompletions
.perform!(prompt)
.dig(:choices)
.to_a
.flat_map { |choice| parse_content(prompt_type, choice.dig(:message, :content).to_s) }
.compact_blank
result[:diff] = generate_diff(text, result[:suggestions].first) if proofreading?(
prompt_type,
)
result
end
private
def proofreading?(prompt_type)
prompt_type == PROOFREAD
end
def generate_diff(text, suggestion)
cooked_text = PrettyText.cook(text)
cooked_suggestion = PrettyText.cook(suggestion)
DiscourseDiff.new(cooked_text, suggestion).inline_html
end
def parse_content(type, content)
return "" if content.blank?
return content.strip if type != GENERATE_TITLES
content.gsub("\"", "").gsub(/\d./, "").split("\n").map(&:strip)
end
def translate_prompt
<<~STRING
I want you to act as an English translator, spelling corrector and improver. I will speak to you
in any language and you will detect the language, translate it and answer in the corrected and
improved version of my text, in English. I want you to replace my simplified A0-level words and
sentences with more beautiful and elegant, upper level English words and sentences.
Keep the meaning same, but make them more literary. I want you to only reply the correction,
the improvements and nothing else, do not write explanations.
STRING
end
def generate_titles_prompt
<<~STRING
I want you to act as a title generator for written pieces. I will provide you with a text,
and you will generate five attention-grabbing titles. Please keep the title concise and under 20 words,
and ensure that the meaning is maintained. Replies will utilize the language type of the topic.
STRING
end
def proofread_prompt
<<~STRING
I want you act as a proofreader. I will provide you with a text and I want you to review them for any spelling,
grammar, or punctuation errors. Once you have finished reviewing the text, provide me with any necessary
corrections or suggestions for improve the text.
STRING
end
end
end
end

View File

@ -2,15 +2,13 @@
module ::DiscourseAi
module Inference
class OpenAICompletions
def self.perform!(model, content, api_key)
class OpenAiCompletions
def self.perform!(content, model = "gpt-3.5-turbo")
headers = {
"Authorization" => "Bearer #{SiteSetting.ai_openai_api_key}",
"Content-Type" => "application/json",
}
model ||= "gpt-3.5-turbo"
response =
Faraday.post(
"https://api.openai.com/v1/chat/completions",

View File

@ -2,7 +2,7 @@
module ::DiscourseAi
module Inference
class OpenAIEmbeddings
class OpenAiEmbeddings
def self.perform!(content, model = nil)
headers = {
"Authorization" => "Bearer #{SiteSetting.ai_openai_api_key}",

View File

@ -9,6 +9,8 @@
enabled_site_setting :discourse_ai_enabled
register_asset "stylesheets/modules/ai-helper/common/ai-helper.scss"
module ::DiscourseAi
PLUGIN_NAME = "discourse-ai"
end
@ -28,11 +30,13 @@ after_initialize do
require_relative "lib/modules/nsfw/entry_point"
require_relative "lib/modules/toxicity/entry_point"
require_relative "lib/modules/sentiment/entry_point"
require_relative "lib/modules/ai_helper/entry_point"
[
DiscourseAi::NSFW::EntryPoint.new,
DiscourseAi::Toxicity::EntryPoint.new,
DiscourseAi::Sentiment::EntryPoint.new,
DiscourseAi::AiHelper::EntryPoint.new,
].each do |a_module|
a_module.load_files
a_module.inject_into(self)

View File

@ -0,0 +1,64 @@
# frozen_string_literal: true
require_relative "../../../support/openai_completions_inference_stubs"
RSpec.describe DiscourseAi::AiHelper::OpenAiPrompt do
describe "#generate_and_send_prompt" do
context "when using the translate mode" do
let(:mode) { described_class::TRANSLATE }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
it "Sends the prompt to chatGPT and returns the response" do
response =
subject.generate_and_send_prompt(mode, OpenAiCompletionsInferenceStubs.spanish_text)
expect(response[:suggestions]).to contain_exactly(
OpenAiCompletionsInferenceStubs.translated_response.strip,
)
end
end
context "when using the proofread mode" do
let(:mode) { described_class::PROOFREAD }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
it "Sends the prompt to chatGPT and returns the response" do
response =
subject.generate_and_send_prompt(
mode,
OpenAiCompletionsInferenceStubs.translated_response,
)
expect(response[:suggestions]).to contain_exactly(
OpenAiCompletionsInferenceStubs.proofread_response.strip,
)
end
end
context "when generating titles" do
let(:mode) { described_class::GENERATE_TITLES }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
it "returns an array with each title" do
expected =
OpenAiCompletionsInferenceStubs
.generated_titles
.gsub("\"", "")
.gsub(/\d./, "")
.split("\n")
.map(&:strip)
response =
subject.generate_and_send_prompt(
mode,
OpenAiCompletionsInferenceStubs.translated_response,
)
expect(response[:suggestions]).to contain_exactly(*expected)
end
end
end
end

View File

@ -0,0 +1,68 @@
# frozen_string_literal: true
require_relative "../../support/openai_completions_inference_stubs"
RSpec.describe DiscourseAi::AiHelper::AssistantController do
describe "#suggest" do
let(:text) { OpenAiCompletionsInferenceStubs.translated_response }
let(:mode) { DiscourseAi::AiHelper::OpenAiPrompt::PROOFREAD }
context "when not logged in" do
it "returns a 403 response" do
post "/discourse-ai/ai-helper/suggest", params: { text: text, mode: mode }
expect(response.status).to eq(403)
end
end
context "when logged in as an user without enough privileges" do
fab!(:user) { Fabricate(:newuser) }
before do
sign_in(user)
SiteSetting.ai_helper_allowed_groups = Group::AUTO_GROUPS[:staff]
end
it "returns a 403 response" do
post "/discourse-ai/ai-helper/suggest", params: { text: text, mode: mode }
expect(response.status).to eq(403)
end
end
context "when logged in as an allowed user" do
fab!(:user) { Fabricate(:user) }
before do
sign_in(user)
user.group_ids = [Group::AUTO_GROUPS[:trust_level_1]]
SiteSetting.ai_helper_allowed_groups = Group::AUTO_GROUPS[:trust_level_1]
end
it "returns a 400 if the helper mode is invalid" do
invalid_mode = "asd"
post "/discourse-ai/ai-helper/suggest", params: { text: text, mode: invalid_mode }
expect(response.status).to eq(400)
end
it "returns a 400 if the text is blank" do
post "/discourse-ai/ai-helper/suggest", params: { mode: mode }
expect(response.status).to eq(400)
end
it "returns a suggestion" do
OpenAiCompletionsInferenceStubs.stub_prompt(mode)
post "/discourse-ai/ai-helper/suggest", params: { mode: mode, text: text }
expect(response.status).to eq(200)
expect(response.parsed_body["suggestions"].first).to eq(
OpenAiCompletionsInferenceStubs.proofread_response.strip,
)
end
end
end
end

View File

@ -0,0 +1,96 @@
# frozen_string_literal: true
class OpenAiCompletionsInferenceStubs
class << self
def spanish_text
<<~STRING
Para que su horror sea perfecto, César, acosado al pie de la estatua por lo impacientes puñales de sus amigos,
descubre entre las caras y los aceros la de Marco Bruto, su protegido, acaso su hijo,
y ya no se defiende y exclama: ¡ también, hijo mío! Shakespeare y Quevedo recogen el patético grito.
Al destino le agradan las repeticiones, las variantes, las simetrías; diecinueve siglos después,
en el sur de la provincia de Buenos Aires, un gaucho es agredido por otros gauchos y, al caer,
reconoce a un ahijado suyo y le dice con mansa reconvención y lenta sorpresa (estas palabras hay que oírlas, no leerlas):
¡Pero, che! Lo matan y no sabe que muere para que se repita una escena.
STRING
end
def translated_response
<<~STRING
"To perfect his horror, Caesar, surrounded at the base of the statue by the impatient daggers of his friends,
discovers among the faces and blades that of Marcus Brutus, his protege, perhaps his son, and he no longer
defends himself, but instead exclaims: 'You too, my son!' Shakespeare and Quevedo capture the pathetic cry.
Destiny favors repetitions, variants, symmetries; nineteen centuries later, in the southern province of Buenos Aires,
a gaucho is attacked by other gauchos and, as he falls, recognizes a godson of his and says with gentle rebuke and
slow surprise (these words must be heard, not read): 'But, my friend!' He is killed and does not know that he
dies so that a scene may be repeated."
STRING
end
def generated_titles
<<~STRING
1. "The Life and Death of a Nameless Gaucho"
2. "The Faith of Iron and Courage: A Gaucho's Legacy"
3. "The Quiet Piece that Moves Literature: A Gaucho's Story"
4. "The Unknown Hero: A Gaucho's Sacrifice for Country"
5. "From Dust to Legacy: The Enduring Name of a Gaucho"
STRING
end
def proofread_response
<<~STRING
"This excerpt explores the idea of repetition and symmetry in tragic events. The author highlights two instances
where someone is betrayed by a close friend or protege, uttering a similar phrase of surprise and disappointment
before their untimely death. The first example refers to Julius Caesar, who upon realizing that one of his own
friends and proteges, Marcus Brutus, is among his assassins, exclaims \"You too, my son!\" The second example
is of a gaucho in Buenos Aires, who recognizes his godson among his attackers and utters the words of rebuke
and surprise, \"But, my friend!\" before he is killed. The author suggests that these tragedies occur so that
a scene may be repeated, emphasizing the cyclical nature of history and the inevitability of certain events."
STRING
end
def response(content)
{
id: "chatcmpl-6sZfAb30Rnv9Q7ufzFwvQsMpjZh8S",
object: "chat.completion",
created: 1_678_464_820,
model: "gpt-3.5-turbo-0301",
usage: {
prompt_tokens: 337,
completion_tokens: 162,
total_tokens: 499,
},
choices: [
{ message: { role: "assistant", content: content }, finish_reason: "stop", index: 0 },
],
}
end
def response_text_for(type)
case type
when DiscourseAi::AiHelper::OpenAiPrompt::TRANSLATE
translated_response
when DiscourseAi::AiHelper::OpenAiPrompt::PROOFREAD
proofread_response
when DiscourseAi::AiHelper::OpenAiPrompt::GENERATE_TITLES
generated_titles
end
end
def stub_prompt(type)
prompt_builder = DiscourseAi::AiHelper::OpenAiPrompt.new
text =
type == DiscourseAi::AiHelper::OpenAiPrompt::TRANSLATE ? spanish_text : translated_response
prompt = [
{ role: "system", content: prompt_builder.get_prompt_for(type) },
{ role: "user", content: text },
]
WebMock
.stub_request(:post, "https://api.openai.com/v1/chat/completions")
.with(body: JSON.dump(model: "gpt-3.5-turbo", messages: prompt))
.to_return(status: 200, body: JSON.dump(response(response_text_for(type))))
end
end
end

View File

@ -0,0 +1,86 @@
# frozen_string_literal: true
require_relative "../../support/openai_completions_inference_stubs"
RSpec.describe "AI Composer helper", type: :system, js: true do
fab!(:user) { Fabricate(:admin) }
before do
Group.find_by(id: Group::AUTO_GROUPS[:admins]).add(user)
SiteSetting.composer_ai_helper_enabled = true
sign_in(user)
end
let(:composer) { PageObjects::Components::Composer.new }
let(:ai_helper_modal) { PageObjects::Modals::AiHelper.new }
context "when using the translation mode" do
let(:mode) { DiscourseAi::AiHelper::OpenAiPrompt::TRANSLATE }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
it "replaces the composed message with AI generated content" do
visit("/latest")
page.find("#create-topic").click
composer.fill_content(OpenAiCompletionsInferenceStubs.spanish_text)
page.find(".composer-ai-helper").click
expect(ai_helper_modal).to be_visible
ai_helper_modal.select_helper_model(mode)
ai_helper_modal.save_changes
expect(composer.composer_input.value).to eq(
OpenAiCompletionsInferenceStubs.translated_response.strip,
)
end
end
context "when using the proofreading mode" do
let(:mode) { DiscourseAi::AiHelper::OpenAiPrompt::PROOFREAD }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
it "replaces the composed message with AI generated content" do
visit("/latest")
page.find("#create-topic").click
composer.fill_content(OpenAiCompletionsInferenceStubs.translated_response)
page.find(".composer-ai-helper").click
expect(ai_helper_modal).to be_visible
ai_helper_modal.select_helper_model(mode)
ai_helper_modal.save_changes
expect(composer.composer_input.value).to eq(
OpenAiCompletionsInferenceStubs.proofread_response.strip,
)
end
end
context "when selecting an AI generated title" do
let(:mode) { DiscourseAi::AiHelper::OpenAiPrompt::GENERATE_TITLES }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
it "replaces the topic title" do
visit("/latest")
page.find("#create-topic").click
composer.fill_content(OpenAiCompletionsInferenceStubs.translated_response)
page.find(".composer-ai-helper").click
expect(ai_helper_modal).to be_visible
ai_helper_modal.select_helper_model(mode)
ai_helper_modal.select_title_suggestion(2)
ai_helper_modal.save_changes
expected_title = "The Quiet Piece that Moves Literature: A Gaucho's Story"
expect(find("#reply-title").value).to eq(expected_title)
end
end
end

View File

@ -0,0 +1,24 @@
# frozen_string_literal: true
module PageObjects
module Modals
class AiHelper < PageObjects::Modals::Base
def visible?
page.has_css?(".composer-ai-helper-modal")
end
def select_helper_model(mode)
find(".ai-helper-mode").click
find(".select-kit-row[data-value=\"#{mode}\"]").click
end
def save_changes
find(".modal-footer button.create").click
end
def select_title_suggestion(option_number)
find("input#title-suggestion-#{option_number}").click
end
end
end
end