REFACTOR: Store prompts in a dedicated table. (#14)

This change makes it easier to add new prompts to our AI helper. We don't have a UI for it yet. You'll have to do it through a console.
This commit is contained in:
Roman Rizzi 2023-03-17 15:14:19 -03:00 committed by GitHub
parent edfdc6dfae
commit 320ac6e84b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 258 additions and 101 deletions

View File

@ -7,19 +7,27 @@ module DiscourseAi
requires_login
before_action :ensure_can_request_suggestions
def prompts
render json:
ActiveModel::ArraySerializer.new(
DiscourseAi::AiHelper::OpenAiPrompt.new.available_prompts,
root: false,
),
status: 200
end
def suggest
raise Discourse::InvalidParameters.new(:text) if params[:text].blank?
if !DiscourseAi::AiHelper::OpenAiPrompt::VALID_TYPES.include?(params[:mode])
raise Discourse::InvalidParameters.new(:mode)
end
prompt = CompletionPrompt.find_by(name: params[:mode])
raise Discourse::InvalidParameters.new(:mode) if !prompt || !prompt.enabled?
RateLimiter.new(current_user, "ai_assistant", 6, 3.minutes).performed!
hijack do
render json:
DiscourseAi::AiHelper::OpenAiPrompt.new.generate_and_send_prompt(
params[:mode],
prompt,
params[:text],
),
status: 200

View File

@ -0,0 +1,23 @@
# frozen_string_literal: true
class CompletionPrompt < ActiveRecord::Base
enum :prompt_type, { text: 0, list: 1, diff: 2 }
end
# == Schema Information
#
# Table name: completion_prompts
#
# id :bigint not null, primary key
# name :string not null
# translated_name :string
# prompt_type :integer default("text"), not null
# value :text not null
# enabled :boolean default(TRUE), not null
# created_at :datetime not null
# updated_at :datetime not null
#
# Indexes
#
# index_completion_prompts_on_name (name) UNIQUE
#

View File

@ -33,3 +33,20 @@ class ModelAccuracy < ActiveRecord::Base
flags_agreed + flags_disagreed
end
end
# == Schema Information
#
# Table name: model_accuracies
#
# id :bigint not null, primary key
# model :string not null
# classification_type :string not null
# flags_agreed :integer default(0), not null
# flags_disagreed :integer default(0), not null
# created_at :datetime not null
# updated_at :datetime not null
#
# Indexes
#
# index_model_accuracies_on_model (model) UNIQUE
#

View File

@ -147,3 +147,40 @@ class ReviewableAiChatMessage < Reviewable
end
end
end
# == Schema Information
#
# Table name: reviewables
#
# id :bigint not null, primary key
# type :string not null
# status :integer default("pending"), not null
# created_by_id :integer not null
# reviewable_by_moderator :boolean default(FALSE), not null
# reviewable_by_group_id :integer
# category_id :integer
# topic_id :integer
# score :float default(0.0), not null
# potential_spam :boolean default(FALSE), not null
# target_id :integer
# target_type :string
# target_created_by_id :integer
# payload :json
# version :integer default(0), not null
# latest_score :datetime
# created_at :datetime not null
# updated_at :datetime not null
# force_review :boolean default(FALSE), not null
# reject_reason :text
#
# Indexes
#
# idx_reviewables_score_desc_created_at_desc (score,created_at)
# index_reviewables_on_reviewable_by_group_id (reviewable_by_group_id)
# index_reviewables_on_status_and_created_at (status,created_at)
# index_reviewables_on_status_and_score (status,score)
# index_reviewables_on_status_and_type (status,type)
# index_reviewables_on_target_id_where_post_type_eq_post (target_id) WHERE ((target_type)::text = 'Post'::text)
# index_reviewables_on_topic_id_and_status_and_created_by_id (topic_id,status,created_by_id)
# index_reviewables_on_type_and_target_id (type,target_id) UNIQUE
#

View File

@ -205,3 +205,40 @@ class ReviewableAiPost < Reviewable
end
end
end
# == Schema Information
#
# Table name: reviewables
#
# id :bigint not null, primary key
# type :string not null
# status :integer default("pending"), not null
# created_by_id :integer not null
# reviewable_by_moderator :boolean default(FALSE), not null
# reviewable_by_group_id :integer
# category_id :integer
# topic_id :integer
# score :float default(0.0), not null
# potential_spam :boolean default(FALSE), not null
# target_id :integer
# target_type :string
# target_created_by_id :integer
# payload :json
# version :integer default(0), not null
# latest_score :datetime
# created_at :datetime not null
# updated_at :datetime not null
# force_review :boolean default(FALSE), not null
# reject_reason :text
#
# Indexes
#
# idx_reviewables_score_desc_created_at_desc (score,created_at)
# index_reviewables_on_reviewable_by_group_id (reviewable_by_group_id)
# index_reviewables_on_status_and_created_at (status,created_at)
# index_reviewables_on_status_and_score (status,score)
# index_reviewables_on_status_and_type (status,type)
# index_reviewables_on_target_id_where_post_type_eq_post (target_id) WHERE ((target_type)::text = 'Post'::text)
# index_reviewables_on_topic_id_and_status_and_created_by_id (topic_id,status,created_by_id)
# index_reviewables_on_type_and_target_id (type,target_id) UNIQUE
#

View File

@ -1,13 +1,12 @@
import Component from "@glimmer/component";
import { tracked } from "@glimmer/tracking";
import { action, computed } from "@ember/object";
import I18n from "I18n";
import { ajax } from "discourse/lib/ajax";
import { popupAjaxError } from "discourse/lib/ajax-error";
const TRANSLATE = "translate";
const GENERATE_TITLES = "generate_titles";
const PROOFREAD = "proofread";
const LIST = "list";
const TEXT = "text";
const DIFF = "diff";
export default class AiHelper extends Component {
@tracked selected = null;
@ -21,20 +20,29 @@ export default class AiHelper extends Component {
@tracked proofreadDiff = null;
helperOptions = [
{
name: I18n.t("discourse_ai.ai_helper.modes.translate"),
value: TRANSLATE,
},
{
name: I18n.t("discourse_ai.ai_helper.modes.generate_titles"),
value: GENERATE_TITLES,
},
{
name: I18n.t("discourse_ai.ai_helper.modes.proofreader"),
value: PROOFREAD,
},
];
@tracked helperOptions = [];
promptTypes = {};
constructor() {
super(...arguments);
this.loadPrompts();
}
async loadPrompts() {
const prompts = await ajax("/discourse-ai/ai-helper/prompts");
this.promptTypes = prompts.reduce((memo, p) => {
memo[p.name] = p.prompt_type;
return memo;
}, {});
this.helperOptions = prompts.map((p) => {
return {
name: p.translated_name,
value: p.name,
};
});
}
get composedMessage() {
const editor = this.args.editor;
@ -45,7 +53,7 @@ export default class AiHelper extends Component {
@computed("selected", "selectedTitle", "translatingText", "proofreadingText")
get canSave() {
return (
(this.selected === GENERATE_TITLES && this.selectedTitle) ||
(this.promptTypes[this.selected] === LIST && this.selectedTitle) ||
this.translatingText ||
this.proofreadingText
);
@ -53,31 +61,33 @@ export default class AiHelper extends Component {
@computed("selected", "translatedSuggestion")
get translatingText() {
return this.selected === TRANSLATE && this.translatedSuggestion;
return (
this.promptTypes[this.selected] === TEXT && this.translatedSuggestion
);
}
@computed("selected", "proofReadSuggestion")
get proofreadingText() {
return this.selected === PROOFREAD && this.proofReadSuggestion;
return this.promptTypes[this.selected] === DIFF && this.proofReadSuggestion;
}
@computed("selected", "generatedTitlesSuggestions")
get selectingTopicTitle() {
return (
this.selected === GENERATE_TITLES &&
this.promptTypes[this.selected] === LIST &&
this.generatedTitlesSuggestions.length > 0
);
}
_updateSuggestedByAI(value, data) {
switch (value) {
case GENERATE_TITLES:
_updateSuggestedByAI(data) {
switch (data.type) {
case LIST:
this.generatedTitlesSuggestions = data.suggestions;
break;
case TRANSLATE:
case TEXT:
this.translatedSuggestion = data.suggestions[0];
break;
case PROOFREAD:
case DIFF:
this.proofReadSuggestion = data.suggestions[0];
this.proofreadDiff = data.diff;
break;
@ -89,7 +99,7 @@ export default class AiHelper extends Component {
this.loading = true;
this.selected = value;
if (value === GENERATE_TITLES) {
if (value === LIST) {
this.selectedTitle = null;
}
@ -101,7 +111,7 @@ export default class AiHelper extends Component {
data: { mode: this.selected, text: this.composedMessage },
})
.then((data) => {
this._updateSuggestedByAI(value, data);
this._updateSuggestedByAI(data);
})
.catch(popupAjaxError)
.finally(() => (this.loading = false));

View File

@ -5,10 +5,6 @@ en:
title: "Suggest changes using AI"
description: "Choose one of the options below, and the AI will suggest you a new version of the text."
selection_hint: "Hint: You can also select a portion of the text before opening the helper to rewrite only that."
modes:
translate: Translate to English
generate_titles: Suggest topic titles
proofreader: Proofread text
reviewables:
model_used: "Model used:"
accuracy: "Accuracy:"

View File

@ -50,3 +50,11 @@ en:
reasons:
flagged_by_toxicity: The AI plugin flagged this after classifying it as toxic.
flagged_by_nsfw: The AI plugin flagged this after classifying at least one of the attached images as NSFW.
discourse_ai:
ai_helper:
prompts:
translate: Translate to English
generate_titles: Suggest topic titles
proofread: Proofread text

View File

@ -3,6 +3,7 @@
DiscourseAi::Engine.routes.draw do
# AI-helper routes
scope module: :ai_helper, path: "/ai-helper", defaults: { format: :json } do
get "prompts" => "assistant#prompts"
post "suggest" => "assistant#suggest"
end
end

View File

@ -0,0 +1,36 @@
# frozen_string_literal: true
CompletionPrompt.seed do |cp|
cp.id = 1
cp.name = "translate"
cp.prompt_type = CompletionPrompt.prompt_types[:text]
cp.value = <<~STRING
I want you to act as an English translator, spelling corrector and improver. I will speak to you
in any language and you will detect the language, translate it and answer in the corrected and
improved version of my text, in English. I want you to replace my simplified A0-level words and
sentences with more beautiful and elegant, upper level English words and sentences.
Keep the meaning same, but make them more literary. I want you to only reply the correction,
the improvements and nothing else, do not write explanations.
STRING
end
CompletionPrompt.seed do |cp|
cp.id = 2
cp.name = "generate_titles"
cp.prompt_type = CompletionPrompt.prompt_types[:list]
cp.value = <<~STRING
I want you to act as a title generator for written pieces. I will provide you with a text,
and you will generate five attention-grabbing titles. Please keep the title concise and under 20 words,
and ensure that the meaning is maintained. Replies will utilize the language type of the topic.
STRING
end
CompletionPrompt.seed do |cp|
cp.id = 3
cp.name = "proofread"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.value = <<~STRING
I want you act as a proofreader. I will provide you with a text and I want you to review them for any spelling,
grammar, or punctuation errors. Once you have finished reviewing the text, provide me with any necessary
corrections or suggestions for improve the text.
STRING
end

View File

@ -0,0 +1,15 @@
# frozen_string_literal: true
class CreateCompletionPromptTable < ActiveRecord::Migration[7.0]
def change
create_table :completion_prompts do |t|
t.string :name, null: false
t.string :translated_name
t.integer :prompt_type, null: false, default: 0
t.text :value, null: false
t.boolean :enabled, null: false, default: true
t.timestamps
end
add_index :completion_prompts, %i[name], unique: true
end
end

View File

@ -7,6 +7,9 @@ module DiscourseAi
end
def inject_into(plugin)
plugin.register_seedfu_fixtures(
Rails.root.join("plugins", "discourse-ai", "db", "fixtures", "ai-helper"),
)
plugin.register_svg_icon("magic")
end
end

View File

@ -8,45 +8,37 @@ module DiscourseAi
PROOFREAD = "proofread"
VALID_TYPES = [TRANSLATE, GENERATE_TITLES, PROOFREAD]
def get_prompt_for(prompt_type)
case prompt_type
when TRANSLATE
translate_prompt
when GENERATE_TITLES
generate_titles_prompt
when PROOFREAD
proofread_prompt
end
def available_prompts
CompletionPrompt
.where(enabled: true)
.map do |prompt|
translation =
I18n.t("discourse_ai.ai_helper.prompts.#{prompt.name}", default: nil) ||
prompt.translated_name
{ name: prompt.name, translated_name: translation, prompt_type: prompt.prompt_type }
end
end
def generate_and_send_prompt(prompt_type, text)
result = {}
def generate_and_send_prompt(prompt, text)
result = { type: prompt.prompt_type }
prompt = [
{ role: "system", content: get_prompt_for(prompt_type) },
{ role: "user", content: text },
]
ai_messages = [{ role: "system", content: prompt.value }, { role: "user", content: text }]
result[:suggestions] = DiscourseAi::Inference::OpenAiCompletions
.perform!(prompt)
.perform!(ai_messages)
.dig(:choices)
.to_a
.flat_map { |choice| parse_content(prompt_type, choice.dig(:message, :content).to_s) }
.flat_map { |choice| parse_content(prompt, choice.dig(:message, :content).to_s) }
.compact_blank
result[:diff] = generate_diff(text, result[:suggestions].first) if proofreading?(
prompt_type,
)
result[:diff] = generate_diff(text, result[:suggestions].first) if prompt.diff?
result
end
private
def proofreading?(prompt_type)
prompt_type == PROOFREAD
end
def generate_diff(text, suggestion)
cooked_text = PrettyText.cook(text)
cooked_suggestion = PrettyText.cook(suggestion)
@ -54,39 +46,12 @@ module DiscourseAi
DiscourseDiff.new(cooked_text, cooked_suggestion).inline_html
end
def parse_content(type, content)
def parse_content(prompt, content)
return "" if content.blank?
return content.strip if type != GENERATE_TITLES
return content.strip if !prompt.list?
content.gsub("\"", "").gsub(/\d./, "").split("\n").map(&:strip)
end
def translate_prompt
<<~STRING
I want you to act as an English translator, spelling corrector and improver. I will speak to you
in any language and you will detect the language, translate it and answer in the corrected and
improved version of my text, in English. I want you to replace my simplified A0-level words and
sentences with more beautiful and elegant, upper level English words and sentences.
Keep the meaning same, but make them more literary. I want you to only reply the correction,
the improvements and nothing else, do not write explanations.
STRING
end
def generate_titles_prompt
<<~STRING
I want you to act as a title generator for written pieces. I will provide you with a text,
and you will generate five attention-grabbing titles. Please keep the title concise and under 20 words,
and ensure that the meaning is maintained. Replies will utilize the language type of the topic.
STRING
end
def proofread_prompt
<<~STRING
I want you act as a proofreader. I will provide you with a text and I want you to review them for any spelling,
grammar, or punctuation errors. Once you have finished reviewing the text, provide me with any necessary
corrections or suggestions for improve the text.
STRING
end
end
end
end

View File

@ -3,15 +3,17 @@
require_relative "../../../support/openai_completions_inference_stubs"
RSpec.describe DiscourseAi::AiHelper::OpenAiPrompt do
let(:prompt) { CompletionPrompt.find_by(name: mode) }
describe "#generate_and_send_prompt" do
context "when using the translate mode" do
let(:mode) { described_class::TRANSLATE }
let(:mode) { "translate" }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
it "Sends the prompt to chatGPT and returns the response" do
response =
subject.generate_and_send_prompt(mode, OpenAiCompletionsInferenceStubs.spanish_text)
subject.generate_and_send_prompt(prompt, OpenAiCompletionsInferenceStubs.spanish_text)
expect(response[:suggestions]).to contain_exactly(
OpenAiCompletionsInferenceStubs.translated_response.strip,
@ -20,14 +22,14 @@ RSpec.describe DiscourseAi::AiHelper::OpenAiPrompt do
end
context "when using the proofread mode" do
let(:mode) { described_class::PROOFREAD }
let(:mode) { "proofread" }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
it "Sends the prompt to chatGPT and returns the response" do
response =
subject.generate_and_send_prompt(
mode,
prompt,
OpenAiCompletionsInferenceStubs.translated_response,
)
@ -38,7 +40,7 @@ RSpec.describe DiscourseAi::AiHelper::OpenAiPrompt do
end
context "when generating titles" do
let(:mode) { described_class::GENERATE_TITLES }
let(:mode) { "generate_titles" }
before { OpenAiCompletionsInferenceStubs.stub_prompt(mode) }
@ -53,7 +55,7 @@ RSpec.describe DiscourseAi::AiHelper::OpenAiPrompt do
response =
subject.generate_and_send_prompt(
mode,
prompt,
OpenAiCompletionsInferenceStubs.translated_response,
)

View File

@ -5,7 +5,7 @@ require_relative "../../support/openai_completions_inference_stubs"
RSpec.describe DiscourseAi::AiHelper::AssistantController do
describe "#suggest" do
let(:text) { OpenAiCompletionsInferenceStubs.translated_response }
let(:mode) { DiscourseAi::AiHelper::OpenAiPrompt::PROOFREAD }
let(:mode) { "proofread" }
context "when not logged in" do
it "returns a 403 response" do

View File

@ -82,10 +82,9 @@ class OpenAiCompletionsInferenceStubs
prompt_builder = DiscourseAi::AiHelper::OpenAiPrompt.new
text =
type == DiscourseAi::AiHelper::OpenAiPrompt::TRANSLATE ? spanish_text : translated_response
prompt = [
{ role: "system", content: prompt_builder.get_prompt_for(type) },
{ role: "user", content: text },
]
used_prompt = CompletionPrompt.find_by(name: type)
prompt = [{ role: "system", content: used_prompt.value }, { role: "user", content: text }]
WebMock
.stub_request(:post, "https://api.openai.com/v1/chat/completions")