FEATURE: Prompts can consist of multiple messages. (#21)

A prompt with multiple messages leads to better results, as the AI can learn for given examples. Alongside this change, we provide a better default proofreading prompt.
This commit is contained in:
Roman Rizzi 2023-03-21 12:04:59 -03:00 committed by GitHub
parent 6bdbc0e32d
commit 39f7f1f29e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 138 additions and 17 deletions

View File

@ -1,7 +1,38 @@
# frozen_string_literal: true
class CompletionPrompt < ActiveRecord::Base
# TODO(roman): Remove sept 2023.
self.ignored_columns = ["value"]
VALID_ROLES = %w[system user assistant]
enum :prompt_type, { text: 0, list: 1, diff: 2 }
validates :messages, length: { maximum: 20 }
validate :each_message_length
validate :each_message_role
def messages_with_user_input(user_input)
self.messages << { role: "user", content: user_input }
end
private
def each_message_length
messages.each_with_index do |msg, idx|
next if msg["content"].length <= 1000
errors.add(:messages, I18n.t("errors.prompt_message_length", idx: idx + 1))
end
end
def each_message_role
messages.each_with_index do |msg, idx|
next if VALID_ROLES.include?(msg["role"])
errors.add(:messages, I18n.t("errors.invalid_prompt_role", idx: idx + 1))
end
end
end
# == Schema Information
@ -12,10 +43,10 @@ end
# name :string not null
# translated_name :string
# prompt_type :integer default("text"), not null
# value :text not null
# enabled :boolean default(TRUE), not null
# created_at :datetime not null
# updated_at :datetime not null
# messages :jsonb not null
#
# Indexes
#

View File

@ -51,6 +51,9 @@ en:
flagged_by_toxicity: The AI plugin flagged this after classifying it as toxic.
flagged_by_nsfw: The AI plugin flagged this after classifying at least one of the attached images as NSFW.
errors:
prompt_message_length: The message %{idx} is over the 1000 character limit.
invalid_prompt_role: The message %{idx} has an invalid role.
discourse_ai:
ai_helper:

View File

@ -3,35 +3,80 @@ CompletionPrompt.seed do |cp|
cp.id = -1
cp.name = "translate"
cp.prompt_type = CompletionPrompt.prompt_types[:text]
cp.value = <<~STRING
cp.messages = [{ role: "system", content: <<~TEXT }]
I want you to act as an English translator, spelling corrector and improver. I will speak to you
in any language and you will detect the language, translate it and answer in the corrected and
improved version of my text, in English. I want you to replace my simplified A0-level words and
sentences with more beautiful and elegant, upper level English words and sentences.
Keep the meaning same, but make them more literary. I want you to only reply the correction,
the improvements and nothing else, do not write explanations.
STRING
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -2
cp.name = "generate_titles"
cp.prompt_type = CompletionPrompt.prompt_types[:list]
cp.value = <<~STRING
cp.messages = [{ role: "system", content: <<~TEXT }]
I want you to act as a title generator for written pieces. I will provide you with a text,
and you will generate five attention-grabbing titles. Please keep the title concise and under 20 words,
and ensure that the meaning is maintained. Replies will utilize the language type of the topic.
I want you to only reply the list of options and nothing else, do not write explanations.
STRING
TEXT
end
CompletionPrompt.seed do |cp|
cp.id = -3
cp.name = "proofread"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.value = <<~STRING
I want you act as a proofreader. I will provide you with a text and I want you to review them for any spelling,
grammar, or punctuation errors. Once you have finished reviewing the text, provide me with any necessary
corrections or suggestions for improve the text.
STRING
cp.messages = [
{ role: "system", content: <<~TEXT },
You are a markdown proofreader. You correct egregious typos and phrasing issues but keep the user's original voice.
You do not touch code blocks. I will provide you with text to proofread. If nothing needs fixing, then you will echo the text back.
Optionally, a user can specify intensity. Intensity 10 is a pedantic English teacher correcting the text.
Intensity 1 is a minimal proofreader. By default, you operate at intensity 1.
TEXT
{ role: "user", content: "![amazing car|100x100, 22%](upload://hapy.png)" },
{ role: "assistant", content: "![Amazing car|100x100, 22%](upload://hapy.png)" },
{ role: "user", content: <<~TEXT },
Intensity 1:
The rain in spain stays mainly in the plane.
TEXT
{ role: "assistant", content: "The rain in Spain, stays mainly in the Plane." },
{ role: "user", content: "The rain in Spain, stays mainly in the Plane." },
{ role: "assistant", content: "The rain in Spain, stays mainly in the Plane." },
{ role: "user", content: <<~TEXT },
Intensity 1:
Hello,
Sometimes the logo isn't changing automatically when color scheme changes.
![Screen Recording 2023-03-17 at 18.04.22|video](upload://2rcVL0ZMxHPNtPWQbZjwufKpWVU.mov)
TEXT
{ role: "assistant", content: <<~TEXT },
Hello,
Sometimes the logo does not change automatically when the color scheme changes.
![Screen Recording 2023-03-17 at 18.04.22|video](upload://2rcVL0ZMxHPNtPWQbZjwufKpWVU.mov)
TEXT
{ role: "user", content: <<~TEXT },
Intensity 1:
Any ideas what is wrong with this peace of cod?
> This quot contains a typo
```ruby
# this has speling mistakes
testin.atypo = 11
baad = "bad"
```
TEXT
{ role: "assistant", content: <<~TEXT },
Any ideas what is wrong with this piece of code?
> This quot contains a typo
```ruby
# This has spelling mistakes
testing.a_typo = 11
bad = "bad"
```
TEXT
]
end

View File

@ -0,0 +1,7 @@
# frozen_string_literal: true
class MultiMessageCompletionPrompts < ActiveRecord::Migration[7.0]
def change
add_column :completion_prompts, :messages, :jsonb, null: false
end
end

View File

@ -0,0 +1,7 @@
# frozen_string_literal: true
class DropCompletionPromptValue < ActiveRecord::Migration[7.0]
def change
remove_column :completion_prompts, :value, :text
end
end

View File

@ -23,10 +23,10 @@ module DiscourseAi
def generate_and_send_prompt(prompt, text)
result = { type: prompt.prompt_type }
ai_messages = [{ role: "system", content: prompt.value }, { role: "user", content: text }]
messages = prompt.messages_with_user_input(text)
result[:suggestions] = DiscourseAi::Inference::OpenAiCompletions
.perform!(ai_messages)
.perform!(messages)
.dig(:choices)
.to_a
.flat_map { |choice| parse_content(prompt, choice.dig(:message, :content).to_s) }

View File

@ -3,7 +3,7 @@
module ::DiscourseAi
module Inference
class OpenAiCompletions
def self.perform!(content, model = "gpt-3.5-turbo")
def self.perform!(messages, model = "gpt-3.5-turbo")
headers = {
"Authorization" => "Bearer #{SiteSetting.ai_openai_api_key}",
"Content-Type" => "application/json",
@ -14,7 +14,7 @@ module ::DiscourseAi
response =
Faraday.new(nil, connection_opts).post(
"https://api.openai.com/v1/chat/completions",
{ model: model, messages: content }.to_json,
{ model: model, messages: messages }.to_json,
headers,
)

View File

@ -0,0 +1,29 @@
# frozen_string_literal: true
RSpec.describe CompletionPrompt do
describe "validations" do
context "when there are too many messages" do
it "doesn't accept more than 20 messages" do
prompt = described_class.new(messages: [{ role: "system", content: "a" }] * 21)
expect(prompt.valid?).to eq(false)
end
end
context "when the message is over the max length" do
it "doesn't accept messages when the length is more than 1000 characters" do
prompt = described_class.new(messages: [{ role: "system", content: "a" * 1001 }])
expect(prompt.valid?).to eq(false)
end
end
context "when the message has invalid roles" do
it "doesn't accept messages when the role is invalid" do
prompt = described_class.new(messages: [{ role: "invalid", content: "a" }])
expect(prompt.valid?).to eq(false)
end
end
end
end

View File

@ -83,12 +83,11 @@ class OpenAiCompletionsInferenceStubs
text =
type == DiscourseAi::AiHelper::OpenAiPrompt::TRANSLATE ? spanish_text : translated_response
used_prompt = CompletionPrompt.find_by(name: type)
prompt = [{ role: "system", content: used_prompt.value }, { role: "user", content: text }]
prompt_messages = CompletionPrompt.find_by(name: type).messages_with_user_input(text)
WebMock
.stub_request(:post, "https://api.openai.com/v1/chat/completions")
.with(body: JSON.dump(model: "gpt-3.5-turbo", messages: prompt))
.with(body: { model: "gpt-3.5-turbo", messages: prompt_messages }.to_json)
.to_return(status: 200, body: JSON.dump(response(response_text_for(type))))
end
end