DEV: Fix various typos (#434)

This commit is contained in:
Jarek Radosz 2024-01-19 12:51:26 +01:00 committed by GitHub
parent d4e23e0df6
commit 5802cd1a0c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
28 changed files with 44 additions and 44 deletions

View File

@ -34,10 +34,10 @@ export default class extends Component {
this.composerModel.targetRecipients &&
this.currentUser.ai_enabled_chat_bots
) {
let reciepients = this.composerModel.targetRecipients.split(",");
let recipients = this.composerModel.targetRecipients.split(",");
return this.currentUser.ai_enabled_chat_bots.any((bot) =>
reciepients.any((username) => username === bot.username)
recipients.any((username) => username === bot.username)
);
}
return false;

View File

@ -4,7 +4,7 @@ class ConvertAiPersonasCommandsToJson < ActiveRecord::Migration[7.0]
# this all may be a bit surprising, but interestingly this makes all our backend code
# cross compatible
# upgrading ["a", "b", "c"] to json simply works cause in both cases
# rails will cast to a string array and all code simply expectes a string array
# rails will cast to a string array and all code simply expects a string array
#
# this change was made so we can also start storing parameters with the commands
execute <<~SQL

View File

@ -71,7 +71,7 @@ module DiscourseAi
tool_message = { type: :tool, id: tool_call_id, content: invocation_result_json }
if tool.standalone?
standalone_conext =
standalone_context =
context.dup.merge(
conversation_context: [
context[:conversation_context].last,
@ -79,7 +79,7 @@ module DiscourseAi
tool_message,
],
)
prompt = persona.craft_prompt(standalone_conext)
prompt = persona.craft_prompt(standalone_context)
else
prompt.push(**tool_call_message)
prompt.push(**tool_message)

View File

@ -22,7 +22,7 @@ module DiscourseAi
- You can specify subject, medium (e.g. oil on canvas), artist (person who drew it or photographed it)
- You can specify details about lighting or time of day.
- You can specify a particular website you would like to emulate (artstation or deviantart)
- You can specify additional details such as "beutiful, dystopian, futuristic, etc."
- You can specify additional details such as "beautiful, dystopian, futuristic, etc."
- Prompts should generally be 10-20 words long
- Do not include any connector words such as "and" or "but" etc.
- You are extremely creative, when given short non descriptive prompts from a user you add your own details

View File

@ -12,7 +12,7 @@ module DiscourseAi
{
name: "tables",
description:
"list of tables to load schema information for, comma seperated list eg: (users,posts))",
"list of tables to load schema information for, comma separated list eg: (users,posts))",
type: "string",
required: true,
},

View File

@ -16,7 +16,7 @@ module DiscourseAi
{
name: "search_query",
description:
"Specific keywords to search for, space seperated (correct bad spelling, remove connector words)",
"Specific keywords to search for, space separated (correct bad spelling, remove connector words)",
type: "string",
},
{

View File

@ -148,13 +148,13 @@ module DiscourseAi
progress << "."
progress_blk.call(progress)
contatenation_prompt = {
concatenation_prompt = {
insts: "You are a helpful bot",
input:
"concatenated the disjoint summaries, creating a cohesive narrative:\n#{summaries.join("\n")}}",
}
llm.generate(contatenation_prompt, temperature: 0.6, max_tokens: 500, user: bot_user)
llm.generate(concatenation_prompt, temperature: 0.6, max_tokens: 500, user: bot_user)
else
summaries.first
end

View File

@ -10,7 +10,7 @@ module DiscourseAi
attribution = "discourse_ai.ai_helper.painter.attribution.#{model}"
if model == "stable_diffusion_xl"
stable_diffusion_prompt = difussion_prompt(input, user)
stable_diffusion_prompt = diffusion_prompt(input, user)
return [] if stable_diffusion_prompt.blank?
artifacts =
@ -56,7 +56,7 @@ module DiscourseAi
end
end
def difussion_prompt(text, user)
def diffusion_prompt(text, user)
prompt =
DiscourseAi::Completions::Prompt.new(
<<~TEXT.strip,

View File

@ -24,7 +24,7 @@ module DiscourseAi
- Key statistics: Specify date range, call out important stats like number of new topics and posts
- Overview: Briefly state trends within period.
- Highlighted content: 5 paragaraphs highlighting important topics people should know about. If possible have each paragraph link to multiple related topics.
- Highlighted content: 5 paragraphs highlighting important topics people should know about. If possible have each paragraph link to multiple related topics.
- Key insights and trends linking to a selection of posts that back them
TEXT
end

View File

@ -3,7 +3,7 @@
# A facade that abstracts multiple LLMs behind a single interface.
#
# Internally, it consists of the combination of a dialect and an endpoint.
# After recieving a prompt using our generic format, it translates it to
# After receiving a prompt using our generic format, it translates it to
# the target model and routes the completion request through the correct gateway.
#
# Use the .proxy method to instantiate an object.

View File

@ -23,7 +23,7 @@ module ::DiscourseAi
"Authorization" => "Bearer #{api_key}",
}
sdxl_allowed_dimentions = [
sdxl_allowed_dimensions = [
[1024, 1024],
[1152, 896],
[1216, 832],
@ -37,7 +37,7 @@ module ::DiscourseAi
if (!width && !height)
if engine.include? "xl"
width, height = sdxl_allowed_dimentions[0]
width, height = sdxl_allowed_dimensions[0]
else
width, height = [512, 512]
end

View File

@ -70,8 +70,8 @@ module DiscourseAi
target_to_classify.uploads.to_a.select { |u| FileHelper.is_supported_image?(u.url) }
end
def opennsfw2_verdict?(clasification)
clasification.values.first.to_i >= SiteSetting.ai_nsfw_flag_threshold_general
def opennsfw2_verdict?(classification)
classification.values.first.to_i >= SiteSetting.ai_nsfw_flag_threshold_general
end
def nsfw_detector_verdict?(classification)

View File

@ -25,13 +25,13 @@ module DiscourseAi
plugin.register_summarization_strategy(Strategies::FoldContent.new(model))
end
truncable_models = [
truncatable_models = [
Models::Discourse.new("long-t5-tglobal-base-16384-book-summary", max_tokens: 16_384),
Models::Discourse.new("bart-large-cnn-samsum", max_tokens: 1024),
Models::Discourse.new("flan-t5-base-samsum", max_tokens: 512),
]
truncable_models.each do |model|
truncatable_models.each do |model|
plugin.register_summarization_strategy(Strategies::TruncateContent.new(model))
end
end

View File

@ -87,7 +87,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Anthropic do
end
context "with tools" do
it "returns a function invoncation" do
it "returns a function invocation" do
compliance.streaming_mode_tools(anthropic_mock)
end
end

View File

@ -112,7 +112,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
end
context "with tools" do
it "returns a function invoncation" do
it "returns a function invocation" do
compliance.streaming_mode_tools(bedrock_mock)
end
end

View File

@ -161,7 +161,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
end
context "with tools" do
it "returns a function invoncation" do
it "returns a function invocation" do
compliance.streaming_mode_tools(bedrock_mock)
end
end

View File

@ -96,7 +96,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::HuggingFace do
end
context "with tools" do
it "returns a function invoncation" do
it "returns a function invocation" do
compliance.streaming_mode_tools(hf_mock)
end
end

View File

@ -201,7 +201,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenAi do
end
context "with tools" do
it "returns a function invoncation" do
it "returns a function invocation" do
compliance.streaming_mode_tools(open_ai_mock)
end
end

View File

@ -6,7 +6,7 @@ module DiscourseAi
module Automation
describe ReportRunner do
fab!(:user)
fab!(:reciever) { Fabricate(:user) }
fab!(:receiver) { Fabricate(:user) }
fab!(:post) { Fabricate(:post, user: user) }
fab!(:group)
fab!(:secure_category) { Fabricate(:private_category, group: group) }
@ -46,7 +46,7 @@ module DiscourseAi
DiscourseAi::Completions::Llm.with_prepared_responses(["magical report"]) do
ReportRunner.run!(
sender_username: user.username,
receivers: [reciever.username],
receivers: [receiver.username],
title: "test report",
model: "gpt-4",
category_ids: nil,

View File

@ -80,7 +80,7 @@ describe DiscourseAi::Nsfw::Classification do
let(:negative_verdict) { { "opennsfw2" => false } }
it "returns false when NSFW flaggin is disabled" do
it "returns false when NSFW flagging is disabled" do
SiteSetting.ai_nsfw_flag_automatically = false
should_flag = subject.should_flag_based_on?(positive_verdict)

View File

@ -41,7 +41,7 @@ describe Jobs::PostSentimentAnalysis do
end
end
it "succesfully classifies the post" do
it "successfully classifies the post" do
expected_analysis = SiteSetting.ai_sentiment_models.split("|").length
SentimentInferenceStubs.stub_classification(post)

View File

@ -22,7 +22,7 @@ describe DiscourseAi::Toxicity::ToxicityClassification do
let(:toxic_verdict) { { SiteSetting.ai_toxicity_inference_service_api_model => true } }
it "returns false when toxicity flaggin is disabled" do
it "returns false when toxicity flagging is disabled" do
SiteSetting.ai_toxicity_flag_automatically = false
should_flag = subject.should_flag_based_on?(toxic_verdict)

View File

@ -196,7 +196,7 @@ RSpec.describe DiscourseAi::Admin::AiPersonasController do
params: {
ai_persona: {
name: "bob",
dscription: "the bob",
description: "the bob",
},
}

View File

@ -3,7 +3,7 @@
RSpec.describe DiscourseAi::AiHelper::AssistantController do
describe "#suggest" do
let(:text_to_proofread) { "The rain in spain stays mainly in the plane." }
let(:proofreaded_text) { "The rain in Spain, stays mainly in the Plane." }
let(:proofread_text) { "The rain in Spain, stays mainly in the Plane." }
let(:mode) { CompletionPrompt::PROOFREAD }
context "when not logged in" do
@ -71,11 +71,11 @@ RSpec.describe DiscourseAi::AiHelper::AssistantController do
expected_diff =
"<div class=\"inline-diff\"><p>The rain in <ins>Spain</ins><ins>,</ins><ins> </ins><del>spain </del>stays mainly in the <ins>Plane</ins><del>plane</del>.</p></div>"
DiscourseAi::Completions::Llm.with_prepared_responses([proofreaded_text]) do
DiscourseAi::Completions::Llm.with_prepared_responses([proofread_text]) do
post "/discourse-ai/ai-helper/suggest", params: { mode: mode, text: text_to_proofread }
expect(response.status).to eq(200)
expect(response.parsed_body["suggestions"].first).to eq(proofreaded_text)
expect(response.parsed_body["suggestions"].first).to eq(proofread_text)
expect(response.parsed_body["diff"]).to eq(expected_diff)
end
end

View File

@ -5,7 +5,7 @@ describe DiscourseAi::Inference::StabilityGenerator do
DiscourseAi::Inference::StabilityGenerator.perform!(prompt)
end
it "sets dimentions to 512x512 for non XL model" do
it "sets dimensions to 512x512 for non XL model" do
SiteSetting.ai_stability_engine = "stable-diffusion-v1-5"
SiteSetting.ai_stability_api_url = "http://www.a.b.c"
SiteSetting.ai_stability_api_key = "123"
@ -25,7 +25,7 @@ describe DiscourseAi::Inference::StabilityGenerator do
gen("a cow")
end
it "sets dimentions to 1024x1024 for XL model" do
it "sets dimensions to 1024x1024 for XL model" do
SiteSetting.ai_stability_engine = "stable-diffusion-xl-1024-v1-0"
SiteSetting.ai_stability_api_url = "http://www.a.b.c"
SiteSetting.ai_stability_api_key = "123"

View File

@ -37,7 +37,7 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
expect(ai_helper_context_menu).to have_context_menu
end
it "does not show the context menu when selecting insuffient text" do
it "does not show the context menu when selecting insufficient text" do
visit("/latest")
page.find("#create-topic").click
composer.fill_content(input)

View File

@ -109,7 +109,7 @@ RSpec.describe "AI Post helper", type: :system, js: true do
context "when suggesting tags with AI tag suggester" do
before { SiteSetting.ai_embeddings_enabled = true }
it "updatse the tag with the suggested tag" do
it "update the tag with the suggested tag" do
response =
Tag
.take(5)

View File

@ -39,7 +39,7 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
end
describe "when performing a search in the full page search page" do
skip "TODO: Implement test after doing LLM abrstraction" do
skip "TODO: Implement test after doing LLM abstraction" do
it "performs AI search in the background and hides results by default" do
visit("/search?expanded=true")
search_page.type_in_search("apple pie")