diff --git a/assets/javascripts/discourse/connectors/after-d-editor/composer-open.js b/assets/javascripts/discourse/connectors/after-d-editor/composer-open.js index f13557ca..94aaaef4 100644 --- a/assets/javascripts/discourse/connectors/after-d-editor/composer-open.js +++ b/assets/javascripts/discourse/connectors/after-d-editor/composer-open.js @@ -34,10 +34,10 @@ export default class extends Component { this.composerModel.targetRecipients && this.currentUser.ai_enabled_chat_bots ) { - let reciepients = this.composerModel.targetRecipients.split(","); + let recipients = this.composerModel.targetRecipients.split(","); return this.currentUser.ai_enabled_chat_bots.any((bot) => - reciepients.any((username) => username === bot.username) + recipients.any((username) => username === bot.username) ); } return false; diff --git a/db/migrate/20231202013850_convert_ai_personas_commands_to_json.rb b/db/migrate/20231202013850_convert_ai_personas_commands_to_json.rb index 477817be..6894dd57 100644 --- a/db/migrate/20231202013850_convert_ai_personas_commands_to_json.rb +++ b/db/migrate/20231202013850_convert_ai_personas_commands_to_json.rb @@ -4,7 +4,7 @@ class ConvertAiPersonasCommandsToJson < ActiveRecord::Migration[7.0] # this all may be a bit surprising, but interestingly this makes all our backend code # cross compatible # upgrading ["a", "b", "c"] to json simply works cause in both cases - # rails will cast to a string array and all code simply expectes a string array + # rails will cast to a string array and all code simply expects a string array # # this change was made so we can also start storing parameters with the commands execute <<~SQL diff --git a/lib/ai_bot/bot.rb b/lib/ai_bot/bot.rb index b6d41c7a..26996bf5 100644 --- a/lib/ai_bot/bot.rb +++ b/lib/ai_bot/bot.rb @@ -71,7 +71,7 @@ module DiscourseAi tool_message = { type: :tool, id: tool_call_id, content: invocation_result_json } if tool.standalone? - standalone_conext = + standalone_context = context.dup.merge( conversation_context: [ context[:conversation_context].last, @@ -79,7 +79,7 @@ module DiscourseAi tool_message, ], ) - prompt = persona.craft_prompt(standalone_conext) + prompt = persona.craft_prompt(standalone_context) else prompt.push(**tool_call_message) prompt.push(**tool_message) diff --git a/lib/ai_bot/personas/artist.rb b/lib/ai_bot/personas/artist.rb index d16d74e4..03271424 100644 --- a/lib/ai_bot/personas/artist.rb +++ b/lib/ai_bot/personas/artist.rb @@ -22,7 +22,7 @@ module DiscourseAi - You can specify subject, medium (e.g. oil on canvas), artist (person who drew it or photographed it) - You can specify details about lighting or time of day. - You can specify a particular website you would like to emulate (artstation or deviantart) - - You can specify additional details such as "beutiful, dystopian, futuristic, etc." + - You can specify additional details such as "beautiful, dystopian, futuristic, etc." - Prompts should generally be 10-20 words long - Do not include any connector words such as "and" or "but" etc. - You are extremely creative, when given short non descriptive prompts from a user you add your own details diff --git a/lib/ai_bot/tools/db_schema.rb b/lib/ai_bot/tools/db_schema.rb index 4d19d35a..101ac4c3 100644 --- a/lib/ai_bot/tools/db_schema.rb +++ b/lib/ai_bot/tools/db_schema.rb @@ -12,7 +12,7 @@ module DiscourseAi { name: "tables", description: - "list of tables to load schema information for, comma seperated list eg: (users,posts))", + "list of tables to load schema information for, comma separated list eg: (users,posts))", type: "string", required: true, }, diff --git a/lib/ai_bot/tools/search.rb b/lib/ai_bot/tools/search.rb index 33b4f6d0..aa2b5e2d 100644 --- a/lib/ai_bot/tools/search.rb +++ b/lib/ai_bot/tools/search.rb @@ -16,7 +16,7 @@ module DiscourseAi { name: "search_query", description: - "Specific keywords to search for, space seperated (correct bad spelling, remove connector words)", + "Specific keywords to search for, space separated (correct bad spelling, remove connector words)", type: "string", }, { diff --git a/lib/ai_bot/tools/summarize.rb b/lib/ai_bot/tools/summarize.rb index 9a6b67fd..d8d8a472 100644 --- a/lib/ai_bot/tools/summarize.rb +++ b/lib/ai_bot/tools/summarize.rb @@ -148,13 +148,13 @@ module DiscourseAi progress << "." progress_blk.call(progress) - contatenation_prompt = { + concatenation_prompt = { insts: "You are a helpful bot", input: "concatenated the disjoint summaries, creating a cohesive narrative:\n#{summaries.join("\n")}}", } - llm.generate(contatenation_prompt, temperature: 0.6, max_tokens: 500, user: bot_user) + llm.generate(concatenation_prompt, temperature: 0.6, max_tokens: 500, user: bot_user) else summaries.first end diff --git a/lib/ai_helper/painter.rb b/lib/ai_helper/painter.rb index 110a38ac..c50d2de9 100644 --- a/lib/ai_helper/painter.rb +++ b/lib/ai_helper/painter.rb @@ -10,7 +10,7 @@ module DiscourseAi attribution = "discourse_ai.ai_helper.painter.attribution.#{model}" if model == "stable_diffusion_xl" - stable_diffusion_prompt = difussion_prompt(input, user) + stable_diffusion_prompt = diffusion_prompt(input, user) return [] if stable_diffusion_prompt.blank? artifacts = @@ -56,7 +56,7 @@ module DiscourseAi end end - def difussion_prompt(text, user) + def diffusion_prompt(text, user) prompt = DiscourseAi::Completions::Prompt.new( <<~TEXT.strip, diff --git a/lib/automation/report_runner.rb b/lib/automation/report_runner.rb index beb9c09b..375062b2 100644 --- a/lib/automation/report_runner.rb +++ b/lib/automation/report_runner.rb @@ -24,7 +24,7 @@ module DiscourseAi - Key statistics: Specify date range, call out important stats like number of new topics and posts - Overview: Briefly state trends within period. - - Highlighted content: 5 paragaraphs highlighting important topics people should know about. If possible have each paragraph link to multiple related topics. + - Highlighted content: 5 paragraphs highlighting important topics people should know about. If possible have each paragraph link to multiple related topics. - Key insights and trends linking to a selection of posts that back them TEXT end diff --git a/lib/completions/llm.rb b/lib/completions/llm.rb index f8df9554..846f57b5 100644 --- a/lib/completions/llm.rb +++ b/lib/completions/llm.rb @@ -3,7 +3,7 @@ # A facade that abstracts multiple LLMs behind a single interface. # # Internally, it consists of the combination of a dialect and an endpoint. -# After recieving a prompt using our generic format, it translates it to +# After receiving a prompt using our generic format, it translates it to # the target model and routes the completion request through the correct gateway. # # Use the .proxy method to instantiate an object. diff --git a/lib/inference/stability_generator.rb b/lib/inference/stability_generator.rb index 33447e1d..cdd1a7b1 100644 --- a/lib/inference/stability_generator.rb +++ b/lib/inference/stability_generator.rb @@ -23,7 +23,7 @@ module ::DiscourseAi "Authorization" => "Bearer #{api_key}", } - sdxl_allowed_dimentions = [ + sdxl_allowed_dimensions = [ [1024, 1024], [1152, 896], [1216, 832], @@ -37,7 +37,7 @@ module ::DiscourseAi if (!width && !height) if engine.include? "xl" - width, height = sdxl_allowed_dimentions[0] + width, height = sdxl_allowed_dimensions[0] else width, height = [512, 512] end diff --git a/lib/nsfw/classification.rb b/lib/nsfw/classification.rb index 5c36e566..c87ba8d1 100644 --- a/lib/nsfw/classification.rb +++ b/lib/nsfw/classification.rb @@ -70,8 +70,8 @@ module DiscourseAi target_to_classify.uploads.to_a.select { |u| FileHelper.is_supported_image?(u.url) } end - def opennsfw2_verdict?(clasification) - clasification.values.first.to_i >= SiteSetting.ai_nsfw_flag_threshold_general + def opennsfw2_verdict?(classification) + classification.values.first.to_i >= SiteSetting.ai_nsfw_flag_threshold_general end def nsfw_detector_verdict?(classification) diff --git a/lib/summarization/entry_point.rb b/lib/summarization/entry_point.rb index 3f4176f2..ce6b7edf 100644 --- a/lib/summarization/entry_point.rb +++ b/lib/summarization/entry_point.rb @@ -25,13 +25,13 @@ module DiscourseAi plugin.register_summarization_strategy(Strategies::FoldContent.new(model)) end - truncable_models = [ + truncatable_models = [ Models::Discourse.new("long-t5-tglobal-base-16384-book-summary", max_tokens: 16_384), Models::Discourse.new("bart-large-cnn-samsum", max_tokens: 1024), Models::Discourse.new("flan-t5-base-samsum", max_tokens: 512), ] - truncable_models.each do |model| + truncatable_models.each do |model| plugin.register_summarization_strategy(Strategies::TruncateContent.new(model)) end end diff --git a/spec/lib/completions/endpoints/anthropic_spec.rb b/spec/lib/completions/endpoints/anthropic_spec.rb index 915547af..ccee7177 100644 --- a/spec/lib/completions/endpoints/anthropic_spec.rb +++ b/spec/lib/completions/endpoints/anthropic_spec.rb @@ -87,7 +87,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Anthropic do end context "with tools" do - it "returns a function invoncation" do + it "returns a function invocation" do compliance.streaming_mode_tools(anthropic_mock) end end diff --git a/spec/lib/completions/endpoints/aws_bedrock_spec.rb b/spec/lib/completions/endpoints/aws_bedrock_spec.rb index 34271094..77fa3949 100644 --- a/spec/lib/completions/endpoints/aws_bedrock_spec.rb +++ b/spec/lib/completions/endpoints/aws_bedrock_spec.rb @@ -112,7 +112,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do end context "with tools" do - it "returns a function invoncation" do + it "returns a function invocation" do compliance.streaming_mode_tools(bedrock_mock) end end diff --git a/spec/lib/completions/endpoints/gemini_spec.rb b/spec/lib/completions/endpoints/gemini_spec.rb index 841dde50..65763845 100644 --- a/spec/lib/completions/endpoints/gemini_spec.rb +++ b/spec/lib/completions/endpoints/gemini_spec.rb @@ -161,7 +161,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do end context "with tools" do - it "returns a function invoncation" do + it "returns a function invocation" do compliance.streaming_mode_tools(bedrock_mock) end end diff --git a/spec/lib/completions/endpoints/hugging_face_spec.rb b/spec/lib/completions/endpoints/hugging_face_spec.rb index b68ecbe4..6247e42f 100644 --- a/spec/lib/completions/endpoints/hugging_face_spec.rb +++ b/spec/lib/completions/endpoints/hugging_face_spec.rb @@ -96,7 +96,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::HuggingFace do end context "with tools" do - it "returns a function invoncation" do + it "returns a function invocation" do compliance.streaming_mode_tools(hf_mock) end end diff --git a/spec/lib/completions/endpoints/open_ai_spec.rb b/spec/lib/completions/endpoints/open_ai_spec.rb index 5124dab1..1817b6e2 100644 --- a/spec/lib/completions/endpoints/open_ai_spec.rb +++ b/spec/lib/completions/endpoints/open_ai_spec.rb @@ -172,15 +172,15 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenAi do # the row with test1 is invalid json raw_data = <<~TEXT.strip d|a|t|a|:| |{|"choices":[{"delta":{"content":"test,"}}]} - + data: {"choices":[{"delta":{"content":"test1,"}}] - + data: {"choices":[{"delta":|{"content":"test2,"}}]} - + data: {"choices":[{"delta":{"content":"test3,"}}]|} - + data: {"choices":[{|"|d|elta":{"content":"test4"}}]|} - + data: [D|ONE] TEXT @@ -201,7 +201,7 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenAi do end context "with tools" do - it "returns a function invoncation" do + it "returns a function invocation" do compliance.streaming_mode_tools(open_ai_mock) end end diff --git a/spec/lib/modules/automation/report_runner_spec.rb b/spec/lib/modules/automation/report_runner_spec.rb index 19917f19..ca424bf2 100644 --- a/spec/lib/modules/automation/report_runner_spec.rb +++ b/spec/lib/modules/automation/report_runner_spec.rb @@ -6,7 +6,7 @@ module DiscourseAi module Automation describe ReportRunner do fab!(:user) - fab!(:reciever) { Fabricate(:user) } + fab!(:receiver) { Fabricate(:user) } fab!(:post) { Fabricate(:post, user: user) } fab!(:group) fab!(:secure_category) { Fabricate(:private_category, group: group) } @@ -46,7 +46,7 @@ module DiscourseAi DiscourseAi::Completions::Llm.with_prepared_responses(["magical report"]) do ReportRunner.run!( sender_username: user.username, - receivers: [reciever.username], + receivers: [receiver.username], title: "test report", model: "gpt-4", category_ids: nil, diff --git a/spec/lib/modules/nsfw/nsfw_classification_spec.rb b/spec/lib/modules/nsfw/nsfw_classification_spec.rb index 15de208e..e89fac44 100644 --- a/spec/lib/modules/nsfw/nsfw_classification_spec.rb +++ b/spec/lib/modules/nsfw/nsfw_classification_spec.rb @@ -80,7 +80,7 @@ describe DiscourseAi::Nsfw::Classification do let(:negative_verdict) { { "opennsfw2" => false } } - it "returns false when NSFW flaggin is disabled" do + it "returns false when NSFW flagging is disabled" do SiteSetting.ai_nsfw_flag_automatically = false should_flag = subject.should_flag_based_on?(positive_verdict) diff --git a/spec/lib/modules/sentiment/jobs/regular/post_sentiment_analysis_spec.rb b/spec/lib/modules/sentiment/jobs/regular/post_sentiment_analysis_spec.rb index 95fa63c8..4a4ffff9 100644 --- a/spec/lib/modules/sentiment/jobs/regular/post_sentiment_analysis_spec.rb +++ b/spec/lib/modules/sentiment/jobs/regular/post_sentiment_analysis_spec.rb @@ -41,7 +41,7 @@ describe Jobs::PostSentimentAnalysis do end end - it "succesfully classifies the post" do + it "successfully classifies the post" do expected_analysis = SiteSetting.ai_sentiment_models.split("|").length SentimentInferenceStubs.stub_classification(post) diff --git a/spec/lib/modules/toxicity/toxicity_classification_spec.rb b/spec/lib/modules/toxicity/toxicity_classification_spec.rb index 32db8fe8..7f0449d2 100644 --- a/spec/lib/modules/toxicity/toxicity_classification_spec.rb +++ b/spec/lib/modules/toxicity/toxicity_classification_spec.rb @@ -22,7 +22,7 @@ describe DiscourseAi::Toxicity::ToxicityClassification do let(:toxic_verdict) { { SiteSetting.ai_toxicity_inference_service_api_model => true } } - it "returns false when toxicity flaggin is disabled" do + it "returns false when toxicity flagging is disabled" do SiteSetting.ai_toxicity_flag_automatically = false should_flag = subject.should_flag_based_on?(toxic_verdict) diff --git a/spec/requests/admin/ai_personas_controller_spec.rb b/spec/requests/admin/ai_personas_controller_spec.rb index c7de6d63..d68bdeb6 100644 --- a/spec/requests/admin/ai_personas_controller_spec.rb +++ b/spec/requests/admin/ai_personas_controller_spec.rb @@ -196,7 +196,7 @@ RSpec.describe DiscourseAi::Admin::AiPersonasController do params: { ai_persona: { name: "bob", - dscription: "the bob", + description: "the bob", }, } diff --git a/spec/requests/ai_helper/assistant_controller_spec.rb b/spec/requests/ai_helper/assistant_controller_spec.rb index fd20b6ba..456fb6eb 100644 --- a/spec/requests/ai_helper/assistant_controller_spec.rb +++ b/spec/requests/ai_helper/assistant_controller_spec.rb @@ -3,7 +3,7 @@ RSpec.describe DiscourseAi::AiHelper::AssistantController do describe "#suggest" do let(:text_to_proofread) { "The rain in spain stays mainly in the plane." } - let(:proofreaded_text) { "The rain in Spain, stays mainly in the Plane." } + let(:proofread_text) { "The rain in Spain, stays mainly in the Plane." } let(:mode) { CompletionPrompt::PROOFREAD } context "when not logged in" do @@ -71,11 +71,11 @@ RSpec.describe DiscourseAi::AiHelper::AssistantController do expected_diff = "

The rain in Spain, spain stays mainly in the Planeplane.

" - DiscourseAi::Completions::Llm.with_prepared_responses([proofreaded_text]) do + DiscourseAi::Completions::Llm.with_prepared_responses([proofread_text]) do post "/discourse-ai/ai-helper/suggest", params: { mode: mode, text: text_to_proofread } expect(response.status).to eq(200) - expect(response.parsed_body["suggestions"].first).to eq(proofreaded_text) + expect(response.parsed_body["suggestions"].first).to eq(proofread_text) expect(response.parsed_body["diff"]).to eq(expected_diff) end end diff --git a/spec/shared/inference/stability_generator_spec.rb b/spec/shared/inference/stability_generator_spec.rb index 0e6822dc..34f98ef3 100644 --- a/spec/shared/inference/stability_generator_spec.rb +++ b/spec/shared/inference/stability_generator_spec.rb @@ -5,7 +5,7 @@ describe DiscourseAi::Inference::StabilityGenerator do DiscourseAi::Inference::StabilityGenerator.perform!(prompt) end - it "sets dimentions to 512x512 for non XL model" do + it "sets dimensions to 512x512 for non XL model" do SiteSetting.ai_stability_engine = "stable-diffusion-v1-5" SiteSetting.ai_stability_api_url = "http://www.a.b.c" SiteSetting.ai_stability_api_key = "123" @@ -25,7 +25,7 @@ describe DiscourseAi::Inference::StabilityGenerator do gen("a cow") end - it "sets dimentions to 1024x1024 for XL model" do + it "sets dimensions to 1024x1024 for XL model" do SiteSetting.ai_stability_engine = "stable-diffusion-xl-1024-v1-0" SiteSetting.ai_stability_api_url = "http://www.a.b.c" SiteSetting.ai_stability_api_key = "123" diff --git a/spec/system/ai_helper/ai_composer_helper_spec.rb b/spec/system/ai_helper/ai_composer_helper_spec.rb index 0fce44d4..3b99f779 100644 --- a/spec/system/ai_helper/ai_composer_helper_spec.rb +++ b/spec/system/ai_helper/ai_composer_helper_spec.rb @@ -37,7 +37,7 @@ RSpec.describe "AI Composer helper", type: :system, js: true do expect(ai_helper_context_menu).to have_context_menu end - it "does not show the context menu when selecting insuffient text" do + it "does not show the context menu when selecting insufficient text" do visit("/latest") page.find("#create-topic").click composer.fill_content(input) diff --git a/spec/system/ai_helper/ai_split_topic_suggestion_spec.rb b/spec/system/ai_helper/ai_split_topic_suggestion_spec.rb index 14be5b80..4efe437a 100644 --- a/spec/system/ai_helper/ai_split_topic_suggestion_spec.rb +++ b/spec/system/ai_helper/ai_split_topic_suggestion_spec.rb @@ -109,7 +109,7 @@ RSpec.describe "AI Post helper", type: :system, js: true do context "when suggesting tags with AI tag suggester" do before { SiteSetting.ai_embeddings_enabled = true } - it "updatse the tag with the suggested tag" do + it "update the tag with the suggested tag" do response = Tag .take(5) diff --git a/spec/system/embeddings/semantic_search_spec.rb b/spec/system/embeddings/semantic_search_spec.rb index 4415bc0c..af5b5708 100644 --- a/spec/system/embeddings/semantic_search_spec.rb +++ b/spec/system/embeddings/semantic_search_spec.rb @@ -39,7 +39,7 @@ RSpec.describe "AI Composer helper", type: :system, js: true do end describe "when performing a search in the full page search page" do - skip "TODO: Implement test after doing LLM abrstraction" do + skip "TODO: Implement test after doing LLM abstraction" do it "performs AI search in the background and hides results by default" do visit("/search?expanded=true") search_page.type_in_search("apple pie")