2023-02-17 11:33:47 -03:00
en :
2023-10-03 08:55:30 +11:00
discourse_automation :
2024-10-04 15:11:30 +09:00
ai :
flag_types :
review : "Add post to review queue"
spam : "Flag as spam and hide post"
2024-11-26 07:19:56 +11:00
spam_silence : "Flag as spam, hide post and silence user"
2023-10-03 08:55:30 +11:00
scriptables :
2025-03-06 09:41:09 +11:00
llm_tool_triage :
title : Triage posts using AI Tool
description : "Triage posts using custom logic in an AI tool"
llm_persona_triage :
title : Triage posts using AI Persona
description : "Respond to posts using a specific AI persona"
2023-10-03 08:55:30 +11:00
llm_triage :
title : Triage posts using AI
description : "Triage posts using a large language model"
2024-08-14 15:54:09 -03:00
flagged_post : |
<div>Response from the model:</div>
<p>%%LLM_RESPONSE%%</p>
2024-12-13 16:01:04 +00:00
<b>Triggered by the <a href="%{base_path}/admin/plugins/discourse-automation/%%AUTOMATION_ID%%">%%AUTOMATION_NAME%%</a> rule.</b>
2023-12-19 12:04:15 +11:00
llm_report :
title : Periodic report using AI
description : "Periodic report based on a large language model"
2023-02-22 20:46:53 -03:00
site_settings :
2023-03-15 17:02:20 -03:00
discourse_ai_enabled : "Enable the discourse AI plugin."
2024-11-19 09:22:39 +11:00
ai_artifact_security : "The AI artifact system generates IFRAMEs with runnable code. Strict mode disables sharing and forces an extra click to run code. Lax mode allows sharing of artifacts and runs code directly. Disabled mode disables the artifact system."
2023-02-22 20:46:53 -03:00
ai_toxicity_enabled : "Enable the toxicity module."
ai_toxicity_inference_service_api_endpoint : "URL where the API is running for the toxicity module"
ai_toxicity_inference_service_api_key : "API key for the toxicity API"
ai_toxicity_inference_service_api_model : "Model to use for inference. Multilingual model works with Italian, French, Russian, Portuguese, Spanish and Turkish."
ai_toxicity_flag_automatically : "Automatically flag posts / chat messages that are above the configured thresholds."
ai_toxicity_flag_threshold_toxicity : "Toxicity: a rude, disrespectful, or unreasonable comment that is somewhat likely to make you leave a discussion or give up on sharing your perspective"
ai_toxicity_flag_threshold_severe_toxicity : "Severe Toxicity: a very hateful, aggressive, or disrespectful comment that is very likely to make you leave a discussion or give up on sharing your perspective"
ai_toxicity_flag_threshold_obscene : "Obscene"
ai_toxicity_flag_threshold_identity_attack : "Identity Attack"
ai_toxicity_flag_threshold_insult : "Insult"
ai_toxicity_flag_threshold_threat : "Threat"
ai_toxicity_flag_threshold_sexual_explicit : "Sexual Explicit"
ai_toxicity_groups_bypass : "Users on those groups will not have their posts classified by the toxicity module."
ai_sentiment_enabled : "Enable the sentiment module."
ai_sentiment_inference_service_api_endpoint : "URL where the API is running for the sentiment module"
ai_sentiment_inference_service_api_key : "API key for the sentiment API"
ai_sentiment_models : "Models to use for inference. Sentiment classifies post on the positive/neutral/negative space. Emotion classifies on the anger/disgust/fear/joy/neutral/sadness/surprise space."
2023-03-15 17:02:20 -03:00
ai_nsfw_detection_enabled : "Enable the NSFW module."
2023-09-25 11:12:54 -07:00
ai_nsfw_inference_service_api_endpoint : "URL where the API is running for the NSFW module"
2023-03-15 17:02:20 -03:00
ai_nsfw_inference_service_api_key : "API key for the NSFW API"
ai_nsfw_flag_automatically : "Automatically flag NSFW posts that are above the configured thresholds."
ai_nsfw_flag_threshold_general : "General Threshold for an image to be considered NSFW."
ai_nsfw_flag_threshold_drawings : "Threshold for a drawing to be considered NSFW."
ai_nsfw_flag_threshold_hentai : "Threshold for an image classified as hentai to be considered NSFW."
ai_nsfw_flag_threshold_porn : "Threshold for an image classified as porn to be considered NSFW."
ai_nsfw_flag_threshold_sexy : "Threshold for an image classified as sexy to be considered NSFW."
ai_nsfw_models : "Models to use for NSFW inference."
2025-01-21 12:23:19 -03:00
ai_openai_api_key : "API key for OpenAI API. ONLY used for Dall-E. For GPT use the LLM config tab"
2023-03-15 17:21:45 -03:00
2024-08-12 15:40:23 -07:00
ai_helper_enabled : "Enable the AI helper."
composer_ai_helper_allowed_groups : "Users on these groups will see the AI helper button in the composer."
2023-03-15 17:02:20 -03:00
ai_helper_allowed_in_pm : "Enable the composer's AI helper in PMs."
2023-03-28 23:22:34 -03:00
ai_helper_model : "Model to use for the AI helper."
2023-09-25 11:12:54 -07:00
ai_helper_custom_prompts_allowed_groups : "Users on these groups will see the custom prompt option in the AI helper."
2023-11-29 15:17:46 +11:00
ai_helper_automatic_chat_thread_title_delay : "Delay in minutes before the AI helper automatically sets the chat thread title."
ai_helper_automatic_chat_thread_title : "Automatically set the chat thread titles based on thread contents."
2024-01-05 09:03:23 -08:00
ai_helper_illustrate_post_model : "Model to use for the composer AI helper's illustrate post feature"
2024-02-01 14:58:04 -08:00
ai_helper_enabled_features : "Select the features to enable in the AI helper."
2024-02-08 12:18:05 -03:00
post_ai_helper_allowed_groups : "User groups allowed to access AI Helper features in posts"
2024-02-19 09:56:28 -08:00
ai_helper_image_caption_model : "Select the model to use for generating image captions"
2024-05-27 10:49:24 -07:00
ai_auto_image_caption_allowed_groups : "Users on these groups can toggle automatic image captioning."
2023-03-15 17:02:20 -03:00
2025-01-21 12:23:19 -03:00
ai_embeddings_selected_model : "Use the selected model for generating embeddings."
2023-03-15 17:21:45 -03:00
ai_embeddings_generate_for_pms : "Generate embeddings for personal messages."
2023-03-31 11:04:34 +11:00
ai_embeddings_semantic_related_topics_enabled : "Use Semantic Search for related topics."
ai_embeddings_semantic_related_topics : "Maximum number of topics to show in related topic section."
2023-10-26 12:07:37 -03:00
ai_embeddings_backfill_batch_size : "Number of embeddings to backfill every 15 minutes."
2023-04-03 11:48:38 -03:00
ai_embeddings_semantic_search_enabled : "Enable full-page semantic search."
2024-03-08 08:02:50 -08:00
ai_embeddings_semantic_quick_search_enabled : "Enable semantic search option in search menu popup."
2023-05-16 14:38:21 -03:00
ai_embeddings_semantic_related_include_closed_topics : "Include closed topics in semantic search results"
2023-09-05 11:08:23 -03:00
ai_embeddings_semantic_search_hyde_model : "Model used to expand keywords to get better results during a semantic search"
2024-02-08 12:18:05 -03:00
ai_embeddings_per_post_enabled : Generate embeddings for each post
2024-11-19 09:22:39 +11:00
2024-07-04 10:48:18 +10:00
ai_summarization_enabled : "Enable the topic summarization module."
ai_summarization_model : "Model to use for summarization."
ai_custom_summarization_allowed_groups : "Groups allowed to use create new summaries."
2024-08-21 07:58:24 +10:00
ai_pm_summarization_allowed_groups : "Groups allowed to create and view summaries in PMs."
2024-11-26 13:44:12 -03:00
ai_summary_gists_enabled : "Generate brief summaries of latest replies in topics automatically."
ai_summary_gists_allowed_groups : "Groups allowed to see gists in the hot topics list."
2024-11-04 17:48:11 -03:00
ai_summary_backfill_maximum_topics_per_hour : "Number of topic summaries to backfill per hour."
2023-04-04 11:24:09 -03:00
2023-05-05 15:28:31 -03:00
ai_bot_enabled : "Enable the AI Bot module."
2023-08-17 06:29:58 +10:00
ai_bot_enable_chat_warning : "Display a warning when PM chat is initiated. Can be overriden by editing the translation string: discourse_ai.ai_bot.pm_warning"
2023-05-05 15:28:31 -03:00
ai_bot_allowed_groups : "When the GPT Bot has access to the PM, it will reply to members of these groups."
2024-04-15 23:22:06 +10:00
ai_bot_debugging_allowed_groups : "Allow these groups to see a debug button on posts which displays the raw AI request and response"
2024-05-29 11:04:47 +10:00
ai_bot_public_sharing_allowed_groups : "Allow these groups to share AI personal messages with the public via a unique publicly available link. Note: if your site requires login, shares will also require login."
2023-08-31 14:42:28 +10:00
ai_bot_add_to_header : "Display a button in the header to start a PM with a AI Bot"
2024-03-08 06:37:23 +11:00
ai_bot_github_access_token : "GitHub access token for use with GitHub AI tools (required for search support)"
2023-05-05 15:28:31 -03:00
2023-05-20 17:45:54 +10:00
ai_stability_api_key : "API key for the stability.ai API"
2023-08-10 19:07:13 +02:00
ai_stability_engine : "Image generation engine to use for the stability.ai API"
2023-05-20 17:45:54 +10:00
ai_stability_api_url : "URL for the stability.ai API"
ai_google_custom_search_api_key : "API key for the Google Custom Search API see: https://developers.google.com/custom-search"
ai_google_custom_search_cx : "CX for Google Custom Search API"
2023-05-05 15:28:31 -03:00
2023-03-07 15:39:28 -03:00
reviewables :
2023-09-25 11:12:54 -07:00
reasons :
flagged_by_toxicity : The AI plugin flagged this after classifying it as toxic.
flagged_by_nsfw : The AI plugin flagged this after classifying at least one of the attached images as NSFW.
2023-03-23 03:16:29 +11:00
2023-11-08 10:50:37 -03:00
reports :
2025-02-20 09:14:10 -08:00
sentiment_analysis :
title : "Sentiment analysis"
description : "This report provides sentiment analysis for posts, grouped by category, with positive, negative, and neutral scores for each post and category."
2023-11-08 10:50:37 -03:00
overall_sentiment :
title : "Overall sentiment"
2024-11-21 15:18:31 -03:00
description : 'The chart compares the number of posts classified as either positive or negative. These are calculated when positive or negative scores > the set threshold score. This means neutral posts are not shown. Personal messages (PMs) are also excluded. Classified with "cardiffnlp/twitter-roberta-base-sentiment-latest"'
2023-11-08 10:50:37 -03:00
xaxis : "Positive(%)"
yaxis : "Date"
2024-11-19 10:01:10 -03:00
emotion_admiration :
2024-11-22 12:02:18 -03:00
title : 🤩 Admiration
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion admiration via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_amusement :
2024-11-22 12:02:18 -03:00
title : 😄 Amusement
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion amusement via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_anger :
2024-11-22 12:02:18 -03:00
title : 😠 Anger
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion anger via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_annoyance :
2024-11-22 12:02:18 -03:00
title : 😒 Annoyance
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion annoyance via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_approval :
2024-11-22 12:02:18 -03:00
title : 👍 Approval
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion approval via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_caring :
2024-11-22 12:02:18 -03:00
title : 🤗 Caring
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion caring via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_confusion :
2024-11-22 12:02:18 -03:00
title : 😕 Confusion
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion confusion via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_curiosity :
2024-11-22 12:02:18 -03:00
title : 🤔 Curiosity
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion curiosity via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_desire :
2024-11-22 12:02:18 -03:00
title : 😍 Desire
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion desire via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_disappointment :
2024-11-22 12:02:18 -03:00
title : 😞 Disappointment
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion disappointment via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_disapproval :
2024-11-22 12:02:18 -03:00
title : 👎 Disapproval
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion disapproval via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_disgust :
2024-11-22 12:02:18 -03:00
title : 🤢 Disgust
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion disgust via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_embarrassment :
2024-11-22 12:02:18 -03:00
title : 😳 Embarrassment
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion embarrassment via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_excitement :
2024-11-22 12:02:18 -03:00
title : 🤪 Excitement
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion excitement via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_fear :
2024-11-22 12:02:18 -03:00
title : 😨 Fear
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion fear via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_gratitude :
2024-11-22 12:02:18 -03:00
title : 🙏 Gratitude
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion gratitude via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_grief :
2024-11-22 12:02:18 -03:00
title : 😢 Grief
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion grief via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_joy :
2024-11-22 12:02:18 -03:00
title : 😊 Joy
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion joy via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_love :
2024-11-22 12:02:18 -03:00
title : ❤️ Love
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion love via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_nervousness :
2024-11-22 12:02:18 -03:00
title : 😰 Nervousness
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion nervousness via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_neutral :
2024-11-22 12:02:18 -03:00
title : 😐 Neutral
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion neutral via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_optimism :
2024-11-22 12:02:18 -03:00
title : 🌟 Optimism
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion optimism via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_pride :
2024-11-22 12:02:18 -03:00
title : 🦁 Pride
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion pride via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_realization :
2024-11-22 12:02:18 -03:00
title : 💡 Realization
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion realization via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_relief :
2024-11-22 12:02:18 -03:00
title : 😌 Relief
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion relief via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_remorse :
2024-11-22 12:02:18 -03:00
title : 😔 Remorse
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion remorse via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_sadness :
2024-11-22 12:02:18 -03:00
title : 😭 Sadness
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion sadness via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2024-11-19 10:01:10 -03:00
emotion_surprise :
2024-11-22 12:02:18 -03:00
title : 😲 Surprise
2024-11-25 11:31:51 -03:00
description : "Posts classified with the emotion surprise via AI, using the model 'SamLowe/roberta-base-go_emotions'."
2023-11-08 10:50:37 -03:00
2023-03-17 15:14:19 -03:00
discourse_ai :
2024-11-20 13:13:03 +11:00
ai_artifact :
link : "Show Artifact in new tab"
2024-11-20 18:53:19 +11:00
view_source : "View Source"
2024-12-03 07:23:31 +11:00
view_changes : "View Changes"
DEV: artifact system update (#1096)
### Why
This pull request fundamentally restructures how AI bots create and update web artifacts to address critical limitations in the previous approach:
1. **Improved Artifact Context for LLMs**: Previously, artifact creation and update tools included the *entire* artifact source code directly in the tool arguments. This overloaded the Language Model (LLM) with raw code, making it difficult for the LLM to maintain a clear understanding of the artifact's current state when applying changes. The LLM would struggle to differentiate between the base artifact and the requested modifications, leading to confusion and less effective updates.
2. **Reduced Token Usage and History Bloat**: Including the full artifact source code in every tool interaction was extremely token-inefficient. As conversations progressed, this redundant code in the history consumed a significant number of tokens unnecessarily. This not only increased costs but also diluted the context for the LLM with less relevant historical information.
3. **Enabling Updates for Large Artifacts**: The lack of a practical diff or targeted update mechanism made it nearly impossible to efficiently update larger web artifacts. Sending the entire source code for every minor change was both computationally expensive and prone to errors, effectively blocking the use of AI bots for meaningful modifications of complex artifacts.
**This pull request addresses these core issues by**:
* Introducing methods for the AI bot to explicitly *read* and understand the current state of an artifact.
* Implementing efficient update strategies that send *targeted* changes rather than the entire artifact source code.
* Providing options to control the level of artifact context included in LLM prompts, optimizing token usage.
### What
The main changes implemented in this PR to resolve the above issues are:
1. **`Read Artifact` Tool for Contextual Awareness**:
- A new `read_artifact` tool is introduced, enabling AI bots to fetch and process the current content of a web artifact from a given URL (local or external).
- This provides the LLM with a clear and up-to-date representation of the artifact's HTML, CSS, and JavaScript, improving its understanding of the base to be modified.
- By cloning local artifacts, it allows the bot to work with a fresh copy, further enhancing context and control.
2. **Refactored `Update Artifact` Tool with Efficient Strategies**:
- The `update_artifact` tool is redesigned to employ more efficient update strategies, minimizing token usage and improving update precision:
- **`diff` strategy**: Utilizes a search-and-replace diff algorithm to apply only the necessary, targeted changes to the artifact's code. This significantly reduces the amount of code sent to the LLM and focuses its attention on the specific modifications.
- **`full` strategy**: Provides the option to replace the entire content sections (HTML, CSS, JavaScript) when a complete rewrite is required.
- Tool options enhance the control over the update process:
- `editor_llm`: Allows selection of a specific LLM for artifact updates, potentially optimizing for code editing tasks.
- `update_algorithm`: Enables choosing between `diff` and `full` update strategies based on the nature of the required changes.
- `do_not_echo_artifact`: Defaults to true, and by *not* echoing the artifact in prompts, it further reduces token consumption in scenarios where the LLM might not need the full artifact context for every update step (though effectiveness might be slightly reduced in certain update scenarios).
3. **System and General Persona Tool Option Visibility and Customization**:
- Tool options, including those for system personas, are made visible and editable in the admin UI. This allows administrators to fine-tune the behavior of all personas and their tools, including setting specific LLMs or update algorithms. This was previously limited or hidden for system personas.
4. **Centralized and Improved Content Security Policy (CSP) Management**:
- The CSP for AI artifacts is consolidated and made more maintainable through the `ALLOWED_CDN_SOURCES` constant. This improves code organization and future updates to the allowed CDN list, while maintaining the existing security posture.
5. **Codebase Improvements**:
- Refactoring of diff utilities, introduction of strategy classes, enhanced error handling, new locales, and comprehensive testing all contribute to a more robust, efficient, and maintainable artifact management system.
By addressing the issues of LLM context confusion, token inefficiency, and the limitations of updating large artifacts, this pull request significantly improves the practicality and effectiveness of AI bots in managing web artifacts within Discourse.
2025-02-04 16:27:27 +11:00
change_description : "Change Description"
2024-03-12 16:51:41 +11:00
unknown_model : "Unknown AI model"
2024-06-27 17:27:40 +10:00
tools :
custom_name : "%{name} (custom)"
presets :
browse_web_jina :
2024-10-09 08:17:45 +11:00
name : "Browse web (jina.ai)"
2024-06-27 17:27:40 +10:00
exchange_rate :
name : "Exchange rate"
stock_quote :
name : "Stock quote (AlphaVantage)"
2024-10-09 08:17:45 +11:00
image_generation :
name : "Flux image generator (Together.ai)"
2024-06-27 17:27:40 +10:00
empty_tool :
name : "Start from blank..."
2025-02-07 14:34:47 +11:00
name :
characters : "must only include numbers, letters, and underscores"
2024-06-27 17:27:40 +10:00
2023-03-17 15:14:19 -03:00
ai_helper :
2023-03-22 16:00:28 -03:00
errors :
completion_request_failed : "Something went wrong while trying to provide suggestions. Please, try again."
2023-03-17 15:14:19 -03:00
prompts :
2024-07-04 08:23:37 -07:00
translate : Translate to %{language}
2023-03-17 15:14:19 -03:00
generate_titles : Suggest topic titles
proofread : Proofread text
2023-03-23 03:16:29 +11:00
markdown_table : Generate Markdown table
2023-09-25 11:12:54 -07:00
custom_prompt : "Custom Prompt"
2023-10-23 11:41:36 -03:00
explain : "Explain"
2023-12-19 11:17:34 -08:00
illustrate_post : "Illustrate Post"
2024-12-31 08:04:25 +11:00
replace_dates : "Smart dates"
2024-01-03 13:01:19 -08:00
painter :
2024-01-09 23:20:28 +11:00
attribution :
2024-01-05 09:03:23 -08:00
stable_diffusion_xl : "Image by Stable Diffusion XL"
dall_e_3 : "Image by DALL-E 3"
2024-02-21 10:10:22 -08:00
image_caption :
attribution : "Captioned by AI"
2023-05-16 14:38:21 -03:00
2024-03-12 16:51:41 +11:00
share_ai :
read_more : "Read full transcript"
onebox_title : "AI Conversation with %{llm_name}"
2024-03-13 20:52:33 +11:00
formatted_excerpt : "AI Conversation with %{llm_name}:\n %{excerpt}"
2024-03-15 10:31:58 +00:00
title : "%{title} - AI Conversation - %{site_name}"
2024-03-12 16:51:41 +11:00
errors :
not_allowed : "You are not allowed to share this topic"
other_people_in_pm : "Personal messages with other humans cannot be shared publicly"
other_content_in_pm : "Personal messages containing posts from other people cannot be shared publicly"
failed_to_share : "Failed to share the conversation"
conversation_deleted : "Conversation share deleted successfully"
2024-12-12 09:17:25 +11:00
spam_detection :
flag_reason : "Flagged as spam by <a href='%{url}'>Discourse AI</a>"
2024-12-27 17:47:16 +11:00
silence_reason : "User silenced automatically by <a href='%{url}'>Discourse AI</a>"
2025-01-10 02:17:06 +09:00
invalid_error_type : "Invalid error type provided"
unexpected : "An unexpected error occured"
bot_user_update_failed : "Failed to update the spam scanning bot user"
2023-05-16 14:38:21 -03:00
ai_bot :
2024-12-10 05:59:19 +11:00
reply_error : "Sorry, it looks like our system encountered an unexpected issue while trying to reply.\n\n[details='Error details']\n%{details}\n[/details]"
2024-10-30 10:28:20 +11:00
default_pm_prefix : "[Untitled AI bot PM]"
2025-03-04 12:22:30 +11:00
thinking : "Thinking..."
2023-08-30 16:15:03 +10:00
personas :
2024-05-06 09:49:02 +10:00
default_llm_required : "Default LLM model is required prior to enabling Chat"
FEATURE: UI to update ai personas on admin page (#290)
Introduces a UI to manage customizable personas (admin only feature)
Part of the change was some extensive internal refactoring:
- AIBot now has a persona set in the constructor, once set it never changes
- Command now takes in bot as a constructor param, so it has the correct persona and is not generating AIBot objects on the fly
- Added a .prettierignore file, due to the way ALE is configured in nvim it is a pre-req for prettier to work
- Adds a bunch of validations on the AIPersona model, system personas (artist/creative etc...) are all seeded. We now ensure
- name uniqueness, and only allow certain properties to be touched for system personas.
- (JS note) the client side design takes advantage of nested routes, the parent route for personas gets all the personas via this.store.findAll("ai-persona") then child routes simply reach into this model to find a particular persona.
- (JS note) data is sideloaded into the ai-persona model the meta property supplied from the controller, resultSetMeta
- This removes ai_bot_enabled_personas and ai_bot_enabled_chat_commands, both should be controlled from the UI on a per persona basis
- Fixes a long standing bug in token accounting ... we were doing to_json.length instead of to_json.to_s.length
- Amended it so {commands} are always inserted at the end unconditionally, no need to add it to the template of the system message as it just confuses things
- Adds a concept of required_commands to stock personas, these are commands that must be configured for this stock persona to show up.
- Refactored tests so we stop requiring inference_stubs, it was very confusing to need it, added to plugin.rb for now which at least is clearer
- Migrates the persona selector to gjs
---------
Co-authored-by: Joffrey JAFFEUX <j.jaffeux@gmail.com>
Co-authored-by: Martin Brennan <martin@discourse.org>
2023-11-21 16:56:43 +11:00
cannot_delete_system_persona : "System personas cannot be deleted, please disable it instead"
2024-06-11 18:14:14 +10:00
cannot_edit_system_persona : "System personas can only be renamed, you may not edit tools or system prompt, instead disable and make a copy"
2025-02-07 14:34:47 +11:00
cannot_have_duplicate_tools : "Can not have duplicate tools"
2024-03-08 06:37:23 +11:00
github_helper :
name : "GitHub Helper"
description : "AI Bot specialized in assisting with GitHub-related tasks and questions"
2023-08-30 16:15:03 +10:00
general :
name : Forum Helper
description : "General purpose AI Bot capable of performing various tasks"
artist :
name : Artist
description : "AI Bot specialized in generating images"
sql_helper :
name : SQL Helper
description : "AI Bot specialized in helping craft SQL queries on this Discourse instance"
2023-08-31 17:02:03 +10:00
settings_explorer :
name : Settings Explorer
description : "AI Bot specialized in helping explore Discourse site settings"
2023-09-04 12:05:27 +10:00
researcher :
name : Researcher
description : "AI Bot with Google access that can research information for you"
2023-09-27 01:48:38 +01:00
creative :
name : Creative
description : "AI Bot with no external integrations specialized in creative tasks"
2023-11-24 18:08:08 +11:00
dall_e3 :
name : "DALL-E 3"
description : "AI Bot specialized in generating images using DALL-E 3"
2024-02-19 14:52:12 +11:00
discourse_helper :
name : "Discourse Helper"
description : "AI Bot specialized in helping with Discourse related tasks"
2024-11-19 09:22:39 +11:00
web_artifact_creator :
name : "Web Artifact Creator"
description : "AI Bot specialized in creating interactive web artifacts"
2023-05-22 12:09:14 +10:00
topic_not_found : "Summary unavailable, topic not found!"
2024-01-04 10:44:07 -03:00
summarizing : "Summarizing topic"
2023-09-14 16:46:56 +10:00
searching : "Searching for: '%{query}'"
2024-06-11 18:14:14 +10:00
tool_options :
DEV: artifact system update (#1096)
### Why
This pull request fundamentally restructures how AI bots create and update web artifacts to address critical limitations in the previous approach:
1. **Improved Artifact Context for LLMs**: Previously, artifact creation and update tools included the *entire* artifact source code directly in the tool arguments. This overloaded the Language Model (LLM) with raw code, making it difficult for the LLM to maintain a clear understanding of the artifact's current state when applying changes. The LLM would struggle to differentiate between the base artifact and the requested modifications, leading to confusion and less effective updates.
2. **Reduced Token Usage and History Bloat**: Including the full artifact source code in every tool interaction was extremely token-inefficient. As conversations progressed, this redundant code in the history consumed a significant number of tokens unnecessarily. This not only increased costs but also diluted the context for the LLM with less relevant historical information.
3. **Enabling Updates for Large Artifacts**: The lack of a practical diff or targeted update mechanism made it nearly impossible to efficiently update larger web artifacts. Sending the entire source code for every minor change was both computationally expensive and prone to errors, effectively blocking the use of AI bots for meaningful modifications of complex artifacts.
**This pull request addresses these core issues by**:
* Introducing methods for the AI bot to explicitly *read* and understand the current state of an artifact.
* Implementing efficient update strategies that send *targeted* changes rather than the entire artifact source code.
* Providing options to control the level of artifact context included in LLM prompts, optimizing token usage.
### What
The main changes implemented in this PR to resolve the above issues are:
1. **`Read Artifact` Tool for Contextual Awareness**:
- A new `read_artifact` tool is introduced, enabling AI bots to fetch and process the current content of a web artifact from a given URL (local or external).
- This provides the LLM with a clear and up-to-date representation of the artifact's HTML, CSS, and JavaScript, improving its understanding of the base to be modified.
- By cloning local artifacts, it allows the bot to work with a fresh copy, further enhancing context and control.
2. **Refactored `Update Artifact` Tool with Efficient Strategies**:
- The `update_artifact` tool is redesigned to employ more efficient update strategies, minimizing token usage and improving update precision:
- **`diff` strategy**: Utilizes a search-and-replace diff algorithm to apply only the necessary, targeted changes to the artifact's code. This significantly reduces the amount of code sent to the LLM and focuses its attention on the specific modifications.
- **`full` strategy**: Provides the option to replace the entire content sections (HTML, CSS, JavaScript) when a complete rewrite is required.
- Tool options enhance the control over the update process:
- `editor_llm`: Allows selection of a specific LLM for artifact updates, potentially optimizing for code editing tasks.
- `update_algorithm`: Enables choosing between `diff` and `full` update strategies based on the nature of the required changes.
- `do_not_echo_artifact`: Defaults to true, and by *not* echoing the artifact in prompts, it further reduces token consumption in scenarios where the LLM might not need the full artifact context for every update step (though effectiveness might be slightly reduced in certain update scenarios).
3. **System and General Persona Tool Option Visibility and Customization**:
- Tool options, including those for system personas, are made visible and editable in the admin UI. This allows administrators to fine-tune the behavior of all personas and their tools, including setting specific LLMs or update algorithms. This was previously limited or hidden for system personas.
4. **Centralized and Improved Content Security Policy (CSP) Management**:
- The CSP for AI artifacts is consolidated and made more maintainable through the `ALLOWED_CDN_SOURCES` constant. This improves code organization and future updates to the allowed CDN list, while maintaining the existing security posture.
5. **Codebase Improvements**:
- Refactoring of diff utilities, introduction of strategy classes, enhanced error handling, new locales, and comprehensive testing all contribute to a more robust, efficient, and maintainable artifact management system.
By addressing the issues of LLM context confusion, token inefficiency, and the limitations of updating large artifacts, this pull request significantly improves the practicality and effectiveness of AI bots in managing web artifacts within Discourse.
2025-02-04 16:27:27 +11:00
create_artifact :
creator_llm :
name : "LLM"
description : "Language model to use for artifact creation"
update_artifact :
editor_llm :
name : "LLM"
description : "Language model to use for artifact edits"
update_algorithm :
name : "Update Algorithm"
description : "Ask LLM to fully replace, or use diff to update"
do_not_echo_artifact :
name : "Do Not Echo Artifact"
description : "Will limit costs however effectiveness of Artifact updates will be reduced"
2024-10-23 16:55:10 +11:00
google :
base_query :
name : "Base Search Query"
description : "Base query to use when searching. Examples: 'site:example.com' will only include results from example.com, before:2022-01-01 will only includes results from 2021 and earlier. This text is prepended to the search query."
2024-06-19 15:49:36 +10:00
read :
read_private :
name : "Read Private"
description : "Allow access to all topics user has access to (by default only public topics are included)"
2023-12-08 08:42:56 +11:00
search :
2024-05-10 11:32:34 +10:00
search_private :
name : "Search Private"
description : "Include all topics user has access to in search results (by default only public topics are included)"
2023-12-11 16:54:16 +11:00
max_results :
name : "Maximum number of results"
description : "Maximum number of results to include in the search - if empty default rules will be used and count will be scaled depending on model used. Highest value is 100."
2023-12-08 08:42:56 +11:00
base_query :
name : "Base Search Query"
description : "Base query to use when searching. Example: '#urgent' will prepend '#urgent' to the search query and only include topics with the urgent category or tag."
2024-06-11 18:14:14 +10:00
tool_summary :
DEV: artifact system update (#1096)
### Why
This pull request fundamentally restructures how AI bots create and update web artifacts to address critical limitations in the previous approach:
1. **Improved Artifact Context for LLMs**: Previously, artifact creation and update tools included the *entire* artifact source code directly in the tool arguments. This overloaded the Language Model (LLM) with raw code, making it difficult for the LLM to maintain a clear understanding of the artifact's current state when applying changes. The LLM would struggle to differentiate between the base artifact and the requested modifications, leading to confusion and less effective updates.
2. **Reduced Token Usage and History Bloat**: Including the full artifact source code in every tool interaction was extremely token-inefficient. As conversations progressed, this redundant code in the history consumed a significant number of tokens unnecessarily. This not only increased costs but also diluted the context for the LLM with less relevant historical information.
3. **Enabling Updates for Large Artifacts**: The lack of a practical diff or targeted update mechanism made it nearly impossible to efficiently update larger web artifacts. Sending the entire source code for every minor change was both computationally expensive and prone to errors, effectively blocking the use of AI bots for meaningful modifications of complex artifacts.
**This pull request addresses these core issues by**:
* Introducing methods for the AI bot to explicitly *read* and understand the current state of an artifact.
* Implementing efficient update strategies that send *targeted* changes rather than the entire artifact source code.
* Providing options to control the level of artifact context included in LLM prompts, optimizing token usage.
### What
The main changes implemented in this PR to resolve the above issues are:
1. **`Read Artifact` Tool for Contextual Awareness**:
- A new `read_artifact` tool is introduced, enabling AI bots to fetch and process the current content of a web artifact from a given URL (local or external).
- This provides the LLM with a clear and up-to-date representation of the artifact's HTML, CSS, and JavaScript, improving its understanding of the base to be modified.
- By cloning local artifacts, it allows the bot to work with a fresh copy, further enhancing context and control.
2. **Refactored `Update Artifact` Tool with Efficient Strategies**:
- The `update_artifact` tool is redesigned to employ more efficient update strategies, minimizing token usage and improving update precision:
- **`diff` strategy**: Utilizes a search-and-replace diff algorithm to apply only the necessary, targeted changes to the artifact's code. This significantly reduces the amount of code sent to the LLM and focuses its attention on the specific modifications.
- **`full` strategy**: Provides the option to replace the entire content sections (HTML, CSS, JavaScript) when a complete rewrite is required.
- Tool options enhance the control over the update process:
- `editor_llm`: Allows selection of a specific LLM for artifact updates, potentially optimizing for code editing tasks.
- `update_algorithm`: Enables choosing between `diff` and `full` update strategies based on the nature of the required changes.
- `do_not_echo_artifact`: Defaults to true, and by *not* echoing the artifact in prompts, it further reduces token consumption in scenarios where the LLM might not need the full artifact context for every update step (though effectiveness might be slightly reduced in certain update scenarios).
3. **System and General Persona Tool Option Visibility and Customization**:
- Tool options, including those for system personas, are made visible and editable in the admin UI. This allows administrators to fine-tune the behavior of all personas and their tools, including setting specific LLMs or update algorithms. This was previously limited or hidden for system personas.
4. **Centralized and Improved Content Security Policy (CSP) Management**:
- The CSP for AI artifacts is consolidated and made more maintainable through the `ALLOWED_CDN_SOURCES` constant. This improves code organization and future updates to the allowed CDN list, while maintaining the existing security posture.
5. **Codebase Improvements**:
- Refactoring of diff utilities, introduction of strategy classes, enhanced error handling, new locales, and comprehensive testing all contribute to a more robust, efficient, and maintainable artifact management system.
By addressing the issues of LLM context confusion, token inefficiency, and the limitations of updating large artifacts, this pull request significantly improves the practicality and effectiveness of AI bots in managing web artifacts within Discourse.
2025-02-04 16:27:27 +11:00
read_artifact : "Read a web artifact"
2024-12-03 07:23:31 +11:00
update_artifact : "Update a web artifact"
2024-11-19 09:22:39 +11:00
create_artifact : "Create web artifact"
2024-03-28 16:01:58 +11:00
web_browser : "Browse Web"
2024-05-30 06:33:50 +10:00
github_search_files : "GitHub search files"
2024-03-08 06:37:23 +11:00
github_search_code : "GitHub code search"
github_file_content : "GitHub file content"
github_pull_request_diff : "GitHub pull request diff"
2024-02-15 16:37:59 +11:00
random_picker : "Random Picker"
2023-05-20 17:45:54 +10:00
categories : "List categories"
search : "Search"
tags : "List tags"
time : "Time"
summarize : "Summarize"
image : "Generate image"
google : "Search Google"
2023-08-09 07:19:56 +10:00
read : "Read topic"
2023-08-29 10:43:58 +10:00
setting_context : "Look up site setting context"
2023-08-30 16:15:03 +10:00
schema : "Look up database schema"
2023-09-01 11:48:51 +10:00
search_settings : "Searching site settings"
2023-11-24 18:08:08 +11:00
dall_e : "Generate image"
2024-02-19 14:52:12 +11:00
search_meta_discourse : "Search Meta Discourse"
2024-05-21 07:57:01 +10:00
javascript_evaluator : "Evaluate JavaScript"
2024-06-11 18:14:14 +10:00
tool_help :
DEV: artifact system update (#1096)
### Why
This pull request fundamentally restructures how AI bots create and update web artifacts to address critical limitations in the previous approach:
1. **Improved Artifact Context for LLMs**: Previously, artifact creation and update tools included the *entire* artifact source code directly in the tool arguments. This overloaded the Language Model (LLM) with raw code, making it difficult for the LLM to maintain a clear understanding of the artifact's current state when applying changes. The LLM would struggle to differentiate between the base artifact and the requested modifications, leading to confusion and less effective updates.
2. **Reduced Token Usage and History Bloat**: Including the full artifact source code in every tool interaction was extremely token-inefficient. As conversations progressed, this redundant code in the history consumed a significant number of tokens unnecessarily. This not only increased costs but also diluted the context for the LLM with less relevant historical information.
3. **Enabling Updates for Large Artifacts**: The lack of a practical diff or targeted update mechanism made it nearly impossible to efficiently update larger web artifacts. Sending the entire source code for every minor change was both computationally expensive and prone to errors, effectively blocking the use of AI bots for meaningful modifications of complex artifacts.
**This pull request addresses these core issues by**:
* Introducing methods for the AI bot to explicitly *read* and understand the current state of an artifact.
* Implementing efficient update strategies that send *targeted* changes rather than the entire artifact source code.
* Providing options to control the level of artifact context included in LLM prompts, optimizing token usage.
### What
The main changes implemented in this PR to resolve the above issues are:
1. **`Read Artifact` Tool for Contextual Awareness**:
- A new `read_artifact` tool is introduced, enabling AI bots to fetch and process the current content of a web artifact from a given URL (local or external).
- This provides the LLM with a clear and up-to-date representation of the artifact's HTML, CSS, and JavaScript, improving its understanding of the base to be modified.
- By cloning local artifacts, it allows the bot to work with a fresh copy, further enhancing context and control.
2. **Refactored `Update Artifact` Tool with Efficient Strategies**:
- The `update_artifact` tool is redesigned to employ more efficient update strategies, minimizing token usage and improving update precision:
- **`diff` strategy**: Utilizes a search-and-replace diff algorithm to apply only the necessary, targeted changes to the artifact's code. This significantly reduces the amount of code sent to the LLM and focuses its attention on the specific modifications.
- **`full` strategy**: Provides the option to replace the entire content sections (HTML, CSS, JavaScript) when a complete rewrite is required.
- Tool options enhance the control over the update process:
- `editor_llm`: Allows selection of a specific LLM for artifact updates, potentially optimizing for code editing tasks.
- `update_algorithm`: Enables choosing between `diff` and `full` update strategies based on the nature of the required changes.
- `do_not_echo_artifact`: Defaults to true, and by *not* echoing the artifact in prompts, it further reduces token consumption in scenarios where the LLM might not need the full artifact context for every update step (though effectiveness might be slightly reduced in certain update scenarios).
3. **System and General Persona Tool Option Visibility and Customization**:
- Tool options, including those for system personas, are made visible and editable in the admin UI. This allows administrators to fine-tune the behavior of all personas and their tools, including setting specific LLMs or update algorithms. This was previously limited or hidden for system personas.
4. **Centralized and Improved Content Security Policy (CSP) Management**:
- The CSP for AI artifacts is consolidated and made more maintainable through the `ALLOWED_CDN_SOURCES` constant. This improves code organization and future updates to the allowed CDN list, while maintaining the existing security posture.
5. **Codebase Improvements**:
- Refactoring of diff utilities, introduction of strategy classes, enhanced error handling, new locales, and comprehensive testing all contribute to a more robust, efficient, and maintainable artifact management system.
By addressing the issues of LLM context confusion, token inefficiency, and the limitations of updating large artifacts, this pull request significantly improves the practicality and effectiveness of AI bots in managing web artifacts within Discourse.
2025-02-04 16:27:27 +11:00
read_artifact : "Read a web artifact using the AI Bot"
2024-12-03 07:23:31 +11:00
update_artifact : "Update a web artifact using the AI Bot"
2024-11-19 09:22:39 +11:00
create_artifact : "Create a web artifact using the AI Bot"
2024-03-28 16:01:58 +11:00
web_browser : "Browse web page using the AI Bot"
2024-03-08 06:37:23 +11:00
github_search_code : "Search for code in a GitHub repository"
2024-05-30 06:33:50 +10:00
github_search_files : "Search for files in a GitHub repository"
2024-03-08 06:37:23 +11:00
github_file_content : "Retrieve content of files from a GitHub repository"
github_pull_request_diff : "Retrieve a GitHub pull request diff"
2024-02-15 16:37:59 +11:00
random_picker : "Pick a random number or a random element of a list"
2023-12-08 08:42:56 +11:00
categories : "List all publicly visible categories on the forum"
search : "Search all public topics on the forum"
tags : "List all tags on the forum"
time : "Find time in various time zones"
2024-05-24 20:00:36 +08:00
summary : "Summarize a topic"
2023-12-08 08:42:56 +11:00
image : "Generate image using Stable Diffusion"
google : "Search Google for a query"
read : "Read public topic on the forum"
setting_context : "Look up site setting context"
schema : "Look up database schema"
search_settings : "Search site settings"
dall_e : "Generate image using DALL-E 3"
2024-02-19 14:52:12 +11:00
search_meta_discourse : "Search Meta Discourse"
2024-05-21 07:57:01 +10:00
javascript_evaluator : "Evaluate JavaScript"
2024-06-11 18:14:14 +10:00
tool_description :
DEV: artifact system update (#1096)
### Why
This pull request fundamentally restructures how AI bots create and update web artifacts to address critical limitations in the previous approach:
1. **Improved Artifact Context for LLMs**: Previously, artifact creation and update tools included the *entire* artifact source code directly in the tool arguments. This overloaded the Language Model (LLM) with raw code, making it difficult for the LLM to maintain a clear understanding of the artifact's current state when applying changes. The LLM would struggle to differentiate between the base artifact and the requested modifications, leading to confusion and less effective updates.
2. **Reduced Token Usage and History Bloat**: Including the full artifact source code in every tool interaction was extremely token-inefficient. As conversations progressed, this redundant code in the history consumed a significant number of tokens unnecessarily. This not only increased costs but also diluted the context for the LLM with less relevant historical information.
3. **Enabling Updates for Large Artifacts**: The lack of a practical diff or targeted update mechanism made it nearly impossible to efficiently update larger web artifacts. Sending the entire source code for every minor change was both computationally expensive and prone to errors, effectively blocking the use of AI bots for meaningful modifications of complex artifacts.
**This pull request addresses these core issues by**:
* Introducing methods for the AI bot to explicitly *read* and understand the current state of an artifact.
* Implementing efficient update strategies that send *targeted* changes rather than the entire artifact source code.
* Providing options to control the level of artifact context included in LLM prompts, optimizing token usage.
### What
The main changes implemented in this PR to resolve the above issues are:
1. **`Read Artifact` Tool for Contextual Awareness**:
- A new `read_artifact` tool is introduced, enabling AI bots to fetch and process the current content of a web artifact from a given URL (local or external).
- This provides the LLM with a clear and up-to-date representation of the artifact's HTML, CSS, and JavaScript, improving its understanding of the base to be modified.
- By cloning local artifacts, it allows the bot to work with a fresh copy, further enhancing context and control.
2. **Refactored `Update Artifact` Tool with Efficient Strategies**:
- The `update_artifact` tool is redesigned to employ more efficient update strategies, minimizing token usage and improving update precision:
- **`diff` strategy**: Utilizes a search-and-replace diff algorithm to apply only the necessary, targeted changes to the artifact's code. This significantly reduces the amount of code sent to the LLM and focuses its attention on the specific modifications.
- **`full` strategy**: Provides the option to replace the entire content sections (HTML, CSS, JavaScript) when a complete rewrite is required.
- Tool options enhance the control over the update process:
- `editor_llm`: Allows selection of a specific LLM for artifact updates, potentially optimizing for code editing tasks.
- `update_algorithm`: Enables choosing between `diff` and `full` update strategies based on the nature of the required changes.
- `do_not_echo_artifact`: Defaults to true, and by *not* echoing the artifact in prompts, it further reduces token consumption in scenarios where the LLM might not need the full artifact context for every update step (though effectiveness might be slightly reduced in certain update scenarios).
3. **System and General Persona Tool Option Visibility and Customization**:
- Tool options, including those for system personas, are made visible and editable in the admin UI. This allows administrators to fine-tune the behavior of all personas and their tools, including setting specific LLMs or update algorithms. This was previously limited or hidden for system personas.
4. **Centralized and Improved Content Security Policy (CSP) Management**:
- The CSP for AI artifacts is consolidated and made more maintainable through the `ALLOWED_CDN_SOURCES` constant. This improves code organization and future updates to the allowed CDN list, while maintaining the existing security posture.
5. **Codebase Improvements**:
- Refactoring of diff utilities, introduction of strategy classes, enhanced error handling, new locales, and comprehensive testing all contribute to a more robust, efficient, and maintainable artifact management system.
By addressing the issues of LLM context confusion, token inefficiency, and the limitations of updating large artifacts, this pull request significantly improves the practicality and effectiveness of AI bots in managing web artifacts within Discourse.
2025-02-04 16:27:27 +11:00
read_artifact : "Read a web artifact using the AI Bot"
2024-12-03 07:23:31 +11:00
update_artifact : "Updated a web artifact using the AI Bot"
DEV: artifact system update (#1096)
### Why
This pull request fundamentally restructures how AI bots create and update web artifacts to address critical limitations in the previous approach:
1. **Improved Artifact Context for LLMs**: Previously, artifact creation and update tools included the *entire* artifact source code directly in the tool arguments. This overloaded the Language Model (LLM) with raw code, making it difficult for the LLM to maintain a clear understanding of the artifact's current state when applying changes. The LLM would struggle to differentiate between the base artifact and the requested modifications, leading to confusion and less effective updates.
2. **Reduced Token Usage and History Bloat**: Including the full artifact source code in every tool interaction was extremely token-inefficient. As conversations progressed, this redundant code in the history consumed a significant number of tokens unnecessarily. This not only increased costs but also diluted the context for the LLM with less relevant historical information.
3. **Enabling Updates for Large Artifacts**: The lack of a practical diff or targeted update mechanism made it nearly impossible to efficiently update larger web artifacts. Sending the entire source code for every minor change was both computationally expensive and prone to errors, effectively blocking the use of AI bots for meaningful modifications of complex artifacts.
**This pull request addresses these core issues by**:
* Introducing methods for the AI bot to explicitly *read* and understand the current state of an artifact.
* Implementing efficient update strategies that send *targeted* changes rather than the entire artifact source code.
* Providing options to control the level of artifact context included in LLM prompts, optimizing token usage.
### What
The main changes implemented in this PR to resolve the above issues are:
1. **`Read Artifact` Tool for Contextual Awareness**:
- A new `read_artifact` tool is introduced, enabling AI bots to fetch and process the current content of a web artifact from a given URL (local or external).
- This provides the LLM with a clear and up-to-date representation of the artifact's HTML, CSS, and JavaScript, improving its understanding of the base to be modified.
- By cloning local artifacts, it allows the bot to work with a fresh copy, further enhancing context and control.
2. **Refactored `Update Artifact` Tool with Efficient Strategies**:
- The `update_artifact` tool is redesigned to employ more efficient update strategies, minimizing token usage and improving update precision:
- **`diff` strategy**: Utilizes a search-and-replace diff algorithm to apply only the necessary, targeted changes to the artifact's code. This significantly reduces the amount of code sent to the LLM and focuses its attention on the specific modifications.
- **`full` strategy**: Provides the option to replace the entire content sections (HTML, CSS, JavaScript) when a complete rewrite is required.
- Tool options enhance the control over the update process:
- `editor_llm`: Allows selection of a specific LLM for artifact updates, potentially optimizing for code editing tasks.
- `update_algorithm`: Enables choosing between `diff` and `full` update strategies based on the nature of the required changes.
- `do_not_echo_artifact`: Defaults to true, and by *not* echoing the artifact in prompts, it further reduces token consumption in scenarios where the LLM might not need the full artifact context for every update step (though effectiveness might be slightly reduced in certain update scenarios).
3. **System and General Persona Tool Option Visibility and Customization**:
- Tool options, including those for system personas, are made visible and editable in the admin UI. This allows administrators to fine-tune the behavior of all personas and their tools, including setting specific LLMs or update algorithms. This was previously limited or hidden for system personas.
4. **Centralized and Improved Content Security Policy (CSP) Management**:
- The CSP for AI artifacts is consolidated and made more maintainable through the `ALLOWED_CDN_SOURCES` constant. This improves code organization and future updates to the allowed CDN list, while maintaining the existing security posture.
5. **Codebase Improvements**:
- Refactoring of diff utilities, introduction of strategy classes, enhanced error handling, new locales, and comprehensive testing all contribute to a more robust, efficient, and maintainable artifact management system.
By addressing the issues of LLM context confusion, token inefficiency, and the limitations of updating large artifacts, this pull request significantly improves the practicality and effectiveness of AI bots in managing web artifacts within Discourse.
2025-02-04 16:27:27 +11:00
create_artifact : "Created a web artifact: %{name} - %{specification}"
2024-03-28 16:01:58 +11:00
web_browser : "Reading <a href='%{url}'>%{url}</a>"
2024-05-30 06:33:50 +10:00
github_search_files : "Searched for '%{keywords}' in %{repo}/%{branch}"
2024-03-08 06:37:23 +11:00
github_search_code : "Searched for '%{query}' in %{repo}"
github_pull_request_diff : "<a href='%{url}'>%{repo} %{pull_id}</a>"
github_file_content : "Retrieved content of %{file_paths} from %{repo_name}@%{branch}"
2024-02-15 16:37:59 +11:00
random_picker : "Picking from %{options}, picked: %{result}"
2023-08-09 16:01:48 +10:00
read : "Reading: <a href='%{url}'>%{title}</a>"
2023-05-20 17:45:54 +10:00
time : "Time in %{timezone} is %{time}"
summarize : "Summarized <a href='%{url}'>%{title}</a>"
2023-11-24 18:08:08 +11:00
dall_e : "%{prompt}"
2023-06-20 15:44:03 +10:00
image : "%{prompt}"
2023-05-20 17:45:54 +10:00
categories :
one : "Found %{count} category"
other : "Found %{count} categories"
tags :
one : "Found %{count} tag"
other : "Found %{count} tags"
search :
one : "Found %{count} <a href='%{url}'>result</a> for '%{query}'"
2024-02-19 14:52:12 +11:00
other : "Found %{count} <a href='%{url}'>results</a> for '%{query}'"
search_meta_discourse :
one : "Found %{count} <a href='%{url}'>result</a> for '%{query}'"
2023-05-20 17:45:54 +10:00
other : "Found %{count} <a href='%{url}'>results</a> for '%{query}'"
google :
one : "Found %{count} <a href='%{url}'>result</a> for '%{query}'"
other : "Found %{count} <a href='%{url}'>results</a> for '%{query}'"
2023-08-29 10:43:58 +10:00
setting_context : "Reading context for: %{setting_name}"
2023-08-30 16:15:03 +10:00
schema : "%{tables}"
2023-09-01 11:48:51 +10:00
search_settings :
one : "Found %{count} result for '%{query}'"
other : "Found %{count} results for '%{query}'"
2023-06-13 14:32:26 -03:00
summarization :
2023-06-20 15:44:03 +10:00
configuration_hint :
2023-06-13 14:32:26 -03:00
one : "Configure the `%{setting}` setting first."
other : "Configure these settings first: %{settings}"
2024-07-02 08:51:59 -07:00
chat :
no_targets : "There were no messages during the selected period."
FEATURE: UI to update ai personas on admin page (#290)
Introduces a UI to manage customizable personas (admin only feature)
Part of the change was some extensive internal refactoring:
- AIBot now has a persona set in the constructor, once set it never changes
- Command now takes in bot as a constructor param, so it has the correct persona and is not generating AIBot objects on the fly
- Added a .prettierignore file, due to the way ALE is configured in nvim it is a pre-req for prettier to work
- Adds a bunch of validations on the AIPersona model, system personas (artist/creative etc...) are all seeded. We now ensure
- name uniqueness, and only allow certain properties to be touched for system personas.
- (JS note) the client side design takes advantage of nested routes, the parent route for personas gets all the personas via this.store.findAll("ai-persona") then child routes simply reach into this model to find a particular persona.
- (JS note) data is sideloaded into the ai-persona model the meta property supplied from the controller, resultSetMeta
- This removes ai_bot_enabled_personas and ai_bot_enabled_chat_commands, both should be controlled from the UI on a per persona basis
- Fixes a long standing bug in token accounting ... we were doing to_json.length instead of to_json.to_s.length
- Amended it so {commands} are always inserted at the end unconditionally, no need to add it to the template of the system message as it just confuses things
- Adds a concept of required_commands to stock personas, these are commands that must be configured for this stock persona to show up.
- Refactored tests so we stop requiring inference_stubs, it was very confusing to need it, added to plugin.rb for now which at least is clearer
- Migrates the persona selector to gjs
---------
Co-authored-by: Joffrey JAFFEUX <j.jaffeux@gmail.com>
Co-authored-by: Martin Brennan <martin@discourse.org>
2023-11-21 16:56:43 +11:00
2023-11-08 10:50:37 -03:00
sentiment :
reports :
2024-12-12 02:13:18 +09:00
overall_sentiment : "Overall sentiment (Positive - Negative)"
2023-11-08 10:50:37 -03:00
post_emotion :
2024-08-02 14:23:29 -07:00
sadness : "Sadness 😢"
surprise : "Surprise 😱"
neutral : "Neutral 😐"
fear : "Fear 😨"
anger : "Anger 😡"
joy : "Joy 😀"
disgust : "Disgust 🤢"
2025-02-20 09:14:10 -08:00
sentiment_analysis :
positive : "Positive"
negative : "Negative"
neutral : "Neutral"
2024-01-29 16:04:25 -03:00
llm :
configuration :
disable_module_first : "You have to disable %{setting} first."
2024-12-12 09:17:25 +11:00
set_llm_first : "Set %{setting} first"
2024-01-29 16:04:25 -03:00
model_unreachable : "We couldn't get a response from this model. Check your settings first."
2024-12-12 09:17:25 +11:00
invalid_seeded_model : "You can't use this model with this feature"
must_select_model : "You must select a LLM first"
2024-01-29 16:04:25 -03:00
endpoints :
not_configured : "%{display_name} (not configured)"
configuration_hint :
one : "Make sure the `%{settings}` setting was configured."
other : "Make sure these settings were configured: %{settings}"
2024-05-29 11:04:47 +10:00
2024-05-27 16:44:08 -03:00
delete_failed :
one : "We couldn't delete this model because %{settings} is using it. Update the setting and try again."
other : "We couldn't delete this model because %{settings} are using it. Update the settings and try again."
2024-08-28 15:57:58 -03:00
cannot_edit_builtin : "You can't edit a built-in model."
2024-02-01 16:54:09 -03:00
embeddings :
2025-01-21 12:23:19 -03:00
delete_failed : "This model is currently in use. Update the `ai embeddings selected model` first."
cannot_edit_builtin : "You can't edit a built-in model."
2024-02-01 16:54:09 -03:00
configuration :
disable_embeddings : "You have to disable 'ai embeddings enabled' first."
2025-01-30 14:16:56 -03:00
invalid_config : "You selected a invalid option."
2025-01-21 12:23:19 -03:00
choose_model : "Set 'ai embeddings selected model' first."
2024-08-07 16:08:56 -03:00
llm_models :
missing_provider_param : "%{param} can't be blank"
2024-12-02 12:26:54 -08:00
bedrock_invalid_url : "Please complete all the fields to use this model."
2024-10-30 10:28:20 +11:00
errors :
2025-01-14 15:54:09 +11:00
quota_exceeded : "You have exceeded the quota for this model. Please try again in %{relative_time}."
quota_required : "You must specify maximum tokens or usages for this model"
2024-10-30 10:28:20 +11:00
no_query_specified : The query parameter is required, please specify it.
no_user_for_persona : The persona specified does not have a user associated with it.
persona_not_found : The persona specified does not exist. Check the persona_name or persona_id params.
no_user_specified : The username or the user_unique_id parameter is required, please specify it.
user_not_found : The user specified does not exist. Check the username param.
persona_disabled : The persona specified is disabled. Check the persona_name or persona_id params.
no_default_llm : The persona must have a default_llm defined.
user_not_allowed : The user is not allowed to participate in the topic.
prompt_message_length : The message %{idx} is over the 1000 character limit.
2024-12-17 08:00:05 +09:00
dashboard :
problem :
2025-03-05 11:31:23 +08:00
ai_llm_status : "The LLM model: %{model_name} is encountering issues. Please check the <a href='%{url}'>model's configuration page</a>."