en: admin_js: admin: api: scopes: descriptions: discourse_ai: search: "Allows AI search" stream_completion: "Allows streaming AI persona completions" site_settings: categories: discourse_ai: "Discourse AI" dashboard: emotion: title: "Emotion" description: "The table lists a count of posts classified with a determined emotion. Classified with the model 'SamLowe/roberta-base-go_emotions'." js: discourse_automation: scriptables: llm_report: fields: sender: label: "Sender" description: "The user that will send the report" receivers: label: "Receivers" description: "The users that will receive the report (emails will be sent direct emails, usernames will be sent a PM)" topic_id: label: "Topic ID" description: "The topic ID to post the report to" title: label: "Title" description: "The title of the report" days: label: "Days" description: "The timespan of the report" offset: label: "Offset" description: "When testing you may want to run the report historically, use offset to start the report in an earlier date" instructions: label: "Instructions" description: "The instructions provided to the large language model" sample_size: label: "Sample Size" description: "The number of posts to sample for the report" tokens_per_post: label: "Tokens per post" description: "The number of LLM tokens to use per post" model: label: "Model" description: "LLM to use for report generation" categories: label: "Categories" description: "Filter topics only to these categories" tags: label: "Tags" description: "Filter topics only to these tags" exclude_tags: label: "Exclude Tags" description: "Exclude topics with these tags" exclude_categories: label: "Exclude Categories" description: "Exclude topics with these categories" allow_secure_categories: label: "Allow secure categories" description: "Allow the report to be generated for topics in secure categories" suppress_notifications: label: "Suppress Notifications" description: "Suppress notifications the report may generate by transforming to content. This will remap mentions and internal links." debug_mode: label: "Debug Mode" description: "Enable debug mode to see the raw input and output of the LLM" priority_group: label: "Priority Group" description: "Prioritize content from this group in the report" temperature: label: "Temperature" description: "Temperature to use for the LLM. Increase to increase randomness (0 to use model default)" top_p: label: "Top P" description: "Top P to use for the LLM, increase to increase randomness (0 to use model default)" llm_triage: fields: system_prompt: label: "System Prompt" description: "The prompt that will be used to triage, be sure for it to reply with a single word you can use to trigger the action" max_post_tokens: label: "Max Post Tokens" description: "The maximum number of tokens to scan using LLM triage" search_for_text: label: "Search for text" description: "If the following text appears in the LLM reply, apply these actions" category: label: "Category" description: "Category to apply to the topic" tags: label: "Tags" description: "Tags to apply to the topic" canned_reply: label: "Reply" description: "Raw text of the canned reply to post on the topic" canned_reply_user: label: "Reply User" description: "Username of the user to post the canned reply" hide_topic: label: "Hide topic" description: "Make topic not visible to the public if triggered" flag_type: label: "Flag type" description: "Type of flag to apply to the post (spam or simply raise for review)" flag_post: label: "Flag post" description: "Flags post (either as spam or for review)" include_personal_messages: label: "Include personal messages" description: "Also scan and triage personal messages" model: label: "Model" description: "Language model used for triage" discourse_ai: title: "AI" modals: select_option: "Select an option..." usage: short_title: "Usage" summary: "Summary" total_tokens: "Total tokens" tokens_over_time: "Tokens over time" features_breakdown: "Usage per feature" feature: "Feature" usage_count: "Usage count" model: "Model" models_breakdown: "Usage per model" users_breakdown: "Usage per user" all_features: "All features" all_models: "All models" username: "Username" total_requests: "Total requests" request_tokens: "Request tokens" response_tokens: "Response tokens" cached_tokens: "Cached tokens" ai_persona: tool_strategies: all: "Apply to all replies" replies: one: "Apply to first reply only" other: "Apply to first %{count} replies" back: "Back" name: "Name" edit: "Edit" description: "Description" no_llm_selected: "No language model selected" max_context_posts: "Max context posts" max_context_posts_help: "The maximum number of posts to use as context for the AI when responding to a user. (empty for default)" vision_enabled: Vision enabled vision_enabled_help: If enabled, the AI will attempt to understand images users post in the topic, depends on the model being used supporting vision. Supported by latest models from Anthropic, Google, and OpenAI. vision_max_pixels: Supported image size vision_max_pixel_sizes: low: Low quality - cheapest (256x256) medium: Medium quality (512x512) high: High quality - slowest (1024x1024) tool_details: Show tool details tool_details_help: Will show end users details on which tools the language model has triggered. mentionable: Allow mentions mentionable_help: If enabled, users in allowed groups can mention this user in posts, the AI will respond as this persona. user: User create_user: Create user create_user_help: You can optionally attach a user to this persona. If you do, the AI will use this user to respond to requests. default_llm: Default language model default_llm_help: The default language model to use for this persona. Required if you wish to mention persona on public posts. question_consolidator_llm: Language Model for Question Consolidator question_consolidator_llm_help: The language model to use for the question consolidator, you may choose a less powerful model to save costs. system_prompt: System prompt forced_tool_strategy: Forced tool strategy allow_chat_direct_messages: "Allow chat direct messages" allow_chat_direct_messages_help: "If enabled, users in allowed groups can send direct messages to this persona." allow_chat_channel_mentions: "Allow chat channel mentions" allow_chat_channel_mentions_help: "If enabled, users in allowed groups can mention this persona in chat channels." allow_personal_messages: "Allow personal messages" allow_personal_messages_help: "If enabled, users in allowed groups can send personal messages to this persona." allow_topic_mentions: "Allow topic mentions" allow_topic_mentions_help: "If enabled, users in allowed groups can mention this persona in topics." force_default_llm: "Always use default language model" save: "Save" saved: "Persona saved" enabled: "Enabled?" tools: "Enabled tools" forced_tools: "Forced tools" allowed_groups: "Allowed groups" confirm_delete: "Are you sure you want to delete this persona?" new: "New persona" no_personas: "You have not created any personas yet" title: "Personas" short_title: "Personas" delete: "Delete" temperature: "Temperature" temperature_help: "Temperature to use for the LLM. Increase to increase creativity (leave empty to use model default, generally a value from 0.0 to 2.0)" top_p: "Top P" top_p_help: "Top P to use for the LLM, increase to increase randomness (leave empty to use model default, generally a value from 0.0 to 1.0)" priority: "Priority" priority_help: "Priority personas are displayed to users at the top of the persona list. If multiple personas have priority, they will be sorted alphabetically." tool_options: "Tool options" rag_conversation_chunks: "Search conversation chunks" rag_conversation_chunks_help: "The number of chunks to use for the RAG model searches. Increase to increase the amount of context the AI can use." persona_description: "Personas are a powerful feature that allows you to customize the behavior of the AI engine in your Discourse forum. They act as a 'system message' that guides the AI's responses and interactions, helping to create a more personalized and engaging user experience." rag: options: rag_chunk_tokens: "Upload chunk tokens" rag_chunk_tokens_help: "The number of tokens to use for each chunk in the RAG model. Increase to increase the amount of context the AI can use. (changing will re-index all uploads)" rag_chunk_overlap_tokens: "Upload chunk overlap tokens" rag_chunk_overlap_tokens_help: "The number of tokens to overlap between chunks in the RAG model. (changing will re-index all uploads)" show_indexing_options: "Show upload options" hide_indexing_options: "Hide upload options" uploads: title: "Uploads" description: "Uploaded files should be formatted as plaintext (.txt) or markdown (.md)." button: "Add files" filter: "Filter uploads" indexed: "Indexed" indexing: "Indexing" uploaded: "Ready to be indexed" uploading: "Uploading..." remove: "Remove upload" tools: back: "Back" short_title: "Tools" no_tools: "You have not created any tools yet" name: "Name" subheader_description: "Tools extend the capabilities of AI bots with user-defined JavaScript functions." new: "New tool" name_help: "The unique name of the tool as used by the language model" description: "Description" description_help: "A clear description of the tool's purpose for the language model" summary: "Summary" summary_help: "Summary of tools purpose to be displayed to end users" script: "Script" parameters: "Parameters" save: "Save" add_parameter: "Add Parameter" parameter_required: "Required" parameter_enum: "Enum" parameter_name: "Parameter name" parameter_description: "Parameter description" enum_value: "Enum value" add_enum_value: "Add enum value" edit: "Edit" test: "Run test" delete: "Delete" saved: "Tool saved" presets: "Select a preset..." confirm_delete: "Are you sure you want to delete this tool?" next: title: "Next" test_modal: title: "Test AI tool" run: "Run test" result: "Test result" llms: short_title: "LLMs" no_llms: "No LLMs yet" new: "New model" display_name: "Name" name: "Model id" provider: "Provider" tokenizer: "Tokenizer" max_prompt_tokens: "Number of tokens for the prompt" url: "URL of the service hosting the model" api_key: "API Key of the service hosting the model" enabled_chat_bot: "Allow AI bot selector" vision_enabled: "Vision enabled" ai_bot_user: "AI bot User" save: "Save" edit: "Edit" saved: "LLM model saved" back: "Back" confirm_delete: Are you sure you want to delete this model? delete: Delete seeded_warning: "This model is pre-configured on your site and cannot be edited." usage: ai_bot: "AI bot" ai_helper: "Helper" ai_persona: "Persona (%{persona})" ai_summarization: "Summarize" ai_embeddings_semantic_search: "AI search" in_use_warning: one: "This model is currently used by %{settings}. If misconfigured, the feature won't work as expected." other: "This model is currently used by the following: %{settings}. If misconfigured, features won't work as expected. " model_description: none: "General settings that work for most language models" anthropic-claude-3-5-sonnet: "Anthropic's most intelligent model" anthropic-claude-3-5-haiku: "Fast and cost-effective" anthropic-claude-3-opus: "Excels at writing and complex tasks" google-gemini-1-5-pro: "Mid-sized multimodal model capable of a wide range of tasks" google-gemini-1-5-flash: "Lightweight, fast, and cost-efficient with multimodal reasoning" open_ai-gpt-4-turbo: "Previous generation high-intelligence model" open_ai-gpt-4o: "High intelligence model for complex, multi-step tasks" open_ai-gpt-4o-mini: "Affordable and fast small model for lightweight tasks" open_ai-o1-mini: "Cost-efficient reasoning model" open_ai-o1-preview: "Open AI's most capabale reasoning model" samba_nova-Meta-Llama-3-1-8B-Instruct: "Efficient lightweight multilingual model" samba_nova-Meta-Llama-3-1-70B-Instruct": "Powerful multipurpose model" mistral-mistral-large-latest: "Mistral's most powerful model" mistral-pixtral-large-latest: "Mistral's most powerful vision capable model" configured: title: "Configured LLMs" preconfigured_llms: "Select your LLM" preconfigured: title_no_llms: "Select a template to get started" title: "Unconfigured LLM templates" description: "LLMs (Large Language Models) are AI tools optimized for tasks like summarizing content, generating reports, automating customer interactions, and facilitating forum moderation and insights." fake: "Manual configuration" button: "Set up" next: title: "Next" tests: title: "Run test" running: "Running test..." success: "Success!" failure: "Trying to contact the model returned this error: %{error}" hints: max_prompt_tokens: "Max numbers of tokens for the prompt. As a rule of thumb, this should be 50% of the model's context window." name: "We include this in the API call to specify which model we'll use" vision_enabled: "If enabled, the AI will attempt to understand images. It depends on the model being used supporting vision. Supported by latest models from Anthropic, Google, and OpenAI." enabled_chat_bot: "If enabled, users can select this model when creating PMs with the AI bot" providers: aws_bedrock: "AWS Bedrock" anthropic: "Anthropic" vllm: "vLLM" hugging_face: "Hugging Face" cohere: "Cohere" open_ai: "OpenAI" google: "Google" azure: "Azure" ollama: "Ollama" CDCK: "CDCK" samba_nova: "SambaNova" mistral: "Mistral" fake: "Custom" provider_fields: access_key_id: "AWS Bedrock access key ID" region: "AWS Bedrock region" organization: "Optional OpenAI organization ID" disable_system_prompt: "Disable system message in prompts" enable_native_tool: "Enable native tool support" disable_native_tools: "Disable native tool support (use XML based tools)" related_topics: title: "Related topics" pill: "Related" ai_helper: title: "Suggest changes using AI" description: "Choose one of the options below, and the AI will suggest you a new version of the text." selection_hint: "Hint: You can also select a portion of the text before opening the helper to rewrite only that." suggest: "Suggest with AI" suggest_errors: too_many_tags: one: "You can only have up to %{count} tag" other: "You can only have up to %{count} tags" no_suggestions: "No suggestions available" missing_content: "Please enter some content to generate suggestions." context_menu: trigger: "Ask AI" loading: "AI is generating" cancel: "Cancel" regen: "Try Again" confirm: "Confirm" discard: "Discard" changes: "Suggested edits" custom_prompt: title: "Custom prompt" placeholder: "Enter a custom prompt..." submit: "Send prompt" translate_prompt: "Translate to %{language}" post_options_menu: trigger: "Ask AI" title: "Ask AI" loading: "AI is generating" close: "Close" copy: "Copy" copied: "Copied!" cancel: "Cancel" insert_footnote: "Add footnote" footnote_credits: "Explanation by AI" fast_edit: suggest_button: "Suggest edit" thumbnail_suggestions: title: "Suggested thumbnails" select: "Select" selected: "Selected" image_caption: button_label: "Caption with AI" generating: "Generating caption..." credits: "Captioned by AI" save_caption: "Save" automatic_caption_setting: "Enable auto caption" automatic_caption_loading: "Captioning images..." automatic_caption_dialog: prompt: "This post contains non-captioned images. Would you like to enable automatic captions on image uploads? (This can be changed in your preferences later)" confirm: "Enable" cancel: "Don't ask again" no_content_error: "Add content first to perform AI actions on it" reviewables: model_used: "Model used:" accuracy: "Accuracy:" embeddings: semantic_search: "Topics (Semantic)" semantic_search_loading: "Searching for more results using AI" semantic_search_results: toggle: "Showing %{count} results found using AI" toggle_hidden: "Hiding %{count} results found using AI" none: "Sorry, our AI search found no matching topics" new: "Press 'search' to begin looking for new results with AI" ai_generated_result: "Search result found using AI" quick_search: suffix: "in all topics and posts with AI" ai_artifact: expand_view_label: "Expand view" collapse_view_label: "Exit Fullscreen (ESC)" click_to_run_label: "Run Artifact" ai_bot: pm_warning: "AI chatbot messages are monitored regularly by moderators." cancel_streaming: "Stop reply" default_pm_prefix: "[Untitled AI bot PM]" shortcut_title: "Start a PM with an AI bot" share: "Copy AI conversation" conversation_shared: "Conversation copied" debug_ai: "View raw AI request and response" debug_ai_modal: title: "View AI interaction" copy_request: "Copy request" copy_response: "Copy response" request_tokens: "Request tokens:" response_tokens: "Response tokens:" request: "Request" response: "Response" next_log: "Next" previous_log: "Previous" share_full_topic_modal: title: "Share conversation publicly" share: "Share and copy link" update: "Update and copy link" delete: "Delete share" share_ai_conversation: name: "Share AI conversation" title: "Share this AI conversation publicly" ai_label: "AI" ai_title: "Conversation with AI" share_modal: title: "Copy AI conversation" copy: "Copy" context: "Interactions to share:" share_tip: "Alternatively, you can share the entire conversation" bot_names: fake: "Fake Test Bot" claude-3-opus: "Claude 3 Opus" claude-3-sonnet: "Claude 3 Sonnet" claude-3-haiku: "Claude 3 Haiku" cohere-command-r-plus: "Cohere Command R Plus" gpt-4: "GPT-4" gpt-4-turbo: "GPT-4 Turbo" gpt-4o: "GPT-4 Omni" gpt-3: 5-turbo: "GPT-3.5" claude-2: "Claude 2" gemini-1: 5-pro: "Gemini" mixtral-8x7B-Instruct-V0: "1": "Mixtral-8x7B V0.1" sentiments: dashboard: title: "Sentiment" summarization: chat: title: "Summarize messages" description: "Select an option below to summarize the conversation sent during the desired timeframe." summarize: "Summarize" since: one: "Last hour" other: "Last %{count} hours" topic: title: "Topic summary" close: "Close summary panel" topic_list_layout: button: compact: "Compact" expanded: "Expanded" expanded_description: "with AI summaries" review: types: reviewable_ai_post: title: "AI-Flagged post" reviewable_ai_chat_message: title: "AI-Flagged chat message"