discourse-ai/config/locales/server.en.yml

53 lines
3.9 KiB
YAML

en:
site_settings:
discourse_ai_enabled: "Enable the discourse AI plugin."
ai_toxicity_enabled: "Enable the toxicity module."
ai_toxicity_inference_service_api_endpoint: "URL where the API is running for the toxicity module"
ai_toxicity_inference_service_api_key: "API key for the toxicity API"
ai_toxicity_inference_service_api_model: "Model to use for inference. Multilingual model works with Italian, French, Russian, Portuguese, Spanish and Turkish."
ai_toxicity_flag_automatically: "Automatically flag posts / chat messages that are above the configured thresholds."
ai_toxicity_flag_threshold_toxicity: "Toxicity: a rude, disrespectful, or unreasonable comment that is somewhat likely to make you leave a discussion or give up on sharing your perspective"
ai_toxicity_flag_threshold_severe_toxicity: "Severe Toxicity: a very hateful, aggressive, or disrespectful comment that is very likely to make you leave a discussion or give up on sharing your perspective"
ai_toxicity_flag_threshold_obscene: "Obscene"
ai_toxicity_flag_threshold_identity_attack: "Identity Attack"
ai_toxicity_flag_threshold_insult: "Insult"
ai_toxicity_flag_threshold_threat: "Threat"
ai_toxicity_flag_threshold_sexual_explicit: "Sexual Explicit"
ai_toxicity_groups_bypass: "Users on those groups will not have their posts classified by the toxicity module."
ai_sentiment_enabled: "Enable the sentiment module."
ai_sentiment_inference_service_api_endpoint: "URL where the API is running for the sentiment module"
ai_sentiment_inference_service_api_key: "API key for the sentiment API"
ai_sentiment_models: "Models to use for inference. Sentiment classifies post on the positive/neutral/negative space. Emotion classifies on the anger/disgust/fear/joy/neutral/sadness/surprise space."
ai_nsfw_detection_enabled: "Enable the NSFW module."
ai_nsfw_inference_service_api_endpoint: "URL where the API is running for the NSFW module"
ai_nsfw_inference_service_api_key: "API key for the NSFW API"
ai_nsfw_flag_automatically: "Automatically flag NSFW posts that are above the configured thresholds."
ai_nsfw_flag_threshold_general: "General Threshold for an image to be considered NSFW."
ai_nsfw_flag_threshold_drawings: "Threshold for a drawing to be considered NSFW."
ai_nsfw_flag_threshold_hentai: "Threshold for an image classified as hentai to be considered NSFW."
ai_nsfw_flag_threshold_porn: "Threshold for an image classified as porn to be considered NSFW."
ai_nsfw_flag_threshold_sexy: "Threshold for an image classified as sexy to be considered NSFW."
ai_nsfw_models: "Models to use for NSFW inference."
ai_openai_api_key: "API key for OpenAI API"
composer_ai_helper_enabled: "Enable the Composer's AI helper."
ai_helper_allowed_groups: "Users on these groups will see the AI helper button in the composer."
ai_helper_allowed_in_pm: "Enable the composer's AI helper in PMs."
ai_embeddings_enabled: "Enable the embeddings module."
ai_embeddings_discourse_service_api_endpoint: "URL where the API is running for the embeddings module"
ai_embeddings_discourse_service_api_key: "API key for the embeddings API"
ai_embeddings_models: "Discourse will generate embeddings for each of the models enabled here"
ai_embeddings_semantic_suggested_model: "Model to use for suggested topics."
ai_embeddings_generate_for_pms: "Generate embeddings for personal messages."
ai_embeddings_semantic_suggested_topics_anons_enabled: "Use Semantic Search for suggested topics for anonymous users."
ai_embeddings_pg_connection_string: "PostgreSQL connection string for the embeddings module. Needs pgvector extension enabled and a series of tables created. See docs for more info."
reviewables:
reasons:
flagged_by_toxicity: The AI plugin flagged this after classifying it as toxic.
flagged_by_nsfw: The AI plugin flagged this after classifying at least one of the attached images as NSFW.