22 lines
1.8 KiB
YAML
22 lines
1.8 KiB
YAML
en:
|
|
site_settings:
|
|
ai_enabled: "Enable the discourse ai plugin."
|
|
ai_toxicity_enabled: "Enable the toxicity module."
|
|
ai_toxicity_inference_service_api_endpoint: "URL where the API is running for the toxicity module"
|
|
ai_toxicity_inference_service_api_key: "API key for the toxicity API"
|
|
ai_toxicity_inference_service_api_model: "Model to use for inference. Multilingual model works with Italian, French, Russian, Portuguese, Spanish and Turkish."
|
|
ai_toxicity_flag_automatically: "Automatically flag posts / chat messages that are above the configured thresholds."
|
|
ai_toxicity_flag_threshold_toxicity: "Toxicity: a rude, disrespectful, or unreasonable comment that is somewhat likely to make you leave a discussion or give up on sharing your perspective"
|
|
ai_toxicity_flag_threshold_severe_toxicity: "Severe Toxicity: a very hateful, aggressive, or disrespectful comment that is very likely to make you leave a discussion or give up on sharing your perspective"
|
|
ai_toxicity_flag_threshold_obscene: "Obscene"
|
|
ai_toxicity_flag_threshold_identity_attack: "Identity Attack"
|
|
ai_toxicity_flag_threshold_insult: "Insult"
|
|
ai_toxicity_flag_threshold_threat: "Threat"
|
|
ai_toxicity_flag_threshold_sexual_explicit: "Sexual Explicit"
|
|
ai_toxicity_groups_bypass: "Users on those groups will not have their posts classified by the toxicity module."
|
|
|
|
ai_sentiment_enabled: "Enable the sentiment module."
|
|
ai_sentiment_inference_service_api_endpoint: "URL where the API is running for the sentiment module"
|
|
ai_sentiment_inference_service_api_key: "API key for the sentiment API"
|
|
ai_sentiment_models: "Models to use for inference. Sentiment classifies post on the positive/neutral/negative space. Emotion classifies on the anger/disgust/fear/joy/neutral/sadness/surprise space."
|