Sam 47f5da7e42
FEATURE: Add AI-powered spam detection for new user posts (#1004)
This introduces a comprehensive spam detection system that uses LLM models
to automatically identify and flag potential spam posts. The system is
designed to be both powerful and configurable while preventing false positives.

Key Features:
* Automatically scans first 3 posts from new users (TL0/TL1)
* Creates dedicated AI flagging user to distinguish from system flags
* Tracks false positives/negatives for quality monitoring
* Supports custom instructions to fine-tune detection
* Includes test interface for trying detection on any post

Technical Implementation:
* New database tables:
  - ai_spam_logs: Stores scan history and results
  - ai_moderation_settings: Stores LLM config and custom instructions
* Rate limiting and safeguards:
  - Minimum 10-minute delay between rescans
  - Only scans significant edits (>10 char difference)
  - Maximum 3 scans per post
  - 24-hour maximum age for scannable posts
* Admin UI features:
  - Real-time testing capabilities
  - 7-day statistics dashboard
  - Configurable LLM model selection
  - Custom instruction support

Security and Performance:
* Respects trust levels - only scans TL0/TL1 users
* Skips private messages entirely
* Stops scanning users after 3 successful public posts
* Includes comprehensive test coverage
* Maintains audit log of all scan attempts


---------

Co-authored-by: Keegan George <kgeorge13@gmail.com>
Co-authored-by: Martin Brennan <martin@discourse.org>
2024-12-12 09:17:25 +11:00

244 lines
6.9 KiB
Plaintext

import Component from "@glimmer/component";
import { tracked } from "@glimmer/tracking";
import { fn } from "@ember/helper";
import { on } from "@ember/modifier";
import { action } from "@ember/object";
import { LinkTo } from "@ember/routing";
import { service } from "@ember/service";
import DButton from "discourse/components/d-button";
import DToggleSwitch from "discourse/components/d-toggle-switch";
import DTooltip from "discourse/components/d-tooltip";
import withEventValue from "discourse/helpers/with-event-value";
import { ajax } from "discourse/lib/ajax";
import { popupAjaxError } from "discourse/lib/ajax-error";
import i18n from "discourse-common/helpers/i18n";
import getURL from "discourse-common/lib/get-url";
import AdminConfigAreaCard from "admin/components/admin-config-area-card";
import AdminPageSubheader from "admin/components/admin-page-subheader";
import ComboBox from "select-kit/components/combo-box";
import SpamTestModal from "./modal/spam-test-modal";
export default class AiSpam extends Component {
@service siteSettings;
@service toasts;
@service modal;
@tracked
stats = {
scanned_count: 0,
spam_detected: 0,
false_positives: 0,
false_negatives: 0,
daily_data: [],
};
@tracked isEnabled = false;
@tracked selectedLLM = null;
@tracked customInstructions = "";
constructor() {
super(...arguments);
this.initializeFromModel();
}
@action
initializeFromModel() {
const model = this.args.model;
this.isEnabled = model.is_enabled;
if (model.llm_id) {
this.selectedLLM = "custom:" + model.llm_id;
} else {
if (this.availableLLMs.length) {
this.selectedLLM = this.availableLLMs[0].id;
this.autoSelectedLLM = true;
}
}
this.customInstructions = model.custom_instructions;
this.stats = model.stats;
}
get availableLLMs() {
return this.args.model?.available_llms || [];
}
@action
async toggleEnabled() {
this.isEnabled = !this.isEnabled;
const data = { is_enabled: this.isEnabled };
if (this.autoSelectedLLM) {
data.llm_model_id = this.llmId;
}
try {
const response = await ajax("/admin/plugins/discourse-ai/ai-spam.json", {
type: "PUT",
data,
});
this.autoSelectedLLM = false;
this.isEnabled = response.is_enabled;
} catch (error) {
this.isEnabled = !this.isEnabled;
popupAjaxError(error);
}
}
get llmId() {
return this.selectedLLM.toString().split(":")[1];
}
@action
async updateLLM(value) {
this.selectedLLM = value;
}
@action
async save() {
try {
await ajax("/admin/plugins/discourse-ai/ai-spam.json", {
type: "PUT",
data: {
llm_model_id: this.llmId,
custom_instructions: this.customInstructions,
},
});
this.toasts.success({
data: { message: i18n("discourse_ai.spam.settings_saved") },
duration: 2000,
});
} catch (error) {
popupAjaxError(error);
}
}
@action
showTestModal() {
this.modal.show(SpamTestModal, {
model: {
customInstructions: this.customInstructions,
llmId: this.llmId,
},
});
}
get metrics() {
const detected = {
label: "discourse_ai.spam.spam_detected",
value: this.stats.spam_detected,
};
if (this.args.model.flagging_username) {
detected.href = getURL(
"/review?flagged_by=" + this.args.model.flagging_username
);
}
return [
{
label: "discourse_ai.spam.scanned_count",
value: this.stats.scanned_count,
},
detected,
{
label: "discourse_ai.spam.false_positives",
value: this.stats.false_positives,
},
{
label: "discourse_ai.spam.false_negatives",
value: this.stats.false_negatives,
},
];
}
<template>
<div class="ai-spam">
<section class="ai-spam__settings">
<AdminPageSubheader
@titleLabel="discourse_ai.spam.title"
@descriptionLabel="discourse_ai.spam.spam_description"
/>
<div class="control-group ai-spam__enabled">
<DToggleSwitch
class="ai-spam__toggle"
@state={{this.isEnabled}}
@label="discourse_ai.spam.enable"
{{on "click" this.toggleEnabled}}
/>
<DTooltip
@icon="circle-question"
@content={{i18n "discourse_ai.spam.spam_tip"}}
/>
</div>
<div class="ai-spam__llm">
<label class="ai-spam__llm-label">{{i18n
"discourse_ai.spam.select_llm"
}}</label>
{{#if this.availableLLMs.length}}
<ComboBox
@value={{this.selectedLLM}}
@content={{this.availableLLMs}}
@onChange={{this.updateLLM}}
class="ai-spam__llm-selector"
/>
{{else}}
<span class="ai-spam__llm-placeholder">
<LinkTo @route="adminPlugins.show.discourse-ai-llms.index">
{{i18n "discourse_ai.spam.no_llms"}}
</LinkTo>
</span>
{{/if}}
</div>
<div class="ai-spam__instructions">
<label class="ai-spam__instructions-label">
{{i18n "discourse_ai.spam.custom_instructions"}}
<DTooltip
@icon="circle-question"
@content={{i18n "discourse_ai.spam.custom_instructions_help"}}
/>
</label>
<textarea
class="ai-spam__instructions-input"
placeholder={{i18n
"discourse_ai.spam.custom_instructions_placeholder"
}}
{{on "input" (withEventValue (fn (mut this.customInstructions)))}}
>{{this.customInstructions}}</textarea>
<DButton
@action={{this.save}}
@label="discourse_ai.spam.save_button"
class="ai-spam__instructions-save btn-primary"
/>
<DButton
@action={{this.showTestModal}}
@label="discourse_ai.spam.test_button"
class="btn-default"
/>
</div>
</section>
<AdminConfigAreaCard
@heading="discourse_ai.spam.last_seven_days"
class="ai-spam__stats"
>
<:content>
<div class="ai-spam__metrics">
{{#each this.metrics as |metric|}}
<div class="ai-spam__metrics-item">
<span class="ai-spam__metrics-label">{{i18n
metric.label
}}</span>
{{#if metric.href}}
<a href={{metric.href}} class="ai-spam__metrics-value">
{{metric.value}}
</a>
{{else}}
<span class="ai-spam__metrics-value">{{metric.value}}</span>
{{/if}}
</div>
{{/each}}
</div>
</:content>
</AdminConfigAreaCard>
</div>
</template>
}