mirror of
https://github.com/discourse/discourse-ai.git
synced 2025-03-07 01:39:54 +00:00
This PR introduces several enhancements and refactorings to the AI Persona and RAG (Retrieval-Augmented Generation) functionalities within the discourse-ai plugin. Here's a breakdown of the changes: **1. LLM Model Association for RAG and Personas:** - **New Database Columns:** Adds `rag_llm_model_id` to both `ai_personas` and `ai_tools` tables. This allows specifying a dedicated LLM for RAG indexing, separate from the persona's primary LLM. Adds `default_llm_id` and `question_consolidator_llm_id` to `ai_personas`. - **Migration:** Includes a migration (`20250210032345_migrate_persona_to_llm_model_id.rb`) to populate the new `default_llm_id` and `question_consolidator_llm_id` columns in `ai_personas` based on the existing `default_llm` and `question_consolidator_llm` string columns, and a post migration to remove the latter. - **Model Changes:** The `AiPersona` and `AiTool` models now `belong_to` an `LlmModel` via `rag_llm_model_id`. The `LlmModel.proxy` method now accepts an `LlmModel` instance instead of just an identifier. `AiPersona` now has `default_llm_id` and `question_consolidator_llm_id` attributes. - **UI Updates:** The AI Persona and AI Tool editors in the admin panel now allow selecting an LLM for RAG indexing (if PDF/image support is enabled). The RAG options component displays an LLM selector. - **Serialization:** The serializers (`AiCustomToolSerializer`, `AiCustomToolListSerializer`, `LocalizedAiPersonaSerializer`) have been updated to include the new `rag_llm_model_id`, `default_llm_id` and `question_consolidator_llm_id` attributes. **2. PDF and Image Support for RAG:** - **Site Setting:** Introduces a new hidden site setting, `ai_rag_pdf_images_enabled`, to control whether PDF and image files can be indexed for RAG. This defaults to `false`. - **File Upload Validation:** The `RagDocumentFragmentsController` now checks the `ai_rag_pdf_images_enabled` setting and allows PDF, PNG, JPG, and JPEG files if enabled. Error handling is included for cases where PDF/image indexing is attempted with the setting disabled. - **PDF Processing:** Adds a new utility class, `DiscourseAi::Utils::PdfToImages`, which uses ImageMagick (`magick`) to convert PDF pages into individual PNG images. A maximum PDF size and conversion timeout are enforced. - **Image Processing:** A new utility class, `DiscourseAi::Utils::ImageToText`, is included to handle OCR for the images and PDFs. - **RAG Digestion Job:** The `DigestRagUpload` job now handles PDF and image uploads. It uses `PdfToImages` and `ImageToText` to extract text and create document fragments. - **UI Updates:** The RAG uploader component now accepts PDF and image file types if `ai_rag_pdf_images_enabled` is true. The UI text is adjusted to indicate supported file types. **3. Refactoring and Improvements:** - **LLM Enumeration:** The `DiscourseAi::Configuration::LlmEnumerator` now provides a `values_for_serialization` method, which returns a simplified array of LLM data (id, name, vision_enabled) suitable for use in serializers. This avoids exposing unnecessary details to the frontend. - **AI Helper:** The `AiHelper::Assistant` now takes optional `helper_llm` and `image_caption_llm` parameters in its constructor, allowing for greater flexibility. - **Bot and Persona Updates:** Several updates were made across the codebase, changing the string based association to a LLM to the new model based. - **Audit Logs:** The `DiscourseAi::Completions::Endpoints::Base` now formats raw request payloads as pretty JSON for easier auditing. - **Eval Script:** An evaluation script is included. **4. Testing:** - The PR introduces a new eval system for LLMs, this allows us to test how functionality works across various LLM providers. This lives in `/evals`
138 lines
3.7 KiB
JavaScript
138 lines
3.7 KiB
JavaScript
import { module, test } from "qunit";
|
|
import AiPersona from "discourse/plugins/discourse-ai/discourse/admin/models/ai-persona";
|
|
|
|
module("Discourse AI | Unit | Model | ai-persona", function () {
|
|
test("init properties", function (assert) {
|
|
const properties = {
|
|
tools: [
|
|
["ToolName", { option1: "value1", option2: "value2" }],
|
|
"ToolName2",
|
|
"ToolName3",
|
|
],
|
|
};
|
|
|
|
const aiPersona = AiPersona.create(properties);
|
|
|
|
assert.deepEqual(aiPersona.tools, ["ToolName", "ToolName2", "ToolName3"]);
|
|
assert.equal(
|
|
aiPersona.getToolOption("ToolName", "option1").value,
|
|
"value1"
|
|
);
|
|
assert.equal(
|
|
aiPersona.getToolOption("ToolName", "option2").value,
|
|
"value2"
|
|
);
|
|
});
|
|
|
|
test("update properties", function (assert) {
|
|
const properties = {
|
|
id: 1,
|
|
name: "Test",
|
|
tools: ["ToolName"],
|
|
allowed_group_ids: [12],
|
|
system: false,
|
|
enabled: true,
|
|
system_prompt: "System Prompt",
|
|
priority: false,
|
|
description: "Description",
|
|
top_p: 0.8,
|
|
temperature: 0.7,
|
|
default_llm_id: 1,
|
|
force_default_llm: false,
|
|
user: null,
|
|
user_id: null,
|
|
max_context_posts: 5,
|
|
vision_enabled: true,
|
|
vision_max_pixels: 100,
|
|
rag_uploads: [],
|
|
rag_chunk_tokens: 374,
|
|
rag_chunk_overlap_tokens: 10,
|
|
rag_conversation_chunks: 10,
|
|
rag_llm_model_id: 1,
|
|
question_consolidator_llm_id: 2,
|
|
allow_chat: false,
|
|
tool_details: true,
|
|
forced_tool_count: -1,
|
|
allow_personal_messages: true,
|
|
allow_topic_mentions: true,
|
|
allow_chat_channel_mentions: true,
|
|
allow_chat_direct_messages: true,
|
|
};
|
|
|
|
const aiPersona = AiPersona.create({ ...properties });
|
|
|
|
aiPersona.getToolOption("ToolName", "option1").value = "value1";
|
|
|
|
const updatedProperties = aiPersona.updateProperties();
|
|
|
|
// perform remapping for save
|
|
properties.tools = [["ToolName", { option1: "value1" }, false]];
|
|
|
|
assert.deepEqual(updatedProperties, properties);
|
|
});
|
|
|
|
test("create properties", function (assert) {
|
|
const properties = {
|
|
id: 1,
|
|
name: "Test",
|
|
tools: ["ToolName"],
|
|
allowed_group_ids: [12],
|
|
system: false,
|
|
enabled: true,
|
|
system_prompt: "System Prompt",
|
|
priority: false,
|
|
description: "Description",
|
|
top_p: 0.8,
|
|
temperature: 0.7,
|
|
user: null,
|
|
user_id: null,
|
|
default_llm_id: 1,
|
|
max_context_posts: 5,
|
|
vision_enabled: true,
|
|
vision_max_pixels: 100,
|
|
rag_uploads: [],
|
|
rag_chunk_tokens: 374,
|
|
rag_chunk_overlap_tokens: 10,
|
|
rag_conversation_chunks: 10,
|
|
question_consolidator_llm_id: 2,
|
|
allow_chat: false,
|
|
tool_details: true,
|
|
forced_tool_count: -1,
|
|
allow_personal_messages: true,
|
|
allow_topic_mentions: true,
|
|
allow_chat_channel_mentions: true,
|
|
allow_chat_direct_messages: true,
|
|
force_default_llm: false,
|
|
rag_llm_model_id: 1,
|
|
};
|
|
|
|
const aiPersona = AiPersona.create({ ...properties });
|
|
|
|
aiPersona.getToolOption("ToolName", "option1").value = "value1";
|
|
|
|
const createdProperties = aiPersona.createProperties();
|
|
|
|
properties.tools = [["ToolName", { option1: "value1" }, false]];
|
|
|
|
assert.deepEqual(createdProperties, properties);
|
|
});
|
|
|
|
test("working copy", function (assert) {
|
|
const aiPersona = AiPersona.create({
|
|
name: "Test",
|
|
tools: ["ToolName"],
|
|
});
|
|
|
|
aiPersona.getToolOption("ToolName", "option1").value = "value1";
|
|
|
|
const workingCopy = aiPersona.workingCopy();
|
|
|
|
assert.equal(workingCopy.name, "Test");
|
|
assert.equal(
|
|
workingCopy.getToolOption("ToolName", "option1").value,
|
|
"value1"
|
|
);
|
|
assert.deepEqual(workingCopy.tools, ["ToolName"]);
|
|
});
|
|
});
|