mirror of
				https://github.com/discourse/discourse-ai.git
				synced 2025-10-31 06:28:48 +00:00 
			
		
		
		
	* FIX: guide GPT 3.5 better This limits search results to 10 cause we were blowing the whole token budget on search results, additionally it includes a quick exchange at the start of a session to try and guide GPT 3.5 to follow instructions Sadly GPT 3.5 drifts off very quickly but this does improve stuff a bit. It also attempts to correct some issues with anthropic, though it still is surprisingly hard to ground * add status:public, this is a bit of a hack but ensures that we can search for any filter provided * fix specs
		
			
				
	
	
		
			67 lines
		
	
	
		
			2.4 KiB
		
	
	
	
		
			Ruby
		
	
	
	
	
	
			
		
		
	
	
			67 lines
		
	
	
		
			2.4 KiB
		
	
	
	
		
			Ruby
		
	
	
	
	
	
| # frozen_string_literal: true
 | |
| 
 | |
| RSpec.describe DiscourseAi::AiBot::OpenAiBot do
 | |
|   describe "#bot_prompt_with_topic_context" do
 | |
|     fab!(:topic) { Fabricate(:topic) }
 | |
| 
 | |
|     def post_body(post_number)
 | |
|       "This is post #{post_number}"
 | |
|     end
 | |
| 
 | |
|     def bot_user
 | |
|       User.find(DiscourseAi::AiBot::EntryPoint::GPT4_ID)
 | |
|     end
 | |
| 
 | |
|     subject { described_class.new(bot_user) }
 | |
| 
 | |
|     context "when the topic has one post" do
 | |
|       fab!(:post_1) { Fabricate(:post, topic: topic, raw: post_body(1), post_number: 1) }
 | |
| 
 | |
|       it "includes it in the prompt" do
 | |
|         prompt_messages = subject.bot_prompt_with_topic_context(post_1)
 | |
| 
 | |
|         post_1_message = prompt_messages[-1]
 | |
| 
 | |
|         expect(post_1_message[:role]).to eq("user")
 | |
|         expect(post_1_message[:content]).to eq("#{post_1.user.username}: #{post_body(1)}")
 | |
|       end
 | |
|     end
 | |
| 
 | |
|     context "when prompt gets very long" do
 | |
|       fab!(:post_1) { Fabricate(:post, topic: topic, raw: "test " * 6000, post_number: 1) }
 | |
| 
 | |
|       it "trims the prompt" do
 | |
|         prompt_messages = subject.bot_prompt_with_topic_context(post_1)
 | |
| 
 | |
|         expect(prompt_messages[-2][:role]).to eq("assistant")
 | |
|         expect(prompt_messages[-1][:role]).to eq("user")
 | |
|         # trimming is tricky... it needs to account for system message as
 | |
|         # well... just make sure we trim for now
 | |
|         expect(prompt_messages[-1][:content].length).to be < post_1.raw.length
 | |
|       end
 | |
|     end
 | |
| 
 | |
|     context "when the topic has multiple posts" do
 | |
|       fab!(:post_1) { Fabricate(:post, topic: topic, raw: post_body(1), post_number: 1) }
 | |
|       fab!(:post_2) do
 | |
|         Fabricate(:post, topic: topic, user: bot_user, raw: post_body(2), post_number: 2)
 | |
|       end
 | |
|       fab!(:post_3) { Fabricate(:post, topic: topic, raw: post_body(3), post_number: 3) }
 | |
| 
 | |
|       it "includes them in the prompt respecting the post number order" do
 | |
|         prompt_messages = subject.bot_prompt_with_topic_context(post_3)
 | |
| 
 | |
|         # negative cause we may have grounding prompts
 | |
|         expect(prompt_messages[-3][:role]).to eq("user")
 | |
|         expect(prompt_messages[-3][:content]).to eq("#{post_1.username}: #{post_body(1)}")
 | |
| 
 | |
|         expect(prompt_messages[-2][:role]).to eq("assistant")
 | |
|         expect(prompt_messages[-2][:content]).to eq(post_body(2))
 | |
| 
 | |
|         expect(prompt_messages[-1][:role]).to eq("user")
 | |
|         expect(prompt_messages[-1][:content]).to eq("#{post_3.username}: #{post_body(3)}")
 | |
|       end
 | |
|     end
 | |
|   end
 | |
| end
 |