| 
									
										
										
										
											2023-06-27 12:26:33 -03:00
										 |  |  | # frozen_string_literal: true | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | RSpec.describe DiscourseAi::Summarization::Strategies::FoldContent do | 
					
						
							|  |  |  |   describe "#summarize" do | 
					
						
							| 
									
										
										
										
											2023-06-27 14:42:33 -03:00
										 |  |  |     subject(:strategy) { described_class.new(model) } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-06-27 12:26:33 -03:00
										 |  |  |     let(:summarize_text) { "This is a text" } | 
					
						
							|  |  |  |     let(:model_tokens) do | 
					
						
							|  |  |  |       # Make sure each content fits in a single chunk. | 
					
						
							| 
									
										
										
										
											2023-11-23 12:58:54 -03:00
										 |  |  |       # 700 is the number of tokens reserved for the prompt. | 
					
						
							|  |  |  |       700 + DiscourseAi::Tokenizer::OpenAiTokenizer.size("(1 asd said: This is a text ") + 3
 | 
					
						
							| 
									
										
										
										
											2023-06-27 12:26:33 -03:00
										 |  |  |     end | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-11-23 12:58:54 -03:00
										 |  |  |     let(:model) do | 
					
						
							| 
									
										
										
										
											2024-01-29 16:04:25 -03:00
										 |  |  |       DiscourseAi::Summarization::Models::OpenAi.new("fake:fake", max_tokens: model_tokens) | 
					
						
							| 
									
										
										
										
											2023-11-23 12:58:54 -03:00
										 |  |  |     end | 
					
						
							| 
									
										
										
										
											2023-11-21 13:27:35 -03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-06-27 12:26:33 -03:00
										 |  |  |     let(:content) { { contents: [{ poster: "asd", id: 1, text: summarize_text }] } } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-11-23 12:58:54 -03:00
										 |  |  |     let(:single_summary) { "this is a single summary" } | 
					
						
							|  |  |  |     let(:concatenated_summary) { "this is a concatenated summary" } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     let(:user) { User.new } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-06-27 12:26:33 -03:00
										 |  |  |     context "when the content to summarize fits in a single call" do | 
					
						
							|  |  |  |       it "does one call to summarize content" do | 
					
						
							| 
									
										
										
										
											2023-11-23 12:58:54 -03:00
										 |  |  |         result = | 
					
						
							| 
									
										
										
										
											2023-11-29 15:17:46 +11:00
										 |  |  |           DiscourseAi::Completions::Llm.with_prepared_responses([single_summary]) do |spy| | 
					
						
							| 
									
										
										
										
											2023-11-23 12:58:54 -03:00
										 |  |  |             strategy.summarize(content, user).tap { expect(spy.completions).to eq(1) } | 
					
						
							|  |  |  |           end | 
					
						
							| 
									
										
										
										
											2023-06-27 12:26:33 -03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-11-23 12:58:54 -03:00
										 |  |  |         expect(result[:summary]).to eq(single_summary) | 
					
						
							| 
									
										
										
										
											2023-06-27 12:26:33 -03:00
										 |  |  |       end | 
					
						
							|  |  |  |     end | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     context "when the content to summarize doesn't fit in a single call" do | 
					
						
							|  |  |  |       it "summarizes each chunk and then concatenates them" do | 
					
						
							|  |  |  |         content[:contents] << { poster: "asd2", id: 2, text: summarize_text } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-11-23 12:58:54 -03:00
										 |  |  |         result = | 
					
						
							| 
									
										
										
										
											2023-11-29 15:17:46 +11:00
										 |  |  |           DiscourseAi::Completions::Llm.with_prepared_responses( | 
					
						
							| 
									
										
										
										
											2023-11-23 12:58:54 -03:00
										 |  |  |             [single_summary, single_summary, concatenated_summary], | 
					
						
							|  |  |  |           ) { |spy| strategy.summarize(content, user).tap { expect(spy.completions).to eq(3) } } | 
					
						
							| 
									
										
										
										
											2023-06-27 12:26:33 -03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-11-23 12:58:54 -03:00
										 |  |  |         expect(result[:summary]).to eq(concatenated_summary) | 
					
						
							| 
									
										
										
										
											2023-06-27 12:26:33 -03:00
										 |  |  |       end | 
					
						
							| 
									
										
										
										
											2023-12-06 19:00:24 -03:00
										 |  |  | 
 | 
					
						
							|  |  |  |       it "keeps splitting into chunks until the content fits into a single call to create a cohesive narrative" do | 
					
						
							|  |  |  |         content[:contents] << { poster: "asd2", id: 2, text: summarize_text } | 
					
						
							|  |  |  |         max_length_response = "(1 asd said: This is a text " | 
					
						
							|  |  |  |         chunk_of_chunks = "I'm smol" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         result = | 
					
						
							|  |  |  |           DiscourseAi::Completions::Llm.with_prepared_responses( | 
					
						
							|  |  |  |             [ | 
					
						
							|  |  |  |               max_length_response, | 
					
						
							|  |  |  |               max_length_response, | 
					
						
							|  |  |  |               chunk_of_chunks, | 
					
						
							|  |  |  |               chunk_of_chunks, | 
					
						
							|  |  |  |               concatenated_summary, | 
					
						
							|  |  |  |             ], | 
					
						
							|  |  |  |           ) { |spy| strategy.summarize(content, user).tap { expect(spy.completions).to eq(5) } } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         expect(result[:summary]).to eq(concatenated_summary) | 
					
						
							|  |  |  |       end | 
					
						
							| 
									
										
										
										
											2023-06-27 12:26:33 -03:00
										 |  |  |     end | 
					
						
							|  |  |  |   end | 
					
						
							|  |  |  | end |