mirror of
				https://github.com/discourse/discourse-ai.git
				synced 2025-10-31 14:38:37 +00:00 
			
		
		
		
	Claude 1 costs the same and is less good than Claude 2. Make use of Claude 2 in all spots ... This also fixes streaming so it uses the far more efficient streaming protocol.
		
			
				
	
	
		
			69 lines
		
	
	
		
			1.6 KiB
		
	
	
	
		
			Ruby
		
	
	
	
	
	
			
		
		
	
	
			69 lines
		
	
	
		
			1.6 KiB
		
	
	
	
		
			Ruby
		
	
	
	
	
	
| # frozen_string_literal: true
 | |
| 
 | |
| module DiscourseAi
 | |
|   module AiBot
 | |
|     class AnthropicBot < Bot
 | |
|       def self.can_reply_as?(bot_user)
 | |
|         bot_user.id == DiscourseAi::AiBot::EntryPoint::CLAUDE_V2_ID
 | |
|       end
 | |
| 
 | |
|       def bot_prompt_with_topic_context(post)
 | |
|         super(post).join("\n\n") + "\n\nAssistant:"
 | |
|       end
 | |
| 
 | |
|       def prompt_limit
 | |
|         50_000 # https://console.anthropic.com/docs/prompt-design#what-is-a-prompt
 | |
|       end
 | |
| 
 | |
|       def title_prompt(post)
 | |
|         super(post).join("\n\n") + "\n\nAssistant:"
 | |
|       end
 | |
| 
 | |
|       def get_delta(partial, context)
 | |
|         partial[:completion]
 | |
|       end
 | |
| 
 | |
|       private
 | |
| 
 | |
|       def populate_functions(partial, function)
 | |
|         # nothing to do here, no proper function support
 | |
|         # needs to be simulated for Claude but model is too
 | |
|         # hard to steer for now
 | |
|       end
 | |
| 
 | |
|       def build_message(poster_username, content, system: false, function: nil)
 | |
|         role = poster_username == bot_user.username ? "Assistant" : "Human"
 | |
| 
 | |
|         "#{role}: #{content}"
 | |
|       end
 | |
| 
 | |
|       def model_for
 | |
|         "claude-2"
 | |
|       end
 | |
| 
 | |
|       def get_updated_title(prompt)
 | |
|         DiscourseAi::Inference::AnthropicCompletions.perform!(
 | |
|           prompt,
 | |
|           model_for,
 | |
|           temperature: 0.7,
 | |
|           max_tokens: 40,
 | |
|         ).dig(:completion)
 | |
|       end
 | |
| 
 | |
|       def submit_prompt(prompt, prefer_low_cost: false, &blk)
 | |
|         DiscourseAi::Inference::AnthropicCompletions.perform!(
 | |
|           prompt,
 | |
|           model_for,
 | |
|           temperature: 0.4,
 | |
|           max_tokens: 3000,
 | |
|           &blk
 | |
|         )
 | |
|       end
 | |
| 
 | |
|       def tokenize(text)
 | |
|         DiscourseAi::Tokenizer::AnthropicTokenizer.tokenize(text)
 | |
|       end
 | |
|     end
 | |
|   end
 | |
| end
 |