2017-05-26 16:26:18 -04:00
|
|
|
require_relative 'database'
|
|
|
|
require 'json'
|
|
|
|
require 'yaml'
|
|
|
|
|
|
|
|
module ImportScripts::Mbox
|
|
|
|
class Indexer
|
|
|
|
# @param database [ImportScripts::Mbox::Database]
|
|
|
|
# @param settings [ImportScripts::Mbox::Settings]
|
|
|
|
def initialize(database, settings)
|
|
|
|
@database = database
|
2018-01-17 06:03:57 -05:00
|
|
|
@settings = settings
|
2017-05-26 16:26:18 -04:00
|
|
|
@split_regex = settings.split_regex
|
|
|
|
end
|
|
|
|
|
|
|
|
def execute
|
2018-01-17 06:03:57 -05:00
|
|
|
directories = Dir.glob(File.join(@settings.data_dir, '*'))
|
2017-05-26 16:26:18 -04:00
|
|
|
directories.select! { |f| File.directory?(f) }
|
|
|
|
directories.sort!
|
|
|
|
|
|
|
|
directories.each do |directory|
|
|
|
|
puts "indexing files in #{directory}"
|
|
|
|
category = index_category(directory)
|
|
|
|
index_emails(directory, category[:name])
|
|
|
|
end
|
|
|
|
|
|
|
|
puts '', 'indexing replies and users'
|
2018-01-17 06:03:57 -05:00
|
|
|
if @settings.group_messages_by_subject
|
|
|
|
@database.sort_emails_by_subject
|
|
|
|
@database.update_in_reply_to_by_email_subject
|
|
|
|
else
|
|
|
|
@database.update_in_reply_to_of_emails
|
|
|
|
@database.sort_emails_by_date_and_reply_level
|
|
|
|
end
|
|
|
|
|
2017-05-26 16:26:18 -04:00
|
|
|
@database.fill_users_from_emails
|
|
|
|
end
|
|
|
|
|
|
|
|
private
|
|
|
|
|
|
|
|
METADATA_FILENAME = 'metadata.yml'.freeze
|
2018-10-30 19:31:20 -04:00
|
|
|
IGNORED_FILE_EXTENSIONS = ['.dbindex', '.dbnames', '.digest', '.subjects', 'status.yml']
|
2017-05-26 16:26:18 -04:00
|
|
|
|
|
|
|
def index_category(directory)
|
|
|
|
metadata_file = File.join(directory, METADATA_FILENAME)
|
|
|
|
|
|
|
|
if File.exist?(metadata_file)
|
|
|
|
# workaround for YML files that contain classname in file header
|
|
|
|
yaml = File.read(metadata_file).sub(/^--- !.*$/, '---')
|
|
|
|
metadata = YAML.load(yaml)
|
|
|
|
else
|
|
|
|
metadata = {}
|
|
|
|
end
|
|
|
|
|
|
|
|
category = {
|
|
|
|
name: metadata['name'].presence || File.basename(directory),
|
|
|
|
description: metadata['description']
|
|
|
|
}
|
|
|
|
|
|
|
|
@database.insert_category(category)
|
|
|
|
category
|
|
|
|
end
|
|
|
|
|
|
|
|
def index_emails(directory, category_name)
|
2018-01-17 06:03:57 -05:00
|
|
|
all_messages(directory, category_name) do |receiver, filename, opts|
|
2018-03-06 05:32:12 -05:00
|
|
|
begin
|
|
|
|
msg_id = receiver.message_id
|
|
|
|
parsed_email = receiver.mail
|
|
|
|
from_email, from_display_name = receiver.parse_from_field(parsed_email)
|
|
|
|
body, elided, format = receiver.select_body
|
|
|
|
reply_message_ids = extract_reply_message_ids(parsed_email)
|
|
|
|
|
|
|
|
email = {
|
|
|
|
msg_id: msg_id,
|
|
|
|
from_email: from_email,
|
|
|
|
from_name: from_display_name,
|
|
|
|
subject: extract_subject(receiver, category_name),
|
2018-08-23 03:46:25 -04:00
|
|
|
email_date: timestamp(parsed_email.date),
|
2018-03-06 05:32:12 -05:00
|
|
|
raw_message: receiver.raw_email,
|
|
|
|
body: body,
|
|
|
|
elided: elided,
|
|
|
|
format: format,
|
|
|
|
attachment_count: receiver.attachments.count,
|
|
|
|
charset: parsed_email.charset&.downcase,
|
|
|
|
category: category_name,
|
|
|
|
filename: File.basename(filename),
|
|
|
|
first_line_number: opts[:first_line_number],
|
|
|
|
last_line_number: opts[:last_line_number],
|
|
|
|
index_duration: (monotonic_time - opts[:start_time]).round(4)
|
|
|
|
}
|
|
|
|
|
|
|
|
@database.transaction do |db|
|
|
|
|
db.insert_email(email)
|
|
|
|
db.insert_replies(msg_id, reply_message_ids) unless reply_message_ids.empty?
|
|
|
|
end
|
|
|
|
rescue StandardError => e
|
|
|
|
if opts[:first_line_number] && opts[:last_line_number]
|
|
|
|
STDERR.puts "Failed to index message in #{filename} at lines #{opts[:first_line_number]}-#{opts[:last_line_number]}"
|
|
|
|
else
|
|
|
|
STDERR.puts "Failed to index message in #{filename}"
|
|
|
|
end
|
|
|
|
|
|
|
|
STDERR.puts e.message
|
|
|
|
STDERR.puts e.backtrace.inspect
|
2018-01-17 06:03:57 -05:00
|
|
|
end
|
2017-05-26 16:26:18 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def imported_file_checksums(category_name)
|
|
|
|
rows = @database.fetch_imported_files(category_name)
|
|
|
|
rows.each_with_object({}) do |row, hash|
|
2018-01-17 11:03:36 -05:00
|
|
|
filename = File.basename(row['filename'])
|
|
|
|
hash[filename] = row['checksum']
|
2017-05-26 16:26:18 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def all_messages(directory, category_name)
|
|
|
|
checksums = imported_file_checksums(category_name)
|
|
|
|
|
|
|
|
Dir.foreach(directory) do |filename|
|
|
|
|
filename = File.join(directory, filename)
|
|
|
|
next if ignored_file?(filename, checksums)
|
|
|
|
|
|
|
|
puts "indexing #{filename}"
|
|
|
|
|
|
|
|
if @split_regex.present?
|
|
|
|
each_mail(filename) do |raw_message, first_line_number, last_line_number|
|
2018-01-17 06:03:57 -05:00
|
|
|
opts = {
|
|
|
|
first_line_number: first_line_number,
|
|
|
|
last_line_number: last_line_number,
|
|
|
|
start_time: monotonic_time
|
|
|
|
}
|
2017-11-18 07:53:21 -05:00
|
|
|
receiver = read_mail_from_string(raw_message)
|
2018-01-17 06:03:57 -05:00
|
|
|
yield receiver, filename, opts if receiver.present?
|
2017-05-26 16:26:18 -04:00
|
|
|
end
|
|
|
|
else
|
2018-01-17 06:03:57 -05:00
|
|
|
opts = { start_time: monotonic_time }
|
2017-11-18 07:53:21 -05:00
|
|
|
receiver = read_mail_from_file(filename)
|
2018-01-17 06:03:57 -05:00
|
|
|
yield receiver, filename, opts if receiver.present?
|
2017-05-26 16:26:18 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
mark_as_fully_indexed(category_name, filename)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def mark_as_fully_indexed(category_name, filename)
|
|
|
|
imported_file = {
|
|
|
|
category: category_name,
|
2018-01-17 11:03:36 -05:00
|
|
|
filename: File.basename(filename),
|
2017-05-26 16:26:18 -04:00
|
|
|
checksum: calc_checksum(filename)
|
|
|
|
}
|
|
|
|
|
|
|
|
@database.insert_imported_file(imported_file)
|
|
|
|
end
|
|
|
|
|
|
|
|
def each_mail(filename)
|
|
|
|
raw_message = ''
|
|
|
|
first_line_number = 1
|
|
|
|
last_line_number = 0
|
|
|
|
|
|
|
|
each_line(filename) do |line|
|
|
|
|
line = line.scrub
|
|
|
|
|
2018-01-17 06:03:57 -05:00
|
|
|
if line =~ @split_regex
|
|
|
|
if last_line_number > 0
|
|
|
|
yield raw_message, first_line_number, last_line_number
|
|
|
|
raw_message = ''
|
|
|
|
first_line_number = last_line_number + 1
|
|
|
|
end
|
2017-05-26 16:26:18 -04:00
|
|
|
else
|
|
|
|
raw_message << line
|
|
|
|
end
|
|
|
|
|
|
|
|
last_line_number += 1
|
|
|
|
end
|
|
|
|
|
|
|
|
yield raw_message, first_line_number, last_line_number if raw_message.present?
|
|
|
|
end
|
|
|
|
|
|
|
|
def each_line(filename)
|
|
|
|
raw_file = File.open(filename, 'r')
|
|
|
|
text_file = filename.end_with?('.gz') ? Zlib::GzipReader.new(raw_file) : raw_file
|
|
|
|
|
|
|
|
text_file.each_line do |line|
|
|
|
|
yield line
|
|
|
|
end
|
|
|
|
ensure
|
|
|
|
raw_file.close if raw_file
|
|
|
|
end
|
|
|
|
|
|
|
|
def read_mail_from_file(filename)
|
|
|
|
raw_message = File.read(filename)
|
|
|
|
read_mail_from_string(raw_message)
|
|
|
|
end
|
|
|
|
|
|
|
|
def read_mail_from_string(raw_message)
|
2018-01-17 06:03:57 -05:00
|
|
|
Email::Receiver.new(raw_message, convert_plaintext: true, skip_trimming: false) unless raw_message.blank?
|
2017-05-26 16:26:18 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def extract_reply_message_ids(mail)
|
2018-03-30 08:37:19 -04:00
|
|
|
Email::Receiver.extract_reply_message_ids(mail, max_message_id_count: 20)
|
2017-05-26 16:26:18 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def extract_subject(receiver, list_name)
|
|
|
|
subject = receiver.subject
|
|
|
|
return nil if subject.blank?
|
|
|
|
|
|
|
|
# TODO: make the list name (or maybe multiple names) configurable
|
|
|
|
# Strip mailing list name from subject
|
2018-02-12 15:41:58 -05:00
|
|
|
subject = subject.gsub(/\[#{Regexp.escape(list_name)}\]/i, '').strip
|
2017-05-26 16:26:18 -04:00
|
|
|
|
|
|
|
clean_subject(subject)
|
|
|
|
end
|
|
|
|
|
|
|
|
# TODO: refactor and move prefixes to settings
|
|
|
|
def clean_subject(subject)
|
|
|
|
original_length = subject.length
|
|
|
|
|
|
|
|
# Strip Reply prefix from title (Standard and localized)
|
|
|
|
subject = subject.gsub(/^Re: */i, '')
|
|
|
|
subject = subject.gsub(/^R: */i, '') #Italian
|
|
|
|
subject = subject.gsub(/^RIF: */i, '') #Italian
|
|
|
|
|
|
|
|
# Strip Forward prefix from title (Standard and localized)
|
|
|
|
subject = subject.gsub(/^Fwd: */i, '')
|
|
|
|
subject = subject.gsub(/^I: */i, '') #Italian
|
|
|
|
|
|
|
|
subject.strip
|
|
|
|
|
|
|
|
# In case of mixed localized prefixes there could be many of them
|
|
|
|
# if the mail client didn't strip the localized ones
|
|
|
|
if original_length > subject.length
|
|
|
|
clean_subject(subject)
|
|
|
|
else
|
|
|
|
subject
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2018-01-17 11:03:36 -05:00
|
|
|
def ignored_file?(path, checksums)
|
|
|
|
filename = File.basename(path)
|
2017-05-26 16:26:18 -04:00
|
|
|
|
2018-01-17 06:03:57 -05:00
|
|
|
filename.start_with?('.') ||
|
|
|
|
filename == METADATA_FILENAME ||
|
|
|
|
IGNORED_FILE_EXTENSIONS.include?(File.extname(filename)) ||
|
2018-01-17 11:03:36 -05:00
|
|
|
fully_indexed?(path, filename, checksums)
|
2017-05-26 16:26:18 -04:00
|
|
|
end
|
|
|
|
|
2018-01-17 11:03:36 -05:00
|
|
|
def fully_indexed?(path, filename, checksums)
|
2017-05-26 16:26:18 -04:00
|
|
|
checksum = checksums[filename]
|
2018-01-17 11:03:36 -05:00
|
|
|
checksum.present? && calc_checksum(path) == checksum
|
2017-05-26 16:26:18 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def calc_checksum(filename)
|
|
|
|
Digest::SHA256.file(filename).hexdigest
|
|
|
|
end
|
2018-01-17 06:03:57 -05:00
|
|
|
|
|
|
|
def monotonic_time
|
|
|
|
Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
|
|
end
|
2018-08-23 03:46:25 -04:00
|
|
|
|
|
|
|
def timestamp(datetime)
|
|
|
|
Time.zone.at(datetime).to_i if datetime
|
|
|
|
end
|
2017-05-26 16:26:18 -04:00
|
|
|
end
|
|
|
|
end
|