2019-05-02 18:17:27 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2017-07-21 15:29:04 -04:00
|
|
|
module RetrieveTitle
|
2018-01-28 23:36:52 -05:00
|
|
|
CRAWL_TIMEOUT = 1
|
2017-07-21 15:29:04 -04:00
|
|
|
|
2022-05-23 06:52:06 -04:00
|
|
|
def self.crawl(url, max_redirects: nil, initial_https_redirect_ignore_limit: false)
|
|
|
|
fetch_title(
|
|
|
|
url,
|
|
|
|
max_redirects: max_redirects,
|
|
|
|
initial_https_redirect_ignore_limit: initial_https_redirect_ignore_limit
|
|
|
|
)
|
2022-06-09 15:30:22 -04:00
|
|
|
rescue Net::ReadTimeout
|
|
|
|
# do nothing for Net::ReadTimeout errors
|
2017-07-21 15:29:04 -04:00
|
|
|
end
|
|
|
|
|
2021-01-04 14:32:08 -05:00
|
|
|
def self.extract_title(html, encoding = nil)
|
2017-07-21 15:29:04 -04:00
|
|
|
title = nil
|
2021-09-03 03:45:58 -04:00
|
|
|
if html =~ /<title>/ && html !~ /<\/title>/
|
|
|
|
return nil
|
|
|
|
end
|
2021-01-04 14:32:08 -05:00
|
|
|
if doc = Nokogiri::HTML5(html, nil, encoding)
|
2017-07-21 15:29:04 -04:00
|
|
|
|
2017-08-02 14:27:21 -04:00
|
|
|
title = doc.at('title')&.inner_text
|
|
|
|
|
2017-09-28 09:29:50 -04:00
|
|
|
# A horrible hack - YouTube uses `document.title` to populate the title
|
|
|
|
# for some reason. For any other site than YouTube this wouldn't be worth it.
|
|
|
|
if title == "YouTube" && html =~ /document\.title *= *"(.*)";/
|
|
|
|
title = Regexp.last_match[1].sub(/ - YouTube$/, '')
|
|
|
|
end
|
|
|
|
|
2017-08-02 14:27:21 -04:00
|
|
|
if !title && node = doc.at('meta[property="og:title"]')
|
2017-07-21 15:29:04 -04:00
|
|
|
title = node['content']
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
if title.present?
|
|
|
|
title.gsub!(/\n/, ' ')
|
|
|
|
title.gsub!(/ +/, ' ')
|
|
|
|
title.strip!
|
|
|
|
return title
|
|
|
|
end
|
|
|
|
nil
|
|
|
|
end
|
|
|
|
|
|
|
|
private
|
|
|
|
|
|
|
|
def self.max_chunk_size(uri)
|
2022-02-09 16:53:27 -05:00
|
|
|
# Exception for sites that leave the title until very late.
|
|
|
|
return 500 if uri.host =~ /(^|\.)amazon\.(com|ca|co\.uk|es|fr|de|it|com\.au|com\.br|cn|in|co\.jp|com\.mx)$/
|
|
|
|
return 300 if uri.host =~ /(^|\.)youtube\.com$/ || uri.host =~ /(^|\.)youtu\.be$/
|
|
|
|
return 50 if uri.host =~ /(^|\.)github\.com$/
|
2017-07-21 15:29:04 -04:00
|
|
|
|
2021-09-03 03:45:58 -04:00
|
|
|
# default is 20k
|
|
|
|
20
|
2017-07-21 15:29:04 -04:00
|
|
|
end
|
2018-01-28 23:36:52 -05:00
|
|
|
|
|
|
|
# Fetch the beginning of a HTML document at a url
|
2022-05-23 06:52:06 -04:00
|
|
|
def self.fetch_title(url, max_redirects: nil, initial_https_redirect_ignore_limit: false)
|
|
|
|
fd = FinalDestination.new(
|
|
|
|
url,
|
|
|
|
timeout: CRAWL_TIMEOUT,
|
|
|
|
stop_at_blocked_pages: true,
|
|
|
|
max_redirects: max_redirects,
|
|
|
|
initial_https_redirect_ignore_limit: initial_https_redirect_ignore_limit
|
|
|
|
)
|
2018-01-28 23:36:52 -05:00
|
|
|
|
|
|
|
current = nil
|
2017-07-21 15:29:04 -04:00
|
|
|
title = nil
|
2021-01-04 14:32:08 -05:00
|
|
|
encoding = nil
|
2018-01-28 23:36:52 -05:00
|
|
|
|
|
|
|
fd.get do |_response, chunk, uri|
|
2021-06-24 10:23:39 -04:00
|
|
|
unless Net::HTTPRedirection === _response
|
2022-03-22 14:13:27 -04:00
|
|
|
throw :done if uri.blank?
|
|
|
|
|
2021-06-24 10:23:39 -04:00
|
|
|
if current
|
|
|
|
current << chunk
|
|
|
|
else
|
|
|
|
current = chunk
|
|
|
|
end
|
2018-01-28 23:36:52 -05:00
|
|
|
|
2021-06-24 10:23:39 -04:00
|
|
|
if !encoding && content_type = _response['content-type']&.strip&.downcase
|
|
|
|
if content_type =~ /charset="?([a-z0-9_-]+)"?/
|
|
|
|
encoding = Regexp.last_match(1)
|
|
|
|
if !Encoding.list.map(&:name).map(&:downcase).include?(encoding)
|
|
|
|
encoding = nil
|
|
|
|
end
|
2021-01-04 14:32:08 -05:00
|
|
|
end
|
|
|
|
end
|
2018-06-07 01:28:18 -04:00
|
|
|
|
2021-06-24 10:23:39 -04:00
|
|
|
max_size = max_chunk_size(uri) * 1024
|
|
|
|
title = extract_title(current, encoding)
|
|
|
|
throw :done if title || max_size < current.length
|
|
|
|
end
|
2017-07-21 15:29:04 -04:00
|
|
|
end
|
2019-11-14 15:10:51 -05:00
|
|
|
title
|
2018-06-07 01:28:18 -04:00
|
|
|
end
|
2017-07-21 15:29:04 -04:00
|
|
|
end
|