2017-11-23 23:31:23 -05:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2018-12-30 18:53:30 -05:00
|
|
|
require 'method_profiler'
|
2018-01-18 16:26:18 -05:00
|
|
|
|
2017-11-23 23:31:23 -05:00
|
|
|
# This module allows us to hijack a request and send it to the client in the deferred job queue
|
|
|
|
# For cases where we are making remote calls like onebox or proxying files and so on this helps
|
|
|
|
# free up a unicorn worker while the remote IO is happening
|
|
|
|
module Hijack
|
2017-11-26 22:50:57 -05:00
|
|
|
|
2020-10-22 00:38:18 -04:00
|
|
|
def hijack(info: nil, &blk)
|
2017-11-27 01:43:24 -05:00
|
|
|
controller_class = self.class
|
2017-11-27 17:28:40 -05:00
|
|
|
|
2017-11-23 23:31:23 -05:00
|
|
|
if hijack = request.env['rack.hijack']
|
|
|
|
|
2017-11-28 00:47:20 -05:00
|
|
|
request.env['discourse.request_tracker.skip'] = true
|
|
|
|
request_tracker = request.env['discourse.request_tracker']
|
|
|
|
|
2018-01-18 16:26:18 -05:00
|
|
|
# in the past unicorn would recycle env, this is not longer the case
|
|
|
|
env = request.env
|
2018-01-25 05:12:51 -05:00
|
|
|
|
|
|
|
# rack may clean up tempfiles unless we trick it and take control
|
|
|
|
tempfiles = env[Rack::RACK_TEMPFILES]
|
|
|
|
env[Rack::RACK_TEMPFILES] = nil
|
2018-01-18 16:26:18 -05:00
|
|
|
request_copy = ActionDispatch::Request.new(env)
|
2017-11-28 00:47:20 -05:00
|
|
|
|
2018-01-18 16:26:18 -05:00
|
|
|
transfer_timings = MethodProfiler.transfer
|
2017-11-28 00:47:20 -05:00
|
|
|
|
2017-11-28 02:21:45 -05:00
|
|
|
io = hijack.call
|
|
|
|
|
2018-01-25 05:12:51 -05:00
|
|
|
# duplicate headers so other middleware does not mess with it
|
|
|
|
# on the way down the stack
|
|
|
|
original_headers = response.headers.dup
|
2018-01-20 22:26:42 -05:00
|
|
|
|
2020-10-22 00:38:18 -04:00
|
|
|
Scheduler::Defer.later("hijack #{params["controller"]} #{params["action"]} #{info}") do
|
2017-11-23 23:31:23 -05:00
|
|
|
|
2018-01-18 16:26:18 -05:00
|
|
|
MethodProfiler.start(transfer_timings)
|
2017-11-23 23:31:23 -05:00
|
|
|
begin
|
2018-01-18 16:26:18 -05:00
|
|
|
Thread.current[Logster::Logger::LOGSTER_ENV] = env
|
2017-11-23 23:31:23 -05:00
|
|
|
# do this first to confirm we have a working connection
|
|
|
|
# before doing any work
|
|
|
|
io.write "HTTP/1.1 "
|
|
|
|
|
2017-11-27 01:43:24 -05:00
|
|
|
# this trick avoids double render, also avoids any litter that the controller hooks
|
|
|
|
# place on the response
|
|
|
|
instance = controller_class.new
|
|
|
|
response = ActionDispatch::Response.new
|
|
|
|
instance.response = response
|
2017-11-27 18:59:53 -05:00
|
|
|
|
2017-11-27 17:28:40 -05:00
|
|
|
instance.request = request_copy
|
2018-01-20 22:26:42 -05:00
|
|
|
original_headers&.each do |k, v|
|
2018-01-25 05:12:51 -05:00
|
|
|
instance.response.headers[k] = v
|
2018-01-20 22:26:42 -05:00
|
|
|
end
|
2017-11-27 01:43:24 -05:00
|
|
|
|
2018-02-20 23:19:59 -05:00
|
|
|
view_start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
2017-11-23 23:31:23 -05:00
|
|
|
begin
|
2017-11-27 01:43:24 -05:00
|
|
|
instance.instance_eval(&blk)
|
2017-11-23 23:31:23 -05:00
|
|
|
rescue => e
|
2017-12-01 00:23:21 -05:00
|
|
|
# TODO we need to reuse our exception handling in ApplicationController
|
2018-01-18 16:26:18 -05:00
|
|
|
Discourse.warn_exception(e, message: "Failed to process hijacked response correctly", env: env)
|
2017-11-23 23:31:23 -05:00
|
|
|
end
|
2018-02-20 23:19:59 -05:00
|
|
|
view_runtime = Process.clock_gettime(Process::CLOCK_MONOTONIC) - view_start
|
2017-11-23 23:31:23 -05:00
|
|
|
|
2017-11-27 18:59:53 -05:00
|
|
|
unless instance.response_body || response.committed?
|
2017-11-27 01:43:24 -05:00
|
|
|
instance.status = 500
|
|
|
|
end
|
|
|
|
|
|
|
|
response.commit!
|
|
|
|
|
|
|
|
body = response.body
|
|
|
|
|
|
|
|
headers = response.headers
|
2017-12-06 18:30:50 -05:00
|
|
|
# add cors if needed
|
2018-01-18 16:26:18 -05:00
|
|
|
if cors_origins = env[Discourse::Cors::ORIGINS_ENV]
|
|
|
|
Discourse::Cors.apply_headers(cors_origins, env, headers)
|
2017-12-06 18:30:50 -05:00
|
|
|
end
|
|
|
|
|
2018-01-25 02:43:32 -05:00
|
|
|
headers['Content-Type'] ||= response.content_type || "text/plain"
|
2017-11-27 01:43:24 -05:00
|
|
|
headers['Content-Length'] = body.bytesize
|
2017-11-26 22:50:57 -05:00
|
|
|
headers['Connection'] = "close"
|
|
|
|
|
2018-03-06 00:49:31 -05:00
|
|
|
if env[Auth::DefaultCurrentUserProvider::BAD_TOKEN]
|
|
|
|
headers['Discourse-Logged-Out'] = '1'
|
|
|
|
end
|
|
|
|
|
2018-01-18 16:26:18 -05:00
|
|
|
status_string = Rack::Utils::HTTP_STATUS_CODES[response.status.to_i] || "Unknown"
|
|
|
|
io.write "#{response.status} #{status_string}\r\n"
|
2017-11-26 22:50:57 -05:00
|
|
|
|
2018-01-18 16:26:18 -05:00
|
|
|
timings = MethodProfiler.stop
|
|
|
|
if timings && duration = timings[:total_duration]
|
2018-01-20 22:26:42 -05:00
|
|
|
headers["X-Runtime"] = "#{"%0.6f" % duration}"
|
|
|
|
end
|
|
|
|
|
|
|
|
headers.each do |name, val|
|
|
|
|
io.write "#{name}: #{val}\r\n"
|
2018-01-18 16:26:18 -05:00
|
|
|
end
|
|
|
|
|
2017-11-23 23:31:23 -05:00
|
|
|
io.write "\r\n"
|
2017-11-27 01:43:24 -05:00
|
|
|
io.write body
|
2017-11-23 23:31:23 -05:00
|
|
|
rescue Errno::EPIPE, IOError
|
|
|
|
# happens if client terminated before we responded, ignore
|
2017-11-28 19:54:20 -05:00
|
|
|
io = nil
|
2017-11-28 00:47:20 -05:00
|
|
|
ensure
|
2018-02-20 23:19:59 -05:00
|
|
|
|
2018-02-20 23:40:37 -05:00
|
|
|
if Rails.configuration.try(:lograge).try(:enabled)
|
2018-02-20 23:19:59 -05:00
|
|
|
if timings
|
|
|
|
db_runtime = 0
|
|
|
|
if timings[:sql]
|
|
|
|
db_runtime = timings[:sql][:duration]
|
|
|
|
end
|
|
|
|
|
2019-04-29 04:41:35 -04:00
|
|
|
subscriber = Lograge::LogSubscribers::ActionController.new
|
2018-02-20 23:19:59 -05:00
|
|
|
payload = ActiveSupport::HashWithIndifferentAccess.new(
|
|
|
|
controller: self.class.name,
|
|
|
|
action: action_name,
|
|
|
|
params: request.filtered_parameters,
|
|
|
|
headers: request.headers,
|
|
|
|
format: request.format.ref,
|
|
|
|
method: request.request_method,
|
|
|
|
path: request.fullpath,
|
|
|
|
view_runtime: view_runtime * 1000.0,
|
|
|
|
db_runtime: db_runtime * 1000.0,
|
|
|
|
timings: timings,
|
|
|
|
status: response.status
|
|
|
|
)
|
|
|
|
|
|
|
|
event = ActiveSupport::Notifications::Event.new("hijack", Time.now, Time.now + timings[:total_duration], "", payload)
|
|
|
|
subscriber.process_action(event)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2018-01-18 16:26:18 -05:00
|
|
|
MethodProfiler.clear
|
2017-12-01 00:23:21 -05:00
|
|
|
Thread.current[Logster::Logger::LOGSTER_ENV] = nil
|
|
|
|
|
2017-11-28 19:54:20 -05:00
|
|
|
io.close if io rescue nil
|
|
|
|
|
2017-11-28 00:47:20 -05:00
|
|
|
if request_tracker
|
2018-01-18 18:37:27 -05:00
|
|
|
status = response.status rescue 500
|
2018-01-18 16:26:18 -05:00
|
|
|
request_tracker.log_request_info(env, [status, headers || {}, []], timings)
|
2017-11-28 00:47:20 -05:00
|
|
|
end
|
2018-01-25 05:12:51 -05:00
|
|
|
|
|
|
|
tempfiles&.each(&:close!)
|
2017-11-23 23:31:23 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
# not leaked out, we use 418 ... I am a teapot to denote that we are hijacked
|
|
|
|
render plain: "", status: 418
|
|
|
|
else
|
2017-11-27 01:43:24 -05:00
|
|
|
blk.call
|
2017-11-23 23:31:23 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|