2019-05-02 18:17:27 -04:00
# frozen_string_literal: true
2013-07-07 00:30:52 -04:00
# See http://unicorn.bogomips.org/Unicorn/Configurator.html
2020-06-11 03:57:13 -04:00
if ( ENV [ " LOGSTASH_UNICORN_URI " ] || " " ) . length > 0
2017-11-13 23:50:26 -05:00
require_relative '../lib/discourse_logstash_logger'
2020-07-21 03:34:37 -04:00
require_relative '../lib/unicorn_logstash_patch'
2017-11-14 23:28:36 -05:00
logger DiscourseLogstashLogger . logger ( uri : ENV [ 'LOGSTASH_UNICORN_URI' ] , type : :unicorn )
2017-11-05 23:46:14 -05:00
end
2013-07-07 00:30:52 -04:00
discourse_path = File . expand_path ( File . expand_path ( File . dirname ( __FILE__ ) ) + " /../ " )
# tune down if not enough ram
2013-10-30 01:33:08 -04:00
worker_processes ( ENV [ " UNICORN_WORKERS " ] || 3 ) . to_i
2013-07-07 00:30:52 -04:00
working_directory discourse_path
2013-10-09 22:33:52 -04:00
# listen "#{discourse_path}/tmp/sockets/unicorn.sock"
2020-07-27 23:03:17 -04:00
listen ENV [ " UNICORN_LISTENER " ] || " #{ ( ENV [ " UNICORN_BIND_ALL " ] ? " " : " 127.0.0.1: " ) } #{ ( ENV [ " UNICORN_PORT " ] || 3000 ) . to_i } "
2013-07-07 00:30:52 -04:00
2018-08-07 03:13:20 -04:00
if ! File . exist? ( " #{ discourse_path } /tmp/pids " )
2018-08-08 00:49:09 -04:00
FileUtils . mkdir_p ( " #{ discourse_path } /tmp/pids " )
2018-08-07 03:13:20 -04:00
end
2013-07-07 00:30:52 -04:00
# feel free to point this anywhere accessible on the filesystem
2017-06-05 22:46:01 -04:00
pid ( ENV [ " UNICORN_PID_PATH " ] || " #{ discourse_path } /tmp/pids/unicorn.pid " )
2013-07-07 00:30:52 -04:00
2018-08-07 03:13:20 -04:00
if ENV [ " RAILS_ENV " ] == " development " || ! ENV [ " RAILS_ENV " ]
logger Logger . new ( $stdout )
2018-08-14 21:13:43 -04:00
# we want a longer timeout in dev cause first request can be really slow
2018-09-04 01:19:48 -04:00
timeout ( ENV [ " UNICORN_TIMEOUT " ] && ENV [ " UNICORN_TIMEOUT " ] . to_i || 60 )
2018-08-07 03:13:20 -04:00
else
# By default, the Unicorn logger will write to stderr.
# Additionally, some applications/frameworks log to stderr or stdout,
# so prevent them from going to /dev/null when daemonized here:
stderr_path " #{ discourse_path } /log/unicorn.stderr.log "
stdout_path " #{ discourse_path } /log/unicorn.stdout.log "
2018-08-14 21:13:43 -04:00
# nuke workers after 30 seconds instead of 60 seconds (the default)
timeout 30
2018-08-07 03:13:20 -04:00
end
2013-07-07 00:30:52 -04:00
# important for Ruby 2.0
preload_app true
# Enable this flag to have unicorn test client connections by writing the
# beginning of the HTTP headers before calling the application. This
# prevents calling the application for connections that have disconnected
# while queued. This is only guaranteed to detect clients on the same
# host unicorn runs on, and unlikely to detect disconnects even on a
# fast LAN.
check_client_connection false
initialized = false
before_fork do | server , worker |
2013-10-09 22:33:52 -04:00
2013-07-07 00:30:52 -04:00
unless initialized
2019-10-07 00:33:37 -04:00
Discourse . preload_rails!
2017-10-03 23:22:23 -04:00
2019-05-15 19:50:22 -04:00
# V8 does not support forking, make sure all contexts are disposed
ObjectSpace . each_object ( MiniRacer :: Context ) { | c | c . dispose }
2013-07-07 00:30:52 -04:00
# get rid of rubbish so we don't share it
2019-10-07 00:33:37 -04:00
# longer term we will use compact! here
GC . start
GC . start
2013-07-07 00:30:52 -04:00
GC . start
2013-11-12 23:28:39 -05:00
initialized = true
supervisor = ENV [ 'UNICORN_SUPERVISOR_PID' ] . to_i
if supervisor > 0
Thread . new do
while true
unless File . exists? ( " /proc/ #{ supervisor } " )
puts " Kill self supervisor is gone "
Process . kill " TERM " , Process . pid
end
sleep 2
end
end
end
2014-04-22 21:03:36 -04:00
sidekiqs = ENV [ 'UNICORN_SIDEKIQS' ] . to_i
if sidekiqs > 0
puts " Starting up #{ sidekiqs } supervised sidekiqs "
2014-04-22 23:13:18 -04:00
2014-04-22 21:03:36 -04:00
require 'demon/sidekiq'
2018-06-06 21:03:16 -04:00
Demon :: Sidekiq . after_fork do
DiscourseEvent . trigger ( :sidekiq_fork_started )
FEATURE: set UNICORN_STATS_SOCKET_DIR for status socket
eg:
sam@ubuntu stats_sockets % socat - UNIX-CONNECT:9622.sock
gc_stat
{"count":46,"heap_allocated_pages":2459,"heap_sorted_length":2460,"heap_allocatable_pages":0,"heap_available_slots":1002267,"heap_live_slots":647293,"heap_free_slots":354974,"heap_final_slots":0,"heap_marked_slots":503494,"heap_swept_slots":498773,"heap_eden_pages":2459,"heap_tomb_pages":0,"total_allocated_pages":2459,"total_freed_pages":0,"total_allocated_objects":4337014,"total_freed_objects":3689721,"malloc_increase_bytes":6448248,"malloc_increase_bytes_limit":29188387,"minor_gc_count":36,"major_gc_count":10,"remembered_wb_unprotected_objects":19958,"remembered_wb_unprotected_objects_limit":39842,"old_objects":462019,"old_objects_limit":895782,"oldmalloc_increase_bytes":6448696,"oldmalloc_increase_bytes_limit":19350882}
2017-04-21 11:36:51 -04:00
end
2018-06-06 21:03:16 -04:00
2014-04-22 21:03:36 -04:00
Demon :: Sidekiq . start ( sidekiqs )
2017-03-10 11:35:25 -05:00
Signal . trap ( " SIGTSTP " ) do
STDERR . puts " #{ Time . now } : Issuing stop to sidekiq "
Demon :: Sidekiq . stop
end
2020-04-16 07:13:13 -04:00
# Trap USR1, so we can re-issue to sidekiq workers
# but chain the default unicorn implementation as well
old_handler = Signal . trap ( " USR1 " ) do
Demon :: Sidekiq . kill ( " USR1 " )
old_handler . call
end
2020-07-10 05:05:55 -04:00
end
2020-04-16 07:13:13 -04:00
2020-07-24 03:09:29 -04:00
if ENV [ 'DISCOURSE_ENABLE_EMAIL_SYNC_DEMON' ] == 'true'
puts " Starting up EmailSync demon "
Demon :: EmailSync . start
Signal . trap ( " SIGTSTP " ) do
STDERR . puts " #{ Time . now } : Issuing stop to EmailSync "
Demon :: EmailSync . stop
end
2020-07-10 05:05:55 -04:00
end
2014-04-22 21:03:36 -04:00
2020-07-10 05:05:55 -04:00
class :: Unicorn :: HttpServer
alias :master_sleep_orig :master_sleep
2015-03-27 00:44:52 -04:00
2020-07-10 05:05:55 -04:00
def max_sidekiq_rss
rss = ` ps -eo rss,args | grep sidekiq | grep -v grep | awk '{print $1}' `
. split ( " \n " )
. map ( & :to_i )
. max
2015-03-27 00:44:52 -04:00
2020-07-10 05:05:55 -04:00
rss || = 0
2015-03-26 23:27:01 -04:00
2020-07-10 05:05:55 -04:00
rss * 1024
end
2015-06-18 01:32:04 -04:00
2020-07-10 05:05:55 -04:00
def max_allowed_sidekiq_rss
[ ENV [ 'UNICORN_SIDEKIQ_MAX_RSS' ] . to_i , 500 ] . max . megabytes
end
2015-03-26 23:27:01 -04:00
2020-07-10 05:05:55 -04:00
def force_kill_rogue_sidekiq
info = ` ps -eo pid,rss,args | grep sidekiq | grep -v grep | awk '{print $1,$2}' `
info . split ( " \n " ) . each do | row |
pid , mem = row . split ( " " ) . map ( & :to_i )
if pid > 0 && ( mem * 1024 ) > max_allowed_sidekiq_rss
Rails . logger . warn " Detected rogue Sidekiq pid #{ pid } mem #{ mem * 1024 } , killing "
Process . kill ( " KILL " , pid ) rescue nil
2015-06-18 01:32:04 -04:00
end
end
2020-07-10 05:05:55 -04:00
end
2015-06-18 01:32:04 -04:00
2020-07-10 05:05:55 -04:00
def check_sidekiq_heartbeat
@sidekiq_heartbeat_interval || = 30 . minutes
@sidekiq_next_heartbeat_check || = Time . now . to_i + @sidekiq_heartbeat_interval
2014-04-22 23:13:18 -04:00
2020-07-10 05:05:55 -04:00
if @sidekiq_next_heartbeat_check < Time . now . to_i
2019-08-30 06:26:16 -04:00
2020-07-10 05:05:55 -04:00
last_heartbeat = Jobs :: RunHeartbeat . last_heartbeat
restart = false
2015-03-27 00:44:52 -04:00
2020-07-10 05:05:55 -04:00
sidekiq_rss = max_sidekiq_rss
if sidekiq_rss > max_allowed_sidekiq_rss
Rails . logger . warn ( " Sidekiq is consuming too much memory (using: %0.2fM) for '%s', restarting " % [ ( sidekiq_rss . to_f / 1 . megabyte ) , ENV [ " DISCOURSE_HOSTNAME " ] ] )
restart = true
end
2015-03-26 23:27:01 -04:00
2020-07-10 05:05:55 -04:00
if last_heartbeat < Time . now . to_i - @sidekiq_heartbeat_interval
STDERR . puts " Sidekiq heartbeat test failed, restarting "
Rails . logger . warn " Sidekiq heartbeat test failed, restarting "
2019-08-30 06:26:16 -04:00
2020-07-10 05:05:55 -04:00
restart = true
end
@sidekiq_next_heartbeat_check = Time . now . to_i + @sidekiq_heartbeat_interval
2015-03-26 23:27:01 -04:00
2020-07-10 05:05:55 -04:00
if restart
Demon :: Sidekiq . restart
sleep 10
force_kill_rogue_sidekiq
2014-04-22 23:13:18 -04:00
end
2020-07-10 05:05:55 -04:00
Discourse . redis . close
end
end
def max_email_sync_rss
return 0 if Demon :: EmailSync . demons . empty?
email_sync_pids = Demon :: EmailSync . demons . map { | uid , demon | demon . pid }
return 0 if email_sync_pids . empty?
rss = ` ps -eo pid,rss,args | grep ' #{ email_sync_pids . join ( '|' ) } ' | grep -v grep | awk '{print $2}' `
. split ( " \n " )
. map ( & :to_i )
. max
( rss || 0 ) * 1024
end
def max_allowed_email_sync_rss
[ ENV [ 'UNICORN_EMAIL_SYNC_MAX_RSS' ] . to_i , 500 ] . max . megabytes
end
def check_email_sync_heartbeat
# Skip first check to let process warm up
@email_sync_next_heartbeat_check || = ( Time . now + Demon :: EmailSync :: HEARTBEAT_INTERVAL ) . to_i
return if @email_sync_next_heartbeat_check > Time . now . to_i
@email_sync_next_heartbeat_check = ( Time . now + Demon :: EmailSync :: HEARTBEAT_INTERVAL ) . to_i
restart = false
# Restart process if it does not respond anymore
last_heartbeat_ago = Time . now . to_i - Discourse . redis . get ( Demon :: EmailSync :: HEARTBEAT_KEY ) . to_i
if last_heartbeat_ago > Demon :: EmailSync :: HEARTBEAT_INTERVAL . to_i
STDERR . puts ( " EmailSync heartbeat test failed (last heartbeat was #{ last_heartbeat_ago } s ago), restarting " )
restart = true
2014-04-22 23:13:18 -04:00
end
2020-07-10 05:05:55 -04:00
# Restart process if memory usage is too high
email_sync_rss = max_email_sync_rss
if email_sync_rss > max_allowed_email_sync_rss
STDERR . puts ( " EmailSync is consuming too much memory (using: %0.2fM) for '%s', restarting " % [ ( email_sync_rss . to_f / 1 . megabyte ) , ENV [ " DISCOURSE_HOSTNAME " ] ] )
restart = true
end
Demon :: EmailSync . restart if restart
end
def master_sleep ( sec )
sidekiqs = ENV [ 'UNICORN_SIDEKIQS' ] . to_i
if sidekiqs > 0
2014-04-22 21:03:36 -04:00
Demon :: Sidekiq . ensure_running
2014-04-22 23:13:18 -04:00
check_sidekiq_heartbeat
2014-04-22 21:03:36 -04:00
end
2020-07-10 05:05:55 -04:00
2020-07-24 03:09:29 -04:00
if ENV [ 'DISCOURSE_ENABLE_EMAIL_SYNC_DEMON' ] == 'true'
Demon :: EmailSync . ensure_running
check_email_sync_heartbeat
end
2020-07-10 05:05:55 -04:00
master_sleep_orig ( sec )
2014-04-22 21:03:36 -04:00
end
end
2013-07-07 00:30:52 -04:00
end
2013-10-09 22:33:52 -04:00
2020-06-01 23:46:55 -04:00
Discourse . redis . close
2013-07-07 00:30:52 -04:00
# Throttle the master from forking too quickly by sleeping. Due
# to the implementation of standard Unix signal handlers, this
# helps (but does not completely) prevent identical, repeated signals
# from being lost when the receiving process is busy.
2013-10-09 22:33:52 -04:00
sleep 1
2013-07-07 00:30:52 -04:00
end
after_fork do | server , worker |
2017-10-23 00:40:31 -04:00
DiscourseEvent . trigger ( :web_fork_started )
2016-07-16 01:11:34 -04:00
# warm up v8 after fork, that way we do not fork a v8 context
# it may cause issues if bg threads in a v8 isolate randomly stop
# working due to fork
2014-03-27 22:48:14 -04:00
Discourse . after_fork
2016-09-06 02:02:08 -04:00
begin
PrettyText . cook ( " warm up **pretty text** " )
rescue = > e
Rails . logger . error ( " Failed to warm up pretty text: #{ e } " )
end
2013-07-07 00:30:52 -04:00
end