discourse/lib/discourse_redis.rb

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

313 lines
8.0 KiB
Ruby
Raw Normal View History

# frozen_string_literal: true
2013-02-05 14:16:51 -05:00
#
# A wrapper around redis that namespaces keys with the current site id
#
2013-02-05 14:16:51 -05:00
class DiscourseRedis
class FallbackHandler
include Singleton
MASTER_ROLE_STATUS = "role:master"
MASTER_LOADING_STATUS = "loading:1"
MASTER_LOADED_STATUS = "loading:0"
CONNECTION_TYPES = %w{normal pubsub}
def initialize
@master = true
@running = false
@mutex = Mutex.new
@slave_config = DiscourseRedis.slave_config
@message_bus_keepalive_interval = MessageBus.keepalive_interval
end
def verify_master
synchronize do
return if @thread && @thread.alive?
@thread = Thread.new do
loop do
begin
thread = Thread.new { initiate_fallback_to_master }
thread.join
break if synchronize { @master }
sleep 5
ensure
thread.kill
end
end
end
end
end
def initiate_fallback_to_master
success = false
begin
redis_config = DiscourseRedis.config.dup
redis_config.delete(:connector)
master_client = ::Redis::Client.new(redis_config)
logger.warn "#{log_prefix}: Checking connection to master server..."
info = master_client.call([:info])
if info.include?(MASTER_LOADED_STATUS) && info.include?(MASTER_ROLE_STATUS)
begin
logger.warn "#{log_prefix}: Master server is active, killing all connections to slave..."
self.master = true
slave_client = ::Redis::Client.new(@slave_config)
FIX: Redis fallback handler refactoring (#8771) * DEV: Add a fake Mutex that for concurrency testing with Fibers * DEV: Support running in sleep order in concurrency tests * FIX: A separate FallbackHandler should be used for each redis pair This commit refactors the FallbackHandler and Connector: * There were two different ways to determine whether the redis master was up. There is now one way and it is the responsibility of the new RedisStatus class. * A background thread would be created whenever `verify_master` was called unless the thread already existed. The thread would periodically check the status of the redis master. However, checking that a thread is `alive?` is an ineffective way of determining whether it will continue to check the redis master in the future since the thread may be in the process of winding down. Now, this thread is created when the recorded master status goes from up to down. Since this thread runs the only part of the code that is able to bring the recorded status up again, we ensure that only one thread is probing the redis master at a time and that there is always a thread probing redis master when it is recorded as being down. * Each time the status of the redis master was checked periodically, it would spawn a new thread and immediately join on it. I assume this happened to isolate the check from the current execution, but since the join rethrows exceptions in the parent thread, this was not effective. * The logic for falling back was spread over the FallbackHandler and the Connector. The connector is now a dumb object that delegates responsibility for determining the status of redis to the FallbackHandler. * Previously, failing to connect to a master redis instance when it was not recorded as down would raise an exception. Now, this exception is passed to `Discourse.warn_exception` and the connection is made to the slave. This commit introduces the FallbackHandlers singleton: * It is responsible for holding the set of FallbackHandlers. * It adds callbacks to the fallback handlers for when a redis master comes up or goes down. Main redis and message bus redis may exist on different or the same redis hosts and so these callbacks may all exist on the same FallbackHandler or on separate ones. These objects are tested using fake concurrency provided by the Concurrency module: * An `around(:each)` hook is used to cause each test to run inside a Scenario so that the test body, mocking cleanup and `after(:each)` callbacks are run in a different Fiber. * Therefore, holting the execution of the Execution abruptly (so that the fibers aren't run to completion), prevents the mocking cleaning and `after(:each)` callbacks from running. I have tried to prevent this by recovering from all exceptions during an Execution. * FIX: Create frozen copies of passed in config where possible * FIX: extract start_reset method and remove method used by tests Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
CONNECTION_TYPES.each do |connection_type|
slave_client.call([:client, [:kill, 'type', connection_type]])
end
FIX: Redis fallback handler refactoring (#8771) * DEV: Add a fake Mutex that for concurrency testing with Fibers * DEV: Support running in sleep order in concurrency tests * FIX: A separate FallbackHandler should be used for each redis pair This commit refactors the FallbackHandler and Connector: * There were two different ways to determine whether the redis master was up. There is now one way and it is the responsibility of the new RedisStatus class. * A background thread would be created whenever `verify_master` was called unless the thread already existed. The thread would periodically check the status of the redis master. However, checking that a thread is `alive?` is an ineffective way of determining whether it will continue to check the redis master in the future since the thread may be in the process of winding down. Now, this thread is created when the recorded master status goes from up to down. Since this thread runs the only part of the code that is able to bring the recorded status up again, we ensure that only one thread is probing the redis master at a time and that there is always a thread probing redis master when it is recorded as being down. * Each time the status of the redis master was checked periodically, it would spawn a new thread and immediately join on it. I assume this happened to isolate the check from the current execution, but since the join rethrows exceptions in the parent thread, this was not effective. * The logic for falling back was spread over the FallbackHandler and the Connector. The connector is now a dumb object that delegates responsibility for determining the status of redis to the FallbackHandler. * Previously, failing to connect to a master redis instance when it was not recorded as down would raise an exception. Now, this exception is passed to `Discourse.warn_exception` and the connection is made to the slave. This commit introduces the FallbackHandlers singleton: * It is responsible for holding the set of FallbackHandlers. * It adds callbacks to the fallback handlers for when a redis master comes up or goes down. Main redis and message bus redis may exist on different or the same redis hosts and so these callbacks may all exist on the same FallbackHandler or on separate ones. These objects are tested using fake concurrency provided by the Concurrency module: * An `around(:each)` hook is used to cause each test to run inside a Scenario so that the test body, mocking cleanup and `after(:each)` callbacks are run in a different Fiber. * Therefore, holting the execution of the Execution abruptly (so that the fibers aren't run to completion), prevents the mocking cleaning and `after(:each)` callbacks from running. I have tried to prevent this by recovering from all exceptions during an Execution. * FIX: Create frozen copies of passed in config where possible * FIX: extract start_reset method and remove method used by tests Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
MessageBus.keepalive_interval = @message_bus_keepalive_interval
Discourse.clear_readonly!
Discourse.request_refresh!
success = true
ensure
slave_client&.disconnect
end
end
rescue => e
logger.warn "#{log_prefix}: Connection to Master server failed with '#{e.message}'"
ensure
master_client&.disconnect
end
success
end
def master
synchronize { @master }
end
def master=(args)
synchronize do
@master = args
# Disables MessageBus keepalive when Redis is in readonly mode
MessageBus.keepalive_interval = 0 if !@master
end
end
private
def synchronize
@mutex.synchronize { yield }
FIX: Redis fallback handler refactoring (#8771) * DEV: Add a fake Mutex that for concurrency testing with Fibers * DEV: Support running in sleep order in concurrency tests * FIX: A separate FallbackHandler should be used for each redis pair This commit refactors the FallbackHandler and Connector: * There were two different ways to determine whether the redis master was up. There is now one way and it is the responsibility of the new RedisStatus class. * A background thread would be created whenever `verify_master` was called unless the thread already existed. The thread would periodically check the status of the redis master. However, checking that a thread is `alive?` is an ineffective way of determining whether it will continue to check the redis master in the future since the thread may be in the process of winding down. Now, this thread is created when the recorded master status goes from up to down. Since this thread runs the only part of the code that is able to bring the recorded status up again, we ensure that only one thread is probing the redis master at a time and that there is always a thread probing redis master when it is recorded as being down. * Each time the status of the redis master was checked periodically, it would spawn a new thread and immediately join on it. I assume this happened to isolate the check from the current execution, but since the join rethrows exceptions in the parent thread, this was not effective. * The logic for falling back was spread over the FallbackHandler and the Connector. The connector is now a dumb object that delegates responsibility for determining the status of redis to the FallbackHandler. * Previously, failing to connect to a master redis instance when it was not recorded as down would raise an exception. Now, this exception is passed to `Discourse.warn_exception` and the connection is made to the slave. This commit introduces the FallbackHandlers singleton: * It is responsible for holding the set of FallbackHandlers. * It adds callbacks to the fallback handlers for when a redis master comes up or goes down. Main redis and message bus redis may exist on different or the same redis hosts and so these callbacks may all exist on the same FallbackHandler or on separate ones. These objects are tested using fake concurrency provided by the Concurrency module: * An `around(:each)` hook is used to cause each test to run inside a Scenario so that the test body, mocking cleanup and `after(:each)` callbacks are run in a different Fiber. * Therefore, holting the execution of the Execution abruptly (so that the fibers aren't run to completion), prevents the mocking cleaning and `after(:each)` callbacks from running. I have tried to prevent this by recovering from all exceptions during an Execution. * FIX: Create frozen copies of passed in config where possible * FIX: extract start_reset method and remove method used by tests Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
end
def logger
Rails.logger
FIX: Redis fallback handler refactoring (#8771) * DEV: Add a fake Mutex that for concurrency testing with Fibers * DEV: Support running in sleep order in concurrency tests * FIX: A separate FallbackHandler should be used for each redis pair This commit refactors the FallbackHandler and Connector: * There were two different ways to determine whether the redis master was up. There is now one way and it is the responsibility of the new RedisStatus class. * A background thread would be created whenever `verify_master` was called unless the thread already existed. The thread would periodically check the status of the redis master. However, checking that a thread is `alive?` is an ineffective way of determining whether it will continue to check the redis master in the future since the thread may be in the process of winding down. Now, this thread is created when the recorded master status goes from up to down. Since this thread runs the only part of the code that is able to bring the recorded status up again, we ensure that only one thread is probing the redis master at a time and that there is always a thread probing redis master when it is recorded as being down. * Each time the status of the redis master was checked periodically, it would spawn a new thread and immediately join on it. I assume this happened to isolate the check from the current execution, but since the join rethrows exceptions in the parent thread, this was not effective. * The logic for falling back was spread over the FallbackHandler and the Connector. The connector is now a dumb object that delegates responsibility for determining the status of redis to the FallbackHandler. * Previously, failing to connect to a master redis instance when it was not recorded as down would raise an exception. Now, this exception is passed to `Discourse.warn_exception` and the connection is made to the slave. This commit introduces the FallbackHandlers singleton: * It is responsible for holding the set of FallbackHandlers. * It adds callbacks to the fallback handlers for when a redis master comes up or goes down. Main redis and message bus redis may exist on different or the same redis hosts and so these callbacks may all exist on the same FallbackHandler or on separate ones. These objects are tested using fake concurrency provided by the Concurrency module: * An `around(:each)` hook is used to cause each test to run inside a Scenario so that the test body, mocking cleanup and `after(:each)` callbacks are run in a different Fiber. * Therefore, holting the execution of the Execution abruptly (so that the fibers aren't run to completion), prevents the mocking cleaning and `after(:each)` callbacks from running. I have tried to prevent this by recovering from all exceptions during an Execution. * FIX: Create frozen copies of passed in config where possible * FIX: extract start_reset method and remove method used by tests Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
end
def log_prefix
"#{self.class}"
FIX: Redis fallback handler refactoring (#8771) * DEV: Add a fake Mutex that for concurrency testing with Fibers * DEV: Support running in sleep order in concurrency tests * FIX: A separate FallbackHandler should be used for each redis pair This commit refactors the FallbackHandler and Connector: * There were two different ways to determine whether the redis master was up. There is now one way and it is the responsibility of the new RedisStatus class. * A background thread would be created whenever `verify_master` was called unless the thread already existed. The thread would periodically check the status of the redis master. However, checking that a thread is `alive?` is an ineffective way of determining whether it will continue to check the redis master in the future since the thread may be in the process of winding down. Now, this thread is created when the recorded master status goes from up to down. Since this thread runs the only part of the code that is able to bring the recorded status up again, we ensure that only one thread is probing the redis master at a time and that there is always a thread probing redis master when it is recorded as being down. * Each time the status of the redis master was checked periodically, it would spawn a new thread and immediately join on it. I assume this happened to isolate the check from the current execution, but since the join rethrows exceptions in the parent thread, this was not effective. * The logic for falling back was spread over the FallbackHandler and the Connector. The connector is now a dumb object that delegates responsibility for determining the status of redis to the FallbackHandler. * Previously, failing to connect to a master redis instance when it was not recorded as down would raise an exception. Now, this exception is passed to `Discourse.warn_exception` and the connection is made to the slave. This commit introduces the FallbackHandlers singleton: * It is responsible for holding the set of FallbackHandlers. * It adds callbacks to the fallback handlers for when a redis master comes up or goes down. Main redis and message bus redis may exist on different or the same redis hosts and so these callbacks may all exist on the same FallbackHandler or on separate ones. These objects are tested using fake concurrency provided by the Concurrency module: * An `around(:each)` hook is used to cause each test to run inside a Scenario so that the test body, mocking cleanup and `after(:each)` callbacks are run in a different Fiber. * Therefore, holting the execution of the Execution abruptly (so that the fibers aren't run to completion), prevents the mocking cleaning and `after(:each)` callbacks from running. I have tried to prevent this by recovering from all exceptions during an Execution. * FIX: Create frozen copies of passed in config where possible * FIX: extract start_reset method and remove method used by tests Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
end
end
class Connector < Redis::Client::Connector
def initialize(options)
super(options)
@slave_options = DiscourseRedis.slave_config(options)
@fallback_handler = DiscourseRedis::FallbackHandler.instance
end
def resolve(client = nil)
if !@fallback_handler.master
@fallback_handler.verify_master
return @slave_options
end
begin
options = @options.dup
options.delete(:connector)
client ||= Redis::Client.new(options)
loading = client.call([:info, :persistence]).include?(
DiscourseRedis::FallbackHandler::MASTER_LOADING_STATUS
)
FIX: Redis fallback handler refactoring (#8771) * DEV: Add a fake Mutex that for concurrency testing with Fibers * DEV: Support running in sleep order in concurrency tests * FIX: A separate FallbackHandler should be used for each redis pair This commit refactors the FallbackHandler and Connector: * There were two different ways to determine whether the redis master was up. There is now one way and it is the responsibility of the new RedisStatus class. * A background thread would be created whenever `verify_master` was called unless the thread already existed. The thread would periodically check the status of the redis master. However, checking that a thread is `alive?` is an ineffective way of determining whether it will continue to check the redis master in the future since the thread may be in the process of winding down. Now, this thread is created when the recorded master status goes from up to down. Since this thread runs the only part of the code that is able to bring the recorded status up again, we ensure that only one thread is probing the redis master at a time and that there is always a thread probing redis master when it is recorded as being down. * Each time the status of the redis master was checked periodically, it would spawn a new thread and immediately join on it. I assume this happened to isolate the check from the current execution, but since the join rethrows exceptions in the parent thread, this was not effective. * The logic for falling back was spread over the FallbackHandler and the Connector. The connector is now a dumb object that delegates responsibility for determining the status of redis to the FallbackHandler. * Previously, failing to connect to a master redis instance when it was not recorded as down would raise an exception. Now, this exception is passed to `Discourse.warn_exception` and the connection is made to the slave. This commit introduces the FallbackHandlers singleton: * It is responsible for holding the set of FallbackHandlers. * It adds callbacks to the fallback handlers for when a redis master comes up or goes down. Main redis and message bus redis may exist on different or the same redis hosts and so these callbacks may all exist on the same FallbackHandler or on separate ones. These objects are tested using fake concurrency provided by the Concurrency module: * An `around(:each)` hook is used to cause each test to run inside a Scenario so that the test body, mocking cleanup and `after(:each)` callbacks are run in a different Fiber. * Therefore, holting the execution of the Execution abruptly (so that the fibers aren't run to completion), prevents the mocking cleaning and `after(:each)` callbacks from running. I have tried to prevent this by recovering from all exceptions during an Execution. * FIX: Create frozen copies of passed in config where possible * FIX: extract start_reset method and remove method used by tests Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
loading ? @slave_options : @options
rescue Redis::ConnectionError, Redis::CannotConnectError, RuntimeError => ex
raise ex if ex.class == RuntimeError && ex.message != "Name or service not known"
@fallback_handler.master = false
@fallback_handler.verify_master
raise ex
ensure
client.disconnect
end
end
end
2013-02-25 11:42:20 -05:00
2013-03-25 02:19:59 -04:00
def self.raw_connection(config = nil)
config ||= self.config
Redis.new(config)
2013-03-25 02:19:59 -04:00
end
def self.config
GlobalSetting.redis_config
2013-03-25 02:19:59 -04:00
end
def self.slave_config(options = config)
2017-07-27 21:20:09 -04:00
options.dup.merge!(host: options[:slave_host], port: options[:slave_port])
end
def initialize(config = nil, namespace: true)
@config = config || DiscourseRedis.config
@redis = DiscourseRedis.raw_connection(@config.dup)
@namespace = namespace
2013-02-05 14:16:51 -05:00
end
def self.fallback_handler
@fallback_handler ||= DiscourseRedis::FallbackHandler.instance
end
def without_namespace
# Only use this if you want to store and fetch data that's shared between sites
@redis
end
def self.ignore_readonly
yield
rescue Redis::CommandError => ex
if ex.message =~ /READONLY/
if !ENV["REDIS_RAILS_FAILOVER"]
fallback_handler.verify_master if !fallback_handler.master
end
Discourse.received_redis_readonly!
nil
else
raise ex
end
end
2013-02-05 14:16:51 -05:00
# prefix the key with the namespace
def method_missing(meth, *args, &block)
if @redis.respond_to?(meth)
2019-05-06 21:27:05 -04:00
DiscourseRedis.ignore_readonly { @redis.public_send(meth, *args, &block) }
2013-02-05 14:16:51 -05:00
else
super
end
end
# Proxy key methods through, but prefix the keys with the namespace
[:append, :blpop, :brpop, :brpoplpush, :decr, :decrby, :expire, :expireat, :get, :getbit, :getrange, :getset,
2013-05-05 19:51:09 -04:00
:hdel, :hexists, :hget, :hgetall, :hincrby, :hincrbyfloat, :hkeys, :hlen, :hmget, :hmset, :hset, :hsetnx, :hvals, :incr,
:incrby, :incrbyfloat, :lindex, :linsert, :llen, :lpop, :lpush, :lpushx, :lrange, :lrem, :lset, :ltrim,
2015-09-28 02:38:52 -04:00
:mapped_hmset, :mapped_hmget, :mapped_mget, :mapped_mset, :mapped_msetnx, :move, :mset,
2013-05-05 19:51:09 -04:00
:msetnx, :persist, :pexpire, :pexpireat, :psetex, :pttl, :rename, :renamenx, :rpop, :rpoplpush, :rpush, :rpushx, :sadd, :scard,
:sdiff, :set, :setbit, :setex, :setnx, :setrange, :sinter, :sismember, :smembers, :sort, :spop, :srandmember, :srem, :strlen,
:sunion, :ttl, :type, :watch, :zadd, :zcard, :zcount, :zincrby, :zrange, :zrangebyscore, :zrank, :zrem, :zremrangebyrank,
:zremrangebyscore, :zrevrange, :zrevrangebyscore, :zrevrank, :zrangebyscore ].each do |m|
define_method m do |*args|
args[0] = "#{namespace}:#{args[0]}" if @namespace
2019-05-06 21:27:05 -04:00
DiscourseRedis.ignore_readonly { @redis.public_send(m, *args) }
end
2013-02-05 14:16:51 -05:00
end
# Implement our own because https://github.com/redis/redis-rb/issues/698 has stalled
def exists(*keys)
keys.map! { |a| "#{namespace}:#{a}" } if @namespace
DiscourseRedis.ignore_readonly do
@redis.synchronize do |client|
client.call([:exists, *keys]) do |value|
value > 0
end
end
end
end
2015-09-28 02:38:52 -04:00
def mget(*args)
args.map! { |a| "#{namespace}:#{a}" } if @namespace
2015-09-28 02:38:52 -04:00
DiscourseRedis.ignore_readonly { @redis.mget(*args) }
end
def del(k)
DiscourseRedis.ignore_readonly do
k = "#{namespace}:#{k}" if @namespace
@redis.del k
end
end
def scan_each(options = {}, &block)
DiscourseRedis.ignore_readonly do
match = options[:match].presence || '*'
options[:match] =
if @namespace
"#{namespace}:#{match}"
else
match
end
if block
@redis.scan_each(options) do |key|
key = remove_namespace(key) if @namespace
block.call(key)
end
else
@redis.scan_each(options).map do |key|
key = remove_namespace(key) if @namespace
key
end
end
end
end
2017-07-27 21:20:09 -04:00
def keys(pattern = nil)
DiscourseRedis.ignore_readonly do
pattern = pattern || '*'
pattern = "#{namespace}:#{pattern}" if @namespace
keys = @redis.keys(pattern)
if @namespace
len = namespace.length + 1
keys.map! { |k| k[len..-1] }
end
keys
end
end
def delete_prefixed(prefix)
DiscourseRedis.ignore_readonly do
keys("#{prefix}*").each { |k| Discourse.redis.del(k) }
end
end
def reconnect
@redis._client.reconnect
end
def namespace_key(key)
if @namespace
"#{namespace}:#{key}"
else
key
end
end
def namespace
RailsMultisite::ConnectionManagement.current_db
end
2013-02-05 14:16:51 -05:00
def self.namespace
Rails.logger.warn("DiscourseRedis.namespace is going to be deprecated, do not use it!")
2013-02-05 14:16:51 -05:00
RailsMultisite::ConnectionManagement.current_db
end
def self.new_redis_store
Cache.new
end
private
def remove_namespace(key)
key[(namespace.length + 1)..-1]
end
2013-02-05 14:16:51 -05:00
end