2019-05-02 18:17:27 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2013-02-05 14:16:51 -05:00
|
|
|
#
|
|
|
|
# A wrapper around redis that namespaces keys with the current site id
|
|
|
|
#
|
2017-10-05 03:57:08 -04:00
|
|
|
|
2013-02-05 14:16:51 -05:00
|
|
|
class DiscourseRedis
|
2020-01-23 17:20:17 -05:00
|
|
|
class FallbackHandler
|
|
|
|
include Singleton
|
|
|
|
|
2019-01-21 23:38:12 -05:00
|
|
|
MASTER_ROLE_STATUS = "role:master".freeze
|
2020-01-23 17:20:17 -05:00
|
|
|
MASTER_LOADING_STATUS = "loading:1".freeze
|
2019-01-21 23:38:12 -05:00
|
|
|
MASTER_LOADED_STATUS = "loading:0".freeze
|
2016-04-07 03:45:42 -04:00
|
|
|
CONNECTION_TYPES = %w{normal pubsub}.each(&:freeze)
|
2016-03-02 09:01:48 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
def initialize
|
|
|
|
@master = true
|
|
|
|
@running = false
|
|
|
|
@mutex = Mutex.new
|
|
|
|
@slave_config = DiscourseRedis.slave_config
|
|
|
|
@message_bus_keepalive_interval = MessageBus.keepalive_interval
|
|
|
|
end
|
2016-03-02 09:01:48 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
def verify_master
|
|
|
|
synchronize do
|
|
|
|
return if @thread && @thread.alive?
|
|
|
|
|
|
|
|
@thread = Thread.new do
|
|
|
|
loop do
|
|
|
|
begin
|
|
|
|
thread = Thread.new { initiate_fallback_to_master }
|
|
|
|
thread.join
|
|
|
|
break if synchronize { @master }
|
|
|
|
sleep 5
|
|
|
|
ensure
|
|
|
|
thread.kill
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2016-03-02 09:01:48 -05:00
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
def initiate_fallback_to_master
|
|
|
|
success = false
|
2016-11-07 01:54:39 -05:00
|
|
|
|
2016-03-02 09:01:48 -05:00
|
|
|
begin
|
2020-01-23 17:20:17 -05:00
|
|
|
redis_config = DiscourseRedis.config.dup
|
|
|
|
redis_config.delete(:connector)
|
|
|
|
master_client = ::Redis::Client.new(redis_config)
|
|
|
|
logger.warn "#{log_prefix}: Checking connection to master server..."
|
2019-01-21 23:38:12 -05:00
|
|
|
info = master_client.call([:info])
|
2016-11-23 01:04:43 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
if info.include?(MASTER_LOADED_STATUS) && info.include?(MASTER_ROLE_STATUS)
|
|
|
|
begin
|
|
|
|
logger.warn "#{log_prefix}: Master server is active, killing all connections to slave..."
|
2016-04-07 03:45:42 -04:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
self.master = true
|
|
|
|
slave_client = ::Redis::Client.new(@slave_config)
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
CONNECTION_TYPES.each do |connection_type|
|
|
|
|
slave_client.call([:client, [:kill, 'type', connection_type]])
|
|
|
|
end
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
MessageBus.keepalive_interval = @message_bus_keepalive_interval
|
|
|
|
Discourse.clear_readonly!
|
|
|
|
Discourse.request_refresh!
|
|
|
|
success = true
|
|
|
|
ensure
|
|
|
|
slave_client&.disconnect
|
|
|
|
end
|
2016-03-02 09:01:48 -05:00
|
|
|
end
|
2020-01-23 17:20:17 -05:00
|
|
|
rescue => e
|
|
|
|
logger.warn "#{log_prefix}: Connection to Master server failed with '#{e.message}'"
|
2016-03-02 09:01:48 -05:00
|
|
|
ensure
|
2020-01-23 17:20:17 -05:00
|
|
|
master_client&.disconnect
|
2016-03-02 09:01:48 -05:00
|
|
|
end
|
2016-11-07 01:54:39 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
success
|
2016-03-02 09:01:48 -05:00
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
def master
|
|
|
|
synchronize { @master }
|
2016-03-02 09:01:48 -05:00
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
def master=(args)
|
|
|
|
synchronize do
|
|
|
|
@master = args
|
2017-08-01 10:07:52 -04:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
# Disables MessageBus keepalive when Redis is in readonly mode
|
|
|
|
MessageBus.keepalive_interval = 0 if !@master
|
2017-08-01 10:07:52 -04:00
|
|
|
end
|
2016-03-02 09:01:48 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
private
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
def synchronize
|
|
|
|
@mutex.synchronize { yield }
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
def logger
|
|
|
|
Rails.logger
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
def log_prefix
|
|
|
|
"#{self.class}"
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
class Connector < Redis::Client::Connector
|
|
|
|
def initialize(options)
|
|
|
|
super(options)
|
|
|
|
@slave_options = DiscourseRedis.slave_config(options)
|
|
|
|
@fallback_handler = DiscourseRedis::FallbackHandler.instance
|
2016-03-02 09:01:48 -05:00
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
def resolve(client = nil)
|
|
|
|
if !@fallback_handler.master
|
|
|
|
@fallback_handler.verify_master
|
|
|
|
return @slave_options
|
2016-11-22 22:29:28 -05:00
|
|
|
end
|
2019-01-21 23:38:12 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
begin
|
|
|
|
options = @options.dup
|
|
|
|
options.delete(:connector)
|
|
|
|
client ||= Redis::Client.new(options)
|
2019-01-21 23:38:12 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
loading = client.call([:info, :persistence]).include?(
|
|
|
|
DiscourseRedis::FallbackHandler::MASTER_LOADING_STATUS
|
|
|
|
)
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
loading ? @slave_options : @options
|
|
|
|
rescue Redis::ConnectionError, Redis::CannotConnectError, RuntimeError => ex
|
|
|
|
raise ex if ex.class == RuntimeError && ex.message != "Name or service not known"
|
|
|
|
@fallback_handler.master = false
|
|
|
|
@fallback_handler.verify_master
|
|
|
|
raise ex
|
|
|
|
ensure
|
|
|
|
client.disconnect
|
2016-03-02 09:01:48 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2013-02-25 11:42:20 -05:00
|
|
|
|
2013-03-25 02:19:59 -04:00
|
|
|
def self.raw_connection(config = nil)
|
|
|
|
config ||= self.config
|
2015-06-25 02:51:48 -04:00
|
|
|
Redis.new(config)
|
2013-03-25 02:19:59 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def self.config
|
2015-06-25 02:51:48 -04:00
|
|
|
GlobalSetting.redis_config
|
2013-03-25 02:19:59 -04:00
|
|
|
end
|
|
|
|
|
2016-03-02 09:01:48 -05:00
|
|
|
def self.slave_config(options = config)
|
2017-07-27 21:20:09 -04:00
|
|
|
options.dup.merge!(host: options[:slave_host], port: options[:slave_port])
|
2016-03-02 09:01:48 -05:00
|
|
|
end
|
|
|
|
|
2017-08-02 01:32:01 -04:00
|
|
|
def initialize(config = nil, namespace: true)
|
2015-04-24 13:10:43 -04:00
|
|
|
@config = config || DiscourseRedis.config
|
2019-10-20 18:59:24 -04:00
|
|
|
@redis = DiscourseRedis.raw_connection(@config.dup)
|
2017-08-02 01:32:01 -04:00
|
|
|
@namespace = namespace
|
2013-02-05 14:16:51 -05:00
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
def self.fallback_handler
|
|
|
|
@fallback_handler ||= DiscourseRedis::FallbackHandler.instance
|
|
|
|
end
|
|
|
|
|
2013-12-20 16:34:34 -05:00
|
|
|
def without_namespace
|
|
|
|
# Only use this if you want to store and fetch data that's shared between sites
|
|
|
|
@redis
|
|
|
|
end
|
|
|
|
|
2015-04-24 13:10:43 -04:00
|
|
|
def self.ignore_readonly
|
|
|
|
yield
|
|
|
|
rescue Redis::CommandError => ex
|
|
|
|
if ex.message =~ /READONLY/
|
2017-07-24 04:22:32 -04:00
|
|
|
unless Discourse.recently_readonly? || Rails.env.test?
|
2015-04-24 14:32:18 -04:00
|
|
|
STDERR.puts "WARN: Redis is in a readonly state. Performed a noop"
|
|
|
|
end
|
2016-03-02 09:01:48 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
fallback_handler.verify_master if !fallback_handler.master
|
2019-06-21 10:08:57 -04:00
|
|
|
Discourse.received_redis_readonly!
|
2017-10-24 22:19:43 -04:00
|
|
|
nil
|
2015-04-24 13:10:43 -04:00
|
|
|
else
|
|
|
|
raise ex
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2013-02-05 14:16:51 -05:00
|
|
|
# prefix the key with the namespace
|
|
|
|
def method_missing(meth, *args, &block)
|
|
|
|
if @redis.respond_to?(meth)
|
2019-05-06 21:27:05 -04:00
|
|
|
DiscourseRedis.ignore_readonly { @redis.public_send(meth, *args, &block) }
|
2013-02-05 14:16:51 -05:00
|
|
|
else
|
|
|
|
super
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Proxy key methods through, but prefix the keys with the namespace
|
2014-01-06 00:50:04 -05:00
|
|
|
[:append, :blpop, :brpop, :brpoplpush, :decr, :decrby, :exists, :expire, :expireat, :get, :getbit, :getrange, :getset,
|
2013-05-05 19:51:09 -04:00
|
|
|
:hdel, :hexists, :hget, :hgetall, :hincrby, :hincrbyfloat, :hkeys, :hlen, :hmget, :hmset, :hset, :hsetnx, :hvals, :incr,
|
2013-12-31 15:52:16 -05:00
|
|
|
:incrby, :incrbyfloat, :lindex, :linsert, :llen, :lpop, :lpush, :lpushx, :lrange, :lrem, :lset, :ltrim,
|
2015-09-28 02:38:52 -04:00
|
|
|
:mapped_hmset, :mapped_hmget, :mapped_mget, :mapped_mset, :mapped_msetnx, :move, :mset,
|
2013-05-05 19:51:09 -04:00
|
|
|
:msetnx, :persist, :pexpire, :pexpireat, :psetex, :pttl, :rename, :renamenx, :rpop, :rpoplpush, :rpush, :rpushx, :sadd, :scard,
|
|
|
|
:sdiff, :set, :setbit, :setex, :setnx, :setrange, :sinter, :sismember, :smembers, :sort, :spop, :srandmember, :srem, :strlen,
|
|
|
|
:sunion, :ttl, :type, :watch, :zadd, :zcard, :zcount, :zincrby, :zrange, :zrangebyscore, :zrank, :zrem, :zremrangebyrank,
|
2017-10-11 01:41:26 -04:00
|
|
|
:zremrangebyscore, :zrevrange, :zrevrangebyscore, :zrevrank, :zrangebyscore ].each do |m|
|
2013-02-09 18:02:29 -05:00
|
|
|
define_method m do |*args|
|
2017-08-02 01:32:01 -04:00
|
|
|
args[0] = "#{namespace}:#{args[0]}" if @namespace
|
2019-05-06 21:27:05 -04:00
|
|
|
DiscourseRedis.ignore_readonly { @redis.public_send(m, *args) }
|
2013-02-09 18:02:29 -05:00
|
|
|
end
|
2013-02-05 14:16:51 -05:00
|
|
|
end
|
|
|
|
|
2015-09-28 02:38:52 -04:00
|
|
|
def mget(*args)
|
2017-08-02 01:32:01 -04:00
|
|
|
args.map! { |a| "#{namespace}:#{a}" } if @namespace
|
2015-09-28 02:38:52 -04:00
|
|
|
DiscourseRedis.ignore_readonly { @redis.mget(*args) }
|
|
|
|
end
|
|
|
|
|
2014-01-06 00:50:04 -05:00
|
|
|
def del(k)
|
2015-04-24 13:10:43 -04:00
|
|
|
DiscourseRedis.ignore_readonly do
|
2017-08-02 01:32:01 -04:00
|
|
|
k = "#{namespace}:#{k}" if @namespace
|
2015-04-24 13:10:43 -04:00
|
|
|
@redis.del k
|
|
|
|
end
|
2014-01-06 00:50:04 -05:00
|
|
|
end
|
|
|
|
|
2018-12-14 19:53:52 -05:00
|
|
|
def scan_each(options = {}, &block)
|
|
|
|
DiscourseRedis.ignore_readonly do
|
|
|
|
match = options[:match].presence || '*'
|
|
|
|
|
|
|
|
options[:match] =
|
|
|
|
if @namespace
|
|
|
|
"#{namespace}:#{match}"
|
|
|
|
else
|
|
|
|
match
|
|
|
|
end
|
|
|
|
|
|
|
|
if block
|
|
|
|
@redis.scan_each(options) do |key|
|
|
|
|
key = remove_namespace(key) if @namespace
|
|
|
|
block.call(key)
|
|
|
|
end
|
|
|
|
else
|
|
|
|
@redis.scan_each(options).map do |key|
|
|
|
|
key = remove_namespace(key) if @namespace
|
|
|
|
key
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2017-07-27 21:20:09 -04:00
|
|
|
def keys(pattern = nil)
|
2015-04-24 13:10:43 -04:00
|
|
|
DiscourseRedis.ignore_readonly do
|
2017-08-02 01:32:01 -04:00
|
|
|
pattern = pattern || '*'
|
|
|
|
pattern = "#{namespace}:#{pattern}" if @namespace
|
|
|
|
keys = @redis.keys(pattern)
|
|
|
|
|
|
|
|
if @namespace
|
|
|
|
len = namespace.length + 1
|
|
|
|
keys.map! { |k| k[len..-1] }
|
|
|
|
end
|
|
|
|
|
|
|
|
keys
|
2015-04-24 13:10:43 -04:00
|
|
|
end
|
2014-01-06 00:50:04 -05:00
|
|
|
end
|
|
|
|
|
2015-02-02 12:44:21 -05:00
|
|
|
def delete_prefixed(prefix)
|
2015-04-24 13:10:43 -04:00
|
|
|
DiscourseRedis.ignore_readonly do
|
2019-12-03 04:05:53 -05:00
|
|
|
keys("#{prefix}*").each { |k| Discourse.redis.del(k) }
|
2015-04-24 13:10:43 -04:00
|
|
|
end
|
2015-02-02 12:44:21 -05:00
|
|
|
end
|
|
|
|
|
2014-01-06 00:50:04 -05:00
|
|
|
def flushdb
|
2015-04-24 13:10:43 -04:00
|
|
|
DiscourseRedis.ignore_readonly do
|
2017-07-27 21:20:09 -04:00
|
|
|
keys.each { |k| del(k) }
|
2015-04-24 13:10:43 -04:00
|
|
|
end
|
2014-01-06 00:50:04 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def reconnect
|
2018-04-20 01:01:17 -04:00
|
|
|
@redis._client.reconnect
|
2014-01-06 00:50:04 -05:00
|
|
|
end
|
|
|
|
|
2017-10-24 22:19:43 -04:00
|
|
|
def namespace_key(key)
|
|
|
|
if @namespace
|
|
|
|
"#{namespace}:#{key}"
|
|
|
|
else
|
|
|
|
key
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2015-05-05 19:53:10 -04:00
|
|
|
def namespace
|
|
|
|
RailsMultisite::ConnectionManagement.current_db
|
|
|
|
end
|
|
|
|
|
2013-02-05 14:16:51 -05:00
|
|
|
def self.namespace
|
2015-05-05 19:53:10 -04:00
|
|
|
Rails.logger.warn("DiscourseRedis.namespace is going to be deprecated, do not use it!")
|
2013-02-05 14:16:51 -05:00
|
|
|
RailsMultisite::ConnectionManagement.current_db
|
|
|
|
end
|
|
|
|
|
2013-03-11 08:33:20 -04:00
|
|
|
def self.new_redis_store
|
2014-01-06 00:50:04 -05:00
|
|
|
Cache.new
|
2013-03-11 08:33:20 -04:00
|
|
|
end
|
|
|
|
|
2018-12-14 19:53:52 -05:00
|
|
|
private
|
|
|
|
|
|
|
|
def remove_namespace(key)
|
|
|
|
key[(namespace.length + 1)..-1]
|
|
|
|
end
|
2020-01-23 17:20:17 -05:00
|
|
|
|
2013-02-05 14:16:51 -05:00
|
|
|
end
|