2019-04-29 20:27:42 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2016-03-02 09:01:48 -05:00
|
|
|
require 'rails_helper'
|
|
|
|
|
|
|
|
describe DiscourseRedis do
|
|
|
|
let(:slave_host) { 'testhost' }
|
|
|
|
let(:slave_port) { 1234 }
|
|
|
|
|
|
|
|
let(:config) do
|
2020-01-23 17:20:17 -05:00
|
|
|
DiscourseRedis.config.dup.merge(slave_host: 'testhost', slave_port: 1234, connector: DiscourseRedis::Connector)
|
2016-03-02 09:01:48 -05:00
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
let(:fallback_handler) { DiscourseRedis::FallbackHandler.instance }
|
2016-03-02 09:01:48 -05:00
|
|
|
|
2017-10-24 22:19:43 -04:00
|
|
|
it "ignore_readonly returns nil from a pure exception" do
|
|
|
|
result = DiscourseRedis.ignore_readonly { raise Redis::CommandError.new("READONLY") }
|
|
|
|
expect(result).to eq(nil)
|
|
|
|
end
|
|
|
|
|
2017-08-02 01:32:01 -04:00
|
|
|
describe 'redis commands' do
|
|
|
|
let(:raw_redis) { Redis.new(DiscourseRedis.config) }
|
|
|
|
|
|
|
|
before do
|
2020-05-23 00:56:13 -04:00
|
|
|
raw_redis.flushdb
|
2017-08-02 01:32:01 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
after do
|
2020-05-23 00:56:13 -04:00
|
|
|
raw_redis.flushdb
|
2017-08-02 01:32:01 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
describe 'when namespace is enabled' do
|
|
|
|
let(:redis) { DiscourseRedis.new }
|
|
|
|
|
|
|
|
it 'should append namespace to the keys' do
|
2017-08-23 21:00:15 -04:00
|
|
|
raw_redis.set('default:key', 1)
|
|
|
|
raw_redis.set('test:key2', 1)
|
2017-08-02 01:32:01 -04:00
|
|
|
|
2017-08-23 21:00:15 -04:00
|
|
|
expect(redis.keys).to include('key')
|
|
|
|
expect(redis.keys).to_not include('key2')
|
2018-12-14 19:53:52 -05:00
|
|
|
expect(redis.scan_each.to_a).to eq(['key'])
|
|
|
|
|
|
|
|
redis.scan_each.each do |key|
|
|
|
|
expect(key).to eq('key')
|
|
|
|
end
|
2017-08-02 01:32:01 -04:00
|
|
|
|
|
|
|
redis.del('key')
|
|
|
|
|
|
|
|
expect(raw_redis.get('default:key')).to eq(nil)
|
2018-12-14 19:53:52 -05:00
|
|
|
expect(redis.scan_each.to_a).to eq([])
|
2017-08-02 01:32:01 -04:00
|
|
|
|
|
|
|
raw_redis.set('default:key1', '1')
|
|
|
|
raw_redis.set('default:key2', '2')
|
|
|
|
|
|
|
|
expect(redis.mget('key1', 'key2')).to eq(['1', '2'])
|
2018-12-14 19:53:52 -05:00
|
|
|
expect(redis.scan_each.to_a).to contain_exactly('key1', 'key2')
|
2017-08-02 01:32:01 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe 'when namespace is disabled' do
|
|
|
|
let(:redis) { DiscourseRedis.new(nil, namespace: false) }
|
|
|
|
|
|
|
|
it 'should not append any namespace to the keys' do
|
2017-08-23 21:00:15 -04:00
|
|
|
raw_redis.set('default:key', 1)
|
|
|
|
raw_redis.set('test:key2', 1)
|
2017-08-02 01:32:01 -04:00
|
|
|
|
2017-08-23 21:00:15 -04:00
|
|
|
expect(redis.keys).to include('default:key', 'test:key2')
|
2017-08-02 01:32:01 -04:00
|
|
|
|
|
|
|
redis.del('key')
|
|
|
|
|
|
|
|
expect(raw_redis.get('key')).to eq(nil)
|
|
|
|
|
|
|
|
raw_redis.set('key1', '1')
|
|
|
|
raw_redis.set('key2', '2')
|
|
|
|
|
|
|
|
expect(redis.mget('key1', 'key2')).to eq(['1', '2'])
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'should noop a readonly redis' do
|
|
|
|
expect(Discourse.recently_readonly?).to eq(false)
|
|
|
|
|
|
|
|
redis.without_namespace
|
|
|
|
.expects(:set)
|
|
|
|
.raises(Redis::CommandError.new("READONLY"))
|
|
|
|
|
|
|
|
redis.set('key', 1)
|
|
|
|
|
|
|
|
expect(Discourse.recently_readonly?).to eq(true)
|
|
|
|
end
|
|
|
|
end
|
2020-05-31 23:20:20 -04:00
|
|
|
|
|
|
|
describe '.exists' do
|
|
|
|
it 'should return false when key is not present' do
|
|
|
|
expect(Discourse.redis.exists('test')).to eq(false)
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'should return false when keys are not present' do
|
|
|
|
expect(Discourse.redis.exists('test', 'test2')).to eq(false)
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'should return true when key is present' do
|
|
|
|
Discourse.redis.set('test', 1)
|
|
|
|
|
|
|
|
expect(Discourse.redis.exists('test')).to eq(true)
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'should return true when any key is present' do
|
|
|
|
Discourse.redis.set('test', 1)
|
|
|
|
Discourse.redis.set('test2', 1)
|
|
|
|
|
|
|
|
expect(Discourse.redis.exists('test')).to eq(true)
|
|
|
|
expect(Discourse.redis.exists('test', 'test2')).to eq(true)
|
|
|
|
expect(Discourse.redis.exists('test2', 'test3')).to eq(true)
|
|
|
|
end
|
|
|
|
end
|
2017-08-02 01:32:01 -04:00
|
|
|
end
|
|
|
|
|
2016-03-02 09:01:48 -05:00
|
|
|
context '.slave_host' do
|
|
|
|
it 'should return the right config' do
|
|
|
|
slave_config = DiscourseRedis.slave_config(config)
|
|
|
|
expect(slave_config[:host]).to eq(slave_host)
|
|
|
|
expect(slave_config[:port]).to eq(slave_port)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
context 'when redis connection is to a slave redis server' do
|
|
|
|
it 'should check the status of the master server' do
|
|
|
|
begin
|
|
|
|
fallback_handler.master = false
|
|
|
|
Discourse.redis.without_namespace.expects(:set).raises(Redis::CommandError.new("READONLY"))
|
|
|
|
fallback_handler.expects(:verify_master).once
|
|
|
|
Discourse.redis.set('test', '1')
|
|
|
|
ensure
|
|
|
|
fallback_handler.master = true
|
|
|
|
Discourse.redis.del('test')
|
2016-03-02 09:01:48 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe DiscourseRedis::Connector do
|
|
|
|
let(:connector) { DiscourseRedis::Connector.new(config) }
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
after do
|
|
|
|
fallback_handler.master = true
|
2018-05-30 02:43:30 -04:00
|
|
|
end
|
|
|
|
|
2016-03-02 09:01:48 -05:00
|
|
|
it 'should return the master config when master is up' do
|
|
|
|
expect(connector.resolve).to eq(config)
|
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
class BrokenRedis
|
|
|
|
def initialize(error)
|
|
|
|
@error = error
|
2017-08-18 15:10:37 -04:00
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
def call(*args)
|
|
|
|
raise @error
|
2017-08-18 15:10:37 -04:00
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
def disconnect
|
2017-08-18 15:10:37 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
it 'should return the slave config when master is down' do
|
|
|
|
error = Redis::CannotConnectError
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
expect do
|
|
|
|
connector.resolve(BrokenRedis.new(error))
|
|
|
|
end.to raise_error(Redis::CannotConnectError)
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
config = connector.resolve
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
expect(config[:host]).to eq(slave_host)
|
|
|
|
expect(config[:port]).to eq(slave_port)
|
2016-03-11 05:54:01 -05:00
|
|
|
end
|
2016-11-07 02:28:10 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
it "should return the slave config when master's hostname cannot be resolved" do
|
|
|
|
error = RuntimeError.new('Name or service not known')
|
2018-05-30 02:43:30 -04:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
expect do
|
|
|
|
connector.resolve(BrokenRedis.new(error))
|
|
|
|
end.to raise_error(error)
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
expect(fallback_handler.master).to eq(false)
|
2016-11-07 02:28:10 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
config = connector.resolve
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
expect(config[:host]).to eq(slave_host)
|
|
|
|
expect(config[:port]).to eq(slave_port)
|
|
|
|
expect(fallback_handler.master).to eq(false)
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
it "should return the slave config when master is still loading data" do
|
|
|
|
Redis::Client.any_instance
|
|
|
|
.expects(:call)
|
|
|
|
.with([:info, :persistence])
|
|
|
|
.returns("
|
|
|
|
someconfig:haha\r
|
|
|
|
#{DiscourseRedis::FallbackHandler::MASTER_LOADING_STATUS}
|
|
|
|
")
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
config = connector.resolve
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
expect(config[:host]).to eq(slave_host)
|
|
|
|
expect(config[:port]).to eq(slave_port)
|
|
|
|
end
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
it "should raise the right error" do
|
|
|
|
error = RuntimeError.new('test')
|
2019-01-02 03:32:14 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
2.times do
|
|
|
|
expect { connector.resolve(BrokenRedis.new(error)) }
|
|
|
|
.to raise_error(error)
|
2019-01-02 03:32:14 -05:00
|
|
|
end
|
2016-03-11 05:54:01 -05:00
|
|
|
end
|
2016-03-02 09:01:48 -05:00
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
describe DiscourseRedis::FallbackHandler do
|
2018-05-30 02:43:30 -04:00
|
|
|
before do
|
2020-01-23 17:20:17 -05:00
|
|
|
@original_keepalive_interval = MessageBus.keepalive_interval
|
2018-05-30 02:43:30 -04:00
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
after do
|
|
|
|
fallback_handler.master = true
|
|
|
|
MessageBus.keepalive_interval = @original_keepalive_interval
|
2016-04-17 22:41:40 -04:00
|
|
|
end
|
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
describe '#initiate_fallback_to_master' do
|
|
|
|
it 'should return the right value if the master server is still down' do
|
|
|
|
fallback_handler.master = false
|
|
|
|
Redis::Client.any_instance.expects(:call).with([:info]).returns("Some other stuff")
|
2017-08-01 10:07:52 -04:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
expect(fallback_handler.initiate_fallback_to_master).to eq(false)
|
|
|
|
expect(MessageBus.keepalive_interval).to eq(0)
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
end
|
2019-01-21 23:38:12 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
it 'should fallback to the master server once it is up' do
|
|
|
|
fallback_handler.master = false
|
|
|
|
master_conn = mock('master')
|
|
|
|
slave_conn = mock('slave')
|
2019-01-21 23:38:12 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
Redis::Client.expects(:new)
|
|
|
|
.with(DiscourseRedis.config)
|
|
|
|
.returns(master_conn)
|
2016-03-02 09:01:48 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
Redis::Client.expects(:new)
|
|
|
|
.with(DiscourseRedis.slave_config)
|
|
|
|
.returns(slave_conn)
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
master_conn.expects(:call)
|
|
|
|
.with([:info])
|
|
|
|
.returns("
|
|
|
|
#{DiscourseRedis::FallbackHandler::MASTER_ROLE_STATUS}\r\n
|
|
|
|
#{DiscourseRedis::FallbackHandler::MASTER_LOADED_STATUS}
|
|
|
|
")
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
DiscourseRedis::FallbackHandler::CONNECTION_TYPES.each do |connection_type|
|
|
|
|
slave_conn.expects(:call).with(
|
|
|
|
[:client, [:kill, 'type', connection_type]]
|
|
|
|
)
|
|
|
|
end
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
master_conn.expects(:disconnect)
|
|
|
|
slave_conn.expects(:disconnect)
|
FIX: Redis fallback handler refactoring (#8771)
* DEV: Add a fake Mutex that for concurrency testing with Fibers
* DEV: Support running in sleep order in concurrency tests
* FIX: A separate FallbackHandler should be used for each redis pair
This commit refactors the FallbackHandler and Connector:
* There were two different ways to determine whether the redis master
was up. There is now one way and it is the responsibility of the
new RedisStatus class.
* A background thread would be created whenever `verify_master` was
called unless the thread already existed. The thread would
periodically check the status of the redis master. However, checking
that a thread is `alive?` is an ineffective way of determining
whether it will continue to check the redis master in the future
since the thread may be in the process of winding down.
Now, this thread is created when the recorded master status goes from
up to down. Since this thread runs the only part of the code that is
able to bring the recorded status up again, we ensure that only one
thread is probing the redis master at a time and that there is always
a thread probing redis master when it is recorded as being down.
* Each time the status of the redis master was checked periodically, it
would spawn a new thread and immediately join on it. I assume this
happened to isolate the check from the current execution, but since
the join rethrows exceptions in the parent thread, this was not
effective.
* The logic for falling back was spread over the FallbackHandler and
the Connector. The connector is now a dumb object that delegates
responsibility for determining the status of redis to the
FallbackHandler.
* Previously, failing to connect to a master redis instance when it was
not recorded as down would raise an exception. Now, this exception is
passed to `Discourse.warn_exception` and the connection is made to
the slave.
This commit introduces the FallbackHandlers singleton:
* It is responsible for holding the set of FallbackHandlers.
* It adds callbacks to the fallback handlers for when a redis master
comes up or goes down. Main redis and message bus redis may exist on
different or the same redis hosts and so these callbacks may all
exist on the same FallbackHandler or on separate ones.
These objects are tested using fake concurrency provided by the
Concurrency module:
* An `around(:each)` hook is used to cause each test to run inside a
Scenario so that the test body, mocking cleanup and `after(:each)`
callbacks are run in a different Fiber.
* Therefore, holting the execution of the Execution abruptly (so that
the fibers aren't run to completion), prevents the mocking cleaning
and `after(:each)` callbacks from running. I have tried to prevent
this by recovering from all exceptions during an Execution.
* FIX: Create frozen copies of passed in config where possible
* FIX: extract start_reset method and remove method used by tests
Co-authored-by: Daniel Waterworth <me@danielwaterworth.com>
2020-01-22 21:39:29 -05:00
|
|
|
|
2020-01-23 17:20:17 -05:00
|
|
|
expect(fallback_handler.initiate_fallback_to_master).to eq(true)
|
|
|
|
expect(fallback_handler.master).to eq(true)
|
|
|
|
expect(Discourse.recently_readonly?).to eq(false)
|
|
|
|
expect(MessageBus.keepalive_interval).to eq(-1)
|
2016-04-15 05:05:03 -04:00
|
|
|
end
|
2016-03-02 09:01:48 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|