ARTEMIS-3039 Fixing network isolation signal on replication
This commit is contained in:
parent
1db088e892
commit
6b7d963f45
|
@ -123,7 +123,7 @@ public class SharedNothingBackupQuorum implements Quorum, SessionFailureListener
|
||||||
if (!isLiveDown()) {
|
if (!isLiveDown()) {
|
||||||
//lost connection but don't know if live is down so restart as backup as we can't replicate any more
|
//lost connection but don't know if live is down so restart as backup as we can't replicate any more
|
||||||
ActiveMQServerLogger.LOGGER.restartingAsBackupBasedOnQuorumVoteResults();
|
ActiveMQServerLogger.LOGGER.restartingAsBackupBasedOnQuorumVoteResults();
|
||||||
signal = BACKUP_ACTIVATION.FAILURE_REPLICATING;
|
signal = BACKUP_ACTIVATION.FAILURE_RETRY;
|
||||||
} else {
|
} else {
|
||||||
// live is assumed to be down, backup fails-over
|
// live is assumed to be down, backup fails-over
|
||||||
ActiveMQServerLogger.LOGGER.failingOverBasedOnQuorumVoteResults();
|
ActiveMQServerLogger.LOGGER.failingOverBasedOnQuorumVoteResults();
|
||||||
|
@ -139,7 +139,7 @@ public class SharedNothingBackupQuorum implements Quorum, SessionFailureListener
|
||||||
signal = BACKUP_ACTIVATION.FAIL_OVER;
|
signal = BACKUP_ACTIVATION.FAIL_OVER;
|
||||||
} else {
|
} else {
|
||||||
ActiveMQServerLogger.LOGGER.serverIsolatedOnNetwork();
|
ActiveMQServerLogger.LOGGER.serverIsolatedOnNetwork();
|
||||||
signal = BACKUP_ACTIVATION.FAILURE_REPLICATING;
|
signal = BACKUP_ACTIVATION.FAILURE_RETRY;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
|
|
|
@ -1934,10 +1934,12 @@ public abstract class ClusterTestBase extends ActiveMQTestBase {
|
||||||
log.debug("started server " + servers[node]);
|
log.debug("started server " + servers[node]);
|
||||||
waitForServerToStart(servers[node]);
|
waitForServerToStart(servers[node]);
|
||||||
|
|
||||||
for (int i = 0; i < node * 1000; i++) {
|
if (servers[node].getStorageManager() != null) {
|
||||||
// it is common to have messages landing with similar IDs on separate nodes, which could hide a few issues.
|
for (int i = 0; i < node * 1000; i++) {
|
||||||
// so we make them unequal
|
// it is common to have messages landing with similar IDs on separate nodes, which could hide a few issues.
|
||||||
servers[node].getStorageManager().generateID();
|
// so we make them unequal
|
||||||
|
servers[node].getStorageManager().generateID();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue