Check if nodes have been shut down in TestCluster#beforeTest

The test cluster ignored randomly teared down nodes in tests.
This commit also adds some debug logging to the TestCluster#beforeTest
method.
This commit is contained in:
Simon Willnauer 2013-09-18 16:20:53 +02:00
parent 709add033b
commit a26375ae25
1 changed files with 8 additions and 1 deletions

View File

@ -327,9 +327,12 @@ public class TestCluster {
public synchronized void beforeTest(Random random) { public synchronized void beforeTest(Random random) {
this.random = new Random(random.nextLong()); this.random = new Random(random.nextLong());
resetClients(); /* reset all clients - each test gets it's own client based on the Random instance created above. */ resetClients(); /* reset all clients - each test gets it's own client based on the Random instance created above. */
if (nextNodeId.get() == sharedNodesSeeds.length) { if (nextNodeId.get() == sharedNodesSeeds.length && nodes.size() == sharedNodesSeeds.length) {
logger.debug("Cluster hasn't changed - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
return; return;
} }
logger.debug("Cluster is NOT consistent - restarting shared nodes - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
if (nodes.size() > 0) { if (nodes.size() > 0) {
client().admin().cluster().prepareHealth().setWaitForNodes(""+nodes.size()).get(); client().admin().cluster().prepareHealth().setWaitForNodes(""+nodes.size()).get();
} }
@ -342,10 +345,12 @@ public class TestCluster {
changed = true; changed = true;
nodeAndClient = buildNode(i, sharedNodesSeeds[i]); nodeAndClient = buildNode(i, sharedNodesSeeds[i]);
nodeAndClient.node.start(); nodeAndClient.node.start();
logger.info("Start Shared Node [{}] not shared", nodeAndClient.name);
} }
sharedNodes.add(nodeAndClient); sharedNodes.add(nodeAndClient);
} }
if (!changed && sharedNodes.size() == nodes.size()) { if (!changed && sharedNodes.size() == nodes.size()) {
logger.debug("Cluster is consistent - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
return; // we are consistent - return return; // we are consistent - return
} }
for (NodeAndClient nodeAndClient : sharedNodes) { for (NodeAndClient nodeAndClient : sharedNodes) {
@ -355,6 +360,7 @@ public class TestCluster {
// trash the remaining nodes // trash the remaining nodes
final Collection<NodeAndClient> toShutDown = nodes.values(); final Collection<NodeAndClient> toShutDown = nodes.values();
for (NodeAndClient nodeAndClient : toShutDown) { for (NodeAndClient nodeAndClient : toShutDown) {
logger.debug("Close Node [{}] not shared", nodeAndClient.name);
nodeAndClient.close(); nodeAndClient.close();
} }
nodes.clear(); nodes.clear();
@ -364,6 +370,7 @@ public class TestCluster {
nextNodeId.set(sharedNodesSeeds.length); nextNodeId.set(sharedNodesSeeds.length);
assert numNodes() == sharedNodesSeeds.length; assert numNodes() == sharedNodesSeeds.length;
client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get(); client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get();
logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
} }
private void resetClients() { private void resetClients() {