tests: harden this test due to a current short coming in the collections api.

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1587099 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Mark Robert Miller 2014-04-13 21:32:58 +00:00
parent 3dd9091dd0
commit a4133ca6c2
1 changed files with 54 additions and 1 deletions

View File

@ -21,6 +21,7 @@ import java.io.File;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.solr.SolrTestCaseJ4;
@ -136,6 +137,8 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
SolrInputDocument doc = new SolrInputDocument();
doc.setField("id", "1");
ZkStateReader zkStateReader = new ZkStateReader(zkClient);
waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
cloudSolrServer.add(doc);
cloudSolrServer.commit();
SolrQuery query = new SolrQuery();
@ -144,7 +147,6 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
assertEquals(1, rsp.getResults().getNumFound());
// remove a server not hosting any replicas
ZkStateReader zkStateReader = new ZkStateReader(zkClient);
zkStateReader.updateClusterState(true);
ClusterState clusterState = zkStateReader.getClusterState();
HashMap<String, JettySolrRunner> jettyMap = new HashMap<String, JettySolrRunner>();
@ -226,4 +228,55 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
request.setPath("/admin/collections");
return server.request(request);
}
protected void waitForRecoveriesToFinish(String collection,
ZkStateReader zkStateReader, boolean verbose, boolean failOnTimeout, int timeoutSeconds)
throws Exception {
log.info("Wait for recoveries to finish - collection: " + collection + " failOnTimeout:" + failOnTimeout + " timeout (sec):" + timeoutSeconds);
boolean cont = true;
int cnt = 0;
while (cont) {
if (verbose) System.out.println("-");
boolean sawLiveRecovering = false;
zkStateReader.updateClusterState(true);
ClusterState clusterState = zkStateReader.getClusterState();
Map<String,Slice> slices = clusterState.getSlicesMap(collection);
assertNotNull("Could not find collection:" + collection, slices);
for (Map.Entry<String,Slice> entry : slices.entrySet()) {
Map<String,Replica> shards = entry.getValue().getReplicasMap();
for (Map.Entry<String,Replica> shard : shards.entrySet()) {
if (verbose) System.out.println("rstate:"
+ shard.getValue().getStr(ZkStateReader.STATE_PROP)
+ " live:"
+ clusterState.liveNodesContain(shard.getValue().getNodeName()));
String state = shard.getValue().getStr(ZkStateReader.STATE_PROP);
if ((state.equals(ZkStateReader.RECOVERING) || state
.equals(ZkStateReader.SYNC) || state.equals(ZkStateReader.DOWN))
&& clusterState.liveNodesContain(shard.getValue().getStr(
ZkStateReader.NODE_NAME_PROP))) {
sawLiveRecovering = true;
}
}
}
if (!sawLiveRecovering || cnt == timeoutSeconds) {
if (!sawLiveRecovering) {
if (verbose) System.out.println("no one is recoverying");
} else {
if (verbose) System.out.println("Gave up waiting for recovery to finish..");
if (failOnTimeout) {
fail("There are still nodes recoverying - waited for " + timeoutSeconds + " seconds");
// won't get here
return;
}
}
cont = false;
} else {
Thread.sleep(1000);
}
cnt++;
}
log.info("Recoveries finished - collection: " + collection);
}
}