From 10ecbc5a97acabc130798c8547aea8fd19dcbe7a Mon Sep 17 00:00:00 2001 From: Shalin Shekhar Mangar Date: Mon, 14 Jul 2014 08:58:54 +0000 Subject: [PATCH] SOLR-6241: Harden the HttpPartitionTest git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1610364 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 2 + .../apache/solr/cloud/HttpPartitionTest.java | 45 ++++++++++++++----- 2 files changed, 37 insertions(+), 10 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 391741d1968..b484091e1aa 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -203,6 +203,8 @@ Other Changes * SOLR-6240: Removed unused coreName parameter in ZkStateReader.getReplicaProps. (shalin) +* SOLR-6241: Harden the HttpPartitionTest. (shalin) + ================== 4.9.0 ================== Versions of Major Components diff --git a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java index 5e28beaaf23..33f81878188 100644 --- a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java @@ -25,12 +25,14 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import org.apache.lucene.util.LuceneTestCase.Slow; import org.apache.solr.JSONTestUtil; import org.apache.solr.SolrTestCaseJ4.SuppressSSL; import org.apache.solr.client.solrj.embedded.JettySolrRunner; import org.apache.solr.client.solrj.impl.HttpSolrServer; +import org.apache.solr.client.solrj.request.CollectionAdminRequest; import org.apache.solr.client.solrj.request.QueryRequest; import org.apache.solr.common.SolrInputDocument; import org.apache.solr.common.cloud.ClusterState; @@ -215,6 +217,16 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase { // verify all docs received assertDocsExistInAllReplicas(notLeaders, testCollectionName, 1, numDocs + 3); + + // try to clean up + try { + CollectionAdminRequest req = new CollectionAdminRequest.Delete(); + req.setCollectionName(testCollectionName); + req.process(cloudClient); + } catch (Exception e) { + // don't fail the test + log.warn("Could not delete collection {} after test completed", testCollectionName); + } } protected void testRf3() throws Exception { @@ -260,7 +272,16 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase { sendDoc(4); - assertDocsExistInAllReplicas(notLeaders, testCollectionName, 1, 4); + assertDocsExistInAllReplicas(notLeaders, testCollectionName, 1, 4); + // try to clean up + try { + CollectionAdminRequest req = new CollectionAdminRequest.Delete(); + req.setCollectionName(testCollectionName); + req.process(cloudClient); + } catch (Exception e) { + // don't fail the test + log.warn("Could not delete collection {} after test completed", testCollectionName); + } } protected void testRf3WithLeaderFailover() throws Exception { @@ -359,17 +380,22 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase { proxy0.reopen(); - Thread.sleep(10000L); - - cloudClient.getZkStateReader().updateClusterState(true); - + long timeout = System.nanoTime() + TimeUnit.NANOSECONDS.convert(60, TimeUnit.SECONDS); + while (System.nanoTime() < timeout) { + cloudClient.getZkStateReader().updateClusterState(true); + + List activeReps = getActiveOrRecoveringReplicas(testCollectionName, "shard1"); + if (activeReps.size() == 2) break; + Thread.sleep(1000); + } + List activeReps = getActiveOrRecoveringReplicas(testCollectionName, "shard1"); assertTrue("Expected 2 of 3 replicas to be active but only found "+ - activeReps.size()+"; "+activeReps+"; clusterState: "+printClusterStateInfo(), - activeReps.size() == 2); - + activeReps.size()+"; "+activeReps+"; clusterState: "+printClusterStateInfo(), + activeReps.size() == 2); + sendDoc(6); - + assertDocsExistInAllReplicas(activeReps, testCollectionName, 1, 6); } @@ -377,7 +403,6 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase { Map activeReplicas = new HashMap(); ZkStateReader zkr = cloudClient.getZkStateReader(); ClusterState cs = zkr.getClusterState(); - cs = zkr.getClusterState(); assertNotNull(cs); for (Slice shard : cs.getActiveSlices(testCollectionName)) { if (shard.getName().equals(shardId)) {