diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
index a0ca7eb4234..f4887e6c836 100644
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
+++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
@@ -38,6 +38,8 @@ import java.util.Random;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.servlet.SolrDispatchFilter;
import org.eclipse.jetty.server.Connector;
@@ -293,6 +295,10 @@ public class JettySolrRunner {
return getSolrDispatchFilter().getCores();
}
+ public String getNodeName() {
+ return getCoreContainer().getZkController().getNodeName();
+ }
+
public boolean isRunning() {
return server.isRunning();
}
@@ -453,6 +459,10 @@ public class JettySolrRunner {
}
}
+ public SolrClient newClient() {
+ return new HttpSolrClient.Builder(getBaseUrl().toString()).build();
+ }
+
public DebugFilter getDebugFilter() {
return (DebugFilter)debugFilter.getFilter();
}
diff --git a/solr/core/src/test-files/solr/configsets/cloud-dynamic/conf/schema.xml b/solr/core/src/test-files/solr/configsets/cloud-dynamic/conf/schema.xml
index 41822a31a4d..af201c0364b 100644
--- a/solr/core/src/test-files/solr/configsets/cloud-dynamic/conf/schema.xml
+++ b/solr/core/src/test-files/solr/configsets/cloud-dynamic/conf/schema.xml
@@ -223,6 +223,8 @@
+
+
diff --git a/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/schema.xml b/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/schema.xml
new file mode 100644
index 00000000000..aab5e811110
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/schema.xml
@@ -0,0 +1,28 @@
+
+
+
+
+
+
+
+
+
+
+
+ id
+
diff --git a/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/solrconfig.xml b/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/solrconfig.xml
new file mode 100644
index 00000000000..88290dac14b
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/solrconfig.xml
@@ -0,0 +1,50 @@
+
+
+
+
+
+
+
+
+
+
+ hdfs
+
+
+
+
+ ${tests.luceneMatchVersion:LATEST}
+
+
+
+ ${solr.commitwithin.softcommit:true}
+
+
+
+
+
+
+ explicit
+ true
+ text
+
+
+
+
+
diff --git a/solr/core/src/test-files/solr/configsets/cloud-minimal-jmx/conf/schema.xml b/solr/core/src/test-files/solr/configsets/cloud-minimal-jmx/conf/schema.xml
new file mode 100644
index 00000000000..aab5e811110
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/cloud-minimal-jmx/conf/schema.xml
@@ -0,0 +1,28 @@
+
+
+
+
+
+
+
+
+
+
+
+ id
+
diff --git a/solr/core/src/test-files/solr/configsets/cloud-minimal-jmx/conf/solrconfig.xml b/solr/core/src/test-files/solr/configsets/cloud-minimal-jmx/conf/solrconfig.xml
new file mode 100644
index 00000000000..7f27c911f04
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/cloud-minimal-jmx/conf/solrconfig.xml
@@ -0,0 +1,50 @@
+
+
+
+
+
+
+
+
+
+
+ ${solr.data.dir:}
+
+
+
+
+ ${tests.luceneMatchVersion:LATEST}
+
+
+
+ ${solr.commitwithin.softcommit:true}
+
+
+
+
+
+
+ explicit
+ true
+ text
+
+
+
+
+
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
index afc7c483fb0..a1c217589ea 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
@@ -16,186 +16,153 @@
*/
package org.apache.solr.cloud;
-import java.util.ArrayList;
import java.util.List;
import java.util.Map;
-import java.util.Properties;
+import java.util.stream.Collectors;
import org.apache.commons.lang.StringUtils;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
import org.apache.zookeeper.KeeperException;
+import org.junit.Before;
+import org.junit.BeforeClass;
import org.junit.Test;
@Slow
-public class CollectionTooManyReplicasTest extends AbstractFullDistribZkTestBase {
+public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
- public CollectionTooManyReplicasTest() {
- sliceCount = 1;
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(3)
+ .addConfig("conf", configset("cloud-minimal"))
+ .configure();
+ }
+
+ @Before
+ public void deleteCollections() throws Exception {
+ cluster.deleteAllCollections();
}
@Test
- @ShardsFixed(num = 1)
public void testAddTooManyReplicas() throws Exception {
- String collectionName = "TooManyReplicasInSeveralFlavors";
- CollectionAdminRequest.Create create = new CollectionAdminRequest.Create()
- .setCollectionName(collectionName)
- .setNumShards(2)
- .setReplicationFactor(1)
- .setMaxShardsPerNode(2)
- .setStateFormat(2);
+ final String collectionName = "TooManyReplicasInSeveralFlavors";
+ CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+ .setMaxShardsPerNode(1)
+ .process(cluster.getSolrClient());
- CollectionAdminResponse response = create.process(cloudClient);
- assertEquals(0, response.getStatus());
- assertTrue(response.isSuccess());
- // Now I have the fixed Jetty plus the control instnace, I have two replicas, one for each shard
+ // I have two replicas, one for each shard
// Curiously, I should be able to add a bunch of replicas if I specify the node, even more than maxShardsPerNode
// Just get the first node any way we can.
// Get a node to use for the "node" parameter.
-
String nodeName = getAllNodeNames(collectionName).get(0);
+
// Add a replica using the "node" parameter (no "too many replicas check")
// this node should have 2 replicas on it
- CollectionAdminRequest.AddReplica addReplicaNode = new CollectionAdminRequest.AddReplica()
- .setCollectionName(collectionName)
- .setShardName("shard1")
- .setNode(nodeName);
- response = addReplicaNode.process(cloudClient);
- assertEquals(0, response.getStatus());
+ CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+ .setNode(nodeName)
+ .process(cluster.getSolrClient());
// Three replicas so far, should be able to create another one "normally"
- CollectionAdminRequest.AddReplica addReplica = new CollectionAdminRequest.AddReplica()
- .setCollectionName(collectionName)
- .setShardName("shard1");
-
- response = addReplica.process(cloudClient);
- assertEquals(0, response.getStatus());
+ CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+ .process(cluster.getSolrClient());
// This one should fail though, no "node" parameter specified
- try {
- addReplica.process(cloudClient);
- fail("Should have thrown an error because the nodes are full");
- } catch (HttpSolrClient.RemoteSolrException se) {
- assertTrue("Should have gotten the right error message back",
- se.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
- }
+ Exception e = expectThrows(Exception.class, () -> {
+ CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+ .process(cluster.getSolrClient());
+ });
+
+ assertTrue("Should have gotten the right error message back",
+ e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
// Oddly, we should succeed next just because setting property.name will not check for nodes being "full up"
- Properties props = new Properties();
- props.setProperty("name", "bogus2");
- addReplicaNode.setProperties(props);
- response = addReplicaNode.process(cloudClient);
- assertEquals(0, response.getStatus());
+ // TODO: Isn't this a bug?
+ CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+ .withProperty("name", "bogus2")
+ .setNode(nodeName)
+ .process(cluster.getSolrClient());
- ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
- zkStateReader.forceUpdateCollection(collectionName);
- Slice slice = zkStateReader.getClusterState().getSlicesMap(collectionName).get("shard1");
-
- Replica rep = null;
- for (Replica rep1 : slice.getReplicas()) { // Silly compiler
- if (rep1.get("core").equals("bogus2")) {
- rep = rep1;
- break;
- }
- }
- assertNotNull("Should have found a replica named 'bogus2'", rep);
- assertEquals("Replica should have been put on correct core", nodeName, rep.getNodeName());
+ DocCollection collectionState = getCollectionState(collectionName);
+ Slice slice = collectionState.getSlice("shard1");
+ Replica replica = getRandomReplica(slice, r -> r.getCoreName().equals("bogus2"));
+ assertNotNull("Should have found a replica named 'bogus2'", replica);
+ assertEquals("Replica should have been put on correct core", nodeName, replica.getNodeName());
// Shard1 should have 4 replicas
assertEquals("There should be 4 replicas for shard 1", 4, slice.getReplicas().size());
- // And let's fail one more time because to insure that the math doesn't do weird stuff it we have more replicas
+ // And let's fail one more time because to ensure that the math doesn't do weird stuff it we have more replicas
// than simple calcs would indicate.
- try {
- addReplica.process(cloudClient);
- fail("Should have thrown an error because the nodes are full");
- } catch (HttpSolrClient.RemoteSolrException se) {
- assertTrue("Should have gotten the right error message back",
- se.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
- }
+ Exception e2 = expectThrows(Exception.class, () -> {
+ CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+ .process(cluster.getSolrClient());
+ });
+
+ assertTrue("Should have gotten the right error message back",
+ e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
+ // wait for recoveries to finish, for a clean shutdown - see SOLR-9645
+ waitForState("Expected to see all replicas active", collectionName, (n, c) -> {
+ for (Replica r : c.getReplicas()) {
+ if (r.getState() != Replica.State.ACTIVE)
+ return false;
+ }
+ return true;
+ });
}
@Test
- @ShardsFixed(num = 2)
public void testAddShard() throws Exception {
+
String collectionName = "TooManyReplicasWhenAddingShards";
- CollectionAdminRequest.Create create = new CollectionAdminRequest.Create()
- .setCollectionName(collectionName)
- .setReplicationFactor(2)
+ CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 2)
.setMaxShardsPerNode(2)
- .setStateFormat(2)
- .setRouterName("implicit")
- .setShards("shardstart");
+ .process(cluster.getSolrClient());
- NamedList request = create.process(cloudClient).getResponse();
-
- assertTrue("Could not create the collection", request.get("success") != null);
// We have two nodes, maxShardsPerNode is set to 2. Therefore, we should be able to add 2 shards each with
// two replicas, but fail on the third.
-
- CollectionAdminRequest.CreateShard createShard = new CollectionAdminRequest.CreateShard()
- .setCollectionName(collectionName)
- .setShardName("shard1");
- CollectionAdminResponse resp = createShard.process(cloudClient);
- assertEquals(0, resp.getStatus());
+ CollectionAdminRequest.createShard(collectionName, "shard1")
+ .process(cluster.getSolrClient());
// Now we should have one replica on each Jetty, add another to reach maxShardsPerNode
-
- createShard = new CollectionAdminRequest.CreateShard()
- .setCollectionName(collectionName)
- .setShardName("shard2");
- resp = createShard.process(cloudClient);
- assertEquals(0, resp.getStatus());
-
+ CollectionAdminRequest.createShard(collectionName, "shard2")
+ .process(cluster.getSolrClient());
// Now fail to add the third as it should exceed maxShardsPerNode
- createShard = new CollectionAdminRequest.CreateShard()
- .setCollectionName(collectionName)
- .setShardName("shard3");
- try {
- createShard.process(cloudClient);
- fail("Should have exceeded the max number of replicas allowed");
- } catch (HttpSolrClient.RemoteSolrException se) {
- assertTrue("Should have gotten the right error message back",
- se.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
- }
+ Exception e = expectThrows(Exception.class, () -> {
+ CollectionAdminRequest.createShard(collectionName, "shard3")
+ .process(cluster.getSolrClient());
+ });
+ assertTrue("Should have gotten the right error message back",
+ e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
// Hmmm, providing a nodeset also overrides the checks for max replicas, so prove it.
List nodes = getAllNodeNames(collectionName);
- createShard = new CollectionAdminRequest.CreateShard()
- .setCollectionName(collectionName)
- .setShardName("shard4")
- .setNodeSet(StringUtils.join(nodes, ","));
- resp = createShard.process(cloudClient);
- assertEquals(0, resp.getStatus());
+ CollectionAdminRequest.createShard(collectionName, "shard4")
+ .setNodeSet(StringUtils.join(nodes, ","))
+ .process(cluster.getSolrClient());
// And just for yucks, insure we fail the "regular" one again.
- createShard = new CollectionAdminRequest.CreateShard()
- .setCollectionName(collectionName)
- .setShardName("shard5");
- try {
- createShard.process(cloudClient);
- fail("Should have exceeded the max number of replicas allowed");
- } catch (HttpSolrClient.RemoteSolrException se) {
- assertTrue("Should have gotten the right error message back",
- se.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
- }
+ Exception e2 = expectThrows(Exception.class, () -> {
+ CollectionAdminRequest.createShard(collectionName, "shard5")
+ .process(cluster.getSolrClient());
+ });
+ assertTrue("Should have gotten the right error message back",
+ e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
// And finally, insure that there are all the replcias we expect. We should have shards 1, 2 and 4 and each
// should have exactly two replicas
- ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
- zkStateReader.forceUpdateCollection(collectionName);
- Map slices = zkStateReader.getClusterState().getSlicesMap(collectionName);
+ waitForState("Expected shards shardstart, 1, 2 and 4, each with two active replicas", collectionName, (n, c) -> {
+ return DocCollection.isFullyActive(n, c, 4, 2);
+ });
+ Map slices = getCollectionState(collectionName).getSlicesMap();
assertEquals("There should be exaclty four slices", slices.size(), 4);
assertNotNull("shardstart should exist", slices.get("shardstart"));
assertNotNull("shard1 should exist", slices.get("shard1"));
@@ -209,82 +176,46 @@ public class CollectionTooManyReplicasTest extends AbstractFullDistribZkTestBase
}
@Test
- @ShardsFixed(num = 2)
public void testDownedShards() throws Exception {
String collectionName = "TooManyReplicasWhenAddingDownedNode";
- CollectionAdminRequest.Create create = new CollectionAdminRequest.Create()
- .setCollectionName(collectionName)
- .setReplicationFactor(1)
+ CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 1)
.setMaxShardsPerNode(2)
- .setStateFormat(2)
- .setRouterName("implicit")
- .setShards("shardstart");
+ .process(cluster.getSolrClient());
- NamedList request = create.process(cloudClient).getResponse();
+ // Shut down a Jetty, I really don't care which
+ JettySolrRunner jetty = cluster.getRandomJetty(random());
+ String deadNode = jetty.getBaseUrl().toString();
+ cluster.stopJettySolrRunner(jetty);
- assertTrue("Could not create the collection", request.get("success") != null);
- try (SolrZkClient zkClient = new SolrZkClient(zkServer.getZkAddress(),
- AbstractZkTestCase.TIMEOUT)) {
+ try {
- List liveNodes = zkClient.getChildren("/live_nodes", null, true);
-
- // Shut down a Jetty, I really don't care which
- JettySolrRunner downJetty = jettys.get(r.nextInt(2));
-
- downJetty.stop();
- List liveNodesNow = null;
- for (int idx = 0; idx < 150; ++idx) {
- liveNodesNow = zkClient.getChildren("/live_nodes", null, true);
- if (liveNodesNow.size() != liveNodes.size()) break;
- Thread.sleep(100);
- }
- List deadNodes = new ArrayList<>(liveNodes);
- assertTrue("Should be a downed node", deadNodes.removeAll(liveNodesNow));
- liveNodes.removeAll(deadNodes);
-
- //OK, we've killed a node. Insure we get errors when we ask to create a replica or shard that involves it.
- // First try adding a replica to the downed node.
- CollectionAdminRequest.AddReplica addReplicaNode = new CollectionAdminRequest.AddReplica()
- .setCollectionName(collectionName)
- .setShardName("shardstart")
- .setNode(deadNodes.get(0));
-
- try {
- addReplicaNode.process(cloudClient);
- fail("Should have gotten an exception");
- } catch (HttpSolrClient.RemoteSolrException se) {
- assertTrue("Should have gotten a message about shard not ",
- se.getMessage().contains("At least one of the node(s) specified are not currently active, no action taken."));
- }
+ // Adding a replica on a dead node should fail
+ Exception e1 = expectThrows(Exception.class, () -> {
+ CollectionAdminRequest.addReplicaToShard(collectionName, "shardstart")
+ .setNode(deadNode)
+ .process(cluster.getSolrClient());
+ });
+ assertTrue("Should have gotten a message about shard not ",
+ e1.getMessage().contains("At least one of the node(s) specified are not currently active, no action taken."));
// Should also die if we just add a shard
- CollectionAdminRequest.CreateShard createShard = new CollectionAdminRequest.CreateShard()
- .setCollectionName(collectionName)
- .setShardName("shard1")
- .setNodeSet(deadNodes.get(0));
- try {
- createShard.process(cloudClient);
- fail("Should have gotten an exception");
- } catch (HttpSolrClient.RemoteSolrException se) {
- assertTrue("Should have gotten a message about shard not ",
- se.getMessage().contains("At least one of the node(s) specified are not currently active, no action taken."));
- }
- //downJetty.start();
+ Exception e2 = expectThrows(Exception.class, () -> {
+ CollectionAdminRequest.createShard(collectionName, "shard1")
+ .setNodeSet(deadNode)
+ .process(cluster.getSolrClient());
+ });
+
+ assertTrue("Should have gotten a message about shard not ",
+ e2.getMessage().contains("At least one of the node(s) specified are not currently active, no action taken."));
+ }
+ finally {
+ cluster.startJettySolrRunner(jetty);
}
}
private List getAllNodeNames(String collectionName) throws KeeperException, InterruptedException {
- ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
- zkStateReader.forceUpdateCollection(collectionName);
- Slice slice = zkStateReader.getClusterState().getSlicesMap(collectionName).get("shard1");
-
- List nodes = new ArrayList<>();
- for (Replica rep : slice.getReplicas()) {
- nodes.add(rep.getNodeName());
- }
-
- assertTrue("Should have some nodes!", nodes.size() > 0);
- return nodes;
+ DocCollection state = getCollectionState(collectionName);
+ return state.getReplicas().stream().map(Replica::getNodeName).distinct().collect(Collectors.toList());
}
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
index 826a8e5f6dd..c1ad2bd1e49 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
@@ -19,9 +19,7 @@ package org.apache.solr.cloud;
import javax.management.MBeanServer;
import javax.management.MBeanServerFactory;
import javax.management.ObjectName;
-import java.io.File;
import java.io.IOException;
-import java.lang.invoke.MethodHandles;
import java.lang.management.ManagementFactory;
import java.nio.file.Files;
import java.nio.file.Path;
@@ -31,16 +29,14 @@ import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
-import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
-import java.util.Objects;
-import java.util.Properties;
import java.util.Set;
import java.util.concurrent.TimeUnit;
+import com.google.common.collect.ImmutableList;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.SolrClient;
@@ -54,453 +50,224 @@ import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CoreAdminRequest;
import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
import org.apache.solr.client.solrj.response.CoreAdminResponse;
-import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionParams.CollectionAction;
import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.MapSolrParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.StrUtils;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.SolrCore;
import org.apache.solr.core.SolrInfoMBean.Category;
import org.apache.solr.util.TestInjection;
import org.apache.solr.util.TimeOut;
+import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.util.Utils.makeMap;
/**
* Tests the Cloud Collections API.
*/
@Slow
-public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBase {
+public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
- private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
- private static final String DEFAULT_COLLECTION = "collection1";
-
- // we randomly use a second config set rather than just one
- private boolean secondConfigSet = random().nextBoolean();
-
@BeforeClass
public static void beforeCollectionsAPIDistributedZkTest() {
TestInjection.randomDelayInCoreCreation = "true:20";
System.setProperty("validateAfterInactivity", "200");
}
-
- @Override
- public void distribSetUp() throws Exception {
- super.distribSetUp();
-
- if (secondConfigSet ) {
- String zkHost = zkServer.getZkHost();
- String zkAddress = zkServer.getZkAddress();
- SolrZkClient zkClient = new SolrZkClient(zkHost, AbstractZkTestCase.TIMEOUT);
- zkClient.makePath("/solr", false, true);
- zkClient.close();
- zkClient = new SolrZkClient(zkAddress, AbstractZkTestCase.TIMEOUT);
-
- File solrhome = new File(TEST_HOME());
-
- // for now, always upload the config and schema to the canonical names
- AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, getCloudSolrConfig(), "solrconfig.xml");
- AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "schema.xml", "schema.xml");
- AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "enumsConfig.xml", "enumsConfig.xml");
-
- AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "solrconfig.snippet.randomindexconfig.xml");
- AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "stopwords.txt");
- AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "protwords.txt");
- AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "currency.xml");
- AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "open-exchange-rates.json");
- AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "mapping-ISOLatin1Accent.txt");
- AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "old_synonyms.txt");
- AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "synonyms.txt");
- AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "elevate.xml");
- zkClient.close();
- }
- }
-
- protected String getSolrXml() {
- return "solr.xml";
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(4)
+ .addConfig("conf", configset("cloud-minimal"))
+ .addConfig("conf2", configset("cloud-minimal-jmx"))
+ .configure();
}
-
- public CollectionsAPIDistributedZkTest() {
- sliceCount = 2;
- }
-
- @Override
- protected void setDistributedParams(ModifiableSolrParams params) {
-
- if (r.nextBoolean()) {
- // don't set shards, let that be figured out from the cloud state
- } else {
- // use shard ids rather than physical locations
- StringBuilder sb = new StringBuilder();
- for (int i = 0; i < getShardCount(); i++) {
- if (i > 0)
- sb.append(',');
- sb.append("shard" + (i + 3));
- }
- params.set("shards", sb.toString());
- }
+ @Before
+ public void clearCluster() throws Exception {
+ cluster.deleteAllCollections();
}
@Test
- @ShardsFixed(num = 4)
- public void test() throws Exception {
- waitForRecoveriesToFinish(false); // we need to fix no core tests still
- testNodesUsedByCreate();
- testNoConfigSetExist();
- testCollectionsAPI();
- testCollectionsAPIAddRemoveStress();
- testErrorHandling();
- testNoCollectionSpecified();
- deletePartiallyCreatedCollection();
- deleteCollectionRemovesStaleZkCollectionsNode();
- clusterPropTest();
- // last
- deleteCollectionWithDownNodes();
- addReplicaTest();
+ public void testCreationAndDeletion() throws Exception {
+
+ String collectionName = "created_and_deleted";
+
+ CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1).process(cluster.getSolrClient());
+ assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+ .contains(collectionName));
+
+ CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
+ assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+ .contains(collectionName));
+
+ assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
+
+
}
- private void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception {
-
- // we can use this client because we just want base url
- final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
+ @Test
+ public void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception {
String collectionName = "out_of_sync_collection";
+
+ // manually create a collections zknode
+ cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
+
+ CollectionAdminRequest.deleteCollection(collectionName)
+ .process(cluster.getSolrClient());
+
+ assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+ .contains(collectionName));
- List numShardsNumReplicaList = new ArrayList<>();
- numShardsNumReplicaList.add(2);
- numShardsNumReplicaList.add(1);
-
-
- cloudClient.getZkStateReader().getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
-
- ModifiableSolrParams params = new ModifiableSolrParams();
- params.set("action", CollectionAction.DELETE.toString());
- params.set("name", collectionName);
- QueryRequest request = new QueryRequest(params);
- request.setPath("/admin/collections");
-
- // there are remnants of the collection in zk, should work
- makeRequest(baseUrl, request);
-
- assertCollectionNotExists(collectionName, 45);
-
- assertFalse(cloudClient.getZkStateReader().getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
+ assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
}
- private void deletePartiallyCreatedCollection() throws Exception {
- final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
- String collectionName = "halfdeletedcollection";
+ @Test
+ public void deletePartiallyCreatedCollection() throws Exception {
+
+ final String collectionName = "halfdeletedcollection";
+
+ // create a core that simulates something left over from a partially-deleted collection
Create createCmd = new Create();
createCmd.setCoreName("halfdeletedcollection_shard1_replica1");
createCmd.setCollection(collectionName);
+ createCmd.setCollectionConfigName("conf");
String dataDir = createTempDir().toFile().getAbsolutePath();
createCmd.setDataDir(dataDir);
createCmd.setNumShards(2);
- if (secondConfigSet) {
- createCmd.setCollectionConfigName("conf1");
- }
- makeRequest(baseUrl, createCmd);
+ createCmd.process(cluster.getSolrClient());
- ModifiableSolrParams params = new ModifiableSolrParams();
- params.set("action", CollectionAction.DELETE.toString());
- params.set("name", collectionName);
- QueryRequest request = new QueryRequest(params);
- request.setPath("/admin/collections");
+ CollectionAdminRequest.deleteCollection(collectionName)
+ .process(cluster.getSolrClient());
- makeRequest(baseUrl, request);
+ assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
- assertCollectionNotExists(collectionName, 45);
+ CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+ .process(cluster.getSolrClient());
+
+ assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+
+ }
+
+ @Test
+ public void deleteCollectionOnlyInZk() throws Exception {
+
+ final String collectionName = "onlyinzk";
+
+ // create the collections node, but nothing else
+ cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
+
+ // delete via API - should remove collections node
+ CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
+ assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
// now creating that collection should work
- params = new ModifiableSolrParams();
- params.set("action", CollectionAction.CREATE.toString());
- params.set("name", collectionName);
- params.set("numShards", 2);
- request = new QueryRequest(params);
- request.setPath("/admin/collections");
- if (secondConfigSet) {
- params.set("collection.configName", "conf1");
- }
- makeRequest(baseUrl, request);
- }
-
- private void deleteCollectionOnlyInZk() throws Exception {
- final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
- String collectionName = "onlyinzk";
-
- cloudClient.getZkStateReader().getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
-
- ModifiableSolrParams params = new ModifiableSolrParams();
- params.set("action", CollectionAction.DELETE.toString());
- params.set("name", collectionName);
- QueryRequest request = new QueryRequest(params);
- request.setPath("/admin/collections");
-
- makeRequest(baseUrl, request);
-
- assertCollectionNotExists(collectionName, 45);
-
- // now creating that collection should work
- params = new ModifiableSolrParams();
- params.set("action", CollectionAction.CREATE.toString());
- params.set("name", collectionName);
- params.set("numShards", 2);
- request = new QueryRequest(params);
- request.setPath("/admin/collections");
- if (secondConfigSet) {
- params.set("collection.configName", "conf1");
- }
- makeRequest(baseUrl, request);
-
- waitForRecoveriesToFinish(collectionName, false);
-
- params = new ModifiableSolrParams();
- params.set("action", CollectionAction.DELETE.toString());
- params.set("name", collectionName);
- request = new QueryRequest(params);
- request.setPath("/admin/collections");
-
- makeRequest(baseUrl, request);
- }
-
- private void deleteCollectionWithUnloadedCore() throws Exception {
- final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
-
- String collectionName = "corealreadyunloaded";
- try (SolrClient client = createNewSolrClient("", baseUrl)) {
- createCollection(null, collectionName, 2, 1, 2, client, null, "conf1");
- }
- waitForRecoveriesToFinish(collectionName, false);
-
- ModifiableSolrParams params = new ModifiableSolrParams();
- params.set("action", CollectionAction.DELETE.toString());
- params.set("name", collectionName);
- QueryRequest request = new QueryRequest(params);
- request.setPath("/admin/collections");
-
- NamedList result = makeRequest(baseUrl, request);
- System.out.println("result:" + result);
- Object failure = result.get("failure");
- assertNull("We expect no failures", failure);
-
- assertCollectionNotExists(collectionName, 45);
-
- // now creating that collection should work
- params = new ModifiableSolrParams();
- params.set("action", CollectionAction.CREATE.toString());
- params.set("name", collectionName);
- params.set("numShards", 2);
- request = new QueryRequest(params);
- request.setPath("/admin/collections");
- if (secondConfigSet) {
- params.set("collection.configName", "conf1");
- }
- makeRequest(baseUrl, request);
-
- params = new ModifiableSolrParams();
- params.set("action", CollectionAction.DELETE.toString());
- params.set("name", collectionName);
- request = new QueryRequest(params);
- request.setPath("/admin/collections");
-
- makeRequest(baseUrl, request);
- }
-
-
- private void deleteCollectionWithDownNodes() throws Exception {
- String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
- // now try to remove a collection when a couple of its nodes are down
- if (secondConfigSet) {
- try (SolrClient client = createNewSolrClient("", baseUrl)) {
- createCollection(null, "halfdeletedcollection2", 3, 3, 6, client, null, "conf2");
- }
- } else {
- try (SolrClient client = createNewSolrClient("", baseUrl)) {
- createCollection(null, "halfdeletedcollection2", 3, 3, 6, client, null);
- }
- }
-
- waitForRecoveriesToFinish("halfdeletedcollection2", false);
-
- // stop a couple nodes
- ChaosMonkey.stop(jettys.get(0));
- ChaosMonkey.stop(jettys.get(1));
-
- // wait for leaders to settle out
- for (int i = 1; i < 4; i++) {
- cloudClient.getZkStateReader().getLeaderRetry("halfdeletedcollection2", "shard" + i, 30000);
- }
-
- baseUrl = getBaseUrl((HttpSolrClient) clients.get(2));
-
- // remove a collection
- ModifiableSolrParams params = new ModifiableSolrParams();
- params.set("action", CollectionAction.DELETE.toString());
- params.set("name", "halfdeletedcollection2");
- QueryRequest request = new QueryRequest(params);
- request.setPath("/admin/collections");
-
- makeRequest(baseUrl, request);
-
- TimeOut timeout = new TimeOut(10, TimeUnit.SECONDS);
- while (cloudClient.getZkStateReader().getClusterState().hasCollection("halfdeletedcollection2")) {
- if (timeout.hasTimedOut()) {
- throw new AssertionError("Timeout waiting to see removed collection leave clusterstate");
- }
-
- Thread.sleep(200);
- }
-
- assertFalse("Still found collection that should be gone", cloudClient.getZkStateReader().getClusterState().hasCollection("halfdeletedcollection2"));
+ CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+ .process(cluster.getSolrClient());
+ assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
}
- private NamedList makeRequest(String baseUrl, SolrRequest request, int socketTimeout)
- throws SolrServerException, IOException {
- try (SolrClient client = createNewSolrClient("", baseUrl)) {
- ((HttpSolrClient) client).setSoTimeout(socketTimeout);
- return client.request(request);
- }
- }
+ @Test
+ public void testBadActionNames() throws Exception {
- private NamedList makeRequest(String baseUrl, SolrRequest request)
- throws SolrServerException, IOException {
- try (SolrClient client = createNewSolrClient("", baseUrl)) {
- ((HttpSolrClient) client).setSoTimeout(30000);
- return client.request(request);
- }
- }
-
- private void testErrorHandling() throws Exception {
- final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
-
// try a bad action
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", "BADACTION");
String collectionName = "badactioncollection";
params.set("name", collectionName);
params.set("numShards", 2);
- QueryRequest request = new QueryRequest(params);
+ final QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
- boolean gotExp = false;
- try {
- makeRequest(baseUrl, request);
- } catch (SolrException e) {
- gotExp = true;
- }
- assertTrue(gotExp);
-
-
- // leave out required param name
- params = new ModifiableSolrParams();
+
+ expectThrows(Exception.class, () -> {
+ cluster.getSolrClient().request(request);
+ });
+
+ }
+
+ @Test
+ public void testMissingRequiredParameters() {
+
+ ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionAction.CREATE.toString());
params.set("numShards", 2);
- collectionName = "collection";
- // No Name
- // params.set("name", collectionName);
- if (secondConfigSet) {
- params.set("collection.configName", "conf1");
- }
- request = new QueryRequest(params);
+ // missing required collection parameter
+ final SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
- gotExp = false;
- try {
- makeRequest(baseUrl, request);
- } catch (SolrException e) {
- gotExp = true;
- }
- assertTrue(gotExp);
-
- // Too many replicas
- params = new ModifiableSolrParams();
- params.set("action", CollectionAction.CREATE.toString());
- collectionName = "collection";
- params.set("name", collectionName);
- params.set("numShards", 2);
- if (secondConfigSet) {
- params.set("collection.configName", "conf1");
- }
- params.set(REPLICATION_FACTOR, 10);
- request = new QueryRequest(params);
- request.setPath("/admin/collections");
- gotExp = false;
- try {
- makeRequest(baseUrl, request);
- } catch (SolrException e) {
- gotExp = true;
- }
- assertTrue(gotExp);
-
+
+ expectThrows(Exception.class, () -> {
+ cluster.getSolrClient().request(request);
+ });
+ }
+
+ @Test
+ public void testTooManyReplicas() {
+
+ CollectionAdminRequest req = CollectionAdminRequest.createCollection("collection", "conf", 2, 10);
+
+ expectThrows(Exception.class, () -> {
+ cluster.getSolrClient().request(req);
+ });
+
+ }
+
+ @Test
+ public void testMissingNumShards() {
+
// No numShards should fail
- params = new ModifiableSolrParams();
+ ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionAction.CREATE.toString());
- collectionName = "acollection";
- params.set("name", collectionName);
+ params.set("name", "acollection");
params.set(REPLICATION_FACTOR, 10);
- if (secondConfigSet) {
- params.set("collection.configName", "conf1");
- }
- request = new QueryRequest(params);
+ params.set("collection.configName", "conf");
+
+ final SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
- gotExp = false;
- try {
- makeRequest(baseUrl, request);
- } catch (SolrException e) {
- gotExp = true;
- }
- assertTrue(gotExp);
-
- // 0 numShards should fail
- params = new ModifiableSolrParams();
+
+ expectThrows(Exception.class, () -> {
+ cluster.getSolrClient().request(request);
+ });
+
+ }
+
+ @Test
+ public void testZeroNumShards() {
+
+ ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionAction.CREATE.toString());
- collectionName = "acollection";
- params.set("name", collectionName);
+ params.set("name", "acollection");
params.set(REPLICATION_FACTOR, 10);
params.set("numShards", 0);
- if (secondConfigSet) {
- params.set("collection.configName", "conf1");
- }
- request = new QueryRequest(params);
+ params.set("collection.configName", "conf");
+
+ final SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
- gotExp = false;
- try {
- makeRequest(baseUrl, request);
- } catch (SolrException e) {
- gotExp = true;
- }
- assertTrue(gotExp);
-
- // Fail on one node
+ expectThrows(Exception.class, () -> {
+ cluster.getSolrClient().request(request);
+ });
+
+ }
+
+ @Test
+ public void testCreateShouldFailOnExistingCore() throws Exception {
// first we make a core with the core name the collections api
// will try and use - this will cause our mock fail
@@ -510,43 +277,33 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
String dataDir = createTempDir().toFile().getAbsolutePath();
createCmd.setDataDir(dataDir);
createCmd.setNumShards(1);
- if (secondConfigSet) {
- createCmd.setCollectionConfigName("conf1");
+ createCmd.setCollectionConfigName("conf");
+
+ try (SolrClient client = cluster.getJettySolrRunner(0).newClient()) {
+ client.request(createCmd);
}
- makeRequest(baseUrl, createCmd);
-
+
createCmd = new Create();
createCmd.setCoreName("halfcollection_shard1_replica1");
createCmd.setCollection("halfcollectionblocker2");
dataDir = createTempDir().toFile().getAbsolutePath();
createCmd.setDataDir(dataDir);
createCmd.setNumShards(1);
- if (secondConfigSet) {
- createCmd.setCollectionConfigName("conf1");
+ createCmd.setCollectionConfigName("conf");
+
+ try (SolrClient client = cluster.getJettySolrRunner(1).newClient()) {
+ client.request(createCmd);
}
- makeRequest(getBaseUrl((HttpSolrClient) clients.get(1)), createCmd);
+
+ String nn1 = cluster.getJettySolrRunner(0).getNodeName();
+ String nn2 = cluster.getJettySolrRunner(1).getNodeName();
+
+ CollectionAdminResponse resp = CollectionAdminRequest.createCollection("halfcollection", "conf", 2, 1)
+ .setCreateNodeSet(nn1 + "," + nn2)
+ .process(cluster.getSolrClient());
- params = new ModifiableSolrParams();
- params.set("action", CollectionAction.CREATE.toString());
- collectionName = "halfcollection";
- params.set("name", collectionName);
- params.set("numShards", 2);
- params.set("wt", "xml");
-
- if (secondConfigSet) {
- params.set("collection.configName", "conf1");
- }
-
- String nn1 = jettys.get(0).getCoreContainer().getZkController().getNodeName();
- String nn2 = jettys.get(1).getCoreContainer().getZkController().getNodeName();
-
- params.set(OverseerCollectionMessageHandler.CREATE_NODE_SET, nn1 + "," + nn2);
- request = new QueryRequest(params);
- request.setPath("/admin/collections");
- NamedList resp = makeRequest(baseUrl, request, 60000);
-
- SimpleOrderedMap success = (SimpleOrderedMap) resp.get("success");
- SimpleOrderedMap failure = (SimpleOrderedMap) resp.get("failure");
+ SimpleOrderedMap success = (SimpleOrderedMap) resp.getResponse().get("success");
+ SimpleOrderedMap failure = (SimpleOrderedMap) resp.getResponse().get("failure");
assertNotNull(resp.toString(), success);
assertNotNull(resp.toString(), failure);
@@ -555,10 +312,14 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
String val2 = failure.getVal(0).toString();
assertTrue(val1.contains("SolrException") || val2.contains("SolrException"));
}
-
- private void testNoCollectionSpecified() throws Exception {
- assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection"));
- assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
+
+ @Test
+ public void testNoCollectionSpecified() throws Exception {
+
+ // TODO - should we remove this behaviour?
+
+ assertFalse(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection"));
+ assertFalse(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
// try and create a SolrCore with no collection name
Create createCmd = new Create();
@@ -567,26 +328,28 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
String dataDir = createTempDir().toFile().getAbsolutePath();
createCmd.setDataDir(dataDir);
createCmd.setNumShards(1);
- if (secondConfigSet) {
- createCmd.setCollectionConfigName("conf1");
- }
+ createCmd.setCollectionConfigName("conf");
- makeRequest(getBaseUrl((HttpSolrClient) clients.get(1)), createCmd);
+ cluster.getSolrClient().request(createCmd);
// try and create a SolrCore with no collection name
createCmd.setCollection(null);
createCmd.setCoreName("corewithnocollection2");
- makeRequest(getBaseUrl((HttpSolrClient) clients.get(1)), createCmd);
+ cluster.getSolrClient().request(createCmd);
// in both cases, the collection should have default to the core name
- cloudClient.getZkStateReader().forceUpdateCollection("corewithnocollection");
- cloudClient.getZkStateReader().forceUpdateCollection("corewithnocollection2");
- assertTrue(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection"));
- assertTrue(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
+ cluster.getSolrClient().getZkStateReader().forceUpdateCollection("corewithnocollection");
+ cluster.getSolrClient().getZkStateReader().forceUpdateCollection("corewithnocollection2");
+ assertTrue(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection"));
+ assertTrue(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
}
- private void testNoConfigSetExist() throws Exception {
+ @Test
+ public void testNoConfigSetExist() throws Exception {
+
+ final CloudSolrClient cloudClient = cluster.getSolrClient();
+
assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection3"));
// try and create a SolrCore with no collection name
@@ -597,14 +360,11 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
createCmd.setDataDir(dataDir);
createCmd.setNumShards(1);
createCmd.setCollectionConfigName("conf123");
- boolean gotExp = false;
- try {
- makeRequest(getBaseUrl((HttpSolrClient) clients.get(1)), createCmd);
- } catch (SolrException e) {
- gotExp = true;
- }
- assertTrue(gotExp);
+ expectThrows(Exception.class, () -> {
+ cluster.getSolrClient().request(createCmd);
+ });
+
TimeUnit.MILLISECONDS.sleep(200);
// in both cases, the collection should have default to the core name
cloudClient.getZkStateReader().forceUpdateCollection("corewithnocollection3");
@@ -618,401 +378,162 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
}
assertEquals("replicaCount", 0, replicaCount);
- CollectionAdminRequest.List list = new CollectionAdminRequest.List();
- CollectionAdminResponse res = new CollectionAdminResponse();
- res.setResponse(makeRequest(getBaseUrl((HttpSolrClient) clients.get(1)), list));
- List collections = (List) res.getResponse().get("collections");
- assertTrue(collections.contains("corewithnocollection3"));
+ // TODO - WTF? shouldn't this *not* contain the collection?
+ assertTrue(CollectionAdminRequest.listCollections(cloudClient).contains("corewithnocollection3"));
+
}
- private void testNodesUsedByCreate() throws Exception {
- // we can use this client because we just want base url
- final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
-
- ModifiableSolrParams params = new ModifiableSolrParams();
- params.set("action", CollectionAction.CREATE.toString());
+ @Test
+ public void testCoresAreDistributedAcrossNodes() throws Exception {
- params.set("numShards", 2);
- params.set(REPLICATION_FACTOR, 2);
- String collectionName = "nodes_used_collection";
+ CollectionAdminRequest.createCollection("nodes_used_collection", "conf", 2, 2)
+ .process(cluster.getSolrClient());
- params.set("name", collectionName);
-
- if (secondConfigSet) {
- params.set("collection.configName", "conf1");
- }
-
- QueryRequest request = new QueryRequest(params);
- request.setPath("/admin/collections");
- makeRequest(baseUrl, request);
-
- List numShardsNumReplicaList = new ArrayList<>();
- numShardsNumReplicaList.add(2);
- numShardsNumReplicaList.add(2);
- checkForCollection("nodes_used_collection", numShardsNumReplicaList , null);
+ Set liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
List createNodeList = new ArrayList<>();
+ createNodeList.addAll(liveNodes);
- Set liveNodes = cloudClient.getZkStateReader().getClusterState()
- .getLiveNodes();
-
- for (String node : liveNodes) {
- createNodeList.add(node);
- }
-
- DocCollection col = cloudClient.getZkStateReader().getClusterState().getCollection("nodes_used_collection");
- Collection slices = col.getSlices();
- for (Slice slice : slices) {
- Collection replicas = slice.getReplicas();
- for (Replica replica : replicas) {
+ DocCollection collection = getCollectionState("nodes_used_collection");
+ for (Slice slice : collection.getSlices()) {
+ for (Replica replica : slice.getReplicas()) {
createNodeList.remove(replica.getNodeName());
}
}
- assertEquals(createNodeList.toString(), 1, createNodeList.size());
+
+ assertEquals(createNodeList.toString(), 0, createNodeList.size());
}
- private void testCollectionsAPI() throws Exception {
+ @Test
+ public void testDeleteNonExistentCollection() throws Exception {
- boolean disableLegacy = random().nextBoolean();
- CloudSolrClient client1 = null;
+ SolrException e = expectThrows(SolrException.class, () -> {
+ CollectionAdminRequest.deleteCollection("unknown_collection").process(cluster.getSolrClient());
+ });
- if (disableLegacy) {
- log.info("legacyCloud=false");
- client1 = createCloudClient(null);
- setClusterProp(client1, ZkStateReader.LEGACY_CLOUD, "false");
+ // create another collection should still work
+ CollectionAdminRequest.createCollection("acollectionafterbaddelete", "conf", 1, 2)
+ .process(cluster.getSolrClient());
+ waitForState("Collection creation after a bad delete failed", "acollectionafterbaddelete",
+ (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
+ }
+
+ @Test
+ public void testSpecificConfigsets() throws Exception {
+ CollectionAdminRequest.createCollection("withconfigset2", "conf2", 1, 1).process(cluster.getSolrClient());
+ byte[] data = zkClient().getData(ZkStateReader.COLLECTIONS_ZKNODE + "/" + "withconfigset2", null, null, true);
+ assertNotNull(data);
+ ZkNodeProps props = ZkNodeProps.load(data);
+ String configName = props.getStr(ZkController.CONFIGNAME_PROP);
+ assertEquals("conf2", configName);
+ }
+
+ @Test
+ public void testMaxNodesPerShard() throws Exception {
+
+ // test maxShardsPerNode
+ int numLiveNodes = cluster.getJettySolrRunners().size();
+ int numShards = (numLiveNodes/2) + 1;
+ int replicationFactor = 2;
+ int maxShardsPerNode = 1;
+
+ SolrException e = expectThrows(SolrException.class, () -> {
+ CollectionAdminRequest.createCollection("oversharded", "conf", numShards, replicationFactor)
+ .process(cluster.getSolrClient());
+ });
+
+ }
+
+ @Test
+ public void testCreateNodeSet() throws Exception {
+
+ JettySolrRunner jetty1 = cluster.getRandomJetty(random());
+ JettySolrRunner jetty2 = cluster.getRandomJetty(random());
+
+ List baseUrls = ImmutableList.of(jetty1.getBaseUrl().toString(), jetty2.getBaseUrl().toString());
+
+ CollectionAdminRequest.createCollection("nodeset_collection", "conf", 2, 1)
+ .setCreateNodeSet(baseUrls.get(0) + "," + baseUrls.get(1))
+ .process(cluster.getSolrClient());
+
+ DocCollection collectionState = getCollectionState("nodeset_collection");
+ for (Replica replica : collectionState.getReplicas()) {
+ String replicaUrl = replica.getCoreUrl();
+ boolean matchingJetty = false;
+ for (String jettyUrl : baseUrls) {
+ if (replicaUrl.startsWith(jettyUrl))
+ matchingJetty = true;
+ }
+ if (matchingJetty == false)
+ fail("Expected replica to be on " + baseUrls + " but was on " + replicaUrl);
}
- // TODO: fragile - because we dont pass collection.confName, it will only
- // find a default if a conf set with a name matching the collection name is found, or
- // if there is only one conf set. That and the fact that other tests run first in this
- // env make this pretty fragile
-
+ }
+
+ @Test
+ public void testCollectionsAPI() throws Exception {
+
// create new collections rapid fire
- Map> collectionInfos = new HashMap<>();
int cnt = random().nextInt(TEST_NIGHTLY ? 6 : 1) + 1;
-
+ CollectionAdminRequest.Create[] createRequests = new CollectionAdminRequest.Create[cnt];
+
for (int i = 0; i < cnt; i++) {
- int numShards = TestUtil.nextInt(random(), 0, getShardCount()) + 1;
+
+ int numShards = TestUtil.nextInt(random(), 0, cluster.getJettySolrRunners().size()) + 1;
int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
- int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
- .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
+ int maxShardsPerNode = (((numShards * replicationFactor) / cluster.getJettySolrRunners().size())) + 1;
-
- CloudSolrClient client = null;
- try {
- if (i == 0) {
- // Test if we can create a collection through CloudSolrServer where
- // you havnt set default-collection
- // This is nice because you want to be able to create you first
- // collection using CloudSolrServer, and in such case there is
- // nothing reasonable to set as default-collection
- client = createCloudClient(null);
- } else if (i == 1) {
- // Test if we can create a collection through CloudSolrServer where
- // you have set default-collection to a non-existing collection
- // This is nice because you want to be able to create you first
- // collection using CloudSolrServer, and in such case there is
- // nothing reasonable to set as default-collection, but you might want
- // to use the same CloudSolrServer throughout the entire
- // lifetime of your client-application, so it is nice to be able to
- // set a default-collection on this CloudSolrServer once and for all
- // and use this CloudSolrServer to create the collection
- client = createCloudClient("awholynewcollection_" + i);
- }
- if (secondConfigSet) {
- createCollection(collectionInfos, "awholynewcollection_" + i,
- numShards, replicationFactor, maxShardsPerNode, client, null, "conf2");
- } else {
- createCollection(collectionInfos, "awholynewcollection_" + i,
- numShards, replicationFactor, maxShardsPerNode, client, null);
- }
- } finally {
- if (client != null) client.close();
- }
- }
-
- Set>> collectionInfosEntrySet = collectionInfos.entrySet();
- for (Entry> entry : collectionInfosEntrySet) {
- String collection = entry.getKey();
- List list = entry.getValue();
- checkForCollection(collection, list, null);
-
- String url = getUrlFromZk(collection);
-
- try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
- // poll for a second - it can take a moment before we are ready to serve
- waitForNon403or404or503(collectionClient);
- }
- }
-
- // sometimes we restart one of the jetty nodes
- if (random().nextBoolean()) {
- JettySolrRunner jetty = jettys.get(random().nextInt(jettys.size()));
- ChaosMonkey.stop(jetty);
- log.info("============ Restarting jetty");
- ChaosMonkey.start(jetty);
-
- for (Entry> entry : collectionInfosEntrySet) {
- String collection = entry.getKey();
- List list = entry.getValue();
- checkForCollection(collection, list, null);
-
- String url = getUrlFromZk(collection);
-
- try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
- // poll for a second - it can take a moment before we are ready to serve
- waitForNon403or404or503(collectionClient);
- }
- }
+ createRequests[i]
+ = CollectionAdminRequest.createCollection("awhollynewcollection_" + i, "conf2", numShards, replicationFactor)
+ .setMaxShardsPerNode(maxShardsPerNode);
+ createRequests[i].processAsync(cluster.getSolrClient());
}
- // sometimes we restart zookeeper
- if (random().nextBoolean()) {
- zkServer.shutdown();
- log.info("============ Restarting zookeeper");
- zkServer = new ZkTestServer(zkServer.getZkDir(), zkServer.getPort());
- zkServer.run();
- }
-
- // sometimes we cause a connection loss - sometimes it will hit the overseer
- if (random().nextBoolean()) {
- JettySolrRunner jetty = jettys.get(random().nextInt(jettys.size()));
- ChaosMonkey.causeConnectionLoss(jetty);
- }
-
- ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
- for (int j = 0; j < cnt; j++) {
- waitForRecoveriesToFinish("awholynewcollection_" + j, zkStateReader, false);
-
- if (secondConfigSet) {
- // let's see if they are using the second config set
- byte[] data = zkStateReader.getZkClient()
- .getData(
- ZkStateReader.COLLECTIONS_ZKNODE + "/" + "awholynewcollection_"
- + j, null, null, true);
- assertNotNull(data);
- ZkNodeProps props = ZkNodeProps.load(data);
- String configName = props.getStr(ZkController.CONFIGNAME_PROP);
- assertEquals("conf2", configName);
-
- }
- }
-
- checkInstanceDirs(jettys.get(0));
-
- List collectionNameList = new ArrayList<>();
- collectionNameList.addAll(collectionInfos.keySet());
- String collectionName = collectionNameList.get(random().nextInt(collectionNameList.size()));
-
- String url = getUrlFromZk(collectionName);
-
- try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
-
- // lets try and use the solrj client to index a couple documents
- SolrInputDocument doc1 = getDoc(id, 6, i1, -600, tlong, 600, t1,
- "humpty dumpy sat on a wall");
- SolrInputDocument doc2 = getDoc(id, 7, i1, -600, tlong, 600, t1,
- "humpty dumpy3 sat on a walls");
- SolrInputDocument doc3 = getDoc(id, 8, i1, -600, tlong, 600, t1,
- "humpty dumpy2 sat on a walled");
-
- collectionClient.add(doc1);
-
- collectionClient.add(doc2);
-
- collectionClient.add(doc3);
-
- collectionClient.commit();
-
- assertEquals(3, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
+ for (int i = 0; i < cnt; i++) {
+ String collectionName = "awhollynewcollection_" + i;
+ final int j = i;
+ waitForState("Expected to see collection " + collectionName, collectionName,
+ (n, c) -> {
+ CollectionAdminRequest.Create req = createRequests[j];
+ return DocCollection.isFullyActive(n, c, req.getNumShards(), req.getReplicationFactor());
+ });
}
- // lets try a collection reload
-
+ cluster.injectChaos(random());
+
+ for (int i = 0; i < cluster.getJettySolrRunners().size(); i++) {
+ checkInstanceDirs(cluster.getJettySolrRunner(i));
+ }
+
+ String collectionName = createRequests[random().nextInt(createRequests.length)].getCollectionName();
+
+ new UpdateRequest()
+ .add("id", "6")
+ .add("id", "7")
+ .add("id", "8")
+ .commit(cluster.getSolrClient(), collectionName);
+ assertEquals(3, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
+
+ checkNoTwoShardsUseTheSameIndexDir();
+ }
+
+ @Test
+ public void testCollectionReload() throws Exception {
+
+ final String collectionName = "reloaded_collection";
+ CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2).process(cluster.getSolrClient());
+
// get core open times
- Map urlToTimeBefore = new HashMap<>();
+ Map urlToTimeBefore = new HashMap<>();
collectStartTimes(collectionName, urlToTimeBefore);
assertTrue(urlToTimeBefore.size() > 0);
- ModifiableSolrParams params = new ModifiableSolrParams();
- params.set("action", CollectionAction.RELOAD.toString());
- params.set("name", collectionName);
- QueryRequest request = new QueryRequest(params);
- request.setPath("/admin/collections");
-
- // we can use this client because we just want base url
- final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
-
- makeRequest(baseUrl, request);
+
+ CollectionAdminRequest.reloadCollection(collectionName).processAsync(cluster.getSolrClient());
// reloads make take a short while
boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore);
assertTrue("some core start times did not change on reload", allTimesAreCorrect);
-
-
- waitForRecoveriesToFinish("awholynewcollection_" + (cnt - 1), zkStateReader, false);
-
- // remove a collection
- params = new ModifiableSolrParams();
- params.set("action", CollectionAction.DELETE.toString());
- params.set("name", collectionName);
- request = new QueryRequest(params);
- request.setPath("/admin/collections");
-
- makeRequest(baseUrl, request);
-
- // ensure its out of the state
- assertCollectionNotExists(collectionName, 45);
-
- //collectionNameList.remove(collectionName);
-
- // remove an unknown collection
- params = new ModifiableSolrParams();
- params.set("action", CollectionAction.DELETE.toString());
- params.set("name", "unknown_collection");
- request = new QueryRequest(params);
- request.setPath("/admin/collections");
-
- boolean exp = false;
- try {
- makeRequest(baseUrl, request);
- } catch (SolrException e) {
- exp = true;
- }
- assertTrue("Expected exception", exp);
-
- // create another collection should still work
- params = new ModifiableSolrParams();
- params.set("action", CollectionAction.CREATE.toString());
-
- params.set("numShards", 1);
- params.set(REPLICATION_FACTOR, 2);
- collectionName = "acollectionafterbaddelete";
-
- params.set("name", collectionName);
- if (secondConfigSet) {
- params.set("collection.configName", "conf1");
- }
- request = new QueryRequest(params);
- request.setPath("/admin/collections");
- makeRequest(baseUrl, request);
-
- List list = new ArrayList<>(2);
- list.add(1);
- list.add(2);
- checkForCollection(collectionName, list, null);
-
- url = getUrlFromZk(collectionName);
-
- try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
- // poll for a second - it can take a moment before we are ready to serve
- waitForNon403or404or503(collectionClient);
- }
-
- for (int j = 0; j < cnt; j++) {
- waitForRecoveriesToFinish(collectionName, zkStateReader, false);
- }
-
- // test maxShardsPerNode
- int numLiveNodes = getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes().size();
- int numShards = (numLiveNodes/2) + 1;
- int replicationFactor = 2;
- int maxShardsPerNode = 1;
- collectionInfos = new HashMap<>();
- try (CloudSolrClient client = createCloudClient("awholynewcollection_" + cnt)) {
- exp = false;
- try {
- createCollection(collectionInfos, "awholynewcollection_" + cnt,
- numShards, replicationFactor, maxShardsPerNode, client, null, "conf1");
- } catch (SolrException e) {
- exp = true;
- }
- assertTrue("expected exception", exp);
- }
-
-
- // Test createNodeSet
- numLiveNodes = getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes().size();
- List createNodeList = new ArrayList<>();
- int numOfCreateNodes = numLiveNodes/2;
- assertFalse("createNodeSet test is pointless with only " + numLiveNodes + " nodes running", numOfCreateNodes == 0);
- int i = 0;
- for (String liveNode : getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes()) {
- if (i < numOfCreateNodes) {
- createNodeList.add(liveNode);
- i++;
- } else {
- break;
- }
- }
- maxShardsPerNode = 2;
- numShards = createNodeList.size() * maxShardsPerNode;
- replicationFactor = 1;
- collectionInfos = new HashMap<>();
-
- try (SolrClient client = createCloudClient("awholynewcollection_" + (cnt+1))) {
- CollectionAdminResponse res = createCollection(collectionInfos, "awholynewcollection_" + (cnt+1), numShards, replicationFactor, maxShardsPerNode, client, StrUtils.join(createNodeList, ','), "conf1");
- assertTrue(res.isSuccess());
- }
- checkForCollection(collectionInfos.keySet().iterator().next(), collectionInfos.entrySet().iterator().next().getValue(), createNodeList);
-
- checkNoTwoShardsUseTheSameIndexDir();
- if(disableLegacy) {
- setClusterProp(client1, ZkStateReader.LEGACY_CLOUD, null);
- client1.close();
- }
- }
-
- private void testCollectionsAPIAddRemoveStress() throws Exception {
-
- class CollectionThread extends Thread {
-
- public CollectionThread(String name) {
- super(name);
- }
-
- public void run() {
- // create new collections rapid fire
- Map> collectionInfos = new HashMap<>();
- int cnt = random().nextInt(TEST_NIGHTLY ? 13 : 1) + 1;
-
- for (int i = 0; i < cnt; i++) {
- String collectionName = "awholynewstresscollection_" + getName() + "_" + i;
- int numShards = TestUtil.nextInt(random(), 0, getShardCount() * 2) + 1;
- int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
- int maxShardsPerNode = (((numShards * 2 * replicationFactor) / getCommonCloudSolrClient()
- .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
-
- try (CloudSolrClient client = createCloudClient(i == 1 ? collectionName : null)) {
-
- createCollection(collectionInfos, collectionName,
- numShards, replicationFactor, maxShardsPerNode, client, null,
- "conf1");
-
- // remove collection
- CollectionAdminRequest.Delete delete = new CollectionAdminRequest.Delete()
- .setCollectionName(collectionName);
- client.request(delete);
- } catch (SolrServerException | IOException e) {
- e.printStackTrace();
- throw new RuntimeException(e);
- }
- }
- }
- }
- List threads = new ArrayList<>();
- int numThreads = TEST_NIGHTLY ? 6 : 2;
- for (int i = 0; i < numThreads; i++) {
- CollectionThread thread = new CollectionThread("collection" + i);
- threads.add(thread);
- }
-
- for (Thread thread : threads) {
- thread.start();
- }
- for (Thread thread : threads) {
- thread.join();
- }
}
private void checkInstanceDirs(JettySolrRunner jetty) throws IOException {
@@ -1024,7 +545,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
Path instancedir = (Path) core.getStatistics().get("instanceDir");
assertTrue("Could not find expected core.properties file", Files.exists(instancedir.resolve("core.properties")));
- Path expected = Paths.get(jetty.getSolrHome()).toAbsolutePath().resolve("cores").resolve(core.getName());
+ Path expected = Paths.get(jetty.getSolrHome()).toAbsolutePath().resolve(core.getName());
assertTrue("Expected: " + expected + "\nFrom core stats: " + instancedir, Files.isSameFile(expected, instancedir));
@@ -1061,23 +582,14 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
return allTimesAreCorrect;
}
- private void collectStartTimes(String collectionName,
- Map urlToTime) throws SolrServerException, IOException {
- ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader()
- .getClusterState();
-// Map collections = clusterState.getCollectionStates();
- if (clusterState.hasCollection(collectionName)) {
- Map slices = clusterState.getSlicesMap(collectionName);
+ private void collectStartTimes(String collectionName, Map urlToTime)
+ throws SolrServerException, IOException {
- Iterator> it = slices.entrySet().iterator();
- while (it.hasNext()) {
- Entry sliceEntry = it.next();
- Map sliceShards = sliceEntry.getValue().getReplicasMap();
- Iterator> shardIt = sliceShards.entrySet()
- .iterator();
- while (shardIt.hasNext()) {
- Entry shardEntry = shardIt.next();
- ZkCoreNodeProps coreProps = new ZkCoreNodeProps(shardEntry.getValue());
+ DocCollection collectionState = getCollectionState(collectionName);
+ if (collectionState != null) {
+ for (Slice shard : collectionState) {
+ for (Replica replica : shard) {
+ ZkCoreNodeProps coreProps = new ZkCoreNodeProps(replica);
CoreAdminResponse mcr;
try (HttpSolrClient server = getHttpSolrClient(coreProps.getBaseUrl())) {
mcr = CoreAdminRequest.getStatus(coreProps.getCoreName(), server);
@@ -1087,59 +599,9 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
}
}
} else {
- throw new IllegalArgumentException("Could not find collection in :"
- + clusterState.getCollectionsMap());
+ throw new IllegalArgumentException("Could not find collection " + collectionName);
}
}
-
- private String getUrlFromZk(String collection) {
- ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader().getClusterState();
- Map slices = clusterState.getSlicesMap(collection);
-
- if (slices == null) {
- throw new SolrException(ErrorCode.BAD_REQUEST, "Could not find collection:" + collection);
- }
-
- for (Map.Entry entry : slices.entrySet()) {
- Slice slice = entry.getValue();
- Map shards = slice.getReplicasMap();
- Set> shardEntries = shards.entrySet();
- for (Map.Entry shardEntry : shardEntries) {
- final ZkNodeProps node = shardEntry.getValue();
- if (clusterState.liveNodesContain(node.getStr(ZkStateReader.NODE_NAME_PROP))) {
- return ZkCoreNodeProps.getCoreUrl(node.getStr(ZkStateReader.BASE_URL_PROP), collection); //new ZkCoreNodeProps(node).getCoreUrl();
- }
- }
- }
-
- throw new RuntimeException("Could not find a live node for collection:" + collection);
- }
-
-/* private void waitForNon403or404or503(HttpSolrServer collectionClient)
- throws Exception {
- SolrException exp = null;
- long timeoutAt = System.currentTimeMillis() + 30000;
-
- while (System.currentTimeMillis() < timeoutAt) {
- boolean missing = false;
-
- try {
- collectionClient.query(new SolrQuery("*:*"));
- } catch (SolrException e) {
- if (!(e.code() == 403 || e.code() == 503 || e.code() == 404)) {
- throw e;
- }
- exp = e;
- missing = true;
- }
- if (!missing) {
- return;
- }
- Thread.sleep(50);
- }
-
- fail("Could not find the new collection - " + exp.code() + " : " + collectionClient.getBaseURL());
- }*/
private void checkNoTwoShardsUseTheSameIndexDir() throws Exception {
Map> indexDirToShardNamesMap = new HashMap<>();
@@ -1189,40 +651,48 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
}
- private void addReplicaTest() throws Exception {
+ @Test
+ public void addReplicaTest() throws Exception {
String collectionName = "addReplicaColl";
- try (CloudSolrClient client = createCloudClient(null)) {
- createCollection(collectionName, client, 2, 2);
- String newReplicaName = Assign.assignNode(client.getZkStateReader().getClusterState().getCollection(collectionName));
- ArrayList nodeList = new ArrayList<>(client.getZkStateReader().getClusterState().getLiveNodes());
- Collections.shuffle(nodeList, random());
- Replica newReplica = doAddReplica(collectionName, "shard1",
- Assign.assignNode(client.getZkStateReader().getClusterState().getCollection(collectionName)),
- nodeList.get(0), client, null);
+ CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
+ .setMaxShardsPerNode(4)
+ .process(cluster.getSolrClient());
- log.info("newReplica {},\n{} ", newReplica, client.getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)));
+ ArrayList nodeList
+ = new ArrayList<>(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes());
+ Collections.shuffle(nodeList, random());
- assertEquals("Replica should be created on the right node",
- client.getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)), newReplica.getStr(ZkStateReader.BASE_URL_PROP));
+ String newReplicaName = Assign.assignNode(getCollectionState(collectionName));
+ CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+ .setNode(nodeList.get(0))
+ .process(cluster.getSolrClient());
- Properties props = new Properties();
- String instancePathStr = createTempDir().toString();
- props.put(CoreAdminParams.INSTANCE_DIR, instancePathStr); //Use name via the property.instanceDir method
- newReplica = doAddReplica(collectionName, "shard2",
- Assign.assignNode(client.getZkStateReader().getClusterState().getCollection(collectionName)),
- null, client, props);
- assertNotNull(newReplica);
+ Replica newReplica = getCollectionState(collectionName).getReplica(newReplicaName);
- try (HttpSolrClient coreclient = getHttpSolrClient(newReplica.getStr(ZkStateReader.BASE_URL_PROP))) {
- CoreAdminResponse status = CoreAdminRequest.getStatus(newReplica.getStr("core"), coreclient);
- NamedList coreStatus = status.getCoreStatus(newReplica.getStr("core"));
- String instanceDirStr = (String) coreStatus.get("instanceDir");
- assertEquals(Paths.get(instanceDirStr).toString(), instancePathStr);
- }
+ assertEquals("Replica should be created on the right node",
+ cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)),
+ newReplica.getStr(ZkStateReader.BASE_URL_PROP));
- //Test to make sure we can't create another replica with an existing core_name of that collection
- String coreName = newReplica.getStr(CORE_NAME_PROP);
+ newReplicaName = Assign.assignNode(getCollectionState(collectionName));
+ Path instancePath = createTempDir();
+ CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+ .withProperty(CoreAdminParams.INSTANCE_DIR, instancePath.toString())
+ .process(cluster.getSolrClient());
+
+ newReplica = getCollectionState(collectionName).getReplica(newReplicaName);
+ assertNotNull(newReplica);
+
+ try (HttpSolrClient coreclient = getHttpSolrClient(newReplica.getStr(ZkStateReader.BASE_URL_PROP))) {
+ CoreAdminResponse status = CoreAdminRequest.getStatus(newReplica.getStr("core"), coreclient);
+ NamedList coreStatus = status.getCoreStatus(newReplica.getStr("core"));
+ String instanceDirStr = (String) coreStatus.get("instanceDir");
+ assertEquals(instanceDirStr, instancePath.toString());
+ }
+
+ //Test to make sure we can't create another replica with an existing core_name of that collection
+ String coreName = newReplica.getStr(CORE_NAME_PROP);
+ SolrException e = expectThrows(SolrException.class, () -> {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", "addreplica");
params.set("collection", collectionName);
@@ -1230,101 +700,21 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
params.set("name", coreName);
QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
- try {
- client.request(request);
- fail("AddReplica call should not have been successful");
- } catch (SolrException e) {
- assertTrue(e.getMessage().contains("Another replica with the same core name already exists for this collection"));
- }
+ cluster.getSolrClient().request(request);
+ });
+ assertTrue(e.getMessage().contains("Another replica with the same core name already exists for this collection"));
- // Check that specifying property.name works. DO NOT remove this when the "name" property is deprecated
- // for ADDREPLICA, this is "property.name". See SOLR-7132
- props = new Properties();
- props.put(CoreAdminParams.NAME, "propertyDotName");
+ // Check that specifying property.name works. DO NOT remove this when the "name" property is deprecated
+ // for ADDREPLICA, this is "property.name". See SOLR-7132
+ newReplicaName = Assign.assignNode(getCollectionState(collectionName));
+ CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+ .withProperty(CoreAdminParams.NAME, "propertyDotName")
+ .process(cluster.getSolrClient());
+
+ newReplica = getCollectionState(collectionName).getReplica(newReplicaName);
+ assertEquals("'core' should be 'propertyDotName' ", "propertyDotName", newReplica.getStr("core"));
- newReplica = doAddReplica(collectionName, "shard1",
- Assign.assignNode(client.getZkStateReader().getClusterState().getCollection(collectionName)),
- nodeList.get(0), client, props);
- assertEquals("'core' should be 'propertyDotName' ", "propertyDotName", newReplica.getStr("core"));
- }
}
- private Replica doAddReplica(String collectionName, String shard, String newReplicaName, String node,
- CloudSolrClient client, Properties props) throws IOException, SolrServerException {
- CollectionAdminRequest.AddReplica addReplica = new CollectionAdminRequest.AddReplica();
-
- addReplica.setCollectionName(collectionName);
- addReplica.setShardName(shard);
- if (node != null) {
- addReplica.setNode(node);
- }
- if (props != null) {
- addReplica.setProperties(props);
- }
- client.request(addReplica);
- TimeOut timeout = new TimeOut(3, TimeUnit.SECONDS);
- Replica newReplica = null;
-
- for (; ! timeout.hasTimedOut(); ) {
- Slice slice = client.getZkStateReader().getClusterState().getSlice(collectionName, shard);
- newReplica = slice.getReplica(newReplicaName);
- }
-
- assertNotNull(newReplica);
- return newReplica;
- }
- @Override
- protected QueryResponse queryServer(ModifiableSolrParams params) throws SolrServerException, IOException {
-
- if (r.nextBoolean())
- return super.queryServer(params);
-
- if (r.nextBoolean())
- params.set("collection",DEFAULT_COLLECTION);
-
- QueryResponse rsp = getCommonCloudSolrClient().query(params);
- return rsp;
- }
-
- protected void createCollection(String COLL_NAME, CloudSolrClient client,int replicationFactor , int numShards ) throws Exception {
- int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
- .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
-
- Map props = makeMap(
- REPLICATION_FACTOR, replicationFactor,
- MAX_SHARDS_PER_NODE, maxShardsPerNode,
- NUM_SLICES, numShards);
- Map> collectionInfos = new HashMap<>();
- createCollection(collectionInfos, COLL_NAME, props, client, "conf1");
- assertAllActive(COLL_NAME, getCommonCloudSolrClient().getZkStateReader());
-
- }
-
- private void clusterPropTest() throws Exception {
- try (CloudSolrClient client = createCloudClient(null)) {
- assertTrue("cluster property not set", setClusterProp(client, ZkStateReader.LEGACY_CLOUD, "false"));
- assertTrue("cluster property not unset ", setClusterProp(client, ZkStateReader.LEGACY_CLOUD, null));
- }
- }
-
- public static boolean setClusterProp(CloudSolrClient client, String name , String val) throws SolrServerException, IOException, InterruptedException {
- Map m = makeMap(
- "action", CollectionAction.CLUSTERPROP.toLower(),
- "name",name);
-
- if(val != null) m.put("val", val);
- SolrRequest request = new QueryRequest(new MapSolrParams(m));
- request.setPath("/admin/collections");
- client.request(request);
-
- TimeOut timeout = new TimeOut(3, TimeUnit.SECONDS);
- boolean changed = false;
- while(! timeout.hasTimedOut()){
- Thread.sleep(10);
- changed = Objects.equals(val,client.getZkStateReader().getClusterProperty(name, (String) null));
- if(changed) break;
- }
- return changed;
- }
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/CreateCollectionCleanupTest.java b/solr/core/src/test/org/apache/solr/cloud/CreateCollectionCleanupTest.java
index 989e1afde34..df7a2e2e30e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CreateCollectionCleanupTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CreateCollectionCleanupTest.java
@@ -77,8 +77,7 @@ public class CreateCollectionCleanupTest extends SolrCloudTestCase {
assertFalse(rsp.isSuccess());
// Confirm using LIST that the collection does not exist
- CollectionAdminRequest.List list = CollectionAdminRequest.listCollections();
- rsp = list.process(cloudClient);
- assertFalse(((ArrayList) rsp.getResponse().get("collections")).contains("foo"));
+ assertFalse(CollectionAdminRequest.listCollections(cloudClient).contains("foo"));
+
}
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
index 477641da974..63a327277f3 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
@@ -16,41 +16,19 @@
*/
package org.apache.solr.cloud;
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.lucene.util.TestUtil;
-import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.Utils;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
@@ -59,371 +37,162 @@ import static org.apache.solr.common.params.ShardParams._ROUTE_;
/**
* Tests the Custom Sharding API.
*/
-@Slow
-@SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
-public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
+public class CustomCollectionTest extends SolrCloudTestCase {
- private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+ private static final int NODE_COUNT = 4;
- protected String getSolrXml() {
- return "solr.xml";
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(NODE_COUNT)
+ .addConfig("conf", configset("cloud-dynamic"))
+ .configure();
}
-
- public CustomCollectionTest() {
- sliceCount = 2;
- }
-
- @Override
- protected void setDistributedParams(ModifiableSolrParams params) {
-
- if (r.nextBoolean()) {
- // don't set shards, let that be figured out from the cloud state
- } else {
- // use shard ids rather than physical locations
- StringBuilder sb = new StringBuilder();
- for (int i = 0; i < getShardCount(); i++) {
- if (i > 0)
- sb.append(',');
- sb.append("shard" + (i + 3));
- }
- params.set("shards", sb.toString());
- }
+ @Before
+ public void ensureClusterEmpty() throws Exception {
+ cluster.deleteAllCollections();
}
@Test
- @ShardsFixed(num = 4)
- public void test() throws Exception {
- testCustomCollectionsAPI();
- testRouteFieldForHashRouter();
- testCreateShardRepFactor();
- }
+ public void testCustomCollectionsAPI() throws Exception {
-
- private void testCustomCollectionsAPI() throws Exception {
- String COLL_PREFIX = "implicitcoll";
-
- // TODO: fragile - because we dont pass collection.confName, it will only
- // find a default if a conf set with a name matching the collection name is found, or
- // if there is only one conf set. That and the fact that other tests run first in this
- // env make this pretty fragile
-
- // create new collections rapid fire
- Map> collectionInfos = new HashMap<>();
+ final String collection = "implicitcoll";
int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
+ int numShards = 3;
+ int maxShardsPerNode = (((numShards + 1) * replicationFactor) / NODE_COUNT) + 1;
- int cnt = random().nextInt(6) + 1;
+ CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c", replicationFactor)
+ .setMaxShardsPerNode(maxShardsPerNode)
+ .process(cluster.getSolrClient());
- for (int i = 0; i < cnt; i++) {
- int numShards = 3;
- int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
- .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
-
-
- CloudSolrClient client = null;
- try {
- if (i == 0) {
- // Test if we can create a collection through CloudSolrServer where
- // you havnt set default-collection
- // This is nice because you want to be able to create you first
- // collection using CloudSolrServer, and in such case there is
- // nothing reasonable to set as default-collection
- client = createCloudClient(null);
- } else if (i == 1) {
- // Test if we can create a collection through CloudSolrServer where
- // you have set default-collection to a non-existing collection
- // This is nice because you want to be able to create you first
- // collection using CloudSolrServer, and in such case there is
- // nothing reasonable to set as default-collection, but you might want
- // to use the same CloudSolrServer throughout the entire
- // lifetime of your client-application, so it is nice to be able to
- // set a default-collection on this CloudSolrServer once and for all
- // and use this CloudSolrServer to create the collection
- client = createCloudClient(COLL_PREFIX + i);
- }
-
- Map props = Utils.makeMap(
- "router.name", ImplicitDocRouter.NAME,
- REPLICATION_FACTOR, replicationFactor,
- MAX_SHARDS_PER_NODE, maxShardsPerNode,
- SHARDS_PROP, "a,b,c");
-
- createCollection(collectionInfos, COLL_PREFIX + i,props,client);
- } finally {
- if (client != null) client.close();
- }
- }
-
- Set>> collectionInfosEntrySet = collectionInfos.entrySet();
- for (Entry> entry : collectionInfosEntrySet) {
- String collection = entry.getKey();
- List list = entry.getValue();
- checkForCollection(collection, list, null);
-
- String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collection);
-
- try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
- // poll for a second - it can take a moment before we are ready to serve
- waitForNon403or404or503(collectionClient);
- }
- }
- ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
- for (int j = 0; j < cnt; j++) {
- waitForRecoveriesToFinish(COLL_PREFIX + j, zkStateReader, false);
- }
-
- ClusterState clusterState = zkStateReader.getClusterState();
-
- DocCollection coll = clusterState.getCollection(COLL_PREFIX + 0);
- assertEquals("implicit", ((Map)coll.get(DOC_ROUTER)).get("name") );
+ DocCollection coll = getCollectionState(collection);
+ assertEquals("implicit", ((Map) coll.get(DOC_ROUTER)).get("name"));
assertNotNull(coll.getStr(REPLICATION_FACTOR));
assertNotNull(coll.getStr(MAX_SHARDS_PER_NODE));
assertNull("A shard of a Collection configured with implicit router must have null range",
coll.getSlice("a").getRange());
- List collectionNameList = new ArrayList<>();
- collectionNameList.addAll(collectionInfos.keySet());
- log.info("Collections created : "+collectionNameList );
+ new UpdateRequest()
+ .add("id", "6")
+ .add("id", "7")
+ .add("id", "8")
+ .withRoute("a")
+ .commit(cluster.getSolrClient(), collection);
- String collectionName = collectionNameList.get(random().nextInt(collectionNameList.size()));
+ assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+ assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
+ assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
- String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
+ cluster.getSolrClient().deleteByQuery(collection, "*:*");
+ cluster.getSolrClient().commit(collection, true, true);
+ assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
- String shard_fld = "shard_s";
- try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
+ new UpdateRequest()
+ .add("id", "9")
+ .add("id", "10")
+ .add("id", "11")
+ .withRoute("c")
+ .commit(cluster.getSolrClient(), collection);
- // lets try and use the solrj client to index a couple documents
-
- collectionClient.add(getDoc(id, 6, i1, -600, tlong, 600, t1,
- "humpty dumpy sat on a wall", _ROUTE_,"a"));
-
- collectionClient.add(getDoc(id, 7, i1, -600, tlong, 600, t1,
- "humpty dumpy3 sat on a walls", _ROUTE_,"a"));
-
- collectionClient.add(getDoc(id, 8, i1, -600, tlong, 600, t1,
- "humpty dumpy2 sat on a walled", _ROUTE_,"a"));
-
- collectionClient.commit();
-
- assertEquals(3, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
- assertEquals(0, collectionClient.query(new SolrQuery("*:*").setParam(_ROUTE_,"b")).getResults().getNumFound());
- assertEquals(3, collectionClient.query(new SolrQuery("*:*").setParam(_ROUTE_,"a")).getResults().getNumFound());
-
- collectionClient.deleteByQuery("*:*");
- collectionClient.commit(true,true);
- assertEquals(0, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
-
- UpdateRequest up = new UpdateRequest();
- up.setParam(_ROUTE_, "c");
- up.setParam("commit","true");
-
- up.add(getDoc(id, 9, i1, -600, tlong, 600, t1,
- "humpty dumpy sat on a wall"));
- up.add(getDoc(id, 10, i1, -600, tlong, 600, t1,
- "humpty dumpy3 sat on a walls"));
- up.add(getDoc(id, 11, i1, -600, tlong, 600, t1,
- "humpty dumpy2 sat on a walled"));
-
- collectionClient.request(up);
-
- assertEquals(3, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
- assertEquals(0, collectionClient.query(new SolrQuery("*:*").setParam(_ROUTE_,"a")).getResults().getNumFound());
- assertEquals(3, collectionClient.query(new SolrQuery("*:*").setParam(_ROUTE_,"c")).getResults().getNumFound());
-
- //Testing CREATESHARD
- ModifiableSolrParams params = new ModifiableSolrParams();
- params.set("action", CollectionAction.CREATESHARD.toString());
- params.set("collection", collectionName);
- params.set("shard", "x");
- SolrRequest request = new QueryRequest(params);
- request.setPath("/admin/collections");
- try (SolrClient server = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
- server.request(request);
+ assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+ assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+ assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "c")).getResults().getNumFound());
+
+ //Testing CREATESHARD
+ CollectionAdminRequest.createShard(collection, "x")
+ .process(cluster.getSolrClient());
+ waitForState("Expected shard 'x' to be active", collection, (n, c) -> {
+ if (c.getSlice("x") == null)
+ return false;
+ for (Replica r : c.getSlice("x")) {
+ if (r.getState() != Replica.State.ACTIVE)
+ return false;
}
- waitForCollection(zkStateReader,collectionName,4);
- //wait for all the replicas to become active
- int attempts = 0;
- while(true){
- if(attempts>30 ) fail("Not enough active replicas in the shard 'x'");
- attempts++;
- int activeReplicaCount = 0;
- for (Replica x : zkStateReader.getClusterState().getCollection(collectionName).getSlice("x").getReplicas()) {
- if (x.getState() == Replica.State.ACTIVE) {
- activeReplicaCount++;
- }
- }
- Thread.sleep(500);
- if(activeReplicaCount >= replicationFactor) break;
- }
- log.info(zkStateReader.getClusterState().toString());
-
- collectionClient.add(getDoc(id, 66, i1, -600, tlong, 600, t1,
- "humpty dumpy sat on a wall", _ROUTE_,"x"));
- collectionClient.commit();
- assertEquals(1, collectionClient.query(new SolrQuery("*:*").setParam(_ROUTE_,"x")).getResults().getNumFound());
-
-
- int numShards = 4;
- replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
- int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
- .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
+ return true;
+ });
- try (CloudSolrClient client = createCloudClient(null)) {
- Map props = Utils.makeMap(
- "router.name", ImplicitDocRouter.NAME,
- REPLICATION_FACTOR, replicationFactor,
- MAX_SHARDS_PER_NODE, maxShardsPerNode,
- SHARDS_PROP, "a,b,c,d",
- "router.field", shard_fld);
-
- collectionName = COLL_PREFIX + "withShardField";
- createCollection(collectionInfos, collectionName,props,client);
- }
-
- List list = collectionInfos.get(collectionName);
- checkForCollection(collectionName, list, null);
-
-
- url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
- }
+ new UpdateRequest()
+ .add("id", "66", _ROUTE_, "x")
+ .commit(cluster.getSolrClient(), collection);
+ // TODO - the local state is cached and causes the request to fail with 'unknown shard'
+ // assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "x")).getResults().getNumFound());
- try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
- // poll for a second - it can take a moment before we are ready to serve
- waitForNon403or404or503(collectionClient);
- }
-
- try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
- // lets try and use the solrj client to index a couple documents
-
- collectionClient.add(getDoc(id, 6, i1, -600, tlong, 600, t1,
- "humpty dumpy sat on a wall", shard_fld,"a"));
-
- collectionClient.add(getDoc(id, 7, i1, -600, tlong, 600, t1,
- "humpty dumpy3 sat on a walls", shard_fld,"a"));
-
- collectionClient.add(getDoc(id, 8, i1, -600, tlong, 600, t1,
- "humpty dumpy2 sat on a walled", shard_fld,"a"));
-
- collectionClient.commit();
-
- assertEquals(3, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
- assertEquals(0, collectionClient.query(new SolrQuery("*:*").setParam(_ROUTE_,"b")).getResults().getNumFound());
- //TODO debug the following case
- assertEquals(3, collectionClient.query(new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
- }
}
- private void testRouteFieldForHashRouter()throws Exception{
+ @Test
+ public void testRouteFieldForImplicitRouter() throws Exception {
+
+ int numShards = 4;
+ int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
+ int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
+ String shard_fld = "shard_s";
+
+ final String collection = "withShardField";
+
+ CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c,d", replicationFactor)
+ .setMaxShardsPerNode(maxShardsPerNode)
+ .setRouterField(shard_fld)
+ .process(cluster.getSolrClient());
+
+ new UpdateRequest()
+ .add("id", "6", shard_fld, "a")
+ .add("id", "7", shard_fld, "a")
+ .add("id", "8", shard_fld, "b")
+ .commit(cluster.getSolrClient(), collection);
+
+ assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+ assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
+ assertEquals(2, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+
+ }
+
+ @Test
+ public void testRouteFieldForHashRouter()throws Exception{
String collectionName = "routeFieldColl";
int numShards = 4;
int replicationFactor = 2;
- int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
- .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
-
- HashMap> collectionInfos = new HashMap<>();
+ int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
String shard_fld = "shard_s";
- try (CloudSolrClient client = createCloudClient(null)) {
- Map props = Utils.makeMap(
- REPLICATION_FACTOR, replicationFactor,
- MAX_SHARDS_PER_NODE, maxShardsPerNode,
- NUM_SLICES, numShards,
- "router.field", shard_fld);
- createCollection(collectionInfos, collectionName,props,client);
- }
+ CollectionAdminRequest.createCollection(collectionName, "conf", numShards, replicationFactor)
+ .setMaxShardsPerNode(maxShardsPerNode)
+ .setRouterField(shard_fld)
+ .process(cluster.getSolrClient());
- List list = collectionInfos.get(collectionName);
- checkForCollection(collectionName, list, null);
+ new UpdateRequest()
+ .add("id", "6", shard_fld, "a")
+ .add("id", "7", shard_fld, "a")
+ .add("id", "8", shard_fld, "b")
+ .commit(cluster.getSolrClient(), collectionName);
+
+ assertEquals(3, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
+ assertEquals(2, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+ assertEquals(1, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
+ assertEquals(0, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "c")).getResults().getNumFound());
- String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
+ cluster.getSolrClient().deleteByQuery(collectionName, "*:*");
+ cluster.getSolrClient().commit(collectionName);
- try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
- // poll for a second - it can take a moment before we are ready to serve
- waitForNon403or404or503(collectionClient);
- }
+ cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "100", shard_fld, "c!doc1"));
+ cluster.getSolrClient().commit(collectionName);
+ assertEquals(1, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "c!")).getResults().getNumFound());
-
- try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
- // lets try and use the solrj client to index a couple documents
-
- collectionClient.add(getDoc(id, 6, i1, -600, tlong, 600, t1,
- "humpty dumpy sat on a wall", shard_fld,"a"));
-
- collectionClient.add(getDoc(id, 7, i1, -600, tlong, 600, t1,
- "humpty dumpy3 sat on a walls", shard_fld,"a"));
-
- collectionClient.add(getDoc(id, 8, i1, -600, tlong, 600, t1,
- "humpty dumpy2 sat on a walled", shard_fld,"a"));
-
- collectionClient.commit();
-
- assertEquals(3, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
- //TODO debug the following case
- assertEquals(3, collectionClient.query(new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
-
- collectionClient.deleteByQuery("*:*");
- collectionClient.commit();
-
- collectionClient.add (getDoc( id,100,shard_fld, "b!doc1"));
- collectionClient.commit();
- assertEquals(1, collectionClient.query(new SolrQuery("*:*").setParam(_ROUTE_, "b!")).getResults().getNumFound());
- }
}
- private void testCreateShardRepFactor() throws Exception {
- String collectionName = "testCreateShardRepFactor";
- HashMap> collectionInfos = new HashMap<>();
- try (CloudSolrClient client = createCloudClient(null)) {
- Map props = Utils.makeMap(
- REPLICATION_FACTOR, 1,
- MAX_SHARDS_PER_NODE, 5,
- NUM_SLICES, 2,
- "shards", "a,b",
- "router.name", "implicit");
+ @Test
+ public void testCreateShardRepFactor() throws Exception {
+ final String collectionName = "testCreateShardRepFactor";
+ CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "a,b", 1)
+ .process(cluster.getSolrClient());
- createCollection(collectionInfos, collectionName, props, client);
- }
- ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
- waitForRecoveriesToFinish(collectionName, zkStateReader, false);
+ CollectionAdminRequest.createShard(collectionName, "x")
+ .process(cluster.getSolrClient());
- ModifiableSolrParams params = new ModifiableSolrParams();
- params.set("action", CollectionAction.CREATESHARD.toString());
- params.set("collection", collectionName);
- params.set("shard", "x");
- SolrRequest request = new QueryRequest(params);
- request.setPath("/admin/collections");
+ waitForState("Not enough active replicas in shard 'x'", collectionName, (n, c) -> {
+ return c.getSlice("x").getReplicas().size() == 1;
+ });
- try (SolrClient server = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
- server.request(request);
- }
-
- waitForRecoveriesToFinish(collectionName, zkStateReader, false);
-
- int replicaCount = 0;
- int attempts = 0;
- while (true) {
- if (attempts > 30) fail("Not enough active replicas in the shard 'x'");
- attempts++;
- replicaCount = zkStateReader.getClusterState().getSlice(collectionName, "x").getReplicas().size();
- if (replicaCount >= 1) break;
- Thread.sleep(500);
- }
-
- assertEquals("CREATESHARD API created more than replicationFactor number of replicas", 1, replicaCount);
}
-
- @Override
- protected QueryResponse queryServer(ModifiableSolrParams params) throws SolrServerException, IOException {
-
- if (r.nextBoolean())
- return super.queryServer(params);
-
- if (r.nextBoolean())
- params.set("collection",DEFAULT_COLLECTION);
-
- QueryResponse rsp = getCommonCloudSolrClient().query(params);
- return rsp;
- }
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java b/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java
index 78f82ed9efb..8e3f63d9e56 100644
--- a/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java
@@ -51,6 +51,7 @@ public class MigrateRouteKeyTest extends SolrCloudTestCase {
if (usually()) {
CollectionAdminRequest.setClusterProperty("legacyCloud", "false").process(cluster.getSolrClient());
+ log.info("Using legacyCloud=false for cluster");
}
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/RecoveryZkTest.java b/solr/core/src/test/org/apache/solr/cloud/RecoveryZkTest.java
index a0cb4dcfc7e..0aecdf9869a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/RecoveryZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/RecoveryZkTest.java
@@ -16,46 +16,59 @@
*/
package org.apache.solr.cloud;
-import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.junit.After;
+import org.junit.BeforeClass;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@Slow
-public class RecoveryZkTest extends AbstractFullDistribZkTestBase {
+public class RecoveryZkTest extends SolrCloudTestCase {
+
+ private static Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(2)
+ .addConfig("conf", configset("cloud-minimal"))
+ .configure();
+ }
- //private static final String DISTRIB_UPDATE_CHAIN = "distrib-update-chain";
private StoppableIndexingThread indexThread;
private StoppableIndexingThread indexThread2;
- public RecoveryZkTest() {
- super();
- sliceCount = 1;
- fixShardCount(2);
- schemaString = "schema15.xml"; // we need a string id
- }
-
- public static String[] fieldNames = new String[]{"f_i", "f_f", "f_d", "f_l", "f_dt"};
- public static RandVal[] randVals = new RandVal[]{rint, rfloat, rdouble, rlong, rdate};
-
- protected String[] getFieldNames() {
- return fieldNames;
- }
-
- protected RandVal[] getRandValues() {
- return randVals;
+ @After
+ public void stopThreads() throws InterruptedException {
+ indexThread.safeStop();
+ indexThread2.safeStop();
+ indexThread.join();
+ indexThread2.join();
}
@Test
public void test() throws Exception {
- handle.clear();
- handle.put("timestamp", SKIPVAL);
-
+
+ final String collection = "recoverytest";
+
+ CollectionAdminRequest.createCollection(collection, "conf", 1, 2)
+ .setMaxShardsPerNode(1)
+ .process(cluster.getSolrClient());
+ waitForState("Expected a collection with one shard and two replicas", collection, clusterShape(1, 2));
+ cluster.getSolrClient().setDefaultCollection(collection);
+
// start a couple indexing threads
int[] maxDocList = new int[] {300, 700, 1200, 1350, 3000};
@@ -67,12 +80,12 @@ public class RecoveryZkTest extends AbstractFullDistribZkTestBase {
} else {
maxDoc = maxDocNightlyList[random().nextInt(maxDocList.length - 1)];
}
+ log.info("Indexing {} documents", maxDoc);
- indexThread = new StoppableIndexingThread(controlClient, cloudClient, "1", true, maxDoc, 1, true);
+ indexThread = new StoppableIndexingThread(null, cluster.getSolrClient(), "1", true, maxDoc, 1, true);
indexThread.start();
- indexThread2 = new StoppableIndexingThread(controlClient, cloudClient, "2", true, maxDoc, 1, true);
-
+ indexThread2 = new StoppableIndexingThread(null, cluster.getSolrClient(), "2", true, maxDoc, 1, true);
indexThread2.start();
// give some time to index...
@@ -80,88 +93,57 @@ public class RecoveryZkTest extends AbstractFullDistribZkTestBase {
Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
// bring shard replica down
- JettySolrRunner replica = chaosMonkey.stopShard("shard1", 1).jetty;
+ DocCollection state = getCollectionState(collection);
+ Replica leader = state.getLeader("shard1");
+ Replica replica = getRandomReplica(state.getSlice("shard1"), (r) -> leader != r);
+ JettySolrRunner jetty = cluster.getReplicaJetty(replica);
+ jetty.stop();
// wait a moment - lets allow some docs to be indexed so replication time is non 0
Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
// bring shard replica up
- replica.start();
+ jetty.start();
// make sure replication can start
Thread.sleep(3000);
- ZkStateReader zkStateReader = cloudClient.getZkStateReader();
-
+
// stop indexing threads
indexThread.safeStop();
indexThread2.safeStop();
indexThread.join();
indexThread2.join();
-
- Thread.sleep(1000);
-
- waitForThingsToLevelOut(120);
-
- Thread.sleep(2000);
-
- waitForThingsToLevelOut(30);
-
- Thread.sleep(5000);
-
- waitForRecoveriesToFinish(DEFAULT_COLLECTION, zkStateReader, false, true);
+
+ new UpdateRequest()
+ .commit(cluster.getSolrClient(), collection);
+
+ cluster.getSolrClient().waitForState(collection, 120, TimeUnit.SECONDS, clusterShape(1, 2));
// test that leader and replica have same doc count
-
- String fail = checkShardConsistency("shard1", false, false);
- if (fail != null) {
- fail(fail);
- }
-
- SolrQuery query = new SolrQuery("*:*");
- query.setParam("distrib", "false");
- long client1Docs = shardToJetty.get("shard1").get(0).client.solrClient.query(query).getResults().getNumFound();
- long client2Docs = shardToJetty.get("shard1").get(1).client.solrClient.query(query).getResults().getNumFound();
-
- assertTrue(client1Docs > 0);
- assertEquals(client1Docs, client2Docs);
-
- // won't always pass yet...
- //query("q", "*:*", "sort", "id desc");
- }
-
- @Override
- protected void indexDoc(SolrInputDocument doc) throws IOException,
- SolrServerException {
- controlClient.add(doc);
-
- // UpdateRequest ureq = new UpdateRequest();
- // ureq.add(doc);
- // ureq.setParam("update.chain", DISTRIB_UPDATE_CHAIN);
- // ureq.process(cloudClient);
- cloudClient.add(doc);
+ state = getCollectionState(collection);
+ assertShardConsistency(state.getSlice("shard1"), true);
+
}
-
- @Override
- public void distribTearDown() throws Exception {
- // make sure threads have been stopped...
- indexThread.safeStop();
- indexThread2.safeStop();
-
- indexThread.join();
- indexThread2.join();
-
- super.distribTearDown();
- }
-
- // skip the randoms - they can deadlock...
- @Override
- protected void indexr(Object... fields) throws Exception {
- SolrInputDocument doc = new SolrInputDocument();
- addFields(doc, fields);
- addFields(doc, "rnd_b", true);
- indexDoc(doc);
+ private void assertShardConsistency(Slice shard, boolean expectDocs) throws Exception {
+ List replicas = shard.getReplicas(r -> r.getState() == Replica.State.ACTIVE);
+ long[] numCounts = new long[replicas.size()];
+ int i = 0;
+ for (Replica replica : replicas) {
+ try (HttpSolrClient client = new HttpSolrClient.Builder(replica.getCoreUrl())
+ .withHttpClient(cluster.getSolrClient().getHttpClient()).build()) {
+ numCounts[i] = client.query(new SolrQuery("*:*").add("distrib", "false")).getResults().getNumFound();
+ i++;
+ }
+ }
+ for (int j = 1; j < replicas.size(); j++) {
+ if (numCounts[j] != numCounts[j - 1])
+ fail("Mismatch in counts between replicas"); // nocommit improve this!
+ if (numCounts[j] == 0 && expectDocs)
+ fail("Expected docs on shard " + shard.getName() + " but found none");
+ }
}
+
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
index 7388476bdcb..72f06943cf7 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
@@ -93,7 +93,8 @@ public class ShardSplitTest extends BasicDistributedZkTest {
if (usually()) {
log.info("Using legacyCloud=false for cluster");
- CollectionsAPIDistributedZkTest.setClusterProp(cloudClient, "legacyCloud", "false");
+ CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
+ .process(cloudClient);
}
incompleteOrOverlappingCustomRangeTest();
splitByUniqueKeyTest();
@@ -516,7 +517,8 @@ public class ShardSplitTest extends BasicDistributedZkTest {
if (usually()) {
log.info("Using legacyCloud=false for cluster");
- CollectionsAPIDistributedZkTest.setClusterProp(cloudClient, "legacyCloud", "false");
+ CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
+ .process(cloudClient);
}
log.info("Starting testSplitShardWithRule");
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestClusterProperties.java b/solr/core/src/test/org/apache/solr/cloud/TestClusterProperties.java
new file mode 100644
index 00000000000..c5575af2979
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/TestClusterProperties.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud;
+
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.common.cloud.ClusterProperties;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestClusterProperties extends SolrCloudTestCase {
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(1).configure();
+ }
+
+ @Test
+ public void testClusterProperties() throws Exception {
+ ClusterProperties props = new ClusterProperties(zkClient());
+ assertEquals("false", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "false"));
+
+ CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "true").process(cluster.getSolrClient());
+ assertEquals("true", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "false"));
+
+ CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false").process(cluster.getSolrClient());
+ assertEquals("false", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "true"));
+ }
+
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestDeleteCollectionOnDownNodes.java b/solr/core/src/test/org/apache/solr/cloud/TestDeleteCollectionOnDownNodes.java
new file mode 100644
index 00000000000..33820b34b41
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/TestDeleteCollectionOnDownNodes.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud;
+
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.common.cloud.Slice;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestDeleteCollectionOnDownNodes extends SolrCloudTestCase {
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ configureCluster(4)
+ .addConfig("conf", configset("cloud-minimal"))
+ .addConfig("conf2", configset("cloud-minimal"))
+ .configure();
+ }
+
+ @Test
+ public void deleteCollectionWithDownNodes() throws Exception {
+
+ CollectionAdminRequest.createCollection("halfdeletedcollection2", "conf", 4, 2)
+ .setMaxShardsPerNode(3)
+ .process(cluster.getSolrClient());
+
+ // stop a couple nodes
+ cluster.stopJettySolrRunner(cluster.getRandomJetty(random()));
+ cluster.stopJettySolrRunner(cluster.getRandomJetty(random()));
+
+ // wait for leaders to settle out
+ waitForState("Timed out waiting for leader elections", "halfdeletedcollection2", (n, c) -> {
+ for (Slice slice : c) {
+ if (slice.getLeader() == null)
+ return false;
+ if (slice.getLeader().isActive(n) == false)
+ return false;
+ }
+ return true;
+ });
+
+ // delete the collection
+ CollectionAdminRequest.deleteCollection("halfdeletedcollection2").process(cluster.getSolrClient());
+ waitForState("Timed out waiting for collection to be deleted", "halfdeletedcollection2", (n, c) -> c == null);
+
+ assertFalse("Still found collection that should be gone",
+ cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("halfdeletedcollection2"));
+
+ }
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
index 3b02477d0a7..55fb6cdf999 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
@@ -16,48 +16,41 @@
*/
package org.apache.solr.cloud.hdfs;
-import java.io.IOException;
-
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.cloud.CollectionsAPIDistributedZkTest;
-import org.apache.solr.update.HdfsUpdateLog;
+import org.apache.solr.common.cloud.ZkConfigManager;
import org.apache.solr.util.BadHdfsThreadsFilter;
import org.junit.AfterClass;
import org.junit.BeforeClass;
-import com.carrotsearch.randomizedtesting.annotations.Nightly;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
-
@Slow
@Nightly
@ThreadLeakFilters(defaultFilters = true, filters = {
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
public class HdfsCollectionsAPIDistributedZkTest extends CollectionsAPIDistributedZkTest {
+
private static MiniDFSCluster dfsCluster;
- private static long initialFailLogsCount;
-
+
@BeforeClass
public static void setupClass() throws Exception {
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
- System.setProperty("solr.hdfs.blockcache.enabled", "false");
- initialFailLogsCount = HdfsUpdateLog.INIT_FAILED_LOGS_COUNT.get();
+ System.setProperty("solr.hdfs.blockcache.blocksperbank", "2048");
+
+ ZkConfigManager configManager = new ZkConfigManager(zkClient());
+ configManager.uploadConfigDir(configset("cloud-hdfs"), "conf");
+
+ System.setProperty("solr.hdfs.home", HdfsTestUtil.getDataDir(dfsCluster, "data"));
}
-
+
@AfterClass
public static void teardownClass() throws Exception {
- // there should be no new fails from this test
- assertEquals(0, HdfsUpdateLog.INIT_FAILED_LOGS_COUNT.get() - initialFailLogsCount);
+ cluster.shutdown(); // need to close before the MiniDFSCluster
HdfsTestUtil.teardownClass(dfsCluster);
- System.clearProperty("solr.hdfs.blockcache.enabled");
dfsCluster = null;
}
-
- @Override
- protected String getDataDir(String dataDir) throws IOException {
- return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
- }
-
}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java
index a8e6fb0cf3b..2dfc32b2a31 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java
@@ -16,42 +16,40 @@
*/
package org.apache.solr.cloud.hdfs;
-import java.io.IOException;
-
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.cloud.RecoveryZkTest;
+import org.apache.solr.common.cloud.ZkConfigManager;
import org.apache.solr.util.BadHdfsThreadsFilter;
import org.junit.AfterClass;
import org.junit.BeforeClass;
-import com.carrotsearch.randomizedtesting.annotations.Nightly;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
-
@Slow
-@Nightly
+//@Nightly
@ThreadLeakFilters(defaultFilters = true, filters = {
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
})
public class HdfsRecoveryZkTest extends RecoveryZkTest {
+
private static MiniDFSCluster dfsCluster;
@BeforeClass
public static void setupClass() throws Exception {
dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
System.setProperty("solr.hdfs.blockcache.blocksperbank", "2048");
+
+ ZkConfigManager configManager = new ZkConfigManager(zkClient());
+ configManager.uploadConfigDir(configset("cloud-hdfs"), "conf");
+
+ System.setProperty("solr.hdfs.home", HdfsTestUtil.getDataDir(dfsCluster, "data"));
}
@AfterClass
public static void teardownClass() throws Exception {
+ cluster.shutdown(); // need to close before the MiniDFSCluster
HdfsTestUtil.teardownClass(dfsCluster);
dfsCluster = null;
}
-
- @Override
- protected String getDataDir(String dataDir) throws IOException {
- return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
- }
-
}
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
index 72406ef2590..92ea99bc0b8 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
@@ -191,6 +191,10 @@ public abstract class CollectionAdminRequest
@Deprecated
public abstract AsyncCollectionSpecificAdminRequest setCollectionName(String collection);
+ public String getCollectionName() {
+ return collection;
+ }
+
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
@@ -1601,6 +1605,13 @@ public abstract class CollectionAdminRequest
return this;
}
+ public AddReplica withProperty(String key, String value) {
+ if (this.properties == null)
+ this.properties = new Properties();
+ this.properties.setProperty(key, value);
+ return this;
+ }
+
public String getNode() {
return node;
}
@@ -2178,8 +2189,9 @@ public abstract class CollectionAdminRequest
/**
* Returns a SolrRequest to get a list of collections in the cluster
*/
- public static List listCollections() {
- return new List();
+ public static java.util.List listCollections(SolrClient client) throws IOException, SolrServerException {
+ CollectionAdminResponse resp = new List().process(client);
+ return (java.util.List) resp.getResponse().get("collections");
}
// LIST request
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java
index aec6e22e41a..e7ca0fa32a0 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java
@@ -218,6 +218,13 @@ public class UpdateRequest extends AbstractUpdateRequest {
return this;
}
+ public UpdateRequest withRoute(String route) {
+ if (params == null)
+ params = new ModifiableSolrParams();
+ params.set(ROUTE, route);
+ return this;
+ }
+
public UpdateResponse commit(SolrClient client, String collection) throws IOException, SolrServerException {
if (params == null)
params = new ModifiableSolrParams();
@@ -524,4 +531,5 @@ public class UpdateRequest extends AbstractUpdateRequest {
public void lastDocInBatch() {
isLastDocInBatch = true;
}
+
}
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java b/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java
index e4be00956da..bd3bafdba2c 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java
@@ -21,8 +21,11 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
+import java.util.List;
import java.util.Locale;
import java.util.Map;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
import org.noggit.JSONUtil;
import org.noggit.JSONWriter;
@@ -218,6 +221,13 @@ public class Slice extends ZkNodeProps implements Iterable {
return replicas.values();
}
+ /**
+ * Gets all replicas that match a predicate
+ */
+ public List getReplicas(Predicate pred) {
+ return replicas.values().stream().filter(pred).collect(Collectors.toList());
+ }
+
/**
* Get the map of coreNodeName to replicas for this slice.
*/
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
index 3c5aa169d98..2c1ae3b77db 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
@@ -88,7 +88,7 @@ public class MiniSolrCloudCluster {
" \n" +
"\n";
- private final ZkTestServer zkServer;
+ private ZkTestServer zkServer; // non-final due to injectChaos()
private final boolean externalZkServer;
private final List jettys = new CopyOnWriteArrayList<>();
private final Path baseDir;
@@ -328,6 +328,10 @@ public class MiniSolrCloudCluster {
.build());
}
+ public JettySolrRunner getJettySolrRunner(int index) {
+ return jettys.get(index);
+ }
+
/**
* Start a new Solr instance on a particular servlet context
*
@@ -440,6 +444,10 @@ public class MiniSolrCloudCluster {
public CloudSolrClient getSolrClient() {
return solrClient;
}
+
+ public SolrZkClient getZkClient() {
+ return solrClient.getZkStateReader().getZkClient();
+ }
protected CloudSolrClient buildSolrClient() {
return new Builder()
@@ -497,4 +505,29 @@ public class MiniSolrCloudCluster {
log.info("Expired zookeeper session {} from node {}", sessionId, jetty.getBaseUrl());
}
}
+
+ public void injectChaos(Random random) throws Exception {
+
+ // sometimes we restart one of the jetty nodes
+ if (random.nextBoolean()) {
+ JettySolrRunner jetty = jettys.get(random.nextInt(jettys.size()));
+ ChaosMonkey.stop(jetty);
+ log.info("============ Restarting jetty");
+ ChaosMonkey.start(jetty);
+ }
+
+ // sometimes we restart zookeeper
+ if (random.nextBoolean()) {
+ zkServer.shutdown();
+ log.info("============ Restarting zookeeper");
+ zkServer = new ZkTestServer(zkServer.getZkDir(), zkServer.getPort());
+ zkServer.run();
+ }
+
+ // sometimes we cause a connection loss - sometimes it will hit the overseer
+ if (random.nextBoolean()) {
+ JettySolrRunner jetty = jettys.get(random.nextInt(jettys.size()));
+ ChaosMonkey.causeConnectionLoss(jetty);
+ }
+ }
}
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java b/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
index b64b1ce1a42..77a527e692c 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
@@ -44,6 +44,7 @@ import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkStateReader;
import org.junit.AfterClass;
import org.junit.Before;
@@ -174,7 +175,10 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
/** The cluster */
protected static MiniSolrCloudCluster cluster;
- protected SolrZkClient zkClient() {
+ protected static SolrZkClient zkClient() {
+ ZkStateReader reader = cluster.getSolrClient().getZkStateReader();
+ if (reader == null)
+ cluster.getSolrClient().connect();
return cluster.getSolrClient().getZkStateReader().getZkClient();
}