SOLR-12151: Add abstract MultiSolrCloudTestCase class.

This commit is contained in:
Christine Poerschke 2018-04-10 21:09:06 +01:00
parent e8f862ea44
commit e513c95377
4 changed files with 200 additions and 2 deletions

View File

@ -86,6 +86,8 @@ New Features
* SOLR-12036: Factor out DefaultStreamFactory solrj class. (Christine Poerschke)
* SOLR-12151: Add abstract MultiSolrCloudTestCase class. (Christine Poerschke)
Bug Fixes
----------------------

View File

@ -0,0 +1,80 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.cloud;
import org.junit.BeforeClass;
import org.junit.Test;
public class MultiSolrCloudTestCaseTest extends MultiSolrCloudTestCase {
private static int numClouds;
private static int numCollectionsPerCloud;
private static int numShards;
private static int numReplicas;
private static int maxShardsPerNode;
private static int nodesPerCluster;
@BeforeClass
public static void setupClusters() throws Exception {
numClouds = random().nextInt(4); // 0..3
final String[] clusterIds = new String[numClouds];
for (int ii=0; ii<numClouds; ++ii) {
clusterIds[ii] = "cloud"+(ii+1);
}
numCollectionsPerCloud = random().nextInt(3); // 0..2
final String[] collections = new String[numCollectionsPerCloud];
for (int ii=0; ii<numCollectionsPerCloud; ++ii) {
collections[ii] = "collection"+(ii+1);
}
numShards = 1+random().nextInt(2);
numReplicas = 1+random().nextInt(2);
maxShardsPerNode = 1+random().nextInt(2);
nodesPerCluster = (numShards*numReplicas + (maxShardsPerNode-1))/maxShardsPerNode;
doSetupClusters(
clusterIds,
new DefaultClusterCreateFunction() {
@Override
protected int nodesPerCluster(String clusterId) {
return nodesPerCluster;
}
},
new DefaultClusterInitFunction(numShards, numReplicas, maxShardsPerNode) {
@Override
public void accept(String clusterId, MiniSolrCloudCluster cluster) {
for (final String collection : collections) {
if (random().nextBoolean()) {
doAccept(collection, cluster); // same collection name in different clouds
} else {
doAccept(collection+"_in_"+clusterId, cluster); // globally unique collection name
}
}
}
});
}
@Test
public void test() throws Exception {
assertEquals("numClouds", numClouds, clusterId2cluster.size());
}
}

View File

@ -0,0 +1,107 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.cloud;
import java.util.HashMap;
import java.util.Map;
import java.util.function.BiConsumer;
import java.util.function.Function;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.junit.AfterClass;
/**
* Base class for tests that require more than one SolrCloud
*
* Derived tests should call {@link #doSetupClusters(String[], Function, BiConsumer)} in a {@code BeforeClass}
* static method. This configures and starts the {@link MiniSolrCloudCluster} instances, available
* via the {@code clusterId2cluster} variable. The clusters' shutdown is handled automatically.
*/
public abstract class MultiSolrCloudTestCase extends SolrTestCaseJ4 {
protected static Map<String,MiniSolrCloudCluster> clusterId2cluster = new HashMap<String,MiniSolrCloudCluster>();
protected static abstract class DefaultClusterCreateFunction implements Function<String,MiniSolrCloudCluster> {
public DefaultClusterCreateFunction() {
}
protected abstract int nodesPerCluster(String clusterId);
@Override
public MiniSolrCloudCluster apply(String clusterId) {
try {
final MiniSolrCloudCluster cluster = new SolrCloudTestCase
.Builder(nodesPerCluster(clusterId), createTempDir())
.addConfig("conf", configset("cloud-dynamic"))
.build();
return cluster;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
protected static abstract class DefaultClusterInitFunction implements BiConsumer<String,MiniSolrCloudCluster> {
final private int numShards;
final private int numReplicas;
final private int maxShardsPerNode;
public DefaultClusterInitFunction(int numShards, int numReplicas, int maxShardsPerNode) {
this.numShards = numShards;
this.numReplicas = numReplicas;
this.maxShardsPerNode = maxShardsPerNode;
}
protected void doAccept(String collection, MiniSolrCloudCluster cluster) {
try {
CollectionAdminRequest
.createCollection(collection, "conf", numShards, numReplicas)
.setMaxShardsPerNode(maxShardsPerNode)
.processAndWait(cluster.getSolrClient(), SolrCloudTestCase.DEFAULT_TIMEOUT);
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collection, cluster.getSolrClient().getZkStateReader(), false, true, SolrCloudTestCase.DEFAULT_TIMEOUT);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
protected static void doSetupClusters(final String[] clusterIds,
final Function<String,MiniSolrCloudCluster> createFunc,
final BiConsumer<String,MiniSolrCloudCluster> initFunc) throws Exception {
for (final String clusterId : clusterIds) {
assertFalse("duplicate clusterId "+clusterId, clusterId2cluster.containsKey(clusterId));
MiniSolrCloudCluster cluster = createFunc.apply(clusterId);
initFunc.accept(clusterId, cluster);
clusterId2cluster.put(clusterId, cluster);
}
}
@AfterClass
public static void shutdownCluster() throws Exception {
for (MiniSolrCloudCluster cluster : clusterId2cluster.values()) {
cluster.shutdown();
}
}
}

View File

@ -87,7 +87,7 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
/**
* Builder class for a MiniSolrCloudCluster
*/
protected static class Builder {
public static class Builder {
private final int nodeCount;
private final Path baseDir;
@ -187,7 +187,15 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
* @throws Exception if an error occurs on startup
*/
public void configure() throws Exception {
cluster = new MiniSolrCloudCluster(nodeCount, baseDir, solrxml, jettyConfig, null, securityJson);
cluster = build();
}
/**
* Configure, run and return the {@link MiniSolrCloudCluster}
* @throws Exception if an error occurs on startup
*/
public MiniSolrCloudCluster build() throws Exception {
MiniSolrCloudCluster cluster = new MiniSolrCloudCluster(nodeCount, baseDir, solrxml, jettyConfig, null, securityJson);
CloudSolrClient client = cluster.getSolrClient();
for (Config config : configs) {
((ZkClientClusterStateProvider)client.getClusterStateProvider()).uploadConfig(config.path, config.name);
@ -199,6 +207,7 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
props.setClusterProperty(entry.getKey(), entry.getValue());
}
}
return cluster;
}
}