SOLR-6483, Refactor some methods in MiniSolrCloudCluster tests

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1648923 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Erick Erickson 2015-01-01 19:19:12 +00:00
parent 9fadf77120
commit 362ddae754
3 changed files with 81 additions and 102 deletions

View File

@ -596,6 +596,9 @@ Other Changes
* SOLR-6895: SolrServer classes are renamed to *SolrClient. The existing
classes still exist, but are deprecated. (Alan Woodward, Erik Hatcher)
* SOLR-6483: Refactor some methods in MiniSolrCloudCluster tests (Steve Davids via
Erick Erickson)
================== 4.10.3 ==================
Bug Fixes

View File

@ -32,10 +32,7 @@ import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionParams.CollectionAction;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.CoreDescriptor;
import org.apache.solr.util.RevertDefaultThreadHandlerRule;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@ -88,14 +85,6 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
miniCluster.shutdown();
}
miniCluster = null;
System.clearProperty("solr.tests.mergePolicy");
System.clearProperty("solr.tests.maxBufferedDocs");
System.clearProperty("solr.tests.maxIndexingThreads");
System.clearProperty("solr.tests.ramBufferSizeMB");
System.clearProperty("solr.tests.mergeScheduler");
System.clearProperty("solr.directoryFactory");
System.clearProperty("solr.solrxml.location");
System.clearProperty("zkHost");
}
@Test
@ -117,28 +106,33 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
assertTrue(startedServer.isRunning());
assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
CloudSolrClient cloudSolrClient = null;
SolrZkClient zkClient = null;
try {
cloudSolrClient = new CloudSolrClient(miniCluster.getZkServer().getZkAddress(), true);
cloudSolrClient.connect();
zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(),
AbstractZkTestCase.TIMEOUT, 45000, null);
// create collection
String collectionName = "testSolrCloudCollection";
String configName = "solrCloudCollectionConfig";
System.setProperty("solr.tests.mergePolicy", "org.apache.lucene.index.TieredMergePolicy");
uploadConfigToZk(SolrTestCaseJ4.TEST_HOME() + File.separator + "collection1" + File.separator + "conf", configName);
createCollection(cloudSolrClient, collectionName, NUM_SHARDS, REPLICATION_FACTOR, configName);
File configDir = new File(SolrTestCaseJ4.TEST_HOME() + File.separator + "collection1" + File.separator + "conf");
miniCluster.uploadConfigDir(configDir, configName);
Map<String, String> collectionProperties = new HashMap<>();
collectionProperties.put(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
collectionProperties.put("solr.tests.maxBufferedDocs", "100000");
collectionProperties.put("solr.tests.maxIndexingThreads", "-1");
collectionProperties.put("solr.tests.ramBufferSizeMB", "100");
// use non-test classes so RandomizedRunner isn't necessary
collectionProperties.put("solr.tests.mergePolicy", "org.apache.lucene.index.TieredMergePolicy");
collectionProperties.put("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
collectionProperties.put("solr.directoryFactory", "solr.RAMDirectoryFactory");
miniCluster.createCollection(collectionName, NUM_SHARDS, REPLICATION_FACTOR, configName, collectionProperties);
try(SolrZkClient zkClient = new SolrZkClient
(miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, 45000, null)) {
ZkStateReader zkStateReader = new ZkStateReader(zkClient);
waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
// modify/query collection
CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
cloudSolrClient.setDefaultCollection(collectionName);
SolrInputDocument doc = new SolrInputDocument();
doc.setField("id", "1");
ZkStateReader zkStateReader = new ZkStateReader(zkClient);
waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
cloudSolrClient.add(doc);
cloudSolrClient.commit();
SolrQuery query = new SolrQuery();
@ -171,62 +165,7 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
assertEquals(NUM_SERVERS - 1, miniCluster.getJettySolrRunners().size());
}
}
} finally {
if (cloudSolrClient != null) {
cloudSolrClient.shutdown();
}
if (zkClient != null) {
zkClient.close();
}
}
}
protected void uploadConfigToZk(String configDir, String configName) throws Exception {
// override settings in the solrconfig include
System.setProperty("solr.tests.maxBufferedDocs", "100000");
System.setProperty("solr.tests.maxIndexingThreads", "-1");
System.setProperty("solr.tests.ramBufferSizeMB", "100");
// use non-test classes so RandomizedRunner isn't necessary
System.setProperty("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
System.setProperty("solr.directoryFactory", "solr.RAMDirectoryFactory");
SolrZkClient zkClient = null;
try {
zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, 45000, null);
uploadConfigFileToZk(zkClient, configName, "solrconfig.xml", new File(configDir, "solrconfig-tlog.xml"));
uploadConfigFileToZk(zkClient, configName, "schema.xml", new File(configDir, "schema.xml"));
uploadConfigFileToZk(zkClient, configName, "solrconfig.snippet.randomindexconfig.xml",
new File(configDir, "solrconfig.snippet.randomindexconfig.xml"));
uploadConfigFileToZk(zkClient, configName, "currency.xml", new File(configDir, "currency.xml"));
uploadConfigFileToZk(zkClient, configName, "mapping-ISOLatin1Accent.txt",
new File(configDir, "mapping-ISOLatin1Accent.txt"));
uploadConfigFileToZk(zkClient, configName, "old_synonyms.txt", new File(configDir, "old_synonyms.txt"));
uploadConfigFileToZk(zkClient, configName, "open-exchange-rates.json",
new File(configDir, "open-exchange-rates.json"));
uploadConfigFileToZk(zkClient, configName, "protwords.txt", new File(configDir, "protwords.txt"));
uploadConfigFileToZk(zkClient, configName, "stopwords.txt", new File(configDir, "stopwords.txt"));
uploadConfigFileToZk(zkClient, configName, "synonyms.txt", new File(configDir, "synonyms.txt"));
} finally {
if (zkClient != null) zkClient.close();
}
}
protected void uploadConfigFileToZk(SolrZkClient zkClient, String configName, String nameInZk, File file)
throws Exception {
zkClient.makePath(ZkController.CONFIGS_ZKNODE + "/" + configName + "/" + nameInZk, file, false, true);
}
protected NamedList<Object> createCollection(CloudSolrClient client, String name, int numShards,
int replicationFactor, String configName) throws Exception {
ModifiableSolrParams modParams = new ModifiableSolrParams();
modParams.set(CoreAdminParams.ACTION, CollectionAction.CREATE.name());
modParams.set("name", name);
modParams.set("numShards", numShards);
modParams.set("replicationFactor", replicationFactor);
modParams.set("collection.configName", configName);
QueryRequest request = new QueryRequest(modParams);
request.setPath("/admin/collections");
return client.request(request);
}
protected void waitForRecoveriesToFinish(String collection,

View File

@ -18,17 +18,23 @@ package org.apache.solr.cloud;
*/
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import java.io.IOException;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import org.apache.commons.io.IOUtils;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.zookeeper.CreateMode;
import org.apache.solr.common.params.CollectionParams.CollectionAction;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.zookeeper.KeeperException;
import org.eclipse.jetty.servlet.ServletHolder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -40,6 +46,7 @@ public class MiniSolrCloudCluster {
private ZkTestServer zkServer;
private List<JettySolrRunner> jettys;
private File testDir;
private CloudSolrClient solrClient;
/**
* "Mini" SolrCloud cluster to be used for testing
@ -60,17 +67,9 @@ public class MiniSolrCloudCluster {
zkServer = new ZkTestServer(zkDir);
zkServer.run();
SolrZkClient zkClient = null;
InputStream is = null;
try {
zkClient = new SolrZkClient(zkServer.getZkHost(),
AbstractZkTestCase.TIMEOUT, 45000, null);
zkClient.makePath("/solr", false, true);
is = new FileInputStream(solrXml);
zkClient.create("/solr/solr.xml", IOUtils.toByteArray(is), CreateMode.PERSISTENT, true);
} finally {
IOUtils.closeQuietly(is);
if (zkClient != null) zkClient.close();
try(SolrZkClient zkClient = new SolrZkClient(zkServer.getZkHost(),
AbstractZkTestCase.TIMEOUT, 45000, null)) {
zkClient.makePath("/solr/solr.xml", solrXml, false, true);
}
// tell solr to look in zookeeper for solr.xml
@ -81,6 +80,8 @@ public class MiniSolrCloudCluster {
for (int i = 0; i < numServers; ++i) {
startJettySolrRunner(hostContext, extraServlets, extraRequestFilters);
}
solrClient = buildSolrClient();
}
/**
@ -127,11 +128,39 @@ public class MiniSolrCloudCluster {
return jetty;
}
public void uploadConfigDir(File configDir, String configName) throws IOException, KeeperException, InterruptedException {
try(SolrZkClient zkClient = new SolrZkClient(zkServer.getZkAddress(),
AbstractZkTestCase.TIMEOUT, 45000, null)) {
ZkController.uploadConfigDir(zkClient, configDir, configName);
}
}
public NamedList<Object> createCollection(String name, int numShards, int replicationFactor,
String configName, Map<String, String> collectionProperties) throws SolrServerException, IOException {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CoreAdminParams.ACTION, CollectionAction.CREATE.name());
params.set(CoreAdminParams.NAME, name);
params.set("numShards", numShards);
params.set("replicationFactor", replicationFactor);
params.set("collection.configName", configName);
if(collectionProperties != null) {
for(Map.Entry<String, String> property : collectionProperties.entrySet()){
params.set(CoreAdminParams.PROPERTY_PREFIX + property.getKey(), property.getValue());
}
}
QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
return solrClient.request(request);
}
/**
* Shut down the cluster, including all Solr nodes and ZooKeeper
*/
public void shutdown() throws Exception {
try {
solrClient.shutdown();
for (int i = jettys.size() - 1; i >= 0; --i) {
stopJettySolrRunner(i);
}
@ -145,6 +174,14 @@ public class MiniSolrCloudCluster {
}
}
public CloudSolrClient getSolrClient() {
return solrClient;
}
protected CloudSolrClient buildSolrClient() {
return new CloudSolrClient(getZkServer().getZkAddress());
}
private static String getHostContextSuitableForServletContext(String ctx) {
if (ctx == null || "".equals(ctx)) ctx = "/solr";
if (ctx.endsWith("/")) ctx = ctx.substring(0,ctx.length()-1);;