tests: improve some cloud tests

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1446935 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Mark Robert Miller 2013-02-16 20:21:33 +00:00
parent b6ca8acdaa
commit 902e34af55
7 changed files with 94 additions and 18 deletions

View File

@ -294,7 +294,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
Slice slices = zkController.getClusterState().getSlice(collection, shardId);
int cnt = 0;
while (true && !isClosed) {
while (true && !isClosed && !cc.isShutDown()) {
// wait for everyone to be up
if (slices != null) {
int found = 0;

View File

@ -18,10 +18,21 @@
-->
<config>
<jmx />
<luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
<directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
<directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}">
<!-- used to keep RAM reqs down for HdfsDirectoryFactory -->
<int name="solr.hdfs.blockcache.blocksperbank">${solr.hdfs.blockcache.blocksperbank:1024}</int>
</directoryFactory>
<dataDir>${solr.data.dir:}</dataDir>
<indexConfig>
<lockType>${solr.lock.type:native}</lockType>
</indexConfig>
<!-- an update processor the explicitly excludes distrib to test
clean errors when people attempt atomic updates w/o it
-->
@ -38,12 +49,14 @@
<str name="omitHeader">true</str>
</lst>
</requestHandler>
<requestHandler name="/replication" class="solr.ReplicationHandler" startup="lazy" />
<requestHandler name="/update" class="solr.UpdateRequestHandler" />
<updateHandler class="solr.DirectUpdateHandler2">
<updateLog>
<!-- <str name="dir">/tmp/solr/</str> -->
<str name="dir">${solr.ulog.dir:}</str>
</updateLog>
</updateHandler>
@ -72,4 +85,35 @@
<requestHandler name="/admin/" class="org.apache.solr.handler.admin.AdminHandlers" />
<updateRequestProcessorChain name="distrib-dup-test-chain-explicit">
<!-- explicit test using processors before and after distrib -->
<processor class="solr.RegexReplaceProcessorFactory">
<str name="fieldName">regex_dup_A_s</str>
<str name="pattern">x</str>
<str name="replacement">x_x</str>
</processor>
<processor class="solr.DistributedUpdateProcessorFactory" />
<processor class="solr.RegexReplaceProcessorFactory">
<str name="fieldName">regex_dup_B_s</str>
<str name="pattern">x</str>
<str name="replacement">x_x</str>
</processor>
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
<updateRequestProcessorChain name="distrib-dup-test-chain-implicit">
<!-- implicit test w/o distrib declared-->
<processor class="solr.RegexReplaceProcessorFactory">
<str name="fieldName">regex_dup_A_s</str>
<str name="pattern">x</str>
<str name="replacement">x_x</str>
</processor>
<processor class="solr.RegexReplaceProcessorFactory">
<str name="fieldName">regex_dup_B_s</str>
<str name="pattern">x</str>
<str name="replacement">x_x</str>
</processor>
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
</config>

View File

@ -20,7 +20,6 @@ package org.apache.solr.cloud;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.util.LuceneTestCase.BadApple;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.common.SolrInputDocument;
@ -37,8 +36,7 @@ import org.junit.BeforeClass;
@Slow
public class ChaosMonkeySafeLeaderTest extends AbstractFullDistribZkTestBase {
private static final int BASE_RUN_LENGTH = 120000;
private static final int RUN_LENGTH = Integer.parseInt(System.getProperty("solr.tests.cloud.cm.runlength", Integer.toString(BASE_RUN_LENGTH)));
private static final Integer RUN_LENGTH = Integer.parseInt(System.getProperty("solr.tests.cloud.cm.runlength", "-1"));
@BeforeClass
public static void beforeSuperClass() {
@ -104,7 +102,14 @@ public class ChaosMonkeySafeLeaderTest extends AbstractFullDistribZkTestBase {
}
chaosMonkey.startTheMonkey(false, 500);
int runLength = RUN_LENGTH;
long runLength;
if (RUN_LENGTH != -1) {
runLength = RUN_LENGTH;
} else {
int[] runTimes = new int[] {5000,6000,10000,15000,15000,30000,30000,45000,90000,120000};
runLength = runTimes[random().nextInt(runTimes.length - 1)];
}
Thread.sleep(runLength);
chaosMonkey.stopTheMonkey();

View File

@ -50,30 +50,37 @@ public class RecoveryZkTest extends AbstractFullDistribZkTestBase {
// start a couple indexing threads
indexThread = new StopableIndexingThread(0, true);
int[] maxDocList = new int[] {300, 700, 1200, 1350, 5000, 15000};
int maxDoc = maxDocList[random().nextInt(maxDocList.length - 1)];
indexThread = new StopableIndexingThread(0, true, maxDoc);
indexThread.start();
indexThread2 = new StopableIndexingThread(10000, true);
indexThread2 = new StopableIndexingThread(10000, true, maxDoc);
indexThread2.start();
// give some time to index...
Thread.sleep(atLeast(2000));
int[] waitTimes = new int[] {2000, 3000, 5000};
Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
// bring shard replica down
JettySolrRunner replica = chaosMonkey.stopShard("shard1", 1).jetty;
// wait a moment - lets allow some docs to be indexed so replication time is non 0
Thread.sleep(atLeast(2000));
Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
// bring shard replica up
replica.start();
// make sure replication can start
Thread.sleep(1500);
Thread.sleep(3000);
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
waitForRecoveriesToFinish(DEFAULT_COLLECTION, zkStateReader, false, true);
// give some time for replication to complete
Thread.sleep(5000);
// stop indexing threads
indexThread.safeStop();
@ -86,12 +93,10 @@ public class RecoveryZkTest extends AbstractFullDistribZkTestBase {
waitForThingsToLevelOut(30);
Thread.sleep(1000);
Thread.sleep(2000);
waitForThingsToLevelOut(30);
Thread.sleep(5000);
waitForRecoveriesToFinish(DEFAULT_COLLECTION, zkStateReader, false, true);
// test that leader and replica have same doc count

View File

@ -52,6 +52,10 @@ public class CloudSolrServerTest extends AbstractFullDistribZkTestBase {
}
protected String getCloudSolrConfig() {
return "solrconfig.xml";
}
@Override
public String getSolrHome() {
return SOLR_HOME;

View File

@ -67,13 +67,17 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
String schema = getSchemaFile();
if (schema == null) schema = "schema.xml";
AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(), "solrconfig.xml", schema);
AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(), getCloudSolrConfig(), schema);
// set some system properties for use by tests
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
}
protected String getCloudSolrConfig() {
return "solrconfig-tlog.xml";
}
@Override
protected void createServers(int numShards) throws Exception {
// give everyone there own solrhome

View File

@ -1140,21 +1140,34 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
protected final List<Integer> deletes = new ArrayList<Integer>();
protected final AtomicInteger fails = new AtomicInteger();
protected boolean doDeletes;
private int numCycles;
public StopableIndexingThread(int startI, boolean doDeletes) {
this(startI, doDeletes, -1);
}
public StopableIndexingThread(int startI, boolean doDeletes, int numCycles) {
super("StopableIndexingThread");
this.startI = startI;
this.doDeletes = doDeletes;
this.numCycles = numCycles;
setDaemon(true);
}
@Override
public void run() {
int i = startI;
int numDone = 0;
int numDeletes = 0;
int numAdds = 0;
while (true && !stop) {
if (numCycles != -1) {
if (numDone > numCycles) {
break;
}
}
++numDone;
++i;
boolean addFailed = false;
@ -1202,6 +1215,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
@Override
public void safeStop() {
System.out.println("safe stop:");
stop = true;
}