tests: speed up test a bit

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1451322 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Mark Robert Miller 2013-02-28 20:04:20 +00:00
parent 43fe27bf8e
commit f5e2e50c7f
1 changed files with 16 additions and 16 deletions

View File

@ -60,13 +60,17 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
@BeforeClass
public static void beforeThisClass2() throws Exception {
// TODO: we use an fs based dir because something
// like a ram dir will not recover correctly right now
// because tran log will still exist on restart and ram
// dir will not persist - perhaps translog can empty on
// start if using an EphemeralDirectoryFactory
useFactory(null);
}
public BasicDistributedZk2Test() {
super();
fixShardCount = true;
sliceCount = 2;
shardCount = 3;
}
/*
* (non-Javadoc)
*
@ -253,17 +257,17 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
int oldLiveNodes = cloudClient.getZkStateReader().getZkClient().getChildren(ZkStateReader.LIVE_NODES_ZKNODE, null, true).size();
assertEquals(5, oldLiveNodes);
assertEquals(4, oldLiveNodes);
// kill a shard
CloudJettyRunner deadShard = chaosMonkey.stopShard(SHARD2, 0);
CloudJettyRunner deadShard = chaosMonkey.stopShard(SHARD1, 0);
// we are careful to make sure the downed node is no longer in the state,
// because on some systems (especially freebsd w/ blackhole enabled), trying
// to talk to a downed node causes grief
Set<CloudJettyRunner> jetties = new HashSet<CloudJettyRunner>();
jetties.addAll(shardToJetty.get(SHARD2));
jetties.addAll(shardToJetty.get(SHARD1));
jetties.remove(deadShard);
// ensure shard is dead
@ -277,8 +281,6 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
commit();
printLayout();
query("q", "*:*", "sort", "n_tl1 desc");
// long cloudClientDocs = cloudClient.query(new
@ -290,7 +292,8 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
long numFound1 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
index_specific(shardToJetty.get(SHARD2).get(1).client.solrClient, id, 1000, i1, 108, t1,
cloudClient.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1, 15000);
index_specific(shardToJetty.get(SHARD1).get(1).client.solrClient, id, 1000, i1, 108, t1,
"specific doc!");
commit();
@ -363,7 +366,7 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
waitForRecoveriesToFinish(false);
deadShardCount = shardToJetty.get(SHARD2).get(0).client.solrClient
deadShardCount = shardToJetty.get(SHARD1).get(0).client.solrClient
.query(query).getResults().getNumFound();
// if we properly recovered, we should now have the couple missing docs that
// came in while shard was down
@ -401,15 +404,12 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
// try a backup command
final HttpSolrServer client = (HttpSolrServer) shardToJetty.get(SHARD2).get(0).client.solrClient;
System.out.println("base url: "+ client.getBaseURL());
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("qt", "/replication");
params.set("command", "backup");
QueryRequest request = new QueryRequest(params);
NamedList<Object> results = client.request(request );
System.out.println("results:" + results);
checkForBackupSuccess(client);