still favor leaders with CloudSolrServer, but also fall back to replicas after leaders

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1370864 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Mark Robert Miller 2012-08-08 18:01:11 +00:00
parent 3979cc97f3
commit e528549a3b
22 changed files with 729 additions and 435 deletions

View File

@ -0,0 +1,355 @@
package org.apache.solr.cloud;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.HashSet;
import java.util.Set;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.servlet.SolrDispatchFilter;
/**
* This test simply does a bunch of basic things in solrcloud mode and asserts things
* work as expected.
*/
public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
public BasicDistributedZk2Test() {
super();
}
/*
* (non-Javadoc)
*
* @see org.apache.solr.BaseDistributedSearchTestCase#doTest()
*
* Create 3 shards, each with one replica
*/
@Override
public void doTest() throws Exception {
boolean testFinished = false;
try {
handle.clear();
handle.put("QTime", SKIPVAL);
handle.put("timestamp", SKIPVAL);
indexr(id, 1, i1, 100, tlong, 100, t1,
"now is the time for all good men", "foo_f", 1.414f, "foo_b", "true",
"foo_d", 1.414d);
// make sure we are in a steady state...
waitForRecoveriesToFinish(false);
commit();
assertDocCounts(false);
indexAbunchOfDocs();
// check again
waitForRecoveriesToFinish(false);
commit();
assertDocCounts(VERBOSE);
checkQueries();
assertDocCounts(VERBOSE);
query("q", "*:*", "sort", "n_tl1 desc");
brindDownShardIndexSomeDocsAndRecover();
query("q", "*:*", "sort", "n_tl1 desc");
// test adding another replica to a shard - it should do a
// recovery/replication to pick up the index from the leader
addNewReplica();
long docId = testUpdateAndDelete();
// index a bad doc...
try {
indexr(t1, "a doc with no id");
fail("this should fail");
} catch (SolrException e) {
// expected
}
// TODO: bring this to it's own method?
// try indexing to a leader that has no replicas up
ZkNodeProps leaderProps = zkStateReader.getLeaderProps(
DEFAULT_COLLECTION, SHARD2);
String nodeName = leaderProps.get(ZkStateReader.NODE_NAME_PROP);
chaosMonkey.stopShardExcept(SHARD2, nodeName);
SolrServer client = getClient(nodeName);
index_specific(client, "id", docId + 1, t1, "what happens here?");
// expire a session...
CloudJettyRunner cloudJetty = shardToJetty.get("shard1").get(0);
chaosMonkey.expireSession(cloudJetty.jetty);
indexr("id", docId + 1, t1, "slip this doc in");
waitForRecoveriesToFinish(false);
checkShardConsistency("shard1");
testFinished = true;
} finally {
if (!testFinished) {
printLayoutOnTearDown = true;
}
}
}
private long testUpdateAndDelete() throws Exception {
long docId = 99999999L;
indexr("id", docId, t1, "originalcontent");
commit();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", t1 + ":originalcontent");
QueryResponse results = clients.get(0).query(params);
assertEquals(1, results.getResults().getNumFound());
// update doc
indexr("id", docId, t1, "updatedcontent");
commit();
results = clients.get(0).query(params);
assertEquals(0, results.getResults().getNumFound());
params.set("q", t1 + ":updatedcontent");
results = clients.get(0).query(params);
assertEquals(1, results.getResults().getNumFound());
UpdateRequest uReq = new UpdateRequest();
// uReq.setParam(UpdateParams.UPDATE_CHAIN, DISTRIB_UPDATE_CHAIN);
uReq.deleteById(Long.toString(docId)).process(clients.get(0));
commit();
results = clients.get(0).query(params);
assertEquals(0, results.getResults().getNumFound());
return docId;
}
private void brindDownShardIndexSomeDocsAndRecover() throws Exception {
SolrQuery query = new SolrQuery("*:*");
query.set("distrib", false);
commit();
long deadShardCount = shardToJetty.get(SHARD2).get(0).client.solrClient
.query(query).getResults().getNumFound();
query("q", "*:*", "sort", "n_tl1 desc");
// kill a shard
CloudJettyRunner deadShard = chaosMonkey.stopShard(SHARD2, 0);
cloudClient.connect();
// we are careful to make sure the downed node is no longer in the state,
// because on some systems (especially freebsd w/ blackhole enabled), trying
// to talk to a downed node causes grief
Set<CloudJettyRunner> jetties = new HashSet<CloudJettyRunner>();
jetties.addAll(shardToJetty.get(SHARD2));
jetties.remove(deadShard);
for (CloudJettyRunner cjetty : jetties) {
waitToSeeNotLive(((SolrDispatchFilter) cjetty.jetty.getDispatchFilter()
.getFilter()).getCores().getZkController().getZkStateReader(),
deadShard);
}
waitToSeeNotLive(cloudClient.getZkStateReader(), deadShard);
// ensure shard is dead
try {
index_specific(deadShard.client.solrClient, id, 999, i1, 107, t1,
"specific doc!");
fail("This server should be down and this update should have failed");
} catch (SolrServerException e) {
// expected..
}
commit();
query("q", "*:*", "sort", "n_tl1 desc");
// long cloudClientDocs = cloudClient.query(new
// SolrQuery("*:*")).getResults().getNumFound();
// System.out.println("clouddocs:" + cloudClientDocs);
// try to index to a living shard at shard2
long numFound1 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
index_specific(shardToJetty.get(SHARD2).get(1).client.solrClient, id, 1000, i1, 108, t1,
"specific doc!");
commit();
checkShardConsistency(true, false);
query("q", "*:*", "sort", "n_tl1 desc");
// try adding a doc with CloudSolrServer
cloudClient.setDefaultCollection(DEFAULT_COLLECTION);
long numFound2 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
assertEquals(numFound1 + 1, numFound2);
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", 1001);
controlClient.add(doc);
UpdateRequest ureq = new UpdateRequest();
ureq.add(doc);
// ureq.setParam("update.chain", DISTRIB_UPDATE_CHAIN);
ureq.process(cloudClient);
commit();
query("q", "*:*", "sort", "n_tl1 desc");
long numFound3 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
// lets just check that the one doc since last commit made it in...
assertEquals(numFound2 + 1, numFound3);
// test debugging
testDebugQueries();
if (VERBOSE) {
System.err.println(controlClient.query(new SolrQuery("*:*")).getResults()
.getNumFound());
for (SolrServer client : clients) {
try {
SolrQuery q = new SolrQuery("*:*");
q.set("distrib", false);
System.err.println(client.query(q).getResults()
.getNumFound());
} catch (Exception e) {
}
}
}
// TODO: This test currently fails because debug info is obtained only
// on shards with matches.
// query("q","matchesnothing","fl","*,score", "debugQuery", "true");
// this should trigger a recovery phase on deadShard
ChaosMonkey.start(deadShard.jetty);
// make sure we have published we are recovering
Thread.sleep(1500);
waitForRecoveriesToFinish(false);
deadShardCount = shardToJetty.get(SHARD2).get(0).client.solrClient
.query(query).getResults().getNumFound();
// if we properly recovered, we should now have the couple missing docs that
// came in while shard was down
checkShardConsistency(true, false);
// recover over 100 docs so we do more than just peer sync (replicate recovery)
chaosMonkey.stopJetty(deadShard);
for (CloudJettyRunner cjetty : jetties) {
waitToSeeNotLive(((SolrDispatchFilter) cjetty.jetty.getDispatchFilter()
.getFilter()).getCores().getZkController().getZkStateReader(),
deadShard);
}
waitToSeeNotLive(cloudClient.getZkStateReader(), deadShard);
for (int i = 0; i < 226; i++) {
doc = new SolrInputDocument();
doc.addField("id", 2000 + i);
controlClient.add(doc);
ureq = new UpdateRequest();
ureq.add(doc);
// ureq.setParam("update.chain", DISTRIB_UPDATE_CHAIN);
ureq.process(cloudClient);
}
commit();
Thread.sleep(1500);
ChaosMonkey.start(deadShard.jetty);
// make sure we have published we are recovering
Thread.sleep(1500);
waitForRecoveriesToFinish(false);
checkShardConsistency(true, false);
}
private void addNewReplica() throws Exception {
JettySolrRunner newReplica = createJettys(1).get(0);
waitForRecoveriesToFinish(false);
// new server should be part of first shard
// how many docs are on the new shard?
for (CloudJettyRunner cjetty : shardToJetty.get("shard1")) {
if (VERBOSE) System.err.println("total:"
+ cjetty.client.solrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
}
checkShardConsistency("shard1");
assertDocCounts(VERBOSE);
}
private void testDebugQueries() throws Exception {
handle.put("explain", SKIPVAL);
handle.put("debug", UNORDERED);
handle.put("time", SKIPVAL);
query("q", "now their fox sat had put", "fl", "*,score",
CommonParams.DEBUG_QUERY, "true");
query("q", "id:[1 TO 5]", CommonParams.DEBUG_QUERY, "true");
query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.TIMING);
query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.RESULTS);
query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.QUERY);
}
}

View File

@ -71,10 +71,11 @@ import org.apache.solr.update.SolrCmdDistributor.Request;
import org.apache.solr.util.DefaultSolrThreadFactory;
/**
*
* This test simply does a bunch of basic things in solrcloud mode and asserts things
* work as expected.
*/
@Slow
public class BasicDistributedZkTest extends AbstractDistributedZkTestCase {
public class BasicDistributedZkTest extends AbstractDistribZkTestBase {
private static final String DEFAULT_COLLECTION = "collection1";
private static final boolean DEBUG = false;

View File

@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory;
@Slow
@Ignore("ignore while investigating jenkins fails")
public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest {
public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase {
public static Logger log = LoggerFactory.getLogger(ChaosMonkeyNothingIsSafeTest.class);
private static final int BASE_RUN_LENGTH = 180000;

View File

@ -32,7 +32,7 @@ import org.junit.BeforeClass;
import org.junit.Ignore;
@Ignore("SOLR-3126")
public class ChaosMonkeySafeLeaderTest extends FullSolrCloudTest {
public class ChaosMonkeySafeLeaderTest extends AbstractFullDistribZkTestBase {
private static final int BASE_RUN_LENGTH = 120000;

View File

@ -23,8 +23,8 @@ import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.HttpSolrServer;
import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServer;
import org.apache.solr.client.solrj.impl.HttpSolrServer;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
@ -37,14 +37,13 @@ import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.update.VersionInfo;
import org.apache.solr.update.processor.DistributedUpdateProcessor;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.junit.BeforeClass;
/**
* Super basic testing, no shard restarting or anything.
*/
@Slow
public class FullSolrCloudDistribCmdsTest extends FullSolrCloudTest {
public class FullSolrCloudDistribCmdsTest extends AbstractFullDistribZkTestBase {
@BeforeClass

View File

@ -26,12 +26,11 @@ import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.common.SolrInputDocument;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Slow
public class RecoveryZkTest extends FullSolrCloudTest {
public class RecoveryZkTest extends AbstractFullDistribZkTestBase {
//private static final String DISTRIB_UPDATE_CHAIN = "distrib-update-chain";
private static Logger log = LoggerFactory.getLogger(RecoveryZkTest.class);

View File

@ -38,15 +38,13 @@ import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
/**
* Test sync phase that occurs when Leader goes down and a new Leader is
* elected.
*/
@Slow
@Ignore("ignore while i look into")
public class SyncSliceTest extends FullSolrCloudTest {
public class SyncSliceTest extends AbstractFullDistribZkTestBase {
@BeforeClass
public static void beforeSuperClass() {
@ -88,7 +86,7 @@ public class SyncSliceTest extends FullSolrCloudTest {
handle.put("QTime", SKIPVAL);
handle.put("timestamp", SKIPVAL);
waitForThingsToLevelOut();
waitForThingsToLevelOut(15);
del("*:*");
List<String> skipServers = new ArrayList<String>();
@ -131,7 +129,7 @@ public class SyncSliceTest extends FullSolrCloudTest {
HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
baseServer.request(request);
waitForThingsToLevelOut();
waitForThingsToLevelOut(15);
checkShardConsistency(false, true);
@ -161,7 +159,7 @@ public class SyncSliceTest extends FullSolrCloudTest {
// to talk to a downed node causes grief
waitToSeeDownInClusterState(leaderJetty, jetties);
waitForThingsToLevelOut();
waitForThingsToLevelOut(15);
checkShardConsistency(false, true);
@ -182,7 +180,7 @@ public class SyncSliceTest extends FullSolrCloudTest {
// give a moment to be sure it has started recovering
Thread.sleep(2000);
waitForThingsToLevelOut();
waitForThingsToLevelOut(15);
waitForRecoveriesToFinish(false);
skipServers = getRandomOtherJetty(leaderJetty, null);
@ -226,6 +224,7 @@ public class SyncSliceTest extends FullSolrCloudTest {
waitForRecoveriesToFinish(false);
checkShardConsistency(true, true);
}
private List<String> getRandomJetty() {
@ -260,34 +259,6 @@ public class SyncSliceTest extends FullSolrCloudTest {
}
waitToSeeNotLive(cloudClient.getZkStateReader(), leaderJetty);
}
private void waitForThingsToLevelOut() throws Exception {
int cnt = 0;
boolean retry = false;
do {
waitForRecoveriesToFinish(false);
commit();
updateMappingsFromZk(jettys, clients);
Set<String> theShards = shardToJetty.keySet();
String failMessage = null;
for (String shard : theShards) {
failMessage = checkShardConsistency(shard, false);
}
if (failMessage != null) {
retry = true;
} else {
retry = false;
}
cnt++;
if (cnt > 10) break;
Thread.sleep(2000);
} while (retry);
}
protected void indexDoc(List<String> skipServers, Object... fields) throws IOException,
SolrServerException {

View File

@ -65,10 +65,14 @@ public class CloudSolrServer extends SolrServer {
// since the state shouldn't change often, should be very cheap reads
private volatile List<String> urlList;
private volatile List<String> leaderUrlList;
private volatile List<String> replicasList;
private volatile int lastClusterStateHashCode;
private final boolean updatesToLeaders;
/**
* @param zkHost The client endpoint of the zookeeper quorum containing the cloud state,
@ -164,9 +168,11 @@ public class CloudSolrServer extends SolrServer {
ClusterState clusterState = zkStateReader.getClusterState();
boolean sendToLeaders = false;
List<String> replicas = null;
if (request instanceof IsUpdateRequest && updatesToLeaders) {
sendToLeaders = true;
replicas = new ArrayList<String>();
}
SolrParams reqParams = request.getParams();
@ -211,17 +217,22 @@ public class CloudSolrServer extends SolrServer {
if (!sendToLeaders || (sendToLeaders && coreNodeProps.isLeader())) {
String url = coreNodeProps.getCoreUrl();
urlList.add(url);
} else if (sendToLeaders) {
String url = coreNodeProps.getCoreUrl();
replicas.add(url);
}
}
}
}
if (sendToLeaders) {
this.leaderUrlList = urlList;
this.replicasList = replicas;
} else {
this.urlList = urlList;
}
this.lastClusterStateHashCode = clusterState.hashCode();
}
List<String> theUrlList;
if (sendToLeaders) {
theUrlList = new ArrayList<String>(leaderUrlList.size());
@ -231,7 +242,14 @@ public class CloudSolrServer extends SolrServer {
theUrlList.addAll(urlList);
}
Collections.shuffle(theUrlList, rand);
//System.out.println("########################## MAKING REQUEST TO " + urlList);
if (replicas != null) {
ArrayList<String> theReplicas = new ArrayList<String>(replicasList.size());
theReplicas.addAll(replicasList);
Collections.shuffle(theReplicas, rand);
theUrlList.addAll(theReplicas);
}
//System.out.println("########################## MAKING REQUEST TO " + theUrlList);
LBHttpSolrServer.Req req = new LBHttpSolrServer.Req(request, theUrlList);
LBHttpSolrServer.Rsp rsp = lbServer.request(req);
@ -255,4 +273,16 @@ public class CloudSolrServer extends SolrServer {
public LBHttpSolrServer getLbServer() {
return lbServer;
}
List<String> getUrlList() {
return urlList;
}
List<String> getLeaderUrlList() {
return leaderUrlList;
}
List<String> getReplicasList() {
return replicasList;
}
}

View File

@ -28,7 +28,7 @@
$Name: $
-->
<schema name="test" version="1.0">
<schema name="test" version="1.5">
<types>
<!-- field type definitions... note that the "name" attribute is
@ -110,9 +110,6 @@
</fieldtype>
<!-- HighlitText optimizes storage for (long) columns which will be highlit -->
<fieldtype name="highlittext" class="solr.TextField" compressThreshold="345" />
<fieldtype name="boolean" class="solr.BoolField" sortMissingLast="true"/>
<fieldtype name="string" class="solr.StrField" sortMissingLast="true"/>
@ -147,7 +144,6 @@
<analyzer>
<tokenizer class="solr.LowerCaseTokenizerFactory"/>
<filter class="solr.StandardFilterFactory"/>
<filter class="solr.StopFilterFactory" words="stopwords.txt"/>
</analyzer>
</fieldtype>
@ -200,7 +196,6 @@
<fieldtype name="lowerpunctfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
<filter name="syn" class="solr.SynonymFilterFactory" synonyms="synonyms.txt" expand="true"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
@ -242,7 +237,6 @@
<fieldtype name="custengporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldtype>
@ -255,7 +249,6 @@
<fieldtype name="custstopfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.StopFilterFactory" words="stopwords.txt"/>
</analyzer>
</fieldtype>
<fieldtype name="lengthfilt" class="solr.TextField">
@ -267,7 +260,6 @@
<fieldType name="charfilthtmlmap" class="solr.TextField">
<analyzer>
<charFilter class="solr.HTMLStripCharFilterFactory"/>
<charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ISOLatin1Accent.txt"/>
<tokenizer class="solr.MockTokenizerFactory"/>
</analyzer>
</fieldType>
@ -293,14 +285,14 @@
<analyzer type="index">
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
<filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
<filter class="solr.StopFilterFactory"/>
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.StopFilterFactory"/>
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
@ -310,7 +302,7 @@
<analyzer type="index">
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
<filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.MockTokenizerFactory"/>
@ -351,22 +343,9 @@
<fieldtype name="syn" class="solr.TextField">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
<filter name="syn" class="solr.SynonymFilterFactory" synonyms="old_synonyms.txt"/>
</analyzer>
</fieldtype>
<!-- Demonstrates How RemoveDuplicatesTokenFilter makes stemmed
synonyms "better"
-->
<fieldtype name="dedup" class="solr.TextField">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory"
synonyms="old_synonyms.txt" expand="true" />
<filter class="solr.PorterStemFilterFactory"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory" />
</analyzer>
</fieldtype>
<fieldtype name="unstored" class="solr.StrField" indexed="true" stored="false"/>
@ -394,29 +373,33 @@
<!-- some per-field similarity examples -->
<!-- specify a Similarity classname directly -->
<!--
<fieldType name="sim1" class="solr.TextField">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
</analyzer>
<similarity class="org.apache.lucene.misc.SweetSpotSimilarity"/>
</fieldType>
-->
<!-- specify a Similarity factory -->
<!--
<fieldType name="sim2" class="solr.TextField">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
</analyzer>
<similarity class="org.apache.solr.schema.CustomSimilarityFactory">
<similarity class="org.apache.solr.search.similarities.CustomSimilarityFactory">
<str name="echo">is there an echo?</str>
</similarity>
</fieldType>
-->
<!-- don't specify any sim at all: get the default -->
<!--
<fieldType name="sim3" class="solr.TextField">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
</analyzer>
</fieldType>
-->
</types>
@ -474,10 +457,6 @@
<field name="test_posofftv" type="text" termVectors="true"
termPositions="true" termOffsets="true"/>
<!-- test highlit field settings -->
<field name="test_hlt" type="highlittext" indexed="true" compressed="true"/>
<field name="test_hlt_off" type="highlittext" indexed="true" compressed="false"/>
<!-- fields to test individual tokenizers and tokenfilters -->
<field name="teststop" type="teststop" indexed="true" stored="true"/>
<field name="lowertok" type="lowertok" indexed="true" stored="true"/>
@ -500,7 +479,6 @@
<field name="stopfilt" type="stopfilt" indexed="true" stored="true"/>
<field name="custstopfilt" type="custstopfilt" indexed="true" stored="true"/>
<field name="lengthfilt" type="lengthfilt" indexed="true" stored="true"/>
<field name="dedup" type="dedup" indexed="true" stored="true"/>
<field name="wdf_nocase" type="wdf_nocase" indexed="true" stored="true"/>
<field name="wdf_preserve" type="wdf_preserve" indexed="true" stored="true"/>
@ -522,11 +500,15 @@
<field name="multiDefault" type="string" indexed="true" stored="true" default="muLti-Default" multiValued="true"/>
<field name="intDefault" type="int" indexed="true" stored="true" default="42" multiValued="false"/>
<!--
<field name="sim1text" type="sim1" indexed="true" stored="true"/>
<field name="sim2text" type="sim2" indexed="true" stored="true"/>
<field name="sim3text" type="sim3" indexed="true" stored="true"/>
-->
<field name="tlong" type="tlong" indexed="true" stored="true" />
<field name="_version_" type="long" indexed="true" stored="true"/>
<!-- Dynamic field definitions. If a field name is not found, dynamicFields
will be used if the name matches any of the patterns.
@ -604,9 +586,11 @@
<dynamicField name="*_mfacet" type="string" indexed="true" stored="false" multiValued="true" />
<!-- make sure custom sims work with dynamic fields -->
<!--
<dynamicField name="*_sim1" type="sim1" indexed="true" stored="true"/>
<dynamicField name="*_sim2" type="sim2" indexed="true" stored="true"/>
<dynamicField name="*_sim3" type="sim3" indexed="true" stored="true"/>
-->
</fields>
<defaultSearchField>text</defaultSearchField>
@ -640,8 +624,4 @@
<!-- dynamic destination -->
<copyField source="*_dynamic" dest="dynamic_*"/>
<!-- example of a custom similarity -->
<similarity class="org.apache.solr.schema.CustomSimilarityFactory">
<str name="echo">I am your default sim</str>
</similarity>
</schema>

View File

@ -0,0 +1,59 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!--
This is a stripped down config file used for a simple example...
It is *not* a good example to work from.
-->
<config>
<luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
<dataDir>${solr.data.dir:}</dataDir>
<directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.StandardDirectoryFactory}"/>
<updateHandler class="solr.DirectUpdateHandler2">
<updateLog>
<str name="dir">${solr.data.dir:}</str>
</updateLog>
</updateHandler>
<!-- realtime get handler, guaranteed to return the latest stored fields
of any document, without the need to commit or open a new searcher. The current
implementation relies on the updateLog feature being enabled. -->
<requestHandler name="/get" class="solr.RealTimeGetHandler">
<lst name="defaults">
<str name="omitHeader">true</str>
</lst>
</requestHandler>
<requestDispatcher handleSelect="true" >
<requestParsers enableRemoteStreaming="false" multipartUploadLimitInKB="2048" />
</requestDispatcher>
<requestHandler name="/replication" class="solr.ReplicationHandler" startup="lazy" />
<requestHandler name="standard" class="solr.StandardRequestHandler" default="true" />
<requestHandler name="/update" class="solr.UpdateRequestHandler" />
<requestHandler name="/admin/" class="org.apache.solr.handler.admin.AdminHandlers" />
<!-- config for the admin interface -->
<admin>
<defaultQuery>solr</defaultQuery>
</admin>
</config>

View File

@ -0,0 +1,34 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!--
All (relative) paths are relative to the installation path
persistent: Save changes made via the API to this file
sharedLib: path to a lib directory that will be shared across all cores
-->
<solr persistent="false">
<!--
adminPath: RequestHandler path to manage cores.
If 'null' (or absent), cores will not be manageable via request handler
-->
<cores adminPath="/admin/cores" defaultCoreName="collection1" host="127.0.0.1" hostPort="${hostPort:8983}" hostContext="solr" zkClientTimeout="8000" numShards="${numShards:3}">
<core name="collection1" instanceDir="collection1" shard="${shard:}" collection="${collection:collection1}" config="${solrconfig:solrconfig.xml}" schema="${schema:schema.xml}"/>
</cores>
</solr>

View File

@ -237,7 +237,7 @@ public class TestLBHttpSolrServer extends LuceneTestCase {
}
public String getSchemaFile() {
return "solrj/solr/conf/schema-replication1.xml";
return "solrj/solr/collection1/conf/schema-replication1.xml";
}
public String getConfDir() {
@ -249,7 +249,7 @@ public class TestLBHttpSolrServer extends LuceneTestCase {
}
public String getSolrConfigFile() {
return "solrj/solr/conf/solrconfig-slave1.xml";
return "solrj/solr/collection1/conf/solrconfig-slave1.xml";
}
public void setUp() throws Exception {

View File

@ -0,0 +1,139 @@
package org.apache.solr.client.solrj.impl;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
import org.apache.solr.cloud.AbstractZkTestCase;
import org.apache.solr.util.ExternalPaths;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
/**
* This test would be faster if we simulated the zk state instead.
*/
@Slow
public class CloudSolrServerTest extends AbstractFullDistribZkTestBase {
private static final String SOLR_HOME = ExternalPaths.SOURCE_HOME + File.separator + "solrj"
+ File.separator + "src" + File.separator + "test-files"
+ File.separator + "solrj" + File.separator + "solr";
@BeforeClass
public static void beforeSuperClass() {
AbstractZkTestCase.SOLRHOME = new File(SOLR_HOME());
}
@AfterClass
public static void afterSuperClass() {
}
@Override
public String getSolrHome() {
return SOLR_HOME;
}
public static String SOLR_HOME() {
return SOLR_HOME;
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
// we expect this time of exception as shards go up and down...
//ignoreException(".*");
System.setProperty("numShards", Integer.toString(sliceCount));
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
resetExceptionIgnores();
}
public CloudSolrServerTest() {
super();
sliceCount = 2;
shardCount = 6;
}
@Override
public void doTest() throws Exception {
handle.clear();
handle.put("QTime", SKIPVAL);
handle.put("timestamp", SKIPVAL);
waitForThingsToLevelOut(15);
del("*:*");
indexr(id, 0, "a_t", "to come to the aid of their country.");
// compare leaders list
CloudJettyRunner shard1Leader = shardToLeaderJetty.get("shard1");
CloudJettyRunner shard2Leader = shardToLeaderJetty.get("shard2");
assertEquals(2, cloudClient.getLeaderUrlList().size());
HashSet<String> leaderUrlSet = new HashSet<String>();
leaderUrlSet.addAll(cloudClient.getLeaderUrlList());
assertTrue("fail check for leader:" + shard1Leader.url + " in "
+ leaderUrlSet, leaderUrlSet.contains(shard1Leader.url + "/"));
assertTrue("fail check for leader:" + shard2Leader.url + " in "
+ leaderUrlSet, leaderUrlSet.contains(shard2Leader.url + "/"));
// compare replicas list
Set<String> replicas = new HashSet<String>();
List<CloudJettyRunner> jetties = shardToJetty.get("shard1");
for (CloudJettyRunner cjetty : jetties) {
replicas.add(cjetty.url);
}
jetties = shardToJetty.get("shard2");
for (CloudJettyRunner cjetty : jetties) {
replicas.add(cjetty.url);
}
replicas.remove(shard1Leader.url);
replicas.remove(shard2Leader.url);
assertEquals(replicas.size(), cloudClient.getReplicasList().size());
for (String url : cloudClient.getReplicasList()) {
assertTrue("fail check for replica:" + url + " in " + replicas,
replicas.contains(stripTrailingSlash(url)));
}
}
private String stripTrailingSlash(String url) {
if (url.endsWith("/")) {
return url.substring(0, url.length() - 1);
}
return url;
}
}

View File

@ -1388,11 +1388,10 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
return file;
} catch (Exception e) {
/* more friendly than NPE */
throw new RuntimeException("Cannot find resource: " + name);
throw new RuntimeException("Cannot find resource: " + new File(name).getAbsolutePath());
}
}
// TODO: use solr rather than solr/collection1
public static String TEST_HOME() {
return getFile("solr/collection1").getParent();
}

View File

@ -24,6 +24,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.io.FileUtils;
import org.apache.solr.BaseDistributedSearchTestCase;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.cloud.ZkTestServer;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
@ -35,7 +36,7 @@ import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
public abstract class AbstractDistributedZkTestCase extends BaseDistributedSearchTestCase {
public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTestCase {
protected static final String DEFAULT_COLLECTION = "collection1";
private static final boolean DEBUG = false;

View File

@ -40,6 +40,7 @@ import org.apache.solr.client.solrj.impl.CloudSolrServer;
import org.apache.solr.client.solrj.impl.HttpSolrServer;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.cloud.ChaosMonkey;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrException;
@ -49,9 +50,7 @@ import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.servlet.SolrDispatchFilter;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
@ -66,18 +65,19 @@ import org.slf4j.LoggerFactory;
*
*/
@Slow
public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
static Logger log = LoggerFactory.getLogger(FullSolrCloudTest.class);
public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTestBase {
static Logger log = LoggerFactory.getLogger(AbstractFullDistribZkTestBase.class);
@BeforeClass
public static void beforeFullSolrCloudTest() {
// shorten the log output more for this test type
if (formatter != null) formatter.setShorterFormat();
}
private static final String SHARD2 = "shard2";
private boolean printLayoutOnTearDown = false;
public static final String SHARD1 = "shard1";
public static final String SHARD2 = "shard2";
protected boolean printLayoutOnTearDown = false;
String t1 = "a_t";
String i1 = "a_si";
@ -107,12 +107,12 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
protected Map<String,CloudJettyRunner> shardToLeaderJetty = new HashMap<String,CloudJettyRunner>();
static class CloudJettyRunner {
JettySolrRunner jetty;
String nodeName;
String coreNodeName;
String url;
CloudSolrServerClient client;
public static class CloudJettyRunner {
public JettySolrRunner jetty;
public String nodeName;
public String coreNodeName;
public String url;
public CloudSolrServerClient client;
public ZkNodeProps info;
@Override
public int hashCode() {
@ -186,7 +186,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
System.clearProperty("solrcloud.update.delay");
}
public FullSolrCloudTest() {
public AbstractFullDistribZkTestBase() {
fixShardCount = true;
shardCount = 4;
@ -214,11 +214,21 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
}
// wait until shards have started registering...
int cnt = 30;
while (!zkStateReader.getClusterState().getCollections()
.contains(DEFAULT_COLLECTION)) {
if (cnt == 0) {
throw new RuntimeException("timeout waiting for collection1 in cluster state");
}
cnt--;
Thread.sleep(500);
}
cnt = 30;
while (zkStateReader.getClusterState().getSlices(DEFAULT_COLLECTION).size() != sliceCount) {
if (cnt == 0) {
throw new RuntimeException("timeout waiting for collection shards to come up");
}
cnt--;
Thread.sleep(500);
}
@ -261,7 +271,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
}
private List<JettySolrRunner> createJettys(int numJettys) throws Exception {
protected List<JettySolrRunner> createJettys(int numJettys) throws Exception {
return createJettys(numJettys, false);
}
@ -275,7 +285,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
* @return
* @throws Exception
*/
private List<JettySolrRunner> createJettys(int numJettys, boolean checkCreatedVsState) throws Exception {
protected List<JettySolrRunner> createJettys(int numJettys, boolean checkCreatedVsState) throws Exception {
List<JettySolrRunner> jettys = new ArrayList<JettySolrRunner>();
List<SolrServer> clients = new ArrayList<SolrServer>();
StringBuilder sb = new StringBuilder();
@ -330,7 +340,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
return jettys;
}
private int getNumShards(String defaultCollection) {
protected int getNumShards(String defaultCollection) {
Map<String,Slice> slices = this.zkStateReader.getClusterState().getSlices(defaultCollection);
int cnt = 0;
for (Map.Entry<String,Slice> entry : slices.entrySet()) {
@ -538,147 +548,6 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
***/
}// serial commit...
/*
* (non-Javadoc)
*
* @see org.apache.solr.BaseDistributedSearchTestCase#doTest()
*
* Create 3 shards, each with one replica
*/
@Override
public void doTest() throws Exception {
boolean testFinished = false;
try {
handle.clear();
handle.put("QTime", SKIPVAL);
handle.put("timestamp", SKIPVAL);
indexr(id, 1, i1, 100, tlong, 100, t1,
"now is the time for all good men", "foo_f", 1.414f, "foo_b", "true",
"foo_d", 1.414d);
// make sure we are in a steady state...
waitForRecoveriesToFinish(false);
commit();
assertDocCounts(false);
indexAbunchOfDocs();
// check again
waitForRecoveriesToFinish(false);
commit();
assertDocCounts(VERBOSE);
checkQueries();
assertDocCounts(VERBOSE);
query("q", "*:*", "sort", "n_tl1 desc");
brindDownShardIndexSomeDocsAndRecover();
query("q", "*:*", "sort", "n_tl1 desc");
// test adding another replica to a shard - it should do a
// recovery/replication to pick up the index from the leader
addNewReplica();
long docId = testUpdateAndDelete();
// index a bad doc...
try {
indexr(t1, "a doc with no id");
fail("this should fail");
} catch (SolrException e) {
// expected
}
// TODO: bring this to it's own method?
// try indexing to a leader that has no replicas up
ZkNodeProps leaderProps = zkStateReader.getLeaderProps(
DEFAULT_COLLECTION, SHARD2);
String nodeName = leaderProps.get(ZkStateReader.NODE_NAME_PROP);
chaosMonkey.stopShardExcept(SHARD2, nodeName);
SolrServer client = getClient(nodeName);
index_specific(client, "id", docId + 1, t1, "what happens here?");
// expire a session...
CloudJettyRunner cloudJetty = shardToJetty.get("shard1").get(0);
chaosMonkey.expireSession(cloudJetty.jetty);
indexr("id", docId + 1, t1, "slip this doc in");
waitForRecoveriesToFinish(false);
checkShardConsistency("shard1");
testFinished = true;
} finally {
if (!testFinished) {
printLayoutOnTearDown = true;
}
}
}
private long testUpdateAndDelete() throws Exception {
long docId = 99999999L;
indexr("id", docId, t1, "originalcontent");
commit();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", t1 + ":originalcontent");
QueryResponse results = clients.get(0).query(params);
assertEquals(1, results.getResults().getNumFound());
// update doc
indexr("id", docId, t1, "updatedcontent");
commit();
results = clients.get(0).query(params);
assertEquals(0, results.getResults().getNumFound());
params.set("q", t1 + ":updatedcontent");
results = clients.get(0).query(params);
assertEquals(1, results.getResults().getNumFound());
UpdateRequest uReq = new UpdateRequest();
// uReq.setParam(UpdateParams.UPDATE_CHAIN, DISTRIB_UPDATE_CHAIN);
uReq.deleteById(Long.toString(docId)).process(clients.get(0));
commit();
results = clients.get(0).query(params);
assertEquals(0, results.getResults().getNumFound());
return docId;
}
private void addNewReplica() throws Exception {
JettySolrRunner newReplica = createJettys(1).get(0);
waitForRecoveriesToFinish(false);
// new server should be part of first shard
// how many docs are on the new shard?
for (CloudJettyRunner cjetty : shardToJetty.get("shard1")) {
if (VERBOSE) System.err.println("total:"
+ cjetty.client.solrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
}
checkShardConsistency("shard1");
assertDocCounts(VERBOSE);
}
protected void waitForRecoveriesToFinish(boolean verbose)
throws Exception {
super.waitForRecoveriesToFinish(DEFAULT_COLLECTION, zkStateReader, verbose);
@ -689,174 +558,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
super.waitForRecoveriesToFinish(DEFAULT_COLLECTION, zkStateReader, verbose, true, timeoutSeconds);
}
private void brindDownShardIndexSomeDocsAndRecover() throws Exception {
SolrQuery query = new SolrQuery("*:*");
query.set("distrib", false);
commit();
long deadShardCount = shardToJetty.get(SHARD2).get(0).client.solrClient
.query(query).getResults().getNumFound();
query("q", "*:*", "sort", "n_tl1 desc");
// kill a shard
CloudJettyRunner deadShard = chaosMonkey.stopShard(SHARD2, 0);
cloudClient.connect();
// we are careful to make sure the downed node is no longer in the state,
// because on some systems (especially freebsd w/ blackhole enabled), trying
// to talk to a downed node causes grief
Set<CloudJettyRunner> jetties = new HashSet<CloudJettyRunner>();
jetties.addAll(shardToJetty.get(SHARD2));
jetties.remove(deadShard);
for (CloudJettyRunner cjetty : jetties) {
waitToSeeNotLive(((SolrDispatchFilter) cjetty.jetty.getDispatchFilter()
.getFilter()).getCores().getZkController().getZkStateReader(),
deadShard);
}
waitToSeeNotLive(cloudClient.getZkStateReader(), deadShard);
// ensure shard is dead
try {
index_specific(deadShard.client.solrClient, id, 999, i1, 107, t1,
"specific doc!");
fail("This server should be down and this update should have failed");
} catch (SolrServerException e) {
// expected..
}
commit();
query("q", "*:*", "sort", "n_tl1 desc");
// long cloudClientDocs = cloudClient.query(new
// SolrQuery("*:*")).getResults().getNumFound();
// System.out.println("clouddocs:" + cloudClientDocs);
// try to index to a living shard at shard2
long numFound1 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
index_specific(shardToJetty.get(SHARD2).get(1).client.solrClient, id, 1000, i1, 108, t1,
"specific doc!");
commit();
checkShardConsistency(true, false);
query("q", "*:*", "sort", "n_tl1 desc");
// try adding a doc with CloudSolrServer
cloudClient.setDefaultCollection(DEFAULT_COLLECTION);
long numFound2 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
assertEquals(numFound1 + 1, numFound2);
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", 1001);
controlClient.add(doc);
UpdateRequest ureq = new UpdateRequest();
ureq.add(doc);
// ureq.setParam("update.chain", DISTRIB_UPDATE_CHAIN);
ureq.process(cloudClient);
commit();
query("q", "*:*", "sort", "n_tl1 desc");
long numFound3 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
// lets just check that the one doc since last commit made it in...
assertEquals(numFound2 + 1, numFound3);
// test debugging
testDebugQueries();
if (VERBOSE) {
System.err.println(controlClient.query(new SolrQuery("*:*")).getResults()
.getNumFound());
for (SolrServer client : clients) {
try {
SolrQuery q = new SolrQuery("*:*");
q.set("distrib", false);
System.err.println(client.query(q).getResults()
.getNumFound());
} catch (Exception e) {
}
}
}
// TODO: This test currently fails because debug info is obtained only
// on shards with matches.
// query("q","matchesnothing","fl","*,score", "debugQuery", "true");
// this should trigger a recovery phase on deadShard
ChaosMonkey.start(deadShard.jetty);
// make sure we have published we are recovering
Thread.sleep(1500);
waitForRecoveriesToFinish(false);
deadShardCount = shardToJetty.get(SHARD2).get(0).client.solrClient
.query(query).getResults().getNumFound();
// if we properly recovered, we should now have the couple missing docs that
// came in while shard was down
checkShardConsistency(true, false);
// recover over 100 docs so we do more than just peer sync (replicate recovery)
chaosMonkey.stopJetty(deadShard);
for (CloudJettyRunner cjetty : jetties) {
waitToSeeNotLive(((SolrDispatchFilter) cjetty.jetty.getDispatchFilter()
.getFilter()).getCores().getZkController().getZkStateReader(),
deadShard);
}
waitToSeeNotLive(cloudClient.getZkStateReader(), deadShard);
for (int i = 0; i < 226; i++) {
doc = new SolrInputDocument();
doc.addField("id", 2000 + i);
controlClient.add(doc);
ureq = new UpdateRequest();
ureq.add(doc);
// ureq.setParam("update.chain", DISTRIB_UPDATE_CHAIN);
ureq.process(cloudClient);
}
commit();
Thread.sleep(1500);
ChaosMonkey.start(deadShard.jetty);
// make sure we have published we are recovering
Thread.sleep(1500);
waitForRecoveriesToFinish(false);
checkShardConsistency(true, false);
}
private void testDebugQueries() throws Exception {
handle.put("explain", SKIPVAL);
handle.put("debug", UNORDERED);
handle.put("time", SKIPVAL);
query("q", "now their fox sat had put", "fl", "*,score",
CommonParams.DEBUG_QUERY, "true");
query("q", "id:[1 TO 5]", CommonParams.DEBUG_QUERY, "true");
query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.TIMING);
query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.RESULTS);
query("q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.QUERY);
}
private void checkQueries() throws Exception {
protected void checkQueries() throws Exception {
handle.put("_version_", SKIPVAL);
@ -995,7 +697,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
}
}
private void indexAbunchOfDocs() throws Exception {
protected void indexAbunchOfDocs() throws Exception {
indexr(id, 2, i1, 50, tlong, 50, t1, "to come to the aid of their country.");
indexr(id, 3, i1, 2, tlong, 2, t1, "how now brown cow");
indexr(id, 4, i1, -100, tlong, 101, t1,
@ -1209,7 +911,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
}
}
private SolrServer getClient(String nodeName) {
protected SolrServer getClient(String nodeName) {
for (CloudJettyRunner cjetty : cloudJettys) {
CloudSolrServerClient client = cjetty.client;
if (client.shardName.equals(nodeName)) {
@ -1423,7 +1125,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
};
protected void waitForThingsToLevelOut(int waitForRecTimeSeconds) throws Exception {
public void waitForThingsToLevelOut(int waitForRecTimeSeconds) throws Exception {
log.info("Wait for recoveries to finish - wait " + waitForRecTimeSeconds + " for each attempt");
int cnt = 0;
boolean retry = false;

View File

@ -19,7 +19,6 @@ package org.apache.solr.cloud;
import java.io.File;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.solr.SolrTestCaseJ4;
@ -44,6 +43,18 @@ public abstract class AbstractZkTestCase extends SolrTestCaseJ4 {
protected static Logger log = LoggerFactory
.getLogger(AbstractZkTestCase.class);
public static File SOLRHOME;
static {
try {
SOLRHOME = new File(TEST_HOME());
} catch (RuntimeException e) {
log.warn("TEST_HOME() does not exist - solrj test?");
// solrj tests not working with TEST_HOME()
// must override getSolrHome
}
}
protected static ZkTestServer zkServer;
protected static String zkDir;
@ -51,6 +62,7 @@ public abstract class AbstractZkTestCase extends SolrTestCaseJ4 {
@BeforeClass
public static void azt_beforeClass() throws Exception {
System.out.println("azt beforeclass");
createTempDir();
zkDir = dataDir.getAbsolutePath() + File.separator
+ "zookeeper/server1/data";
@ -61,15 +73,20 @@ public abstract class AbstractZkTestCase extends SolrTestCaseJ4 {
System.setProperty("zkHost", zkServer.getZkAddress());
System.setProperty("jetty.port", "0000");
buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(),
buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(), SOLRHOME,
"solrconfig.xml", "schema.xml");
initCore("solrconfig.xml", "schema.xml");
}
// static to share with distrib test
static void buildZooKeeper(String zkHost, String zkAddress, String config,
String schema) throws Exception {
buildZooKeeper(zkHost, zkAddress, SOLRHOME, config, schema);
}
// static to share with distrib test
static void buildZooKeeper(String zkHost, String zkAddress, File solrhome, String config,
String schema) throws Exception {
SolrZkClient zkClient = new SolrZkClient(zkHost, AbstractZkTestCase.TIMEOUT);
zkClient.makePath("/solr", false, true);
zkClient.close();
@ -85,24 +102,32 @@ public abstract class AbstractZkTestCase extends SolrTestCaseJ4 {
zkClient.makePath("/collections/control_collection", ZkStateReader.toJSON(zkProps), CreateMode.PERSISTENT, true);
zkClient.makePath("/collections/control_collection/shards", CreateMode.PERSISTENT, true);
putConfig(zkClient, config);
putConfig(zkClient, schema);
putConfig(zkClient, "solrconfig.xml");
putConfig(zkClient, "stopwords.txt");
putConfig(zkClient, "protwords.txt");
putConfig(zkClient, "currency.xml");
putConfig(zkClient, "open-exchange-rates.json");
putConfig(zkClient, "mapping-ISOLatin1Accent.txt");
putConfig(zkClient, "old_synonyms.txt");
putConfig(zkClient, "synonyms.txt");
putConfig(zkClient, solrhome, config);
putConfig(zkClient, solrhome, schema);
putConfig(zkClient, solrhome, "solrconfig.xml");
putConfig(zkClient, solrhome, "stopwords.txt");
putConfig(zkClient, solrhome, "protwords.txt");
putConfig(zkClient, solrhome, "currency.xml");
putConfig(zkClient, solrhome, "open-exchange-rates.json");
putConfig(zkClient, solrhome, "mapping-ISOLatin1Accent.txt");
putConfig(zkClient, solrhome, "old_synonyms.txt");
putConfig(zkClient, solrhome, "synonyms.txt");
zkClient.close();
}
private static void putConfig(SolrZkClient zkClient, final String name)
private static void putConfig(SolrZkClient zkClient, File solrhome, final String name)
throws Exception {
zkClient.makePath("/configs/conf1/" + name, getFile("solr" + File.separator + "collection1"
+ File.separator + "conf" + File.separator + name), false, true);
String path = "/configs/conf1/" + name;
File file = new File(solrhome, "collection1"
+ File.separator + "conf" + File.separator + name);
if (!file.exists()) {
log.info("skipping " + file.getAbsolutePath() + " because it doesn't exist");
return;
}
log.info("put " + file.getAbsolutePath() + " to " + path);
zkClient.makePath(path, file, false, true);
}
@Override

View File

@ -27,7 +27,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.cloud.FullSolrCloudTest.CloudJettyRunner;
import org.apache.solr.cloud.AbstractFullDistribZkTestBase.CloudJettyRunner;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkNodeProps;

View File

@ -25,7 +25,7 @@ import java.io.File;
* @lucene.internal
*/
public class ExternalPaths {
private static final String SOURCE_HOME = determineSourceHome();
public static final String SOURCE_HOME = determineSourceHome();
public static String WEBAPP_HOME = new File(SOURCE_HOME, "webapp/web").getAbsolutePath();
public static String EXAMPLE_HOME = new File(SOURCE_HOME, "example/solr").getAbsolutePath();
public static String EXAMPLE_MULTICORE_HOME = new File(SOURCE_HOME, "example/multicore").getAbsolutePath();