Merged revision(s) 1364862-1365482 from lucene/dev/trunk:

........
LUCENE-4044: add spi support to Tokenizer/CharFilter/TokenFilter factory
........
LUCENE-2510: apply movefactories.sh
........
LUCENE-4044: few steps closer to fixing compile
........
LUCENE-4044: add the rest for common/
........
LUCENE-4044: add services for kuromoji
........
LUCENE-4044: more factories and tests
........
LUCENE-4044: fix more compil
........
LUCENE-4044: fix some more tests
........
LUCENE-4197 rename CachedDistanceValueSource
........
LUCENE-4044: get analysis/common tests passing
........
improve zk tests vs blackhole
........
LUCENE-4044: get all lucene tests passing
........
LUCENE-4044: register phonetic factories
........
LUCENE-4245: Make IndexWriter#close() and MergeScheduler#close() non-interruptible
........
LUCENE-4044: port over icu module
........
LUCENE-4044: port over morfologik
........
LUCENE-4044: port over smartcn
........
LUCENE-4245: Addon: handle failures during flushing by enforcing CMS to stop
........
LUCENE-4044: port over stempel/uima
........
LUCENE-4044: port over synfilter
........
LUCENE-4245: better record interruption
........
fix reuse bug
........
simplify + improve test infra
........
LUCENE-4245: use IOUtils to close everything in finally block
........
LUCENE-4044: get solr tests working
........
LUCENE-4044: dont use instances just class names
........


git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene2510@1365483 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2012-07-25 08:10:27 +00:00
commit 6751eefb8e
32 changed files with 495 additions and 240 deletions

View File

@ -100,6 +100,9 @@ Bug Fixes
* LUCENE-4234: Exception when FacetsCollector is used with ScoreFacetRequest,
and the number of matching documents is too large. (Gilad Barkai via Shai Erera)
* LUCENE-4245: Make IndexWriter#close() and MergeScheduler#close()
non-interruptible. (Mark Miller, Uwe Schindler)
Build
* LUCENE-4094: Support overriding file.encoding on forked test JVMs

View File

@ -30,6 +30,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.miscellaneous.KeywordMarkerFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public class HunspellStemFilterTest extends BaseTokenStreamTestCase {
@ -39,6 +40,10 @@ public class HunspellStemFilterTest extends BaseTokenStreamTestCase {
public static void beforeClass() throws IOException, ParseException {
DICTIONARY = createDict(true);
}
@AfterClass
public static void afterClass() {
DICTIONARY = null;
}
public static HunspellDictionary createDict(boolean ignoreCase) throws IOException, ParseException {
InputStream affixStream = HunspellStemmerTest.class.getResourceAsStream("test.aff");
InputStream dictStream = HunspellStemmerTest.class.getResourceAsStream("test.dic");

View File

@ -19,6 +19,7 @@ package org.apache.lucene.analysis.hunspell;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.Version;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@ -37,6 +38,11 @@ public class HunspellStemmerTest extends LuceneTestCase {
public static void beforeClass() throws IOException, ParseException {
createStemmer(true);
}
@AfterClass
public static void afterClass() {
stemmer = null;
}
@Test
public void testStem_simpleSuffix() {

View File

@ -1852,7 +1852,7 @@ public class DirectPostingsFormat extends PostingsFormat {
public DocsEnum reset(int[] docIDs, int[] freqs) {
this.docIDs = docIDs;
this.freqs = freqs;
upto = -1;
docID = upto = -1;
return this;
}

View File

@ -243,27 +243,34 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
sync();
}
/** Wait for any running merge threads to finish */
/** Wait for any running merge threads to finish. This call is not interruptible as used by {@link #close()}. */
public void sync() {
while (true) {
MergeThread toSync = null;
synchronized (this) {
for (MergeThread t : mergeThreads) {
if (t.isAlive()) {
toSync = t;
break;
boolean interrupted = false;
try {
while (true) {
MergeThread toSync = null;
synchronized (this) {
for (MergeThread t : mergeThreads) {
if (t.isAlive()) {
toSync = t;
break;
}
}
}
}
if (toSync != null) {
try {
toSync.join();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
if (toSync != null) {
try {
toSync.join();
} catch (InterruptedException ie) {
// ignore this Exception, we will retry until all threads are dead
interrupted = true;
}
} else {
break;
}
} else {
break;
}
} finally {
// finally, restore interrupt status:
if (interrupted) Thread.currentThread().interrupt();
}
}

View File

@ -842,7 +842,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
if (hitOOM) {
rollbackInternal();
} else {
closeInternal(waitForMerges, !hitOOM);
closeInternal(waitForMerges, true);
}
}
}
@ -870,7 +870,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
}
private void closeInternal(boolean waitForMerges, boolean doFlush) throws IOException {
boolean interrupted = false;
try {
if (pendingCommit != null) {
@ -883,27 +883,58 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
docWriter.close();
// Only allow a new merge to be triggered if we are
// going to wait for merges:
if (doFlush) {
flush(waitForMerges, true);
} else {
docWriter.abort(); // already closed
try {
// Only allow a new merge to be triggered if we are
// going to wait for merges:
if (doFlush) {
flush(waitForMerges, true);
} else {
docWriter.abort(); // already closed
}
} finally {
try {
// clean up merge scheduler in all cases, although flushing may have failed:
interrupted = Thread.interrupted();
if (waitForMerges) {
try {
// Give merge scheduler last chance to run, in case
// any pending merges are waiting:
mergeScheduler.merge(this);
} catch (ThreadInterruptedException tie) {
// ignore any interruption, does not matter
interrupted = true;
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "interrupted while waiting for final merges");
}
}
}
synchronized(this) {
for (;;) {
try {
finishMerges(waitForMerges && !interrupted);
break;
} catch (ThreadInterruptedException tie) {
// by setting the interrupted status, the
// next call to finishMerges will pass false,
// so it will not wait
interrupted = true;
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "interrupted while waiting for merges to finish");
}
}
}
stopMerges = true;
}
} finally {
// shutdown policy, scheduler and all threads (this call is not interruptible):
IOUtils.closeWhileHandlingException(mergePolicy, mergeScheduler);
}
}
if (waitForMerges)
// Give merge scheduler last chance to run, in case
// any pending merges are waiting:
mergeScheduler.merge(this);
mergePolicy.close();
synchronized(this) {
finishMerges(waitForMerges);
stopMerges = true;
}
mergeScheduler.close();
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "now call final commit()");
}
@ -943,6 +974,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
}
}
}
// finally, restore interrupt status:
if (interrupted) Thread.currentThread().interrupt();
}
}

View File

@ -17,6 +17,7 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import java.io.Closeable;
import java.io.IOException;
/** <p>Expert: {@link IndexWriter} uses an instance
@ -26,7 +27,7 @@ import java.io.IOException;
*
* @lucene.experimental
*/
public abstract class MergeScheduler {
public abstract class MergeScheduler implements Closeable {
/** Run the merges provided by {@link IndexWriter#getNextMerge()}. */
public abstract void merge(IndexWriter writer) throws IOException;

View File

@ -65,7 +65,7 @@ public final class IOUtils {
* } catch (ExpectedException e) {
* priorE = e;
* } finally {
* closeSafely(priorE, resource1, resource2, resource3);
* closeWhileHandlingException(priorE, resource1, resource2, resource3);
* }
* </pre>
* </p>

View File

@ -43,6 +43,7 @@ import org.apache.lucene.util.Constants;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
/* NOTE: This test focuses on the postings
@ -315,6 +316,15 @@ public class TestPostingsFormat extends LuceneTestCase {
System.out.println("TEST: done init postings; maxDocID=" + maxDocID + "; " + allTerms.size() + " total terms, across " + fieldInfos.size() + " fields");
}
}
@AfterClass
public static void afterClass() throws Exception {
allTerms = null;
fieldInfos = null;
fields = null;
fieldsLive = null;
globalLiveDocs = null;
}
// TODO maybe instead of @BeforeClass just make a single test run: build postings & index & test it?
@ -554,7 +564,7 @@ public class TestPostingsFormat extends LuceneTestCase {
assertNotNull("null DocsEnum", docsEnum);
int initialDocID = docsEnum.docID();
assertTrue("inital docID should be -1 or NO_MORE_DOCS", initialDocID == -1 || initialDocID == DocsEnum.NO_MORE_DOCS);
assertTrue("inital docID should be -1 or NO_MORE_DOCS: " + docsEnum, initialDocID == -1 || initialDocID == DocsEnum.NO_MORE_DOCS);
if (VERBOSE) {
if (prevDocsEnum == null) {

View File

@ -96,7 +96,7 @@ public class TestBasics extends LuceneTestCase {
}
}
static final Analyzer simplePayloadAnalyzer = new Analyzer() {
static Analyzer simplePayloadAnalyzer = new Analyzer() {
@Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
@ -130,6 +130,7 @@ public class TestBasics extends LuceneTestCase {
searcher = null;
reader = null;
directory = null;
simplePayloadAnalyzer = null;
}
@Test

View File

@ -56,6 +56,7 @@ public class TestHighFreqTerms extends LuceneTestCase {
dir.close();
dir = null;
reader = null;
writer = null;
}
/******************** Tests for getHighFreqTerms **********************************/

View File

@ -30,7 +30,7 @@ import org.apache.lucene.spatial.SpatialStrategy;
import org.apache.lucene.spatial.prefix.tree.Node;
import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
import org.apache.lucene.spatial.query.SpatialArgs;
import org.apache.lucene.spatial.util.CachedDistanceValueSource;
import org.apache.lucene.spatial.util.ShapeFieldCacheDistanceValueSource;
import java.util.Iterator;
import java.util.List;
@ -144,7 +144,7 @@ public abstract class PrefixTreeStrategy extends SpatialStrategy {
}
}
Point point = args.getShape().getCenter();
return new CachedDistanceValueSource(point, calc, p);
return new ShapeFieldCacheDistanceValueSource(point, calc, p);
}
public SpatialPrefixTree getGrid() {

View File

@ -1,3 +1,5 @@
package org.apache.lucene.spatial.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -15,8 +17,6 @@
* limitations under the License.
*/
package org.apache.lucene.spatial.util;
import com.spatial4j.core.distance.DistanceCalculator;
import com.spatial4j.core.shape.Point;
import org.apache.lucene.index.AtomicReaderContext;
@ -28,17 +28,20 @@ import java.util.List;
import java.util.Map;
/**
* An implementation of the Lucene ValueSource model to support spatial relevance ranking.
* An implementation of the Lucene ValueSource that returns the spatial distance
* between an input point and a document's points in
* {@link ShapeFieldCacheProvider}. The shortest distance is returned if a
* document has more than one point.
*
* @lucene.internal
*/
public class CachedDistanceValueSource extends ValueSource {
public class ShapeFieldCacheDistanceValueSource extends ValueSource {
private final ShapeFieldCacheProvider<Point> provider;
private final DistanceCalculator calculator;
private final Point from;
public CachedDistanceValueSource(Point from, DistanceCalculator calc, ShapeFieldCacheProvider<Point> provider) {
public ShapeFieldCacheDistanceValueSource(Point from, DistanceCalculator calc, ShapeFieldCacheProvider<Point> provider) {
this.from = from;
this.provider = provider;
this.calculator = calc;
@ -46,7 +49,7 @@ public class CachedDistanceValueSource extends ValueSource {
@Override
public String description() {
return "DistanceValueSource("+calculator+")";
return getClass().getSimpleName()+"("+calculator+")";
}
@Override
@ -85,7 +88,7 @@ public class CachedDistanceValueSource extends ValueSource {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CachedDistanceValueSource that = (CachedDistanceValueSource) o;
ShapeFieldCacheDistanceValueSource that = (ShapeFieldCacheDistanceValueSource) o;
if (calculator != null ? !calculator.equals(that.calculator) : that.calculator != null) return false;
if (from != null ? !from.equals(that.from) : that.from != null) return false;

View File

@ -134,6 +134,10 @@ Bug Fixes
* SOLR-3623: Fixed inconsistent treatment of third-party dependencies for
solr contribs analysis-extras & uima (hossman)
* SOLR-3652: Fixed range faceting to error instead of looping infinitely
when 'gap' is zero -- or effectively zero due to floating point arithmetic
underflow. (hossman)
Other Changes
----------------------

View File

@ -179,7 +179,7 @@
<!-- Exclude start.jar only (it'd be weird to have a license file there?) -->
<exclude name="example/start.jar" />
<exclude name="example/exampledocs/post.jar" />
<exclude name="example/work/**" />
<exclude name="example/solr-webapp/**" />
</additional-excludes>
<additional-filters>
<replaceregex pattern="/jetty([^/]+)$" replace="/jetty" flags="gi" />
@ -243,7 +243,7 @@
<fileset dir="example">
<include name="**/data/**/*" />
<include name="webapps/**/*" />
<include name="work/**/*" />
<include name="solr-webapp/**/*" />
<exclude name="**/.gitignore" />
</fileset>
</delete>

View File

@ -25,6 +25,7 @@ import org.apache.solr.common.params.SolrParams;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.update.AddUpdateCommand;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -97,7 +98,8 @@ public abstract class LanguageIdentifierUpdateProcessor extends UpdateRequestPro
}
langField = params.get(LANG_FIELD, DOCID_LANGFIELD_DEFAULT);
langsField = params.get(LANGS_FIELD, DOCID_LANGSFIELD_DEFAULT);
docIdField = params.get(DOCID_PARAM, DOCID_FIELD_DEFAULT);
SchemaField uniqueKeyField = schema.getUniqueKeyField();
docIdField = params.get(DOCID_PARAM, uniqueKeyField == null ? DOCID_FIELD_DEFAULT : uniqueKeyField.getName());
fallbackValue = params.get(FALLBACK);
if(params.get(FALLBACK_FIELDS, "").length() > 0) {
fallbackFields = params.get(FALLBACK_FIELDS).split(",");

View File

@ -24,7 +24,7 @@ import org.junit.Test;
public class LangDetectLanguageIdentifierUpdateProcessorFactoryTest extends LanguageIdentifierUpdateProcessorFactoryTestCase {
@Override
protected LanguageIdentifierUpdateProcessor createLangIdProcessor(ModifiableSolrParams parameters) throws Exception {
return new LangDetectLanguageIdentifierUpdateProcessor(_parser.buildRequestFrom(null, parameters, null), resp, null);
return new LangDetectLanguageIdentifierUpdateProcessor(_parser.buildRequestFrom(h.getCore(), parameters, null), resp, null);
}
// this one actually works better it seems with short docs

View File

@ -22,6 +22,6 @@ import org.apache.solr.common.params.ModifiableSolrParams;
public class TikaLanguageIdentifierUpdateProcessorFactoryTest extends LanguageIdentifierUpdateProcessorFactoryTestCase {
@Override
protected LanguageIdentifierUpdateProcessor createLangIdProcessor(ModifiableSolrParams parameters) throws Exception {
return new TikaLanguageIdentifierUpdateProcessor(_parser.buildRequestFrom(null, parameters, null), resp, null);
return new TikaLanguageIdentifierUpdateProcessor(_parser.buildRequestFrom(h.getCore(), parameters, null), resp, null);
}
}

View File

@ -924,6 +924,11 @@ public class SimpleFacets {
(SolrException.ErrorCode.BAD_REQUEST,
"date facet infinite loop (is gap negative?)");
}
if (high.equals(low)) {
throw new SolrException
(SolrException.ErrorCode.BAD_REQUEST,
"date facet infinite loop: gap is effectively zero");
}
final boolean includeLower =
(include.contains(FacetRangeInclude.LOWER) ||
(include.contains(FacetRangeInclude.EDGE) && low.equals(start)));
@ -1113,6 +1118,11 @@ public class SimpleFacets {
(SolrException.ErrorCode.BAD_REQUEST,
"range facet infinite loop (is gap negative? did the math overflow?)");
}
if (high.compareTo(low) == 0) {
throw new SolrException
(SolrException.ErrorCode.BAD_REQUEST,
"range facet infinite loop: gap is either zero, or too small relative start/end and caused underflow: " + low + " + " + gap + " = " + high );
}
final boolean includeLower =
(include.contains(FacetRangeInclude.LOWER) ||

View File

@ -44,7 +44,7 @@ public abstract class UpdateCommand implements Cloneable {
@Override
public String toString() {
return name() + "{flags="+flags+",version="+version;
return name() + "{flags="+flags+",_version_="+version;
}
public long getVersion() {

View File

@ -748,7 +748,10 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
if (zkEnabled && DistribPhase.TOLEADER == phase) {
// This core should be a leader
isLeader = true;
replicas = setupRequest();
} else if (DistribPhase.FROMLEADER == phase) {
isLeader = false;
}
if (vinfo == null) {

View File

@ -66,18 +66,13 @@ public class ChaosMonkey {
private boolean expireSessions;
private boolean causeConnectionLoss;
private boolean aggressivelyKillLeaders;
private Map<String,SolrServer> shardToLeaderClient;
private Map<String,CloudJettyRunner> shardToLeaderJetty;
private long startTime;
public ChaosMonkey(ZkTestServer zkServer, ZkStateReader zkStateReader,
String collection, Map<String,List<CloudJettyRunner>> shardToJetty,
Map<String,List<SolrServer>> shardToClient,
Map<String,SolrServer> shardToLeaderClient,
Map<String,CloudJettyRunner> shardToLeaderJetty) {
this.shardToJetty = shardToJetty;
this.shardToClient = shardToClient;
this.shardToLeaderClient = shardToLeaderClient;
this.shardToLeaderJetty = shardToLeaderJetty;
this.zkServer = zkServer;
this.zkStateReader = zkStateReader;
@ -104,7 +99,7 @@ public class ChaosMonkey {
public void expireRandomSession() throws KeeperException, InterruptedException {
String sliceName = getRandomSlice();
JettySolrRunner jetty = getRandomJetty(sliceName, aggressivelyKillLeaders);
JettySolrRunner jetty = getRandomJetty(sliceName, aggressivelyKillLeaders).jetty;
if (jetty != null) {
expireSession(jetty);
expires.incrementAndGet();
@ -115,7 +110,7 @@ public class ChaosMonkey {
monkeyLog("cause connection loss!");
String sliceName = getRandomSlice();
JettySolrRunner jetty = getRandomJetty(sliceName, aggressivelyKillLeaders);
JettySolrRunner jetty = getRandomJetty(sliceName, aggressivelyKillLeaders).jetty;
if (jetty != null) {
causeConnectionLoss(jetty);
connloss.incrementAndGet();
@ -135,23 +130,29 @@ public class ChaosMonkey {
}
}
public JettySolrRunner stopShard(String slice, int index) throws Exception {
JettySolrRunner jetty = shardToJetty.get(slice).get(index).jetty;
stopJetty(jetty);
return jetty;
public CloudJettyRunner stopShard(String slice, int index) throws Exception {
CloudJettyRunner cjetty = shardToJetty.get(slice).get(index);
stopJetty(cjetty);
return cjetty;
}
public void stopJetty(JettySolrRunner jetty) throws Exception {
stop(jetty);
public void stopJetty(CloudJettyRunner cjetty) throws Exception {
stop(cjetty.jetty);
stops.incrementAndGet();
}
public void killJetty(JettySolrRunner jetty) throws Exception {
kill(jetty);
public void killJetty(CloudJettyRunner cjetty) throws Exception {
kill(cjetty);
stops.incrementAndGet();
}
public static void stop(JettySolrRunner jetty) throws Exception {
public void stopJetty(JettySolrRunner jetty) throws Exception {
stops.incrementAndGet();
stopJettySolrRunner(jetty);
}
private static void stopJettySolrRunner(JettySolrRunner jetty) throws Exception {
monkeyLog("stop shard! " + jetty.getLocalPort());
// get a clean shutdown so that no dirs are left open...
FilterHolder fh = jetty.getDispatchFilter();
@ -168,7 +169,8 @@ public class ChaosMonkey {
}
}
public static void kill(JettySolrRunner jetty) throws Exception {
public static void kill(CloudJettyRunner cjetty) throws Exception {
JettySolrRunner jetty = cjetty.jetty;
monkeyLog("kill shard! " + jetty.getLocalPort());
FilterHolder fh = jetty.getDispatchFilter();
SolrDispatchFilter sdf = null;
@ -189,7 +191,7 @@ public class ChaosMonkey {
public void stopShard(String slice) throws Exception {
List<CloudJettyRunner> jetties = shardToJetty.get(slice);
for (CloudJettyRunner jetty : jetties) {
stopJetty(jetty.jetty);
stopJetty(jetty);
}
}
@ -197,7 +199,7 @@ public class ChaosMonkey {
List<CloudJettyRunner> jetties = shardToJetty.get(slice);
for (CloudJettyRunner jetty : jetties) {
if (!jetty.nodeName.equals(shardName)) {
stopJetty(jetty.jetty);
stopJetty(jetty);
}
}
}
@ -207,22 +209,22 @@ public class ChaosMonkey {
return jetty;
}
public JettySolrRunner stopRandomShard() throws Exception {
public CloudJettyRunner stopRandomShard() throws Exception {
String sliceName = getRandomSlice();
return stopRandomShard(sliceName);
}
public JettySolrRunner stopRandomShard(String slice) throws Exception {
JettySolrRunner jetty = getRandomJetty(slice, aggressivelyKillLeaders);
if (jetty != null) {
stopJetty(jetty);
public CloudJettyRunner stopRandomShard(String slice) throws Exception {
CloudJettyRunner cjetty = getRandomJetty(slice, aggressivelyKillLeaders);
if (cjetty != null) {
stopJetty(cjetty);
}
return jetty;
return cjetty;
}
public JettySolrRunner killRandomShard() throws Exception {
public CloudJettyRunner killRandomShard() throws Exception {
// add all the shards to a list
String sliceName = getRandomSlice();
@ -238,15 +240,15 @@ public class ChaosMonkey {
return sliceName;
}
public JettySolrRunner killRandomShard(String slice) throws Exception {
JettySolrRunner jetty = getRandomJetty(slice, aggressivelyKillLeaders);
if (jetty != null) {
killJetty(jetty);
public CloudJettyRunner killRandomShard(String slice) throws Exception {
CloudJettyRunner cjetty = getRandomJetty(slice, aggressivelyKillLeaders);
if (cjetty != null) {
killJetty(cjetty);
}
return jetty;
return cjetty;
}
public JettySolrRunner getRandomJetty(String slice, boolean aggressivelyKillLeaders) throws KeeperException, InterruptedException {
public CloudJettyRunner getRandomJetty(String slice, boolean aggressivelyKillLeaders) throws KeeperException, InterruptedException {
int numRunning = 0;
@ -301,15 +303,15 @@ public class ChaosMonkey {
}
Random random = LuceneTestCase.random();
int chance = random.nextInt(10);
JettySolrRunner jetty;
CloudJettyRunner cjetty;
if (chance <= 5 && aggressivelyKillLeaders) {
// if killLeader, really aggressively go after leaders
jetty = shardToLeaderJetty.get(slice).jetty;
cjetty = shardToLeaderJetty.get(slice);
} else {
// get random shard
List<CloudJettyRunner> jetties = shardToJetty.get(slice);
int index = random.nextInt(jetties.size());
jetty = jetties.get(index).jetty;
cjetty = jetties.get(index);
ZkNodeProps leader = zkStateReader.getLeaderProps(collection, slice);
boolean isLeader = leader.get(ZkStateReader.NODE_NAME_PROP).equals(jetties.get(index).nodeName);
@ -320,15 +322,16 @@ public class ChaosMonkey {
}
}
if (jetty.getLocalPort() == -1) {
if (cjetty.jetty.getLocalPort() == -1) {
// we can't kill the dead
monkeyLog("abort! This guy is already dead");
return null;
}
//System.out.println("num active:" + numActive + " for " + slice + " sac:" + jetty.getLocalPort());
monkeyLog("chose a victim! " + jetty.getLocalPort());
return jetty;
monkeyLog("chose a victim! " + cjetty.jetty.getLocalPort());
return cjetty;
}
public SolrServer getRandomClient(String slice) throws KeeperException, InterruptedException {
@ -353,7 +356,7 @@ public class ChaosMonkey {
stop = false;
new Thread() {
private List<JettySolrRunner> deadPool = new ArrayList<JettySolrRunner>();
private List<CloudJettyRunner> deadPool = new ArrayList<CloudJettyRunner>();
@Override
public void run() {
@ -364,25 +367,9 @@ public class ChaosMonkey {
if (random.nextBoolean()) {
if (!deadPool.isEmpty()) {
int index = random.nextInt(deadPool.size());
JettySolrRunner jetty = deadPool.get(index);
try {
jetty.start();
} catch (BindException e) {
jetty.stop();
sleep(2000);
try {
jetty.start();
} catch (BindException e2) {
jetty.stop();
sleep(5000);
try {
jetty.start();
} catch (BindException e3) {
// we coud not get the port
jetty.stop();
continue;
}
}
JettySolrRunner jetty = deadPool.get(index).jetty;
if (!ChaosMonkey.start(jetty)) {
continue;
}
//System.out.println("started on port:" + jetty.getLocalPort());
deadPool.remove(index);
@ -402,16 +389,16 @@ public class ChaosMonkey {
randomConnectionLoss();
}
JettySolrRunner jetty;
CloudJettyRunner cjetty;
if (random.nextBoolean()) {
jetty = stopRandomShard();
cjetty = stopRandomShard();
} else {
jetty = killRandomShard();
cjetty = killRandomShard();
}
if (jetty == null) {
if (cjetty == null) {
// we cannot kill
} else {
deadPool.add(jetty);
deadPool.add(cjetty);
}
} catch (InterruptedException e) {
@ -441,4 +428,31 @@ public class ChaosMonkey {
return starts.get();
}
public static void stop(JettySolrRunner jetty) throws Exception {
stopJettySolrRunner(jetty);
}
public static boolean start(JettySolrRunner jetty) throws Exception {
try {
jetty.start();
} catch (BindException e) {
jetty.stop();
Thread.sleep(2000);
try {
jetty.start();
} catch (BindException e2) {
jetty.stop();
Thread.sleep(5000);
try {
jetty.start();
} catch (BindException e3) {
// we coud not get the port
jetty.stop();
return false;
}
}
}
return true;
}
}

View File

@ -87,7 +87,7 @@ public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest {
// as it's not supported for recovery
// del("*:*");
List<StopableIndexingThread> threads = new ArrayList<StopableIndexingThread>();
List<StopableThread> threads = new ArrayList<StopableThread>();
int threadCount = 1;
int i = 0;
for (i = 0; i < threadCount; i++) {
@ -97,6 +97,14 @@ public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest {
indexThread.start();
}
threadCount = 1;
i = 0;
for (i = 0; i < threadCount; i++) {
StopableSearchThread searchThread = new StopableSearchThread();
threads.add(searchThread);
searchThread.start();
}
FullThrottleStopableIndexingThread ftIndexThread = new FullThrottleStopableIndexingThread(
clients, i * 50000, true);
threads.add(ftIndexThread);
@ -110,12 +118,12 @@ public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest {
chaosMonkey.stopTheMonkey();
}
for (StopableIndexingThread indexThread : threads) {
for (StopableThread indexThread : threads) {
indexThread.safeStop();
}
// wait for stop...
for (StopableIndexingThread indexThread : threads) {
for (StopableThread indexThread : threads) {
indexThread.join();
}

View File

@ -26,6 +26,7 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
@ -66,7 +67,7 @@ import org.slf4j.LoggerFactory;
*/
@Slow
public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
private static Logger log = LoggerFactory.getLogger(FullSolrCloudTest.class);
static Logger log = LoggerFactory.getLogger(FullSolrCloudTest.class);
@BeforeClass
public static void beforeFullSolrCloudTest() {
@ -98,39 +99,40 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
protected volatile CloudSolrServer cloudClient;
protected Map<JettySolrRunner,ZkNodeProps> jettyToInfo = new HashMap<JettySolrRunner,ZkNodeProps>();
protected Map<CloudSolrServerClient,ZkNodeProps> clientToInfo = new HashMap<CloudSolrServerClient,ZkNodeProps>();
protected Map<String,List<SolrServer>> shardToClient = new HashMap<String,List<SolrServer>>();
protected List<CloudJettyRunner> cloudJettys = new ArrayList<CloudJettyRunner>();
protected Map<String,List<CloudJettyRunner>> shardToJetty = new HashMap<String,List<CloudJettyRunner>>();
private AtomicInteger jettyIntCntr = new AtomicInteger(0);
protected ChaosMonkey chaosMonkey;
protected volatile ZkStateReader zkStateReader;
protected Map<String,SolrServer> shardToLeaderClient = new HashMap<String,SolrServer>();
protected Map<String,CloudJettyRunner> shardToLeaderJetty = new HashMap<String,CloudJettyRunner>();
class CloudJettyRunner {
static class CloudJettyRunner {
JettySolrRunner jetty;
String nodeName;
String coreNodeName;
String url;
CloudSolrServerClient client;
public ZkNodeProps info;
}
static class CloudSolrServerClient {
SolrServer client;
SolrServer solrClient;
String shardName;
int port;
public ZkNodeProps info;
public CloudSolrServerClient() {}
public CloudSolrServerClient(SolrServer client) {
this.client = client;
this.solrClient = client;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((client == null) ? 0 : client.hashCode());
result = prime * result + ((solrClient == null) ? 0 : solrClient.hashCode());
return result;
}
@ -140,9 +142,9 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
CloudSolrServerClient other = (CloudSolrServerClient) obj;
if (client == null) {
if (other.client != null) return false;
} else if (!client.equals(other.client)) return false;
if (solrClient == null) {
if (other.solrClient != null) return false;
} else if (!solrClient.equals(other.solrClient)) return false;
return true;
}
@ -189,7 +191,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
}
chaosMonkey = new ChaosMonkey(zkServer, zkStateReader,
DEFAULT_COLLECTION, shardToJetty, shardToClient, shardToLeaderClient,
DEFAULT_COLLECTION, shardToJetty,
shardToLeaderJetty);
}
@ -335,9 +337,8 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
protected void updateMappingsFromZk(List<JettySolrRunner> jettys,
List<SolrServer> clients) throws Exception {
zkStateReader.updateCloudState(true);
shardToClient.clear();
cloudJettys.clear();
shardToJetty.clear();
jettyToInfo.clear();
CloudState cloudState = zkStateReader.getCloudState();
Map<String,Slice> slices = cloudState.getSlices(DEFAULT_COLLECTION);
@ -347,6 +348,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
+ DEFAULT_COLLECTION + " in " + cloudState.getCollections());
}
List<CloudSolrServerClient> theClients = new ArrayList<CloudSolrServerClient>();
for (SolrServer client : clients) {
// find info for this client in zk
nextClient:
@ -359,36 +361,23 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
if (shard.getKey().contains(":" + port + "_")) {
CloudSolrServerClient csc = new CloudSolrServerClient();
csc.client = client;
csc.solrClient = client;
csc.port = port;
csc.shardName = shard.getValue().get(ZkStateReader.NODE_NAME_PROP);
boolean isLeader = shard.getValue().containsKey(
ZkStateReader.LEADER_PROP);
clientToInfo.put(csc, shard.getValue());
List<SolrServer> list = shardToClient.get(slice.getKey());
if (list == null) {
list = new ArrayList<SolrServer>();
shardToClient.put(slice.getKey(), list);
}
list.add(client);
csc.info = shard.getValue();
theClients .add(csc);
if (isLeader) {
shardToLeaderClient.put(slice.getKey(), client);
}
break nextClient;
}
}
}
}
for (Map.Entry<String,Slice> slice : slices.entrySet()) {
// check that things look right
assertEquals(slice.getValue().getShards().size(), shardToClient.get(slice.getKey()).size());
}
for (JettySolrRunner jetty : jettys) {
int port = jetty.getLocalPort();
if (port == -1) {
continue; // If we cannot get the port, this jetty is down
throw new RuntimeException("Cannot find the port for jetty");
}
nextJetty:
@ -396,7 +385,6 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
Map<String,ZkNodeProps> theShards = slice.getValue().getShards();
for (Map.Entry<String,ZkNodeProps> shard : theShards.entrySet()) {
if (shard.getKey().contains(":" + port + "_")) {
jettyToInfo.put(jetty, shard.getValue());
List<CloudJettyRunner> list = shardToJetty.get(slice.getKey());
if (list == null) {
list = new ArrayList<CloudJettyRunner>();
@ -406,13 +394,16 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
ZkStateReader.LEADER_PROP);
CloudJettyRunner cjr = new CloudJettyRunner();
cjr.jetty = jetty;
cjr.info = shard.getValue();
cjr.nodeName = shard.getValue().get(ZkStateReader.NODE_NAME_PROP);
cjr.coreNodeName = shard.getKey();
cjr.url = shard.getValue().get(ZkStateReader.BASE_URL_PROP) + "/" + shard.getValue().get(ZkStateReader.CORE_NAME_PROP);
cjr.client = findClientByPort(port, theClients);
list.add(cjr);
if (isLeader) {
shardToLeaderJetty.put(slice.getKey(), cjr);
}
cloudJettys.add(cjr);
break nextJetty;
}
}
@ -431,6 +422,15 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
}
}
private CloudSolrServerClient findClientByPort(int port, List<CloudSolrServerClient> theClients) {
for (CloudSolrServerClient client : theClients) {
if (client.port == port) {
return client;
}
}
throw new IllegalArgumentException("Client with the give port does not exist:" + port);
}
@Override
protected void setDistributedParams(ModifiableSolrParams params) {
@ -509,11 +509,15 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
protected void del(String q) throws Exception {
controlClient.deleteByQuery(q);
cloudClient.deleteByQuery(q);
/***
for (SolrServer client : clients) {
UpdateRequest ureq = new UpdateRequest();
// ureq.setParam("update.chain", DISTRIB_UPDATE_CHAIN);
ureq.deleteByQuery(q).process(client);
}
***/
}// serial commit...
/*
@ -647,9 +651,9 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
// new server should be part of first shard
// how many docs are on the new shard?
for (SolrServer client : shardToClient.get("shard1")) {
for (CloudJettyRunner cjetty : shardToJetty.get("shard1")) {
if (VERBOSE) System.err.println("total:"
+ client.query(new SolrQuery("*:*")).getResults().getNumFound());
+ cjetty.client.solrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
}
checkShardConsistency("shard1");
@ -673,24 +677,32 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
commit();
long deadShardCount = shardToClient.get(SHARD2).get(0).query(query).getResults().getNumFound();
long deadShardCount = shardToJetty.get(SHARD2).get(0).client.solrClient
.query(query).getResults().getNumFound();
query("q", "*:*", "sort", "n_tl1 desc");
// kill a shard
JettySolrRunner deadShard = chaosMonkey.stopShard(SHARD2, 0);
CloudJettyRunner deadShard = chaosMonkey.stopShard(SHARD2, 0);
cloudClient.connect();
int tries = 0;
while (cloudClient.getZkStateReader().getCloudState().liveNodesContain(clientToInfo.get(new CloudSolrServerClient(shardToClient.get(SHARD2).get(0))).get(ZkStateReader.NODE_NAME_PROP))) {
if (tries++ == 60) {
fail("Shard still reported as live in zk");
}
Thread.sleep(1000);
// we are careful to make sure the downed node is no longer in the state,
// because on some systems (especially freebsd w/ blackhole enabled), trying
// to talk to a downed node causes grief
Set<CloudJettyRunner> jetties = new HashSet<CloudJettyRunner>();
jetties.addAll(shardToJetty.get(SHARD2));
jetties.remove(deadShard);
for (CloudJettyRunner cjetty : jetties) {
waitToSeeNotLive(((SolrDispatchFilter) cjetty.jetty.getDispatchFilter()
.getFilter()).getCores().getZkController().getZkStateReader(),
deadShard);
}
waitToSeeNotLive(cloudClient.getZkStateReader(), deadShard);
// ensure shard is dead
try {
index_specific(shardToClient.get(SHARD2).get(0), id, 999, i1, 107, t1,
index_specific(deadShard.client.solrClient, id, 999, i1, 107, t1,
"specific doc!");
fail("This server should be down and this update should have failed");
} catch (SolrServerException e) {
@ -705,21 +717,11 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
// System.out.println("clouddocs:" + cloudClientDocs);
// try to index to a living shard at shard2
// we are careful to make sure the downed node is no longer in the state,
// because on some systems (especially freebsd w/ blackhole enabled), trying
// to talk to a downed node causes grief
tries = 0;
while (((SolrDispatchFilter) shardToJetty.get(SHARD2).get(1).jetty.getDispatchFilter().getFilter()).getCores().getZkController().getZkStateReader().getCloudState().liveNodesContain(clientToInfo.get(new CloudSolrServerClient(shardToClient.get(SHARD2).get(0))).get(ZkStateReader.NODE_NAME_PROP))) {
if (tries++ == 120) {
fail("Shard still reported as live in zk");
}
Thread.sleep(1000);
}
long numFound1 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
index_specific(shardToClient.get(SHARD2).get(1), id, 1000, i1, 108, t1,
index_specific(shardToJetty.get(SHARD2).get(1).client.solrClient, id, 1000, i1, 108, t1,
"specific doc!");
commit();
@ -777,21 +779,29 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
// query("q","matchesnothing","fl","*,score", "debugQuery", "true");
// this should trigger a recovery phase on deadShard
deadShard.start(true);
ChaosMonkey.start(deadShard.jetty);
// make sure we have published we are recovering
Thread.sleep(1500);
waitForRecoveriesToFinish(false);
deadShardCount = shardToClient.get(SHARD2).get(0).query(query).getResults().getNumFound();
deadShardCount = shardToJetty.get(SHARD2).get(0).client.solrClient
.query(query).getResults().getNumFound();
// if we properly recovered, we should now have the couple missing docs that
// came in while shard was down
checkShardConsistency(true, false);
// recover over 100 docs so we do more than just peer sync (replicate recovery)
deadShard = chaosMonkey.stopShard(SHARD2, 0);
chaosMonkey.stopJetty(deadShard);
for (CloudJettyRunner cjetty : jetties) {
waitToSeeNotLive(((SolrDispatchFilter) cjetty.jetty.getDispatchFilter()
.getFilter()).getCores().getZkController().getZkStateReader(),
deadShard);
}
waitToSeeNotLive(cloudClient.getZkStateReader(), deadShard);
for (int i = 0; i < 226; i++) {
doc = new SolrInputDocument();
@ -804,7 +814,9 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
}
commit();
deadShard.start(true);
Thread.sleep(1500);
ChaosMonkey.start(deadShard.jetty);
// make sure we have published we are recovering
Thread.sleep(1500);
@ -1009,10 +1021,10 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
protected String checkShardConsistency(String shard, boolean verbose)
throws Exception {
List<SolrServer> solrClients = shardToClient.get(shard);
if (solrClients == null) {
List<CloudJettyRunner> solrJetties = shardToJetty.get(shard);
if (solrJetties == null) {
throw new RuntimeException("shard not found:" + shard + " keys:"
+ shardToClient.keySet());
+ shardToJetty.keySet());
}
long num = -1;
long lastNum = -1;
@ -1024,18 +1036,18 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
"The client count does not match up with the shard count for slice:"
+ shard,
zkStateReader.getCloudState().getSlice(DEFAULT_COLLECTION, shard)
.getShards().size(), solrClients.size());
.getShards().size(), solrJetties.size());
SolrServer lastClient = null;
for (SolrServer client : solrClients) {
ZkNodeProps props = clientToInfo.get(new CloudSolrServerClient(client));
for (CloudJettyRunner cjetty : solrJetties) {
ZkNodeProps props = cjetty.info;
if (verbose) System.err.println("client" + cnt++);
if (verbose) System.err.println("PROPS:" + props);
try {
SolrQuery query = new SolrQuery("*:*");
query.set("distrib", false);
num = client.query(query).getResults().getNumFound();
num = cjetty.client.solrClient.query(query).getResults().getNumFound();
} catch (SolrServerException e) {
if (verbose) System.err.println("error contacting client: "
+ e.getMessage() + "\n");
@ -1060,7 +1072,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
if (active && live) {
if (lastNum > -1 && lastNum != num && failMessage == null) {
failMessage = shard + " is not consistent. Got " + lastNum + " from " + lastClient + "lastClient"
+ " and got " + num + " from " + client;
+ " and got " + num + " from " + cjetty.url;
if (verbose || true) {
System.err.println("######" + failMessage);
@ -1071,14 +1083,14 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
query.set("sort","id asc");
SolrDocumentList lst1 = lastClient.query(query).getResults();
SolrDocumentList lst2 = client.query(query).getResults();
SolrDocumentList lst2 = cjetty.client.solrClient.query(query).getResults();
showDiff(lst1, lst2, lastClient.toString(), client.toString());
showDiff(lst1, lst2, lastClient.toString(), cjetty.client.solrClient.toString());
}
}
lastNum = num;
lastClient = client;
lastClient = cjetty.client.solrClient;
}
}
return failMessage;
@ -1125,7 +1137,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
updateMappingsFromZk(jettys, clients);
Set<String> theShards = shardToClient.keySet();
Set<String> theShards = shardToJetty.keySet();
String failMessage = null;
for (String shard : theShards) {
String shardFailMessage = checkShardConsistency(shard, verbose);
@ -1140,15 +1152,15 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
if (checkVsControl) {
// now check that the right # are on each shard
theShards = shardToClient.keySet();
theShards = shardToJetty.keySet();
int cnt = 0;
for (String s : theShards) {
int times = shardToClient.get(s).size();
int times = shardToJetty.get(s).size();
for (int i = 0; i < times; i++) {
try {
SolrServer client = shardToClient.get(s).get(i);
ZkNodeProps props = clientToInfo.get(new CloudSolrServerClient(
client));
CloudJettyRunner cjetty = shardToJetty.get(s).get(i);
ZkNodeProps props = cjetty.info;
SolrServer client = cjetty.client.solrClient;
boolean active = props.get(ZkStateReader.STATE_PROP).equals(
ZkStateReader.ACTIVE);
if (active) {
@ -1180,9 +1192,10 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
}
private SolrServer getClient(String nodeName) {
for (CloudSolrServerClient client : clientToInfo.keySet()) {
for (CloudJettyRunner cjetty : cloudJettys) {
CloudSolrServerClient client = cjetty.client;
if (client.shardName.equals(nodeName)) {
return client.client;
return client.solrClient;
}
}
return null;
@ -1216,12 +1229,13 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
+ DEFAULT_COLLECTION + " in " + cloudState.getCollections());
}
for (SolrServer client : clients) {
for (CloudJettyRunner cjetty : cloudJettys) {
CloudSolrServerClient client = cjetty.client;
for (Map.Entry<String,Slice> slice : slices.entrySet()) {
Map<String,ZkNodeProps> theShards = slice.getValue().getShards();
for (Map.Entry<String,ZkNodeProps> shard : theShards.entrySet()) {
String shardName = new URI(
((HttpSolrServer) client).getBaseURL()).getPort()
((HttpSolrServer) client.solrClient).getBaseURL()).getPort()
+ "_solr_";
if (verbose && shard.getKey().endsWith(shardName)) {
System.err.println("shard:" + slice.getKey());
@ -1231,12 +1245,14 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
}
long count = 0;
String currentState = clientToInfo.get(new CloudSolrServerClient(client))
.get(ZkStateReader.STATE_PROP);
if (currentState != null && currentState.equals(ZkStateReader.ACTIVE)) {
String currentState = cjetty.info.get(ZkStateReader.STATE_PROP);
if (currentState != null
&& currentState.equals(ZkStateReader.ACTIVE)
&& zkStateReader.getCloudState().liveNodesContain(
cjetty.info.get(ZkStateReader.NODE_NAME_PROP))) {
SolrQuery query = new SolrQuery("*:*");
query.set("distrib", false);
count = client.query(query).getResults().getNumFound();
count = client.solrClient.query(query).getResults().getNumFound();
}
if (verbose) System.err.println("client docs:" + count + "\n\n");
@ -1259,7 +1275,14 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
return rsp;
}
class StopableIndexingThread extends Thread {
abstract class StopableThread extends Thread {
public StopableThread(String name) {
super(name);
}
public abstract void safeStop();
}
class StopableIndexingThread extends StopableThread {
private volatile boolean stop = false;
protected final int startI;
protected final List<Integer> deletes = new ArrayList<Integer>();
@ -1333,6 +1356,55 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
};
class StopableSearchThread extends StopableThread {
private volatile boolean stop = false;
protected final AtomicInteger fails = new AtomicInteger();
private String[] QUERIES = new String[] {"to come","their country","aid","co*"};
public StopableSearchThread() {
super("StopableSearchThread");
setDaemon(true);
}
@Override
public void run() {
Random random = random();
int numSearches = 0;
while (true && !stop) {
numSearches++;
try {
//to come to the aid of their country.
cloudClient.query(new SolrQuery(QUERIES[random.nextInt(QUERIES.length)]));
} catch (Exception e) {
System.err.println("QUERY REQUEST FAILED:");
e.printStackTrace();
if (e instanceof SolrServerException) {
System.err.println("ROOT CAUSE:");
((SolrServerException) e).getRootCause().printStackTrace();
}
fails.incrementAndGet();
}
try {
Thread.sleep(random.nextInt(4000) + 300);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
System.err.println("num searches done:" + numSearches + " with " + fails + " fails");
}
public void safeStop() {
stop = true;
}
public int getFails() {
return fails.get();
}
};
protected void waitForThingsToLevelOut(int waitForRecTimeSeconds) throws Exception {
log.info("Wait for recoveries to finish - wait " + waitForRecTimeSeconds + " for each attempt");
int cnt = 0;
@ -1348,7 +1420,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
updateMappingsFromZk(jettys, clients);
Set<String> theShards = shardToClient.keySet();
Set<String> theShards = shardToJetty.keySet();
String failMessage = null;
for (String shard : theShards) {
failMessage = checkShardConsistency(shard, false);
@ -1415,4 +1487,16 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
throw new RuntimeException(ex);
}
}
protected void waitToSeeNotLive(ZkStateReader zkStateReader,
CloudJettyRunner cjetty) throws InterruptedException {
int tries = 0;
while (zkStateReader.getCloudState()
.liveNodesContain(cjetty.info.get(ZkStateReader.NODE_NAME_PROP))) {
if (tries++ == 120) {
fail("Shard still reported as live in zk");
}
Thread.sleep(1000);
}
}
}

View File

@ -72,7 +72,7 @@ public class RecoveryZkTest extends FullSolrCloudTest {
Thread.sleep(atLeast(2000));
// bring shard replica down
JettySolrRunner replica = chaosMonkey.stopShard("shard1", 1);
JettySolrRunner replica = chaosMonkey.stopShard("shard1", 1).jetty;
// wait a moment - lets allow some docs to be indexed so replication time is non 0
@ -100,8 +100,8 @@ public class RecoveryZkTest extends FullSolrCloudTest {
checkShardConsistency("shard1", false);
SolrQuery query = new SolrQuery("*:*");
query.setParam("distrib", "false");
long client1Docs = shardToClient.get("shard1").get(0).query(query).getResults().getNumFound();
long client2Docs = shardToClient.get("shard1").get(1).query(query).getResults().getNumFound();
long client1Docs = shardToJetty.get("shard1").get(0).client.solrClient.query(query).getResults().getNumFound();
long client2Docs = shardToJetty.get("shard1").get(1).client.solrClient.query(query).getResults().getNumFound();
assertTrue(client1Docs > 0);
assertEquals(client1Docs, client2Docs);

View File

@ -26,9 +26,7 @@ import java.util.Set;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.HttpSolrServer;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
@ -91,9 +89,7 @@ public class SyncSliceTest extends FullSolrCloudTest {
waitForThingsToLevelOut();
// something wrong with this?
//del("*:*");
del("*:*");
List<String> skipServers = new ArrayList<String>();
indexDoc(skipServers, id, 0, i1, 50, tlong, 50, t1,
@ -121,7 +117,8 @@ public class SyncSliceTest extends FullSolrCloudTest {
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
String baseUrl = ((HttpSolrServer) shardToClient.get("shard1").get(2)).getBaseURL();
String baseUrl = ((HttpSolrServer) shardToJetty.get("shard1").get(2).client.solrClient)
.getBaseURL();
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
@ -143,28 +140,24 @@ public class SyncSliceTest extends FullSolrCloudTest {
"to come to the aid of their country.");
// kill the leader - new leader could have all the docs or be missing one
JettySolrRunner leaderJetty = shardToLeaderJetty.get("shard1").jetty;
SolrServer leaderClient = shardToLeaderClient.get("shard1");
Set<JettySolrRunner> jetties = new HashSet<JettySolrRunner>();
for (int i = 0; i < shardCount; i++) {
jetties.add(shardToJetty.get("shard1").get(i).jetty);
}
CloudJettyRunner leaderJetty = shardToLeaderJetty.get("shard1");
Set<CloudJettyRunner> jetties = new HashSet<CloudJettyRunner>();
jetties.addAll(shardToJetty.get("shard1"));
jetties.remove(leaderJetty);
chaosMonkey.killJetty(leaderJetty);
JettySolrRunner upJetty = jetties.iterator().next();
// we are careful to make sure the downed node is no longer in the state,
// because on some systems (especially freebsd w/ blackhole enabled), trying
// to talk to a downed node causes grief
int tries = 0;
while (((SolrDispatchFilter) upJetty.getDispatchFilter().getFilter()).getCores().getZkController().getZkStateReader().getCloudState().liveNodesContain(clientToInfo.get(new CloudSolrServerClient(leaderClient)).get(ZkStateReader.NODE_NAME_PROP))) {
if (tries++ == 120) {
fail("Shard still reported as live in zk");
}
Thread.sleep(1000);
for (CloudJettyRunner cjetty : jetties) {
waitToSeeNotLive(((SolrDispatchFilter) cjetty.jetty.getDispatchFilter()
.getFilter()).getCores().getZkController().getZkStateReader(),
leaderJetty);
}
waitToSeeNotLive(cloudClient.getZkStateReader(), leaderJetty);
waitForThingsToLevelOut();
checkShardConsistency(false, true);
@ -183,7 +176,7 @@ public class SyncSliceTest extends FullSolrCloudTest {
updateMappingsFromZk(jettys, clients);
Set<String> theShards = shardToClient.keySet();
Set<String> theShards = shardToJetty.keySet();
String failMessage = null;
for (String shard : theShards) {
failMessage = checkShardConsistency(shard, false);

View File

@ -48,7 +48,8 @@ public class TestMultiCoreConfBootstrap extends SolrTestCaseJ4 {
@AfterClass
public static void afterClass() {
zkServer = null;
zkDir = null;
}
@Override

View File

@ -117,6 +117,9 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
slaveJetty.stop();
master.tearDown();
slave.tearDown();
masterJetty = slaveJetty = null;
master = slave = null;
masterClient = slaveClient = null;
}
private static JettySolrRunner createJetty(SolrInstance instance) throws Exception {

View File

@ -2004,4 +2004,61 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 {
,"*[count(//lst[@name='facet_fields']/lst/int)=0]"
);
}
/**
* kind of an absurd tests because if there is an inifnite loop, it
* would ver finish -- but at least it ensures that <i>if</i> one of
* these requests return, they return an error
*/
public void testRangeFacetInfiniteLoopDetection() {
for (String field : new String[] {"foo_f", "foo_sf",
"foo_d", "foo_sd",
"foo_i", "foo_si"}) {
assertQEx("no zero gap error: " + field,
req("q", "*:*",
"facet", "true",
"facet.range", field,
"facet.range.start", "23",
"facet.range.gap", "0",
"facet.range.end", "100"),
400);
}
for (String field : new String[] {"foo_pdt", "foo_dt"}) {
for (String type : new String[] {"date", "range"}) {
assertQEx("no zero gap error for facet." + type + ": " + field,
req("q", "*:*",
"facet", "true",
"facet." + type, field,
"facet."+type+".start", "NOW",
"facet."+type+".gap", "+0DAYS",
"facet."+type+".end", "NOW+10DAY"),
400);
}
}
for (String field : new String[] {"foo_f", "foo_sf"}) {
assertQEx("no float underflow error: " + field,
req("q", "*:*",
"facet", "true",
"facet.range", field,
"facet.range.start", "100000000000",
"facet.range.end", "100000086200",
"facet.range.gap", "2160"),
400);
}
for (String field : new String[] {"foo_d", "foo_sd"}) {
assertQEx("no double underflow error: " + field,
req("q", "*:*",
"facet", "true",
"facet.range", field,
"facet.range.start", "9900000000000",
"facet.range.end", "9900000086200",
"facet.range.gap", "0.0003"),
400);
}
}
}

View File

@ -21,6 +21,7 @@ import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@ -34,6 +35,11 @@ public class TestValueSourceCache extends SolrTestCaseJ4 {
}
static QParser _func;
@AfterClass
public static void afterClass() throws Exception {
_func = null;
}
Query getQuery(String query) throws ParseException {
_func.setString(query);

View File

@ -17,7 +17,7 @@
<field column="description" xpath="/RDF/item/description" />
<field column="creator" xpath="/RDF/item/creator" />
<field column="item-subject" xpath="/RDF/item/subject" />
<field column="date" xpath="/RDF/item/date" dateTimeFormat="yyyy-MM-dd'T'hh:mm:ss" />
<field column="date" xpath="/RDF/item/date" dateTimeFormat="yyyy-MM-dd'T'HH:mm:ss" />
<field column="slash-department" xpath="/RDF/item/department" />
<field column="slash-section" xpath="/RDF/item/section" />
<field column="slash-comments" xpath="/RDF/item/comments" />

0
solr/example/solr-webapp/.gitignore vendored Normal file
View File