mirror of https://github.com/apache/lucene.git
LUCENE-3892: merge trunk (1363348-1364687)
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/pforcodec_3892@1364696 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
02bf38865a
|
@ -24,7 +24,6 @@
|
|||
</subant>
|
||||
</target>
|
||||
|
||||
<!-- nocommit put depends="validate" back -->
|
||||
<target name="test" description="Test both Lucene and Solr">
|
||||
<sequential>
|
||||
<subant target="test" inheritall="false" failonerror="true">
|
||||
|
@ -150,7 +149,7 @@
|
|||
</delete>
|
||||
</target>
|
||||
|
||||
<target name="clean" depends="clean-jars" description="Clean Lucene and Solr">
|
||||
<target name="clean" description="Clean Lucene and Solr">
|
||||
<delete dir="dist" />
|
||||
<sequential>
|
||||
<subant target="clean" inheritall="false" failonerror="true">
|
||||
|
|
|
@ -22,6 +22,10 @@ New features
|
|||
respect field boundaries in the case of highlighting for multivalued fields.
|
||||
(Martijn van Groningen)
|
||||
|
||||
* LUCENE-4227: Added DirectPostingsFormat, to hold all postings in
|
||||
memory as uncompressed simple arrays. This uses a tremendous amount
|
||||
of RAM but gives good search performance gains. (Mike McCandless)
|
||||
|
||||
API Changes
|
||||
|
||||
* LUCENE-4138: update of morfologik (Polish morphological analyzer) to 1.5.3.
|
||||
|
@ -46,6 +50,11 @@ API Changes
|
|||
filter another reader and you override correct() for offset correction.
|
||||
(Robert Muir)
|
||||
|
||||
* LUCENE-4240: Analyzer api now just takes fieldName for getOffsetGap. If the
|
||||
field is not analyzed (e.g. StringField), then the analyzer is not invoked
|
||||
at all. If you want to tweak things like positionIncrementGap and offsetGap,
|
||||
analyze the field with KeywordTokenizer instead. (Grant Ingersoll, Robert Muir)
|
||||
|
||||
Optimizations
|
||||
|
||||
* LUCENE-4171: Performance improvements to Packed64.
|
||||
|
@ -84,6 +93,9 @@ Bug Fixes
|
|||
all queries. Made Scorer.freq() abstract.
|
||||
(Koji Sekiguchi, Mike McCandless, Robert Muir)
|
||||
|
||||
* LUCENE-4234: Exception when FacetsCollector is used with ScoreFacetRequest,
|
||||
and the number of matching documents is too large. (Gilad Barkai via Shai Erera)
|
||||
|
||||
Build
|
||||
|
||||
* LUCENE-4094: Support overriding file.encoding on forked test JVMs
|
||||
|
@ -1234,7 +1246,60 @@ Build
|
|||
tasks) to correctly encode build file names as URIs for later processing by
|
||||
XSL. (Greg Bowyer, Uwe Schindler)
|
||||
|
||||
|
||||
======================= Lucene 3.6.1 =======================
|
||||
More information about this release, including any errata related to the
|
||||
release notes, upgrade instructions, or other changes may be found online at:
|
||||
https://wiki.apache.org/lucene-java/Lucene3.6.1
|
||||
|
||||
Bug Fixes
|
||||
|
||||
* LUCENE-3969: Throw IAE on bad arguments that could cause confusing
|
||||
errors in KeywordTokenizer.
|
||||
(Uwe Schindler, Mike McCandless, Robert Muir)
|
||||
|
||||
* LUCENE-3971: MappingCharFilter could return invalid final token position.
|
||||
(Dawid Weiss, Robert Muir)
|
||||
|
||||
* LUCENE-4023: DisjunctionMaxScorer now implements visitSubScorers().
|
||||
(Uwe Schindler)
|
||||
|
||||
* LUCENE-2566: + - operators allow any amount of whitespace (yonik, janhoy)
|
||||
|
||||
* LUCENE-3590: Fix AIOOBE in BytesRef/CharsRef copyBytes/copyChars when
|
||||
offset is nonzero, fix off-by-one in CharsRef.subSequence, and fix
|
||||
CharsRef's CharSequence methods to throw exceptions in boundary cases
|
||||
to properly meet the specification. (Robert Muir)
|
||||
|
||||
* LUCENE-4222: TieredMergePolicy.getFloorSegmentMB was returning the
|
||||
size in bytes not MB (Chris Fuller via Mike McCandless)
|
||||
|
||||
API Changes
|
||||
|
||||
* LUCENE-4023: Changed the visibility of Scorer#visitSubScorers() to
|
||||
public, otherwise it's impossible to implement Scorers outside
|
||||
the Lucene package. (Uwe Schindler)
|
||||
|
||||
Optimizations
|
||||
|
||||
* LUCENE-4163: Improve concurrency of MMapIndexInput.clone() by using
|
||||
the new WeakIdentityMap on top of a ConcurrentHashMap to manage
|
||||
the cloned instances. WeakIdentityMap was extended to support
|
||||
iterating over its keys. (Uwe Schindler)
|
||||
|
||||
Tests
|
||||
|
||||
* LUCENE-3873: add MockGraphTokenFilter, testing analyzers with
|
||||
random graph tokens. (Mike McCandless)
|
||||
|
||||
* LUCENE-3968: factor out LookaheadTokenFilter from
|
||||
MockGraphTokenFilter (Mike Mccandless)
|
||||
|
||||
|
||||
======================= Lucene 3.6.0 =======================
|
||||
More information about this release, including any errata related to the
|
||||
release notes, upgrade instructions, or other changes may be found online at:
|
||||
https://wiki.apache.org/lucene-java/Lucene3.6
|
||||
|
||||
Changes in backwards compatibility policy
|
||||
|
||||
|
@ -1290,7 +1355,7 @@ Changes in backwards compatibility policy
|
|||
|
||||
* LUCENE-3712: Removed unused and untested ReaderUtil#subReader methods.
|
||||
(Uwe Schindler)
|
||||
|
||||
|
||||
* LUCENE-3672: Deprecate Directory.fileModified,
|
||||
IndexCommit.getTimestamp and .getVersion and
|
||||
IndexReader.lastModified and getCurrentVersion (Andrzej Bialecki,
|
||||
|
@ -1313,6 +1378,10 @@ Changes in backwards compatibility policy
|
|||
* LUCENE-3738: All readXxx methods in BufferedIndexInput were made
|
||||
final. Subclasses should only override protected readInternal /
|
||||
seekInternal. (Uwe Schindler)
|
||||
|
||||
* LUCENE-2599: Deprecated the spatial contrib module, which was buggy and not
|
||||
well maintained. Lucene 4 includes a new spatial module that replaces this.
|
||||
(David Smiley, Ryan McKinley, Chris Male)
|
||||
|
||||
Changes in Runtime Behavior
|
||||
|
||||
|
@ -1354,7 +1423,7 @@ API Changes
|
|||
query time, wrap your IndexReader using FilterIndexReader, overriding
|
||||
FilterIndexReader.norms(). To persist the changes on disk, copy the
|
||||
FilteredIndexReader to a new index using IndexWriter.addIndexes().
|
||||
In Lucene 4.0, Similarity will allow you to customize scoring
|
||||
In Lucene 4.0, SimilarityProvider will allow you to customize scoring
|
||||
using external norms, too. (Uwe Schindler, Robert Muir)
|
||||
|
||||
* LUCENE-3735: PayloadProcessorProvider was changed to return a
|
||||
|
@ -1379,7 +1448,7 @@ API Changes
|
|||
never applying deletes). (MJB, Shai Erera, Mike McCandless)
|
||||
|
||||
* LUCENE-3761: Generalize SearcherManager into an abstract ReferenceManager.
|
||||
SearcherManager remains a concrete class, but due to the refactoring, the
|
||||
SearcherManager remains a concrete class, but due to the refactoring, the
|
||||
method maybeReopen has been deprecated in favor of maybeRefresh().
|
||||
(Shai Erera, Mike McCandless, Simon Willnauer)
|
||||
|
||||
|
@ -1404,7 +1473,7 @@ New Features
|
|||
queries. Literal asterisks may be represented by quoting or escaping
|
||||
(i.e. \* or "*") Custom QueryParser subclasses overriding getRangeQuery()
|
||||
will be passed null for any open endpoint. (Ingo Renner, Adriano
|
||||
Crestani, yonik, Mike McCandless
|
||||
Crestani, yonik, Mike McCandless
|
||||
|
||||
* LUCENE-3121: Add sugar reverse lookup (given an output, find the
|
||||
input mapping to it) for FSTs that have strictly monotonic long
|
||||
|
@ -1424,7 +1493,7 @@ New Features
|
|||
|
||||
* LUCENE-3789: Expose MTQ TermsEnum via RewriteMethod for non package private
|
||||
access (Simon Willnauer)
|
||||
|
||||
|
||||
* LUCENE-3881: Added UAX29URLEmailAnalyzer: a standard analyzer that recognizes
|
||||
URLs and emails. (Steve Rowe)
|
||||
|
||||
|
|
|
@ -629,3 +629,8 @@ you can now do this:
|
|||
instance exposing the inverted index of the one document. From
|
||||
Fields you can enumerate all fields, terms, positions, offsets.
|
||||
|
||||
* LUCENE-4227: If you were previously using Instantiated index, you
|
||||
may want to use DirectPostingsFormat after upgrading: it stores all
|
||||
postings in simple arrrays (byte[] for terms, int[] for docs, freqs,
|
||||
positions, offsets). Note that this only covers postings, whereas
|
||||
Instantiated covered all other parts of the index as well.
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -45,7 +44,7 @@ public class AddIndexesTaskTest extends BenchmarkTestCase {
|
|||
|
||||
// create a dummy index under inputDir
|
||||
inputDir = new File(testDir, "input");
|
||||
MockDirectoryWrapper tmpDir = newFSDirectory(inputDir);
|
||||
Directory tmpDir = newFSDirectory(inputDir);
|
||||
try {
|
||||
IndexWriter writer = new IndexWriter(tmpDir, new IndexWriterConfig(TEST_VERSION_CURRENT, null));
|
||||
for (int i = 0; i < 10; i++) {
|
||||
|
|
|
@ -144,6 +144,7 @@
|
|||
<property name="javadoc.charset" value="utf-8"/>
|
||||
<property name="javadoc.dir" value="${common.dir}/build/docs"/>
|
||||
<property name="javadoc.maxmemory" value="512m" />
|
||||
<property name="javadoc.noindex" value="true"/>
|
||||
<!-- Javadoc classpath -->
|
||||
<path id="javadoc.classpath">
|
||||
<path refid="classpath"/>
|
||||
|
@ -1414,7 +1415,7 @@ ${tests-output}/junit4-*.suites - per-JVM executed suites
|
|||
encoding="${build.encoding}"
|
||||
charset="${javadoc.charset}"
|
||||
docencoding="${javadoc.charset}"
|
||||
noindex="true"
|
||||
noindex="${javadoc.noindex}"
|
||||
includenosourcepackages="true"
|
||||
author="true"
|
||||
version="true"
|
||||
|
|
|
@ -17,7 +17,6 @@ package org.apache.lucene.analysis;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.util.CloseableThreadLocal;
|
||||
|
||||
|
@ -114,21 +113,15 @@ public abstract class Analyzer {
|
|||
|
||||
/**
|
||||
* Just like {@link #getPositionIncrementGap}, except for
|
||||
* Token offsets instead. By default this returns 1 for
|
||||
* tokenized fields and, as if the fields were joined
|
||||
* with an extra space character, and 0 for un-tokenized
|
||||
* fields. This method is only called if the field
|
||||
* Token offsets instead. By default this returns 1.
|
||||
* This method is only called if the field
|
||||
* produced at least one token for indexing.
|
||||
*
|
||||
* @param field the field just indexed
|
||||
* @param fieldName the field just indexed
|
||||
* @return offset gap, added to the next token emitted from {@link #tokenStream(String,Reader)}
|
||||
*/
|
||||
public int getOffsetGap(IndexableField field) {
|
||||
if (field.fieldType().tokenized()) {
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
public int getOffsetGap(String fieldName) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
/** Frees persistent resources used by this Analyzer */
|
||||
|
|
|
@ -17,8 +17,6 @@ package org.apache.lucene.analysis;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
|
||||
import java.io.Reader;
|
||||
|
||||
/**
|
||||
|
@ -83,8 +81,8 @@ public abstract class AnalyzerWrapper extends Analyzer {
|
|||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public final int getOffsetGap(IndexableField field) {
|
||||
return getWrappedAnalyzer(field.name()).getOffsetGap(field);
|
||||
public final int getOffsetGap(String fieldName) {
|
||||
return getWrappedAnalyzer(fieldName).getOffsetGap(fieldName);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1601,8 +1601,9 @@ public class CheckIndex {
|
|||
}
|
||||
}
|
||||
}
|
||||
float vectorAvg = status.docCount == 0 ? 0 : status.totVectors / (float)status.docCount;
|
||||
msg("OK [" + status.totVectors + " total vector count; avg " +
|
||||
format.format((((float) status.totVectors) / status.docCount)) + " term/freq vector fields per doc]");
|
||||
format.format(vectorAvg) + " term/freq vector fields per doc]");
|
||||
} catch (Throwable e) {
|
||||
msg("ERROR [" + String.valueOf(e.getMessage()) + "]");
|
||||
status.error = e;
|
||||
|
|
|
@ -76,6 +76,7 @@ final class DocInverterPerField extends DocFieldConsumerPerField {
|
|||
// consumer if it wants to see this particular field
|
||||
// tokenized.
|
||||
if (fieldType.indexed() && doInvert) {
|
||||
final boolean analyzed = fieldType.tokenized() && docState.analyzer != null;
|
||||
|
||||
// if the field omits norms, the boost cannot be indexed.
|
||||
if (fieldType.omitNorms() && field.boost() != 1.0f) {
|
||||
|
@ -88,7 +89,7 @@ final class DocInverterPerField extends DocFieldConsumerPerField {
|
|||
int lastStartOffset = 0;
|
||||
|
||||
if (i > 0) {
|
||||
fieldState.position += docState.analyzer == null ? 0 : docState.analyzer.getPositionIncrementGap(fieldInfo.name);
|
||||
fieldState.position += analyzed ? docState.analyzer.getPositionIncrementGap(fieldInfo.name) : 0;
|
||||
}
|
||||
|
||||
final TokenStream stream = field.tokenStream(docState.analyzer);
|
||||
|
@ -188,7 +189,7 @@ final class DocInverterPerField extends DocFieldConsumerPerField {
|
|||
}
|
||||
}
|
||||
|
||||
fieldState.offset += docState.analyzer == null ? 0 : docState.analyzer.getOffsetGap(field);
|
||||
fieldState.offset += analyzed ? docState.analyzer.getOffsetGap(fieldInfo.name) : 0;
|
||||
fieldState.boost *= field.boost();
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,9 @@ public abstract class DocsAndPositionsEnum extends DocsEnum {
|
|||
|
||||
/** Returns the payload at this position, or null if no
|
||||
* payload was indexed. Only call this once per
|
||||
* position. */
|
||||
* position. You should not modify anything (neither
|
||||
* members of the returned BytesRef nor bytes in the
|
||||
* byte[]). */
|
||||
public abstract BytesRef getPayload() throws IOException;
|
||||
|
||||
public abstract boolean hasPayload();
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.index;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* Exposes flex API, merged from flex API of sub-segments.
|
||||
|
@ -150,6 +151,16 @@ public final class MultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
|
|||
public final static class EnumWithSlice {
|
||||
public DocsAndPositionsEnum docsAndPositionsEnum;
|
||||
public ReaderSlice slice;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return slice.toString()+":"+docsAndPositionsEnum;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MultiDocsAndPositionsEnum(" + Arrays.toString(getSubs()) + ")";
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,3 +21,4 @@ org.apache.lucene.codecs.pfor.ForPostingsFormat
|
|||
org.apache.lucene.codecs.pfor.PForPostingsFormat
|
||||
org.apache.lucene.codecs.bulkvint.BulkVIntPostingsFormat
|
||||
org.apache.lucene.codecs.block.BlockPostingsFormat
|
||||
org.apache.lucene.codecs.memory.DirectPostingsFormat
|
||||
|
|
|
@ -59,7 +59,7 @@ public class TestExternalCodecs extends LuceneTestCase {
|
|||
System.out.println("TEST: NUM_DOCS=" + NUM_DOCS);
|
||||
}
|
||||
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
BaseDirectoryWrapper dir = newDirectory();
|
||||
dir.setCheckIndexOnClose(false); // we use a custom codec provider
|
||||
IndexWriter w = new IndexWriter(
|
||||
dir,
|
||||
|
|
|
@ -85,7 +85,7 @@ public class TestMergeSchedulerExternal extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testSubclassConcurrentMergeScheduler() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
dir.failOn(new FailOnlyOnMerge());
|
||||
|
||||
Document doc = new Document();
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.lucene.index.MultiFields;
|
|||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
@ -54,7 +55,7 @@ public class Test10KPulsings extends LuceneTestCase {
|
|||
Codec cp = _TestUtil.alwaysPostingsFormat(new Pulsing40PostingsFormat(1));
|
||||
|
||||
File f = _TestUtil.getTempDir("10kpulsed");
|
||||
MockDirectoryWrapper dir = newFSDirectory(f);
|
||||
BaseDirectoryWrapper dir = newFSDirectory(f);
|
||||
dir.setCheckIndexOnClose(false); // we do this ourselves explicitly
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp));
|
||||
|
@ -105,7 +106,7 @@ public class Test10KPulsings extends LuceneTestCase {
|
|||
Codec cp = _TestUtil.alwaysPostingsFormat(new Pulsing40PostingsFormat(freqCutoff));
|
||||
|
||||
File f = _TestUtil.getTempDir("10knotpulsed");
|
||||
MockDirectoryWrapper dir = newFSDirectory(f);
|
||||
BaseDirectoryWrapper dir = newFSDirectory(f);
|
||||
dir.setCheckIndexOnClose(false); // we do this ourselves explicitly
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp));
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.lucene.index.DocsAndPositionsEnum;
|
|||
import org.apache.lucene.index.DocsEnum;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
@ -84,7 +85,7 @@ public class TestPulsingReuse extends LuceneTestCase {
|
|||
public void testNestedPulsing() throws Exception {
|
||||
// we always run this test with pulsing codec.
|
||||
Codec cp = _TestUtil.alwaysPostingsFormat(new NestedPulsingPostingsFormat());
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
BaseDirectoryWrapper dir = newDirectory();
|
||||
dir.setCheckIndexOnClose(false); // will do this ourselves, custom codec
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setCodec(cp));
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
@ -34,13 +35,15 @@ import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
|||
* Test indexes ~82M docs with 26 terms each, so you get > Integer.MAX_VALUE terms/docs pairs
|
||||
* @lucene.experimental
|
||||
*/
|
||||
@SuppressCodecs({ "SimpleText", "Memory" })
|
||||
@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
|
||||
public class Test2BPostings extends LuceneTestCase {
|
||||
|
||||
@Nightly
|
||||
public void test() throws Exception {
|
||||
MockDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BPostings"));
|
||||
dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BPostings"));
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
}
|
||||
dir.setCheckIndexOnClose(false); // don't double-checkindex
|
||||
|
||||
IndexWriter w = new IndexWriter(dir,
|
||||
|
|
|
@ -41,7 +41,7 @@ import java.util.Random;
|
|||
//
|
||||
// java -server -Xmx8g -d64 -cp .:lib/junit-4.10.jar:./build/classes/test:./build/classes/test-framework:./build/classes/java -Dlucene.version=4.0-dev -Dtests.directory=MMapDirectory -DtempDir=build -ea org.junit.runner.JUnitCore org.apache.lucene.index.Test2BTerms
|
||||
//
|
||||
@SuppressCodecs({ "SimpleText", "Memory" })
|
||||
@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
|
||||
public class Test2BTerms extends LuceneTestCase {
|
||||
|
||||
private final static int TOKEN_LEN = 10;
|
||||
|
@ -146,9 +146,11 @@ public class Test2BTerms extends LuceneTestCase {
|
|||
|
||||
List<BytesRef> savedTerms = null;
|
||||
|
||||
MockDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BTerms"));
|
||||
BaseDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BTerms"));
|
||||
//MockDirectoryWrapper dir = newFSDirectory(new File("/p/lucene/indices/2bindex"));
|
||||
dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
}
|
||||
dir.setCheckIndexOnClose(false); // don't double-checkindex
|
||||
|
||||
if (true) {
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
|||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
|
@ -1168,7 +1169,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
* simple test that ensures we getting expected exceptions
|
||||
*/
|
||||
public void testAddIndexMissingCodec() throws IOException {
|
||||
MockDirectoryWrapper toAdd = newDirectory();
|
||||
BaseDirectoryWrapper toAdd = newDirectory();
|
||||
// Disable checkIndex, else we get an exception because
|
||||
// of the unregistered codec:
|
||||
toAdd.setCheckIndexOnClose(false);
|
||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.search.NumericRangeQuery;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
|
@ -177,7 +178,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
}
|
||||
File oldIndxeDir = _TestUtil.getTempDir(unsupportedNames[i]);
|
||||
_TestUtil.unzip(getDataFile("unsupported." + unsupportedNames[i] + ".zip"), oldIndxeDir);
|
||||
MockDirectoryWrapper dir = newFSDirectory(oldIndxeDir);
|
||||
BaseDirectoryWrapper dir = newFSDirectory(oldIndxeDir);
|
||||
// don't checkindex, these are intentionally not supported
|
||||
dir.setCheckIndexOnClose(false);
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
|
@ -67,7 +68,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
// Make sure running BG merges still work fine even when
|
||||
// we are hitting exceptions during flushing.
|
||||
public void testFlushExceptions() throws IOException {
|
||||
MockDirectoryWrapper directory = newDirectory();
|
||||
MockDirectoryWrapper directory = newMockDirectory();
|
||||
FailOnlyOnFlush failure = new FailOnlyOnFlush();
|
||||
directory.failOn(failure);
|
||||
|
||||
|
@ -120,7 +121,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
// Test that deletes committed after a merge started and
|
||||
// before it finishes, are correctly merged back:
|
||||
public void testDeleteMerging() throws IOException {
|
||||
MockDirectoryWrapper directory = newDirectory();
|
||||
Directory directory = newDirectory();
|
||||
|
||||
LogDocMergePolicy mp = new LogDocMergePolicy();
|
||||
// Force degenerate merging so we can get a mix of
|
||||
|
@ -164,7 +165,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testNoExtraFiles() throws IOException {
|
||||
MockDirectoryWrapper directory = newDirectory();
|
||||
Directory directory = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random()))
|
||||
.setMaxBufferedDocs(2));
|
||||
|
@ -195,7 +196,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testNoWaitClose() throws IOException {
|
||||
MockDirectoryWrapper directory = newDirectory();
|
||||
Directory directory = newDirectory();
|
||||
Document doc = new Document();
|
||||
Field idField = newStringField("id", "", Field.Store.YES);
|
||||
doc.add(idField);
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.lucene.document.Document;
|
|||
public class TestCrash extends LuceneTestCase {
|
||||
|
||||
private IndexWriter initIndex(Random random, boolean initialCommit) throws IOException {
|
||||
return initIndex(random, newDirectory(random), initialCommit);
|
||||
return initIndex(random, newMockDirectory(random), initialCommit);
|
||||
}
|
||||
|
||||
private IndexWriter initIndex(Random random, MockDirectoryWrapper dir, boolean initialCommit) throws IOException {
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.apache.lucene.search.similarities.DefaultSimilarity;
|
|||
import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LineFileDocs;
|
||||
|
@ -44,7 +43,7 @@ public class TestCustomNorms extends LuceneTestCase {
|
|||
|
||||
public void testFloatNorms() throws IOException {
|
||||
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer(random()));
|
||||
Similarity provider = new MySimProvider();
|
||||
|
@ -85,7 +84,7 @@ public class TestCustomNorms extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testExceptionOnRandomType() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer(random()));
|
||||
Similarity provider = new MySimProvider();
|
||||
|
|
|
@ -89,7 +89,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testRandom() throws Exception {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
|
||||
final int NUM_TERMS = atLeast(20);
|
||||
final Set<BytesRef> terms = new HashSet<BytesRef>();
|
||||
|
@ -176,7 +176,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testRandomWithPrefix() throws Exception {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
|
||||
final Set<String> prefixes = new HashSet<String>();
|
||||
final int numPrefix = _TestUtil.nextInt(random(), 2, 7);
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
@ -148,7 +148,7 @@ public class TestFilterAtomicReader extends LuceneTestCase {
|
|||
Directory target = newDirectory();
|
||||
|
||||
// We mess with the postings so this can fail:
|
||||
((MockDirectoryWrapper) target).setCrossCheckTermVectorsOnClose(false);
|
||||
((BaseDirectoryWrapper) target).setCrossCheckTermVectorsOnClose(false);
|
||||
|
||||
writer = new IndexWriter(target, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
|
||||
IndexReader reader = new TestReader(DirectoryReader.open(directory));
|
||||
|
|
|
@ -231,7 +231,7 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase {
|
|||
final int numDocumentsToIndex = 50 + random().nextInt(50);
|
||||
for (int i = 0; i < numThreads.length; i++) {
|
||||
AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex);
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
// mock a very slow harddisk sometimes here so that flushing is very slow
|
||||
dir.setThrottling(MockDirectoryWrapper.Throttling.SOMETIMES);
|
||||
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
|
|
|
@ -37,7 +37,7 @@ public class TestForTooMuchCloning extends LuceneTestCase {
|
|||
// NOTE: if we see a fail on this test with "NestedPulsing" its because its
|
||||
// reuse isnt perfect (but reasonable). see TestPulsingReuse.testNestedPulsing
|
||||
// for more details
|
||||
final MockDirectoryWrapper dir = newDirectory();
|
||||
final MockDirectoryWrapper dir = newMockDirectory();
|
||||
final TieredMergePolicy tmp = new TieredMergePolicy();
|
||||
tmp.setMaxMergeAtOnce(2);
|
||||
final RandomIndexWriter w = new RandomIndexWriter(random(), dir,
|
||||
|
|
|
@ -39,8 +39,10 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
public class TestIndexFileDeleter extends LuceneTestCase {
|
||||
|
||||
public void testDeleteLeftoverFiles() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
dir.setPreventDoubleWrite(false);
|
||||
Directory dir = newDirectory();
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setPreventDoubleWrite(false);
|
||||
}
|
||||
|
||||
LogMergePolicy mergePolicy = newLogMergePolicy(true, 10);
|
||||
mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
|
@ -212,7 +213,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
|
||||
public void testIndexNoDocuments() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())));
|
||||
writer.commit();
|
||||
writer.close();
|
||||
|
@ -234,7 +235,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testManyFields() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(10));
|
||||
for(int j=0;j<100;j++) {
|
||||
Document doc = new Document();
|
||||
|
@ -264,7 +265,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testSmallRAMBuffer() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).
|
||||
|
@ -404,7 +405,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testDiverseDocs() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setRAMBufferSizeMB(0.5));
|
||||
int n = atLeast(1);
|
||||
for(int i=0;i<n;i++) {
|
||||
|
@ -453,7 +454,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testEnablingNorms() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(10));
|
||||
// Enable norms for only 1 doc, pre flush
|
||||
FieldType customType = new FieldType(TextField.TYPE_STORED);
|
||||
|
@ -509,7 +510,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testHighFreqTerm() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random())).setRAMBufferSizeMB(0.01));
|
||||
// Massive doc that has 128 K a's
|
||||
|
@ -1270,7 +1271,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
public void testDeleteUnusedFiles() throws Exception {
|
||||
for(int iter=0;iter<2;iter++) {
|
||||
Directory dir = newDirectory();
|
||||
Directory dir = newMockDirectory(); // relies on windows semantics
|
||||
|
||||
LogMergePolicy mergePolicy = newLogMergePolicy(true);
|
||||
mergePolicy.setNoCFSRatio(1); // This test expects all of its segments to be in CFS
|
||||
|
@ -1799,4 +1800,40 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
r.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testDontInvokeAnalyzerForUnAnalyzedFields() throws Exception {
|
||||
Analyzer analyzer = new Analyzer() {
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
|
||||
throw new IllegalStateException("don't invoke me!");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getPositionIncrementGap(String fieldName) {
|
||||
throw new IllegalStateException("don't invoke me!");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getOffsetGap(String fieldName) {
|
||||
throw new IllegalStateException("don't invoke me!");
|
||||
}
|
||||
};
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, analyzer));
|
||||
Document doc = new Document();
|
||||
FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
|
||||
customType.setStoreTermVectors(true);
|
||||
customType.setStoreTermVectorPositions(true);
|
||||
customType.setStoreTermVectorOffsets(true);
|
||||
Field f = newField("field", "abcd", customType);
|
||||
doc.add(f);
|
||||
doc.add(f);
|
||||
Field f2 = newField("field", "", customType);
|
||||
doc.add(f2);
|
||||
doc.add(f);
|
||||
w.addDocument(doc);
|
||||
w.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
* and add docs to it.
|
||||
*/
|
||||
public void testCommitOnCloseAbort() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(10));
|
||||
for (int i = 0; i < 14; i++) {
|
||||
TestIndexWriter.addDoc(writer);
|
||||
|
@ -139,7 +139,9 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
|
||||
// On abort, writer in fact may write to the same
|
||||
// segments_N file:
|
||||
dir.setPreventDoubleWrite(false);
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setPreventDoubleWrite(false);
|
||||
}
|
||||
|
||||
for(int i=0;i<12;i++) {
|
||||
for(int j=0;j<17;j++) {
|
||||
|
@ -179,7 +181,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
final String idFormat = _TestUtil.getPostingsFormat("id");
|
||||
final String contentFormat = _TestUtil.getPostingsFormat("content");
|
||||
assumeFalse("This test cannot run with Memory codec", idFormat.equals("Memory") || contentFormat.equals("Memory"));
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
Analyzer analyzer;
|
||||
if (random().nextBoolean()) {
|
||||
// no payloads
|
||||
|
@ -258,11 +260,13 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
* and close().
|
||||
*/
|
||||
public void testCommitOnCloseForceMerge() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
// Must disable throwing exc on double-write: this
|
||||
// test uses IW.rollback which easily results in
|
||||
// writing to same file more than once
|
||||
dir.setPreventDoubleWrite(false);
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setPreventDoubleWrite(false);
|
||||
}
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).
|
||||
|
@ -543,8 +547,10 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
|
||||
// LUCENE-1274: test writer.prepareCommit()
|
||||
public void testPrepareCommitRollback() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
dir.setPreventDoubleWrite(false);
|
||||
Directory dir = newDirectory();
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setPreventDoubleWrite(false);
|
||||
}
|
||||
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
|
|
|
@ -426,7 +426,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
int END_COUNT = 144;
|
||||
|
||||
// First build up a starting index:
|
||||
MockDirectoryWrapper startDir = newDirectory();
|
||||
MockDirectoryWrapper startDir = newMockDirectory();
|
||||
// TODO: find the resource leak that only occurs sometimes here.
|
||||
startDir.setNoDeleteOpenFile(false);
|
||||
IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)));
|
||||
|
@ -689,7 +689,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
"Venice has lots of canals" };
|
||||
String[] text = { "Amsterdam", "Venice" };
|
||||
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(2).setReaderPooling(false).setMergePolicy(newLogMergePolicy()));
|
||||
|
||||
|
@ -814,7 +814,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
"Venice has lots of canals" };
|
||||
String[] text = { "Amsterdam", "Venice" };
|
||||
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)));
|
||||
modifier.commit();
|
||||
dir.failOn(failure.reset());
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
|||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
|
@ -221,7 +222,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
if (VERBOSE) {
|
||||
System.out.println("\nTEST: start testRandomExceptions");
|
||||
}
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
|
||||
MockAnalyzer analyzer = new MockAnalyzer(random());
|
||||
analyzer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
|
||||
|
@ -265,7 +266,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testRandomExceptionsThreads() throws Throwable {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
MockAnalyzer analyzer = new MockAnalyzer(random());
|
||||
analyzer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
|
||||
MockIndexWriter writer = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)
|
||||
|
@ -556,7 +557,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
// LUCENE-1072: make sure an errant exception on flushing
|
||||
// one segment only takes out those docs in that one flush
|
||||
public void testDocumentsWriterAbort() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
FailOnlyOnFlush failure = new FailOnlyOnFlush();
|
||||
failure.setDoFail();
|
||||
dir.failOn(failure);
|
||||
|
@ -597,7 +598,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
if (VERBOSE) {
|
||||
System.out.println("TEST: cycle i=" + i);
|
||||
}
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMergePolicy(newLogMergePolicy()));
|
||||
|
||||
// don't allow a sudden merge to clean up the deleted
|
||||
|
@ -692,7 +693,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
final int NUM_ITER = 100;
|
||||
|
||||
for(int i=0;i<2;i++) {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
|
||||
{
|
||||
final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
|
@ -822,7 +823,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
|
||||
// LUCENE-1044: test exception during sync
|
||||
public void testExceptionDuringSync() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
FailOnlyInSync failure = new FailOnlyInSync();
|
||||
dir.failOn(failure);
|
||||
|
||||
|
@ -908,7 +909,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
};
|
||||
|
||||
for (FailOnlyInCommit failure : failures) {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
dir.setFailOnCreateOutput(false);
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random())));
|
||||
|
@ -1076,7 +1077,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
// latest segments file and make sure we get an
|
||||
// IOException trying to open the index:
|
||||
public void testSimulatedCorruptIndex1() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
BaseDirectoryWrapper dir = newDirectory();
|
||||
dir.setCheckIndexOnClose(false); // we are corrupting it!
|
||||
|
||||
IndexWriter writer = null;
|
||||
|
@ -1124,7 +1125,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
// files and make sure we get an IOException trying to
|
||||
// open the index:
|
||||
public void testSimulatedCorruptIndex2() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
BaseDirectoryWrapper dir = newDirectory();
|
||||
dir.setCheckIndexOnClose(false); // we are corrupting it!
|
||||
IndexWriter writer = null;
|
||||
|
||||
|
@ -1174,8 +1175,10 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
// gracefully fallback to the previous segments file),
|
||||
// and that we can add to the index:
|
||||
public void testSimulatedCrashedWriter() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
dir.setPreventDoubleWrite(false);
|
||||
Directory dir = newDirectory();
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setPreventDoubleWrite(false);
|
||||
}
|
||||
|
||||
IndexWriter writer = null;
|
||||
|
||||
|
@ -1240,7 +1243,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
int num = atLeast(1);
|
||||
for (int j = 0; j < num; j++) {
|
||||
for (FailOnTermVectors failure : failures) {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random())));
|
||||
dir.failOn(failure);
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.lucene.util._TestUtil;
|
|||
public class TestIndexWriterForceMerge extends LuceneTestCase {
|
||||
public void testPartialMerge() throws IOException {
|
||||
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
|
||||
final Document doc = new Document();
|
||||
doc.add(newStringField("content", "aaa", Field.Store.NO));
|
||||
|
@ -72,7 +72,7 @@ public class TestIndexWriterForceMerge extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testMaxNumSegments2() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Directory dir = newDirectory();
|
||||
|
||||
final Document doc = new Document();
|
||||
doc.add(newStringField("content", "aaa", Field.Store.NO));
|
||||
|
@ -121,7 +121,7 @@ public class TestIndexWriterForceMerge extends LuceneTestCase {
|
|||
*/
|
||||
public void testForceMergeTempSpaceUsage() throws IOException {
|
||||
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy()));
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: config1=" + writer.getConfig());
|
||||
|
|
|
@ -182,7 +182,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
|
|||
|
||||
// Now, build a starting index that has START_COUNT docs. We
|
||||
// will then try to addIndexes into a copy of this:
|
||||
MockDirectoryWrapper startDir = newDirectory();
|
||||
MockDirectoryWrapper startDir = newMockDirectory();
|
||||
IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
|
||||
for(int j=0;j<START_COUNT;j++) {
|
||||
addDocWithIndex(writer, j);
|
||||
|
@ -476,7 +476,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
|
|||
|
||||
// LUCENE-2593
|
||||
public void testCorruptionAfterDiskFullDuringMerge() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
//IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setReaderPooling(true));
|
||||
IndexWriter w = new IndexWriter(
|
||||
dir,
|
||||
|
@ -520,7 +520,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
|
|||
// an IndexWriter (hit during DW.ThreadState.init()) is
|
||||
// OK:
|
||||
public void testImmediateDiskFull() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
|
||||
.setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
|
||||
dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
|
||||
|
|
|
@ -27,9 +27,7 @@ import java.lang.reflect.Method;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
|
@ -134,7 +132,7 @@ public class TestIndexWriterOnJRECrash extends TestNRTThreads {
|
|||
*/
|
||||
public boolean checkIndexes(File file) throws IOException {
|
||||
if (file.isDirectory()) {
|
||||
MockDirectoryWrapper dir = newFSDirectory(file);
|
||||
BaseDirectoryWrapper dir = newFSDirectory(file);
|
||||
dir.setCheckIndexOnClose(false); // don't double-checkindex
|
||||
if (DirectoryReader.indexExists(dir)) {
|
||||
if (VERBOSE) {
|
||||
|
|
|
@ -708,7 +708,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
|
||||
// Stress test reopen during addIndexes
|
||||
public void testDuringAddIndexes() throws Exception {
|
||||
MockDirectoryWrapper dir1 = newDirectory();
|
||||
Directory dir1 = newDirectory();
|
||||
final IndexWriter writer = new IndexWriter(
|
||||
dir1,
|
||||
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).
|
||||
|
@ -781,8 +781,10 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
|
||||
assertEquals(0, excs.size());
|
||||
r.close();
|
||||
final Collection<String> openDeletedFiles = dir1.getOpenDeletedFiles();
|
||||
assertEquals("openDeleted=" + openDeletedFiles, 0, openDeletedFiles.size());
|
||||
if (dir1 instanceof MockDirectoryWrapper) {
|
||||
final Collection<String> openDeletedFiles = ((MockDirectoryWrapper)dir1).getOpenDeletedFiles();
|
||||
assertEquals("openDeleted=" + openDeletedFiles, 0, openDeletedFiles.size());
|
||||
}
|
||||
|
||||
writer.close();
|
||||
|
||||
|
@ -976,7 +978,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
// Don't proceed if picked Codec is in the list of illegal ones.
|
||||
final String format = _TestUtil.getPostingsFormat("f");
|
||||
assumeFalse("Format: " + format + " does not support ReaderTermsIndexDivisor!",
|
||||
(format.equals("SimpleText") || format.equals("Memory")));
|
||||
(format.equals("SimpleText") || format.equals("Memory") || format.equals("Direct")));
|
||||
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, conf);
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.lucene.document.FieldType;
|
|||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
@ -130,7 +131,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
|
|||
if (VERBOSE) {
|
||||
System.out.println("\nTEST: iter=" + iter);
|
||||
}
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).
|
||||
|
@ -245,7 +246,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
|
|||
if (VERBOSE) {
|
||||
System.out.println("TEST: iter=" + iter);
|
||||
}
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
|
@ -302,7 +303,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
|
|||
// Runs test, with one thread, using the specific failure
|
||||
// to trigger an IOException
|
||||
public void _testSingleThreadFailure(MockDirectoryWrapper.Failure failure) throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
MockDirectoryWrapper dir = newMockDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random()))
|
||||
.setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
|
||||
|
@ -435,7 +436,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
|
|||
// and closes before the second IndexWriter time's out trying to get the Lock,
|
||||
// we should see both documents
|
||||
public void testOpenTwoIndexWritersOnDifferentThreads() throws IOException, InterruptedException {
|
||||
final MockDirectoryWrapper dir = newDirectory();
|
||||
final Directory dir = newDirectory();
|
||||
CountDownLatch oneIWConstructed = new CountDownLatch(1);
|
||||
DelayedIndexAndCloseRunnable thread1 = new DelayedIndexAndCloseRunnable(
|
||||
dir, oneIWConstructed);
|
||||
|
@ -503,8 +504,10 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
|
|||
|
||||
// LUCENE-4147
|
||||
public void testRollbackAndCommitWithThreads() throws Exception {
|
||||
final MockDirectoryWrapper d = newFSDirectory(_TestUtil.getTempDir("RollbackAndCommitWithThreads"));
|
||||
d.setPreventDoubleWrite(false);
|
||||
final BaseDirectoryWrapper d = newFSDirectory(_TestUtil.getTempDir("RollbackAndCommitWithThreads"));
|
||||
if (d instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)d).setPreventDoubleWrite(false);
|
||||
}
|
||||
|
||||
final int threadCount = _TestUtil.nextInt(random(), 2, 6);
|
||||
|
||||
|
|
|
@ -132,8 +132,9 @@ public class TestLazyProxSkipping extends LuceneTestCase {
|
|||
|
||||
public void testLazySkipping() throws IOException {
|
||||
final String fieldFormat = _TestUtil.getPostingsFormat(this.field);
|
||||
assumeFalse("This test cannot run with Memory codec", fieldFormat.equals("Memory"));
|
||||
assumeFalse("This test cannot run with SimpleText codec", fieldFormat.equals("SimpleText"));
|
||||
assumeFalse("This test cannot run with Memory postings format", fieldFormat.equals("Memory"));
|
||||
assumeFalse("This test cannot run with Direct postings format", fieldFormat.equals("Direct"));
|
||||
assumeFalse("This test cannot run with SimpleText postings format", fieldFormat.equals("SimpleText"));
|
||||
|
||||
// test whether only the minimum amount of seeks()
|
||||
// are performed
|
||||
|
|
|
@ -37,7 +37,7 @@ import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
|||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
@SuppressCodecs({ "SimpleText", "Memory" })
|
||||
@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
|
||||
public class TestLongPostings extends LuceneTestCase {
|
||||
|
||||
// Produces a realistic unicode random string that
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
|||
// - mix in forceMerge, addIndexes
|
||||
// - randomoly mix in non-congruent docs
|
||||
|
||||
@SuppressCodecs({ "SimpleText", "Memory" })
|
||||
@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
|
||||
public class TestNRTThreads extends ThreadedIndexingAndSearchingTestCase {
|
||||
|
||||
@Override
|
||||
|
@ -54,7 +54,7 @@ public class TestNRTThreads extends ThreadedIndexingAndSearchingTestCase {
|
|||
}
|
||||
r.close();
|
||||
writer.commit();
|
||||
final Set<String> openDeletedFiles = ((MockDirectoryWrapper) dir).getOpenDeletedFiles();
|
||||
final Set<String> openDeletedFiles = dir.getOpenDeletedFiles();
|
||||
if (openDeletedFiles.size() > 0) {
|
||||
System.out.println("OBD files: " + openDeletedFiles);
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ public class TestNRTThreads extends ThreadedIndexingAndSearchingTestCase {
|
|||
r.close();
|
||||
|
||||
//System.out.println("numDocs=" + r.numDocs() + " openDelFileCount=" + dir.openDeleteFileCount());
|
||||
final Set<String> openDeletedFiles = ((MockDirectoryWrapper) dir).getOpenDeletedFiles();
|
||||
final Set<String> openDeletedFiles = dir.getOpenDeletedFiles();
|
||||
if (openDeletedFiles.size() > 0) {
|
||||
System.out.println("OBD files: " + openDeletedFiles);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.Set;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
@ -35,12 +36,14 @@ public class TestNeverDelete extends LuceneTestCase {
|
|||
|
||||
public void testIndexing() throws Exception {
|
||||
final File tmpDir = _TestUtil.getTempDir("TestNeverDelete");
|
||||
final MockDirectoryWrapper d = newFSDirectory(tmpDir);
|
||||
final BaseDirectoryWrapper d = newFSDirectory(tmpDir);
|
||||
|
||||
// We want to "see" files removed if Lucene removed
|
||||
// them. This is still worth running on Windows since
|
||||
// some files the IR opens and closes.
|
||||
d.setNoDeleteOpenFile(false);
|
||||
if (d instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)d).setNoDeleteOpenFile(false);
|
||||
}
|
||||
final RandomIndexWriter w = new RandomIndexWriter(random(),
|
||||
d,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
|
|
|
@ -40,7 +40,7 @@ import org.apache.lucene.util._TestUtil;
|
|||
* Test that norms info is preserved during index life - including
|
||||
* separate norms, addDocument, addIndexes, forceMerge.
|
||||
*/
|
||||
@SuppressCodecs({ "SimpleText", "Memory" })
|
||||
@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
|
||||
@Slow
|
||||
public class TestNorms extends LuceneTestCase {
|
||||
final String byteTestField = "normsTestByte";
|
||||
|
|
|
@ -35,7 +35,7 @@ public class TestRollingUpdates extends LuceneTestCase {
|
|||
@Test
|
||||
public void testRollingUpdates() throws Exception {
|
||||
Random random = new Random(random().nextLong());
|
||||
final MockDirectoryWrapper dir = newDirectory();
|
||||
final BaseDirectoryWrapper dir = newDirectory();
|
||||
dir.setCheckIndexOnClose(false); // we use a custom codec provider
|
||||
final LineFileDocs docs = new LineFileDocs(random, true);
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ import org.apache.lucene.util.automaton.BasicAutomata;
|
|||
import org.apache.lucene.util.automaton.CompiledAutomaton;
|
||||
import org.apache.lucene.util.automaton.RegExp;
|
||||
|
||||
@SuppressCodecs({ "SimpleText", "Memory" })
|
||||
@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
|
||||
public class TestTermsEnum extends LuceneTestCase {
|
||||
|
||||
public void test() throws Exception {
|
||||
|
|
|
@ -41,7 +41,7 @@ import org.apache.lucene.util.IOUtils;
|
|||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
import org.apache.lucene.util.ThreadInterruptedException;
|
||||
|
||||
@SuppressCodecs({ "SimpleText", "Memory" })
|
||||
@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
|
||||
public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase {
|
||||
|
||||
private final ThreadLocal<Long> lastGens = new ThreadLocal<Long>();
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
@SuppressCodecs({ "SimpleText", "Memory" })
|
||||
@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
|
||||
public class TestSearchWithThreads extends LuceneTestCase {
|
||||
int NUM_DOCS;
|
||||
final int NUM_SEARCH_THREADS = 5;
|
||||
|
|
|
@ -43,7 +43,7 @@ import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
|||
import org.apache.lucene.util.NamedThreadFactory;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
@SuppressCodecs({ "SimpleText", "Memory" })
|
||||
@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
|
||||
public class TestSearcherManager extends ThreadedIndexingAndSearchingTestCase {
|
||||
|
||||
boolean warmCalled;
|
||||
|
|
|
@ -41,7 +41,7 @@ import org.apache.lucene.util._TestUtil;
|
|||
// - test pulling docs in 2nd round trip...
|
||||
// - filter too
|
||||
|
||||
@SuppressCodecs({ "SimpleText", "Memory" })
|
||||
@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
|
||||
public class TestShardSearching extends ShardSearchingTestBase {
|
||||
|
||||
private static class PreviousSearchState {
|
||||
|
|
|
@ -18,9 +18,11 @@ package org.apache.lucene.store;
|
|||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
|
@ -41,6 +43,89 @@ public class TestDirectory extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// test is occasionally very slow, i dont know why
|
||||
// try this seed: 7D7E036AD12927F5:93333EF9E6DE44DE
|
||||
@Nightly
|
||||
public void testThreadSafety() throws Exception {
|
||||
final BaseDirectoryWrapper dir = newDirectory();
|
||||
dir.setCheckIndexOnClose(false); // we arent making an index
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper)dir).setThrottling(Throttling.NEVER); // makes this test really slow
|
||||
}
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println(dir);
|
||||
}
|
||||
|
||||
class TheThread extends Thread {
|
||||
private String name;
|
||||
|
||||
public TheThread(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public void run() {
|
||||
for (int i = 0; i < 3000; i++) {
|
||||
String fileName = this.name + i;
|
||||
try {
|
||||
//System.out.println("create:" + fileName);
|
||||
IndexOutput output = dir.createOutput(fileName, newIOContext(random()));
|
||||
output.close();
|
||||
assertTrue(dir.fileExists(fileName));
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class TheThread2 extends Thread {
|
||||
private String name;
|
||||
|
||||
public TheThread2(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public void run() {
|
||||
for (int i = 0; i < 10000; i++) {
|
||||
try {
|
||||
String[] files = dir.listAll();
|
||||
for (String file : files) {
|
||||
//System.out.println("file:" + file);
|
||||
try {
|
||||
IndexInput input = dir.openInput(file, newIOContext(random()));
|
||||
input.close();
|
||||
} catch (FileNotFoundException e) {
|
||||
// ignore
|
||||
} catch (IOException e) {
|
||||
if (e.getMessage().contains("still open for writing")) {
|
||||
// ignore
|
||||
} else {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
TheThread theThread = new TheThread("t1");
|
||||
TheThread2 theThread2 = new TheThread2("t2");
|
||||
theThread.start();
|
||||
theThread2.start();
|
||||
|
||||
theThread.join();
|
||||
theThread2.join();
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
|
||||
// Test that different instances of FSDirectory can coexist on the same
|
||||
|
|
|
@ -67,7 +67,7 @@ import org.apache.lucene.util.fst.FST.BytesReader;
|
|||
import org.apache.lucene.util.fst.PairOutputs.Pair;
|
||||
import org.apache.lucene.util.packed.PackedInts;
|
||||
|
||||
@SuppressCodecs({ "SimpleText", "Memory" })
|
||||
@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
|
||||
@Slow
|
||||
public class TestFSTs extends LuceneTestCase {
|
||||
|
||||
|
@ -76,7 +76,7 @@ public class TestFSTs extends LuceneTestCase {
|
|||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
dir = newDirectory();
|
||||
dir = newMockDirectory();
|
||||
dir.setPreventDoubleWrite(false);
|
||||
}
|
||||
|
||||
|
@ -1107,7 +1107,7 @@ public class TestFSTs extends LuceneTestCase {
|
|||
final int RUN_TIME_MSEC = atLeast(500);
|
||||
final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(-1).setRAMBufferSizeMB(64);
|
||||
final File tempDir = _TestUtil.getTempDir("fstlines");
|
||||
final MockDirectoryWrapper dir = newFSDirectory(tempDir);
|
||||
final Directory dir = newFSDirectory(tempDir);
|
||||
final IndexWriter writer = new IndexWriter(dir, conf);
|
||||
final long stopTime = System.currentTimeMillis() + RUN_TIME_MSEC;
|
||||
Document doc;
|
||||
|
|
|
@ -72,12 +72,14 @@ public class FacetsCollector extends Collector {
|
|||
protected ScoredDocIdCollector initScoredDocCollector(
|
||||
FacetSearchParams facetSearchParams, IndexReader indexReader,
|
||||
TaxonomyReader taxonomyReader) {
|
||||
boolean scoresNeeded = false;
|
||||
for (FacetRequest frq : facetSearchParams.getFacetRequests()) {
|
||||
if (frq.requireDocumentScore()) {
|
||||
return ScoredDocIdCollector.create(1000, true);
|
||||
scoresNeeded = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ScoredDocIdCollector.create(indexReader.maxDoc(), false);
|
||||
return ScoredDocIdCollector.create(indexReader.maxDoc(), scoresNeeded);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -9,7 +9,7 @@ import org.apache.lucene.search.DocIdSet;
|
|||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.OpenBitSet;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -52,7 +52,7 @@ public abstract class ScoredDocIdCollector extends Collector {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) {
|
||||
docIds.fastSet(docBase + doc);
|
||||
docIds.set(docBase + doc);
|
||||
++numDocIds;
|
||||
}
|
||||
|
||||
|
@ -103,7 +103,9 @@ public abstract class ScoredDocIdCollector extends Collector {
|
|||
@SuppressWarnings("synthetic-access")
|
||||
public ScoringDocIdCollector(int maxDoc) {
|
||||
super(maxDoc);
|
||||
scores = new float[maxDoc];
|
||||
// only matching documents have an entry in the scores array. Therefore start with
|
||||
// a small array and grow when needed.
|
||||
scores = new float[64];
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -111,7 +113,7 @@ public abstract class ScoredDocIdCollector extends Collector {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
docIds.fastSet(docBase + doc);
|
||||
docIds.set(docBase + doc);
|
||||
|
||||
float score = this.scorer.score();
|
||||
if (numDocIds >= scores.length) {
|
||||
|
@ -167,7 +169,7 @@ public abstract class ScoredDocIdCollector extends Collector {
|
|||
|
||||
protected int numDocIds;
|
||||
protected int docBase;
|
||||
protected final OpenBitSet docIds;
|
||||
protected final FixedBitSet docIds;
|
||||
|
||||
/**
|
||||
* Creates a new {@link ScoredDocIdCollector} with the given parameters.
|
||||
|
@ -187,7 +189,7 @@ public abstract class ScoredDocIdCollector extends Collector {
|
|||
|
||||
private ScoredDocIdCollector(int maxDoc) {
|
||||
numDocIds = 0;
|
||||
docIds = new OpenBitSet(maxDoc);
|
||||
docIds = new FixedBitSet(maxDoc);
|
||||
}
|
||||
|
||||
/** Returns the default score used when scoring is disabled. */
|
||||
|
|
|
@ -375,7 +375,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
|
|||
* returning the category's ordinal, or a negative number in case the
|
||||
* category does not yet exist in the taxonomy.
|
||||
*/
|
||||
protected int findCategory(CategoryPath categoryPath) throws IOException {
|
||||
protected synchronized int findCategory(CategoryPath categoryPath) throws IOException {
|
||||
// If we can find the category in the cache, or we know the cache is
|
||||
// complete, we can return the response directly from it
|
||||
int res = cache.get(categoryPath);
|
||||
|
@ -474,12 +474,11 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
|
|||
@Override
|
||||
public int addCategory(CategoryPath categoryPath) throws IOException {
|
||||
ensureOpen();
|
||||
// If the category is already in the cache and/or the taxonomy, we
|
||||
// should return its existing ordinal
|
||||
int res = findCategory(categoryPath);
|
||||
// check the cache outside the synchronized block. this results in better
|
||||
// concurrency when categories are there.
|
||||
int res = cache.get(categoryPath);
|
||||
if (res < 0) {
|
||||
// the category is neither in the cache nor in the index - following code
|
||||
// cannot be executed in parallel.
|
||||
// the category is not in the cache - following code cannot be executed in parallel.
|
||||
synchronized (this) {
|
||||
res = findCategory(categoryPath);
|
||||
if (res < 0) {
|
||||
|
@ -494,7 +493,6 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
|
|||
}
|
||||
}
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -22,22 +22,25 @@ import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
|
|||
|
||||
/**
|
||||
* TaxonomyWriterCache is a relatively simple interface for a cache of
|
||||
* category->ordinal mappings, used in TaxonomyWriter implementations
|
||||
* (such as {@link DirectoryTaxonomyWriter}).
|
||||
* <P>
|
||||
* It basically has put() methods for adding a mapping, and get() for looking
|
||||
* a mapping up the cache. The cache does <B>not</B> guarantee to hold
|
||||
* everything that has been put into it, and might in fact selectively
|
||||
* delete some of the mappings (e.g., the ones least recently used).
|
||||
* This means that if get() returns a negative response, it does not
|
||||
* necessarily mean that the category doesn't exist - just that it is not
|
||||
* in the cache. The caller can only infer that the category doesn't exist
|
||||
* if it knows the cache to be complete (because all the categories were
|
||||
* loaded into the cache, and since then no put() returned true).
|
||||
* <P> However,
|
||||
* if it does so, it should clear out large parts of the cache at once, because
|
||||
* the user will typically need to work hard to recover from every cache
|
||||
* category->ordinal mappings, used in TaxonomyWriter implementations (such as
|
||||
* {@link DirectoryTaxonomyWriter}).
|
||||
* <p>
|
||||
* It basically has put() methods for adding a mapping, and get() for looking a
|
||||
* mapping up the cache. The cache does <B>not</B> guarantee to hold everything
|
||||
* that has been put into it, and might in fact selectively delete some of the
|
||||
* mappings (e.g., the ones least recently used). This means that if get()
|
||||
* returns a negative response, it does not necessarily mean that the category
|
||||
* doesn't exist - just that it is not in the cache. The caller can only infer
|
||||
* that the category doesn't exist if it knows the cache to be complete (because
|
||||
* all the categories were loaded into the cache, and since then no put()
|
||||
* returned true).
|
||||
* <p>
|
||||
* However, if it does so, it should clear out large parts of the cache at once,
|
||||
* because the user will typically need to work hard to recover from every cache
|
||||
* cleanup (see {@link #put(CategoryPath, int)}'s return value).
|
||||
* <p>
|
||||
* <b>NOTE:</b> the cache may be accessed concurrently by multiple threads,
|
||||
* therefore cache implementations should take this into consideration.
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
package org.apache.lucene.facet.search;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.analysis.core.KeywordAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.facet.index.CategoryDocumentBuilder;
|
||||
import org.apache.lucene.facet.search.params.FacetSearchParams;
|
||||
import org.apache.lucene.facet.search.params.ScoreFacetRequest;
|
||||
import org.apache.lucene.facet.search.results.FacetResult;
|
||||
import org.apache.lucene.facet.taxonomy.CategoryPath;
|
||||
import org.apache.lucene.facet.taxonomy.TaxonomyWriter;
|
||||
import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
|
||||
import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.MultiCollector;
|
||||
import org.apache.lucene.search.TopScoreDocCollector;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
public class TestFacetsCollector extends LuceneTestCase {
|
||||
|
||||
@Test
|
||||
public void testFacetsWithDocScore() throws Exception {
|
||||
Directory indexDir = newDirectory();
|
||||
Directory taxoDir = newDirectory();
|
||||
|
||||
TaxonomyWriter taxonomyWriter = new DirectoryTaxonomyWriter(taxoDir);
|
||||
IndexWriter iw = new IndexWriter(indexDir, new IndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new KeywordAnalyzer()));
|
||||
|
||||
CategoryDocumentBuilder cdb = new CategoryDocumentBuilder(taxonomyWriter);
|
||||
Iterable<CategoryPath> cats = Arrays.asList(new CategoryPath("a"));
|
||||
for(int i = atLeast(2000); i > 0; --i) {
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("f", "v", Store.NO));
|
||||
cdb.setCategoryPaths(cats);
|
||||
iw.addDocument(cdb.build(doc));
|
||||
}
|
||||
|
||||
taxonomyWriter.close();
|
||||
iw.close();
|
||||
|
||||
FacetSearchParams sParams = new FacetSearchParams();
|
||||
sParams.addFacetRequest(new ScoreFacetRequest(new CategoryPath("a"), 10));
|
||||
|
||||
DirectoryReader r = DirectoryReader.open(indexDir);
|
||||
DirectoryTaxonomyReader taxo = new DirectoryTaxonomyReader(taxoDir);
|
||||
|
||||
FacetsCollector fc = new FacetsCollector(sParams, r, taxo);
|
||||
TopScoreDocCollector topDocs = TopScoreDocCollector.create(10, false);
|
||||
new IndexSearcher(r).search(new MatchAllDocsQuery(), MultiCollector.wrap(fc, topDocs));
|
||||
|
||||
List<FacetResult> res = fc.getFacetResults();
|
||||
double value = res.get(0).getFacetResultNode().getValue();
|
||||
double expected = topDocs.topDocs().getMaxScore() * r.numDocs();
|
||||
assertEquals(expected, value, 1E-10);
|
||||
|
||||
IOUtils.close(taxo, taxoDir, r, indexDir);
|
||||
}
|
||||
|
||||
}
|
|
@ -376,4 +376,17 @@ public class AssertingAtomicReader extends FilterAtomicReader {
|
|||
return super.hasPayload();
|
||||
}
|
||||
}
|
||||
|
||||
// this is the same hack as FCInvisible
|
||||
@Override
|
||||
public Object getCoreCacheKey() {
|
||||
return cacheKey;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getCombinedCoreAndDeletesKey() {
|
||||
return cacheKey;
|
||||
}
|
||||
|
||||
private final Object cacheKey = new Object();
|
||||
}
|
|
@ -32,6 +32,7 @@ import org.apache.lucene.codecs.asserting.AssertingPostingsFormat;
|
|||
import org.apache.lucene.codecs.lucene40.Lucene40Codec;
|
||||
import org.apache.lucene.codecs.lucene40.Lucene40PostingsFormat;
|
||||
import org.apache.lucene.codecs.lucene40ords.Lucene40WithOrds;
|
||||
import org.apache.lucene.codecs.memory.DirectPostingsFormat;
|
||||
import org.apache.lucene.codecs.memory.MemoryPostingsFormat;
|
||||
import org.apache.lucene.codecs.mockintblock.MockFixedIntBlockPostingsFormat;
|
||||
import org.apache.lucene.codecs.mockintblock.MockVariableIntBlockPostingsFormat;
|
||||
|
@ -40,6 +41,7 @@ import org.apache.lucene.codecs.mocksep.MockSepPostingsFormat;
|
|||
import org.apache.lucene.codecs.nestedpulsing.NestedPulsingPostingsFormat;
|
||||
import org.apache.lucene.codecs.pulsing.Pulsing40PostingsFormat;
|
||||
import org.apache.lucene.codecs.simpletext.SimpleTextPostingsFormat;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
/**
|
||||
|
@ -87,9 +89,12 @@ public class RandomCodec extends Lucene40Codec {
|
|||
// block via CL:
|
||||
int minItemsPerBlock = _TestUtil.nextInt(random, 2, 100);
|
||||
int maxItemsPerBlock = 2*(Math.max(2, minItemsPerBlock-1)) + random.nextInt(100);
|
||||
int lowFreqCutoff = _TestUtil.nextInt(random, 2, 100);
|
||||
|
||||
add(avoidCodecs,
|
||||
new Lucene40PostingsFormat(minItemsPerBlock, maxItemsPerBlock),
|
||||
new DirectPostingsFormat(LuceneTestCase.rarely(random) ? 1 : (LuceneTestCase.rarely(random) ? Integer.MAX_VALUE : maxItemsPerBlock),
|
||||
LuceneTestCase.rarely(random) ? 1 : (LuceneTestCase.rarely(random) ? Integer.MAX_VALUE : lowFreqCutoff)),
|
||||
new Pulsing40PostingsFormat(1 + random.nextInt(20), minItemsPerBlock, maxItemsPerBlock),
|
||||
// add pulsing again with (usually) different parameters
|
||||
new Pulsing40PostingsFormat(1 + random.nextInt(20), minItemsPerBlock, maxItemsPerBlock),
|
||||
|
|
|
@ -60,7 +60,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas
|
|||
protected final AtomicInteger delCount = new AtomicInteger();
|
||||
protected final AtomicInteger packCount = new AtomicInteger();
|
||||
|
||||
protected Directory dir;
|
||||
protected MockDirectoryWrapper dir;
|
||||
protected IndexWriter writer;
|
||||
|
||||
private static class SubDocs {
|
||||
|
@ -432,8 +432,8 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas
|
|||
Random random = new Random(random().nextLong());
|
||||
final LineFileDocs docs = new LineFileDocs(random, true);
|
||||
final File tempDir = _TestUtil.getTempDir(testName);
|
||||
dir = newFSDirectory(tempDir);
|
||||
((MockDirectoryWrapper) dir).setCheckIndexOnClose(false); // don't double-checkIndex, we do it ourselves.
|
||||
dir = newMockFSDirectory(tempDir); // some subclasses rely on this being MDW
|
||||
dir.setCheckIndexOnClose(false); // don't double-checkIndex, we do it ourselves.
|
||||
final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer(random())).setInfoStream(new FailOnNonBulkMergesInfoStream());
|
||||
|
||||
|
|
|
@ -0,0 +1,174 @@
|
|||
package org.apache.lucene.store;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
/**
|
||||
* Calls check index on close.
|
||||
*/
|
||||
// do NOT make any methods in this class synchronized, volatile
|
||||
// do NOT import anything from the concurrency package.
|
||||
// no randoms, no nothing.
|
||||
public class BaseDirectoryWrapper extends Directory {
|
||||
/** our in directory */
|
||||
protected final Directory delegate;
|
||||
/** best effort: base on in Directory is volatile */
|
||||
protected boolean open;
|
||||
|
||||
private boolean checkIndexOnClose = true;
|
||||
private boolean crossCheckTermVectorsOnClose = true;
|
||||
|
||||
public BaseDirectoryWrapper(Directory delegate) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
open = false;
|
||||
if (checkIndexOnClose && indexPossiblyExists()) {
|
||||
_TestUtil.checkIndex(this, crossCheckTermVectorsOnClose);
|
||||
}
|
||||
delegate.close();
|
||||
}
|
||||
|
||||
public boolean isOpen() {
|
||||
return open;
|
||||
}
|
||||
|
||||
/**
|
||||
* don't rely upon DirectoryReader.fileExists to determine if we should
|
||||
* checkIndex() or not. It might mask real problems, where we silently
|
||||
* don't checkindex at all. instead we look for a segments file.
|
||||
*/
|
||||
protected boolean indexPossiblyExists() {
|
||||
String files[];
|
||||
try {
|
||||
files = listAll();
|
||||
} catch (IOException ex) {
|
||||
// this means directory doesn't exist, which is ok. return false
|
||||
return false;
|
||||
}
|
||||
for (String f : files) {
|
||||
if (f.startsWith("segments_")) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set whether or not checkindex should be run
|
||||
* on close
|
||||
*/
|
||||
public void setCheckIndexOnClose(boolean value) {
|
||||
this.checkIndexOnClose = value;
|
||||
}
|
||||
|
||||
public boolean getCheckIndexOnClose() {
|
||||
return checkIndexOnClose;
|
||||
}
|
||||
|
||||
public void setCrossCheckTermVectorsOnClose(boolean value) {
|
||||
this.crossCheckTermVectorsOnClose = value;
|
||||
}
|
||||
|
||||
public boolean getCrossCheckTermVectorsOnClose() {
|
||||
return crossCheckTermVectorsOnClose;
|
||||
}
|
||||
|
||||
// directory methods: delegate
|
||||
|
||||
@Override
|
||||
public String[] listAll() throws IOException {
|
||||
return delegate.listAll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean fileExists(String name) throws IOException {
|
||||
return delegate.fileExists(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteFile(String name) throws IOException {
|
||||
delegate.deleteFile(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long fileLength(String name) throws IOException {
|
||||
return delegate.fileLength(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexOutput createOutput(String name, IOContext context) throws IOException {
|
||||
return delegate.createOutput(name, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sync(Collection<String> names) throws IOException {
|
||||
delegate.sync(names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexInput openInput(String name, IOContext context) throws IOException {
|
||||
return delegate.openInput(name, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String name) {
|
||||
return delegate.makeLock(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearLock(String name) throws IOException {
|
||||
delegate.clearLock(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setLockFactory(LockFactory lockFactory) throws IOException {
|
||||
delegate.setLockFactory(lockFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public LockFactory getLockFactory() {
|
||||
return delegate.getLockFactory();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getLockID() {
|
||||
return delegate.getLockID();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "BaseDirectoryWrapper(" + delegate.toString() + ")";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
|
||||
delegate.copy(to, src, dest, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexInputSlicer createSlicer(String name, IOContext context) throws IOException {
|
||||
return delegate.createSlicer(name, context);
|
||||
}
|
||||
}
|
|
@ -57,8 +57,7 @@ import org.apache.lucene.util._TestUtil;
|
|||
* </ul>
|
||||
*/
|
||||
|
||||
public class MockDirectoryWrapper extends Directory {
|
||||
final Directory delegate;
|
||||
public class MockDirectoryWrapper extends BaseDirectoryWrapper {
|
||||
long maxSize;
|
||||
|
||||
// Max actual bytes used. This is set by MockRAMOutputStream:
|
||||
|
@ -67,8 +66,6 @@ public class MockDirectoryWrapper extends Directory {
|
|||
Random randomState;
|
||||
boolean noDeleteOpenFile = true;
|
||||
boolean preventDoubleWrite = true;
|
||||
boolean checkIndexOnClose = true;
|
||||
boolean crossCheckTermVectorsOnClose = true;
|
||||
boolean trackDiskUsage = false;
|
||||
private Set<String> unSyncedFiles;
|
||||
private Set<String> createdFiles;
|
||||
|
@ -109,7 +106,7 @@ public class MockDirectoryWrapper extends Directory {
|
|||
}
|
||||
|
||||
public MockDirectoryWrapper(Random random, Directory delegate) {
|
||||
this.delegate = delegate;
|
||||
super(delegate);
|
||||
// must make a private random since our methods are
|
||||
// called from different threads; else test failures may
|
||||
// not be reproducible from the original seed
|
||||
|
@ -251,19 +248,19 @@ public class MockDirectoryWrapper extends Directory {
|
|||
}
|
||||
}
|
||||
final IndexOutput tempOut = delegate.createOutput(tempFileName, LuceneTestCase.newIOContext(randomState));
|
||||
IndexInput in = delegate.openInput(name, LuceneTestCase.newIOContext(randomState));
|
||||
tempOut.copyBytes(in, in.length()/2);
|
||||
IndexInput ii = delegate.openInput(name, LuceneTestCase.newIOContext(randomState));
|
||||
tempOut.copyBytes(ii, ii.length()/2);
|
||||
tempOut.close();
|
||||
in.close();
|
||||
ii.close();
|
||||
|
||||
// Delete original and copy bytes back:
|
||||
deleteFile(name, true);
|
||||
|
||||
final IndexOutput out = delegate.createOutput(name, LuceneTestCase.newIOContext(randomState));
|
||||
in = delegate.openInput(tempFileName, LuceneTestCase.newIOContext(randomState));
|
||||
out.copyBytes(in, in.length());
|
||||
ii = delegate.openInput(tempFileName, LuceneTestCase.newIOContext(randomState));
|
||||
out.copyBytes(ii, ii.length());
|
||||
out.close();
|
||||
in.close();
|
||||
ii.close();
|
||||
deleteFile(tempFileName, true);
|
||||
} else if (damage == 3) {
|
||||
// The file survived intact:
|
||||
|
@ -316,26 +313,6 @@ public class MockDirectoryWrapper extends Directory {
|
|||
return noDeleteOpenFile;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set whether or not checkindex should be run
|
||||
* on close
|
||||
*/
|
||||
public void setCheckIndexOnClose(boolean value) {
|
||||
this.checkIndexOnClose = value;
|
||||
}
|
||||
|
||||
public boolean getCheckIndexOnClose() {
|
||||
return checkIndexOnClose;
|
||||
}
|
||||
|
||||
public void setCrossCheckTermVectorsOnClose(boolean value) {
|
||||
this.crossCheckTermVectorsOnClose = value;
|
||||
}
|
||||
|
||||
public boolean getCrossCheckTermVectorsOnClose() {
|
||||
return crossCheckTermVectorsOnClose;
|
||||
}
|
||||
|
||||
/**
|
||||
* If 0.0, no exceptions will be thrown. Else this should
|
||||
* be a double 0.0 - 1.0. We will randomly throw an
|
||||
|
@ -575,8 +552,8 @@ public class MockDirectoryWrapper extends Directory {
|
|||
throw new RuntimeException("MockDirectoryWrapper: cannot close: there are still open locks: " + openLocks);
|
||||
}
|
||||
open = false;
|
||||
if (checkIndexOnClose) {
|
||||
if (indexPossiblyExists(this)) {
|
||||
if (getCheckIndexOnClose()) {
|
||||
if (indexPossiblyExists()) {
|
||||
if (LuceneTestCase.VERBOSE) {
|
||||
System.out.println("\nNOTE: MockDirectoryWrapper: now crash");
|
||||
}
|
||||
|
@ -584,7 +561,7 @@ public class MockDirectoryWrapper extends Directory {
|
|||
if (LuceneTestCase.VERBOSE) {
|
||||
System.out.println("\nNOTE: MockDirectoryWrapper: now run CheckIndex");
|
||||
}
|
||||
_TestUtil.checkIndex(this, crossCheckTermVectorsOnClose);
|
||||
_TestUtil.checkIndex(this, getCrossCheckTermVectorsOnClose());
|
||||
|
||||
if (assertNoUnreferencedFilesOnClose) {
|
||||
// now look for unreferenced files:
|
||||
|
@ -612,26 +589,6 @@ public class MockDirectoryWrapper extends Directory {
|
|||
}
|
||||
delegate.close();
|
||||
}
|
||||
|
||||
/** don't rely upon DirectoryReader.fileExists to determine if we should
|
||||
* checkIndex() or not. It might mask real problems, where we silently
|
||||
* don't checkindex at all. instead we look for a segments file.
|
||||
*/
|
||||
private boolean indexPossiblyExists(Directory d) {
|
||||
String files[];
|
||||
try {
|
||||
files = d.listAll();
|
||||
} catch (IOException ex) {
|
||||
// this means directory doesn't exist, which is ok. return false
|
||||
return false;
|
||||
}
|
||||
for (String f : files) {
|
||||
if (f.startsWith("segments_")) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
synchronized void removeOpenFile(Closeable c, String name) {
|
||||
Integer v = openFiles.get(name);
|
||||
|
@ -658,8 +615,7 @@ public class MockDirectoryWrapper extends Directory {
|
|||
removeOpenFile(in, name);
|
||||
}
|
||||
|
||||
boolean open = true;
|
||||
|
||||
@Override
|
||||
public synchronized boolean isOpen() {
|
||||
return open;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package org.apache.lucene.util;
|
|||
|
||||
import java.io.Closeable;
|
||||
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.junit.Assert;
|
||||
|
||||
|
@ -23,15 +24,15 @@ import org.junit.Assert;
|
|||
*/
|
||||
|
||||
/**
|
||||
* Attempts to close a {@link MockDirectoryWrapper}.
|
||||
* Attempts to close a {@link BaseDirectoryWrapper}.
|
||||
*
|
||||
* @see LuceneTestCase#newDirectory(java.util.Random)
|
||||
*/
|
||||
final class CloseableDirectory implements Closeable {
|
||||
private final MockDirectoryWrapper dir;
|
||||
private final BaseDirectoryWrapper dir;
|
||||
private final TestRuleMarkFailure failureMarker;
|
||||
|
||||
public CloseableDirectory(MockDirectoryWrapper dir,
|
||||
public CloseableDirectory(BaseDirectoryWrapper dir,
|
||||
TestRuleMarkFailure failureMarker) {
|
||||
this.dir = dir;
|
||||
this.failureMarker = failureMarker;
|
||||
|
|
|
@ -789,48 +789,56 @@ public abstract class LuceneTestCase extends Assert {
|
|||
* Returns a new Directory instance. Use this when the test does not
|
||||
* care about the specific Directory implementation (most tests).
|
||||
* <p>
|
||||
* The Directory is wrapped with {@link MockDirectoryWrapper}.
|
||||
* By default this means it will be picky, such as ensuring that you
|
||||
* The Directory is wrapped with {@link BaseDirectoryWrapper}.
|
||||
* this means usually it will be picky, such as ensuring that you
|
||||
* properly close it and all open files in your test. It will emulate
|
||||
* some features of Windows, such as not allowing open files to be
|
||||
* overwritten.
|
||||
*/
|
||||
public static MockDirectoryWrapper newDirectory() {
|
||||
public static BaseDirectoryWrapper newDirectory() {
|
||||
return newDirectory(random());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns a new Directory instance, using the specified random.
|
||||
* See {@link #newDirectory()} for more information.
|
||||
*/
|
||||
public static MockDirectoryWrapper newDirectory(Random r) {
|
||||
Directory impl = newDirectoryImpl(r, TEST_DIRECTORY);
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(r, maybeNRTWrap(r, impl));
|
||||
closeAfterSuite(new CloseableDirectory(dir, suiteFailureMarker));
|
||||
public static BaseDirectoryWrapper newDirectory(Random r) {
|
||||
return wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY), rarely(r));
|
||||
}
|
||||
|
||||
dir.setThrottling(TEST_THROTTLING);
|
||||
if (VERBOSE) {
|
||||
System.out.println("NOTE: LuceneTestCase.newDirectory: returning " + dir);
|
||||
}
|
||||
return dir;
|
||||
}
|
||||
public static MockDirectoryWrapper newMockDirectory() {
|
||||
return newMockDirectory(random());
|
||||
}
|
||||
|
||||
public static MockDirectoryWrapper newMockDirectory(Random r) {
|
||||
return (MockDirectoryWrapper) wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY), false);
|
||||
}
|
||||
|
||||
public static MockDirectoryWrapper newMockFSDirectory(File f) {
|
||||
return (MockDirectoryWrapper) newFSDirectory(f, null, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new Directory instance, with contents copied from the
|
||||
* provided directory. See {@link #newDirectory()} for more
|
||||
* information.
|
||||
*/
|
||||
public static MockDirectoryWrapper newDirectory(Directory d) throws IOException {
|
||||
public static BaseDirectoryWrapper newDirectory(Directory d) throws IOException {
|
||||
return newDirectory(random(), d);
|
||||
}
|
||||
|
||||
/** Returns a new FSDirectory instance over the given file, which must be a folder. */
|
||||
public static MockDirectoryWrapper newFSDirectory(File f) {
|
||||
public static BaseDirectoryWrapper newFSDirectory(File f) {
|
||||
return newFSDirectory(f, null);
|
||||
}
|
||||
|
||||
/** Returns a new FSDirectory instance over the given file, which must be a folder. */
|
||||
public static MockDirectoryWrapper newFSDirectory(File f, LockFactory lf) {
|
||||
public static BaseDirectoryWrapper newFSDirectory(File f, LockFactory lf) {
|
||||
return newFSDirectory(f, lf, rarely());
|
||||
}
|
||||
|
||||
private static BaseDirectoryWrapper newFSDirectory(File f, LockFactory lf, boolean bare) {
|
||||
String fsdirClass = TEST_DIRECTORY;
|
||||
if (fsdirClass.equals("random")) {
|
||||
fsdirClass = RandomPicks.randomFrom(random(), FS_DIRECTORIES);
|
||||
|
@ -847,14 +855,11 @@ public abstract class LuceneTestCase extends Assert {
|
|||
}
|
||||
|
||||
Directory fsdir = newFSDirectoryImpl(clazz, f);
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(
|
||||
random(), maybeNRTWrap(random(), fsdir));
|
||||
BaseDirectoryWrapper wrapped = wrapDirectory(random(), fsdir, bare);
|
||||
if (lf != null) {
|
||||
dir.setLockFactory(lf);
|
||||
wrapped.setLockFactory(lf);
|
||||
}
|
||||
closeAfterSuite(new CloseableDirectory(dir, suiteFailureMarker));
|
||||
dir.setThrottling(TEST_THROTTLING);
|
||||
return dir;
|
||||
return wrapped;
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
@ -865,22 +870,27 @@ public abstract class LuceneTestCase extends Assert {
|
|||
* with contents copied from the provided directory. See
|
||||
* {@link #newDirectory()} for more information.
|
||||
*/
|
||||
public static MockDirectoryWrapper newDirectory(Random r, Directory d) throws IOException {
|
||||
public static BaseDirectoryWrapper newDirectory(Random r, Directory d) throws IOException {
|
||||
Directory impl = newDirectoryImpl(r, TEST_DIRECTORY);
|
||||
for (String file : d.listAll()) {
|
||||
d.copy(impl, file, file, newIOContext(r));
|
||||
}
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(r, maybeNRTWrap(r, impl));
|
||||
closeAfterSuite(new CloseableDirectory(dir, suiteFailureMarker));
|
||||
dir.setThrottling(TEST_THROTTLING);
|
||||
return dir;
|
||||
return wrapDirectory(r, impl, rarely(r));
|
||||
}
|
||||
|
||||
private static Directory maybeNRTWrap(Random random, Directory directory) {
|
||||
private static BaseDirectoryWrapper wrapDirectory(Random random, Directory directory, boolean bare) {
|
||||
if (rarely(random)) {
|
||||
return new NRTCachingDirectory(directory, random.nextDouble(), random.nextDouble());
|
||||
directory = new NRTCachingDirectory(directory, random.nextDouble(), random.nextDouble());
|
||||
}
|
||||
if (bare) {
|
||||
BaseDirectoryWrapper base = new BaseDirectoryWrapper(directory);
|
||||
closeAfterSuite(new CloseableDirectory(base, suiteFailureMarker));
|
||||
return base;
|
||||
} else {
|
||||
return directory;
|
||||
MockDirectoryWrapper mock = new MockDirectoryWrapper(random, directory);
|
||||
mock.setThrottling(TEST_THROTTLING);
|
||||
closeAfterSuite(new CloseableDirectory(mock, suiteFailureMarker));
|
||||
return mock;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1055,9 +1065,9 @@ public abstract class LuceneTestCase extends Assert {
|
|||
// QueryUtils' reader with a fake cache key, so insanity checker cannot walk
|
||||
// along our reader:
|
||||
if (r instanceof AtomicReader) {
|
||||
r = new FCInvisibleMultiReader(new AssertingAtomicReader((AtomicReader)r));
|
||||
r = new AssertingAtomicReader((AtomicReader)r);
|
||||
} else if (r instanceof DirectoryReader) {
|
||||
r = new FCInvisibleMultiReader((DirectoryReader)r);
|
||||
r = new AssertingDirectoryReader((DirectoryReader)r);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -112,6 +112,25 @@ Bug Fixes
|
|||
file name using the "config" attribute prevented the override file from being
|
||||
used. (Ryan Zezeski, hossman)
|
||||
|
||||
* SOLR-3642: Correct broken check for multivalued fields in stats.facet
|
||||
(Yandong Yao, hossman)
|
||||
|
||||
* SOLR-3660: Velocity: Link to admin page broken (janhoy)
|
||||
|
||||
* SOLR-3658: Adding thousands of docs with one UpdateProcessorChain instance can briefly create
|
||||
spikes of threads in the thousands. (yonik, Mark Miller)
|
||||
|
||||
* SOLR-3656: A core reload now always uses the same dataDir. (Mark Miller, yonik)
|
||||
|
||||
* SOLR-3662: Core reload bugs: a reload always obtained a non-NRT searcher, which
|
||||
could go back in time with respect to the previous core's NRT searcher. Versioning
|
||||
did not work correctly across a core reload, and update handler synchronization
|
||||
was changed to synchronize on core state since more than on update handler
|
||||
can coexist for a single index during a reload. (yonik)
|
||||
|
||||
* SOLR-3663: There are a couple of bugs in the sync process when a leader goes down and a
|
||||
new leader is elected. (Mark Miller)
|
||||
|
||||
|
||||
Other Changes
|
||||
----------------------
|
||||
|
@ -795,7 +814,36 @@ Documentation
|
|||
* SOLR-2232: Improved README info on solr.solr.home in examples
|
||||
(Eric Pugh and hossman)
|
||||
|
||||
================== 3.6.1 ==================
|
||||
More information about this release, including any errata related to the
|
||||
release notes, upgrade instructions, or other changes may be found online at:
|
||||
https://wiki.apache.org/solr/Solr3.6.1
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
* LUCENE-3969: Throw IAE on bad arguments that could cause confusing errors in
|
||||
PatternTokenizer. CommonGrams populates PositionLengthAttribute correctly.
|
||||
(Uwe Schindler, Mike McCandless, Robert Muir)
|
||||
|
||||
* SOLR-3361: ReplicationHandler "maxNumberOfBackups" doesn't work if backups are triggered on commit
|
||||
(James Dyer, Tomas Fernandez Lobbe)
|
||||
|
||||
* SOLR-3375: Fix charset problems with HttpSolrServer (Roger Håkansson, yonik, siren)
|
||||
|
||||
* SOLR-3436: Group count incorrect when not all shards are queried in the second
|
||||
pass. (Francois Perron, Martijn van Groningen)
|
||||
|
||||
* SOLR-3454: Exception when using result grouping with main=true and using
|
||||
wt=javabin. (Ludovic Boutros, Martijn van Groningen)
|
||||
|
||||
* SOLR-3489: Config file replication less error prone (Jochen Just via janhoy)
|
||||
|
||||
* SOLR-3477: SOLR does not start up when no cores are defined (Tomás Fernández Löbbe via tommaso)
|
||||
|
||||
================== 3.6.0 ==================
|
||||
More information about this release, including any errata related to the
|
||||
release notes, upgrade instructions, or other changes may be found online at:
|
||||
https://wiki.apache.org/solr/Solr3.6
|
||||
|
||||
Upgrading from Solr 3.5
|
||||
----------------------
|
||||
|
@ -829,16 +877,16 @@ Upgrading from Solr 3.5
|
|||
* SOLR-3161: Don't use the 'qt' parameter with a leading '/'. It probably won't work in 4.0
|
||||
and it's now limited in 3.6 to SearchHandler subclasses that aren't lazy-loaded.
|
||||
|
||||
* SOLR-2724: Specifying <defaultSearchField> and <solrQueryParser defaultOperator="..."/> in
|
||||
schema.xml is now considered deprecated. Instead you are encouraged to specify these via the "df"
|
||||
and "q.op" parameters in your request handler definition. (David Smiley)
|
||||
|
||||
* Bugs found and fixed in the SignatureUpdateProcessor that previously caused
|
||||
some documents to produce the same signature even when the configured fields
|
||||
contained distinct (non-String) values. Users of SignatureUpdateProcessor
|
||||
are strongly advised that they should re-index as document signatures may
|
||||
have now changed. (see SOLR-3200 & SOLR-3226 for details)
|
||||
|
||||
* SOLR-2724: Specifying <defaultSearchField> and <solrQueryParser defaultOperator="..."/> in
|
||||
schema.xml is now considered deprecated. Instead you are encouraged to specify these via the "df"
|
||||
and "q.op" parameters in your request handler definition. (David Smiley)
|
||||
|
||||
New Features
|
||||
----------------------
|
||||
* SOLR-2020: Add Java client that uses Apache Http Components http client (4.x).
|
||||
|
@ -853,13 +901,13 @@ New Features
|
|||
|
||||
* SOLR-1565: StreamingUpdateSolrServer supports RequestWriter API and therefore, javabin update
|
||||
format (shalin)
|
||||
|
||||
|
||||
* SOLR-2438 added MultiTermAwareComponent to the various classes to allow automatic lowercasing
|
||||
for multiterm queries (wildcards, regex, prefix, range, etc). You can now optionally specify a
|
||||
"multiterm" analyzer in our schema.xml, but Solr should "do the right thing" if you don't
|
||||
specify <analyzer type="multiterm"> (Pete Sturge Erick Erickson, Mentoring from Seeley and Muir)
|
||||
specify <fieldType="multiterm"> (Pete Sturge Erick Erickson, Mentoring from Seeley and Muir)
|
||||
|
||||
* SOLR-2919: Added support for localized range queries when the analysis chain uses
|
||||
* SOLR-2919: Added support for localized range queries when the analysis chain uses
|
||||
CollationKeyFilter or ICUCollationKeyFilter. (Michael Sokolov, rmuir)
|
||||
|
||||
* SOLR-2982: Added BeiderMorseFilterFactory for Beider-Morse (BMPM) phonetic encoder. Upgrades
|
||||
|
@ -962,7 +1010,7 @@ Bug Fixes
|
|||
|
||||
* SOLR-2819: Improved speed of parsing hex entities in HTMLStripCharFilter
|
||||
(Bernhard Berger, hossman)
|
||||
|
||||
|
||||
* SOLR-2509: StringIndexOutOfBoundsException in the spellchecker collate when the term contains
|
||||
a hyphen. (Thomas Gambier caught the bug, Steffen Godskesen did the patch, via Erick Erickson)
|
||||
|
||||
|
@ -982,17 +1030,18 @@ Bug Fixes
|
|||
|
||||
* SOLR-2542: Fixed DIH Context variables which were broken for all scopes other
|
||||
then SCOPE_ENTITY (Linbin Chen & Frank Wesemann via hossman)
|
||||
|
||||
|
||||
* SOLR-3042: Fixed Maven Jetty plugin configuration.
|
||||
(David Smiley via Steve Rowe)
|
||||
|
||||
* SOLR-2970: CSV ResponseWriter returns fields defined as stored=false in schema (janhoy)
|
||||
|
||||
* LUCENE-3690, LUCENE-2208, SOLR-882, SOLR-42: Re-implemented
|
||||
HTMLStripCharFilter as a JFlex-generated scanner. See below for a list
|
||||
of bug fixes and other changes. To get the same behavior as
|
||||
HTMLStripCharFilter in Solr version 3.5 and earlier (including the bugs),
|
||||
use LegacyHTMLStripCharFilter, which is the previous implementation.
|
||||
HTMLStripCharFilter as a JFlex-generated scanner and moved it to
|
||||
lucene/contrib/analyzers/common/. See below for a list of bug fixes and
|
||||
other changes. To get the same behavior as HTMLStripCharFilter in Solr
|
||||
version 3.5 and earlier (including the bugs), use LegacyHTMLStripCharFilter,
|
||||
which is the previous implementation.
|
||||
|
||||
Behavior changes from the previous version:
|
||||
|
||||
|
@ -1006,7 +1055,7 @@ Bug Fixes
|
|||
from Unicode character classes [:ID_Start:] and [:ID_Continue:].
|
||||
- Uppercase character entities """, "©", ">", "<", "®",
|
||||
and "&" are now recognized and handled as if they were in lowercase.
|
||||
- The REPLACEMENT CHARACTER U+FFFD is now used to replace numeric character
|
||||
- The REPLACEMENT CHARACTER U+FFFD is now used to replace numeric character
|
||||
entities for unpaired UTF-16 low and high surrogates (in the range
|
||||
[U+D800-U+DFFF]).
|
||||
- Properly paired numeric character entities for UTF-16 surrogates are now
|
||||
|
@ -1072,6 +1121,9 @@ Bug Fixes
|
|||
and was fundamentally broken/bizarre.
|
||||
(hossman, Ahmet Arslan)
|
||||
|
||||
* SOLR-2291: fix JSONWriter to respect field list when writing SolrDocuments
|
||||
(Ahmet Arslan via hossman)
|
||||
|
||||
* SOLR-3264: Fix CoreContainer and SolrResourceLoader logging to be more
|
||||
clear about when SolrCores are being created, and stop misleading people
|
||||
about SolrCore instanceDir's being the "Solr Home Dir" (hossman)
|
||||
|
@ -1087,9 +1139,9 @@ Bug Fixes
|
|||
* SOLR-3200: Fix SignatureUpdateProcessor "all fields" mode to use all
|
||||
fields of each document instead of the fields specified by the first
|
||||
document indexed (Spyros Kapnissis via hossman)
|
||||
|
||||
* SOLR-3316: Distributed grouping failed when rows parameter was set to 0 and sometimes returned a wrong
|
||||
hit count as matches. (Cody Young, Martijn van Groningen)
|
||||
|
||||
* SOLR-3316: Distributed grouping failed when rows parameter was set to 0 and
|
||||
sometimes returned a wrong hit count as matches. (Cody Young, Martijn van Groningen)
|
||||
|
||||
Other Changes
|
||||
----------------------
|
||||
|
@ -1098,7 +1150,7 @@ Other Changes
|
|||
* SOLR-2920: Refactor frequent conditional use of DefaultSolrParams and
|
||||
AppendedSolrParams into factory methods.
|
||||
(David Smiley via hossman)
|
||||
|
||||
|
||||
* SOLR-3032: Deprecate logOnce from SolrException logOnce and all the supporting
|
||||
structure will disappear in 4.0. Errors should be caught and logged at the
|
||||
top-most level or logged and NOT propagated up the chain. (Erick Erickson)
|
||||
|
@ -1122,7 +1174,7 @@ Other Changes
|
|||
* SOLR-3077: Better error messages when attempting to use "blank" field names
|
||||
(Antony Stubbs via hossman)
|
||||
|
||||
* SOLR-2712: expecting fl=score to return all fields is now deprecated.
|
||||
* SOLR-2712: expecting fl=score to return all fields is now deprecated.
|
||||
In solr 4.0, this will only return the score. (ryan)
|
||||
|
||||
* SOLR-3156: Check for Lucene directory locks at startup. In previous versions
|
||||
|
@ -1135,6 +1187,13 @@ Other Changes
|
|||
the effect of NOT specifying <indexConfig> at all gives same result as the
|
||||
example config used to give in 3.5 (janhoy, gsingers)
|
||||
|
||||
* SOLR-3294: In contrib/clustering/lib/, replaced the manually retrowoven
|
||||
Java 1.5-compatible carrot2-core-3.5.0.jar (which is not publicly available,
|
||||
except from the Solr Subversion repository), with newly released Java
|
||||
1.5-compatible carrot2-core-3.5.0.1.jar (hosted on the Maven Central
|
||||
repository). Also updated dependencies jackson-core-asl and
|
||||
jackson-mapper-asl (both v1.5.2 -> v1.7.4). (Dawid Weiss, Steve Rowe)
|
||||
|
||||
Build
|
||||
----------------------
|
||||
* SOLR-2487: Add build target to package war without slf4j jars (janhoy)
|
||||
|
|
|
@ -21,6 +21,10 @@ $Id$
|
|||
the Solr 3.x ICUCollationKeyFilterFactory, and also supports
|
||||
Locale-sensitive range queries. (rmuir)
|
||||
|
||||
================== 3.6.1 ==================
|
||||
|
||||
(No Changes)
|
||||
|
||||
================== 3.6.0 ==================
|
||||
|
||||
* SOLR-2919: Added parametric tailoring options to ICUCollationKeyFilterFactory.
|
||||
|
|
|
@ -13,6 +13,11 @@ $Id$
|
|||
|
||||
================== Release 4.0.0-ALPHA ==============
|
||||
|
||||
* SOLR-3470: Bug fix: custom Carrot2 tokenizer and stemmer factories are
|
||||
respected now (Stanislaw Osinski, Dawid Weiss)
|
||||
|
||||
================== Release 3.6.1 ==================
|
||||
|
||||
* SOLR-3470: Bug fix: custom Carrot2 tokenizer and stemmer factories are
|
||||
respected now (Stanislaw Osinski, Dawid Weiss)
|
||||
|
||||
|
|
|
@ -32,7 +32,12 @@ Other Changes
|
|||
|
||||
Bug Fixes
|
||||
----------------------
|
||||
* SOLR-3336: SolrEntityProcessor substitutes most variables at query time
|
||||
* SOLR-3360: More bug fixes for the deprecated "threads" parameter. (Mikhail Khludnev, Claudio R, via James Dyer)
|
||||
|
||||
* SOLR-3430: Added a new test against a real SQL database. Fixed problems revealed by this new test
|
||||
related to the expanded cache support added to 3.6/SOLR-2382 (James Dyer)
|
||||
|
||||
* SOLR-3336: SolrEntityProcessor substitutes most variables at query time.
|
||||
(Michael Kroh, Lance Norskog, via Martijn van Groningen)
|
||||
|
||||
================== 3.6.0 ==================
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package org.apache.solr.handler.dataimport;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.junit.Ignore;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
|
@ -28,6 +29,7 @@ import java.util.*;
|
|||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
@Ignore("FIXME: I fail so often it makes me ill!")
|
||||
public class TestSqlEntityProcessor extends AbstractDataImportHandlerTestCase {
|
||||
private static ThreadLocal<Integer> local = new ThreadLocal<Integer>();
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ import java.text.ParseException;
|
|||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
@Ignore("FIXME: I fail so often it makes me ill!")
|
||||
public class TestSqlEntityProcessor2 extends AbstractDataImportHandlerTestCase {
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.solr.handler.dataimport;
|
|||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
|
@ -35,6 +36,7 @@ import java.util.List;
|
|||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
@Ignore("FIXME: I fail so often it makes me ill!")
|
||||
public class TestSqlEntityProcessorDelta extends AbstractDataImportHandlerTestCase {
|
||||
private static final String FULLIMPORT_QUERY = "select * from x";
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ package org.apache.solr.handler.dataimport;
|
|||
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -33,6 +34,7 @@ import java.util.List;
|
|||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
@Ignore("FIXME: I fail so often it makes me ill!")
|
||||
public class TestSqlEntityProcessorDelta2 extends AbstractDataImportHandlerTestCase {
|
||||
private static final String FULLIMPORT_QUERY = "select * from x";
|
||||
|
||||
|
|
|
@ -18,12 +18,14 @@ package org.apache.solr.handler.dataimport;
|
|||
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
@Ignore("FIXME: I fail so often it makes me ill!")
|
||||
public class TestSqlEntityProcessorDelta3 extends AbstractDataImportHandlerTestCase {
|
||||
private static final String P_FULLIMPORT_QUERY = "select * from parent";
|
||||
private static final String P_DELTA_QUERY = "select parent_id from parent where last_modified > NOW";
|
||||
|
|
|
@ -18,6 +18,7 @@ package org.apache.solr.handler.dataimport;
|
|||
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -34,6 +35,7 @@ import java.util.logging.*;
|
|||
*
|
||||
* @since solr 3.1
|
||||
*/
|
||||
@Ignore("FIXME: I fail so often it makes me ill!")
|
||||
public class TestSqlEntityProcessorDeltaPrefixedPk extends AbstractDataImportHandlerTestCase {
|
||||
private static final String FULLIMPORT_QUERY = "select * from x";
|
||||
|
||||
|
|
|
@ -32,6 +32,10 @@ $Id$
|
|||
|
||||
* SOLR-3254: Upgrade Solr to Tika 1.1 (janhoy)
|
||||
|
||||
================== Release 3.6.1 ==================
|
||||
|
||||
(No Changes)
|
||||
|
||||
================== Release 3.6.0 ==================
|
||||
|
||||
* SOLR-2346: Add a chance to set content encoding explicitly via content type of stream.
|
||||
|
|
|
@ -15,6 +15,10 @@ $Id$
|
|||
|
||||
(No changes)
|
||||
|
||||
================== Release 3.6.1 ==================
|
||||
|
||||
(No Changes)
|
||||
|
||||
================== Release 3.6.0 ==================
|
||||
|
||||
* SOLR-3107: When using the LangDetect implementation of langid, set the random
|
||||
|
|
|
@ -30,6 +30,10 @@ $Id$
|
|||
|
||||
(No Changes)
|
||||
|
||||
================== 3.6.1 ==================
|
||||
|
||||
(No Changes)
|
||||
|
||||
================== 3.6.0 ==================
|
||||
|
||||
(No Changes)
|
||||
|
|
|
@ -23,12 +23,12 @@ import java.util.NoSuchElementException;
|
|||
import java.util.TreeMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.WatchedEvent;
|
||||
import org.apache.zookeeper.Watcher;
|
||||
import org.apache.zookeeper.ZooDefs;
|
||||
import org.apache.zookeeper.ZooKeeper;
|
||||
import org.apache.zookeeper.data.ACL;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -42,12 +42,12 @@ public class DistributedQueue {
|
|||
|
||||
private final String dir;
|
||||
|
||||
private ZooKeeper zookeeper;
|
||||
private SolrZkClient zookeeper;
|
||||
private List<ACL> acl = ZooDefs.Ids.OPEN_ACL_UNSAFE;
|
||||
|
||||
private final String prefix = "qn-";
|
||||
|
||||
public DistributedQueue(ZooKeeper zookeeper, String dir, List<ACL> acl) {
|
||||
public DistributedQueue(SolrZkClient zookeeper, String dir, List<ACL> acl) {
|
||||
this.dir = dir;
|
||||
|
||||
if (acl != null) {
|
||||
|
@ -70,7 +70,7 @@ public class DistributedQueue {
|
|||
|
||||
List<String> childNames = null;
|
||||
try {
|
||||
childNames = zookeeper.getChildren(dir, watcher);
|
||||
childNames = zookeeper.getChildren(dir, watcher, true);
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
throw e;
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ public class DistributedQueue {
|
|||
for (String headNode : orderedChildren.values()) {
|
||||
if (headNode != null) {
|
||||
try {
|
||||
return zookeeper.getData(dir + "/" + headNode, false, null);
|
||||
return zookeeper.getData(dir + "/" + headNode, null, null, true);
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
// Another client removed the node first, try next
|
||||
}
|
||||
|
@ -156,8 +156,8 @@ public class DistributedQueue {
|
|||
for (String headNode : orderedChildren.values()) {
|
||||
String path = dir + "/" + headNode;
|
||||
try {
|
||||
byte[] data = zookeeper.getData(path, false, null);
|
||||
zookeeper.delete(path, -1);
|
||||
byte[] data = zookeeper.getData(path, null, null, true);
|
||||
zookeeper.delete(path, -1, true);
|
||||
return data;
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
// Another client deleted the node first.
|
||||
|
@ -202,7 +202,7 @@ public class DistributedQueue {
|
|||
try {
|
||||
orderedChildren = orderedChildren(childWatcher);
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT);
|
||||
zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT, true);
|
||||
continue;
|
||||
}
|
||||
if (orderedChildren.size() == 0) {
|
||||
|
@ -213,8 +213,8 @@ public class DistributedQueue {
|
|||
for (String headNode : orderedChildren.values()) {
|
||||
String path = dir + "/" + headNode;
|
||||
try {
|
||||
byte[] data = zookeeper.getData(path, false, null);
|
||||
zookeeper.delete(path, -1);
|
||||
byte[] data = zookeeper.getData(path, null, null, true);
|
||||
zookeeper.delete(path, -1, true);
|
||||
return data;
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
// Another client deleted the node first.
|
||||
|
@ -234,11 +234,11 @@ public class DistributedQueue {
|
|||
for (;;) {
|
||||
try {
|
||||
zookeeper.create(dir + "/" + prefix, data, acl,
|
||||
CreateMode.PERSISTENT_SEQUENTIAL);
|
||||
CreateMode.PERSISTENT_SEQUENTIAL, true);
|
||||
return true;
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
try {
|
||||
zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT);
|
||||
zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT, true);
|
||||
} catch (KeeperException.NodeExistsException ne) {
|
||||
//someone created it
|
||||
}
|
||||
|
@ -284,7 +284,7 @@ public class DistributedQueue {
|
|||
try {
|
||||
orderedChildren = orderedChildren(childWatcher);
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT);
|
||||
zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT, true);
|
||||
continue;
|
||||
}
|
||||
if (orderedChildren.size() == 0) {
|
||||
|
@ -295,7 +295,7 @@ public class DistributedQueue {
|
|||
for (String headNode : orderedChildren.values()) {
|
||||
String path = dir + "/" + headNode;
|
||||
try {
|
||||
byte[] data = zookeeper.getData(path, false, null);
|
||||
byte[] data = zookeeper.getData(path, null, null, true);
|
||||
return data;
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
// Another client deleted the node first.
|
||||
|
|
|
@ -17,6 +17,8 @@ import org.apache.solr.handler.component.ShardHandler;
|
|||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.KeeperException.NodeExistsException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -63,7 +65,7 @@ public abstract class ElectionContext {
|
|||
}
|
||||
|
||||
class ShardLeaderElectionContextBase extends ElectionContext {
|
||||
|
||||
private static Logger log = LoggerFactory.getLogger(ShardLeaderElectionContextBase.class);
|
||||
protected final SolrZkClient zkClient;
|
||||
protected String shardId;
|
||||
protected String collection;
|
||||
|
@ -111,6 +113,8 @@ class ShardLeaderElectionContextBase extends ElectionContext {
|
|||
|
||||
// add core container and stop passing core around...
|
||||
final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
||||
private static Logger log = LoggerFactory.getLogger(ShardLeaderElectionContext.class);
|
||||
|
||||
private ZkController zkController;
|
||||
private CoreContainer cc;
|
||||
private SyncStrategy syncStrategy = new SyncStrategy();
|
||||
|
@ -131,8 +135,6 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
String coreName = leaderProps.get(ZkStateReader.CORE_NAME_PROP);
|
||||
SolrCore core = null;
|
||||
try {
|
||||
// the first time we are run, we will get a startupCore - after
|
||||
// we will get null and must use cc.getCore
|
||||
|
||||
core = cc.getCore(coreName);
|
||||
|
||||
|
@ -181,7 +183,8 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
// remove our ephemeral and re join the election
|
||||
// System.out.println("sync failed, delete our election node:"
|
||||
// + leaderSeqPath);
|
||||
|
||||
log.info("There is a better leader candidate than us - going back into recovery");
|
||||
|
||||
zkController.publish(core.getCoreDescriptor(), ZkStateReader.DOWN);
|
||||
|
||||
cancelElection();
|
||||
|
|
|
@ -422,19 +422,19 @@ public class Overseer {
|
|||
*/
|
||||
public static DistributedQueue getInQueue(final SolrZkClient zkClient) {
|
||||
createOverseerNode(zkClient);
|
||||
return new DistributedQueue(zkClient.getSolrZooKeeper(), "/overseer/queue", null);
|
||||
return new DistributedQueue(zkClient, "/overseer/queue", null);
|
||||
}
|
||||
|
||||
/* Internal queue, not to be used outside of Overseer */
|
||||
static DistributedQueue getInternalQueue(final SolrZkClient zkClient) {
|
||||
createOverseerNode(zkClient);
|
||||
return new DistributedQueue(zkClient.getSolrZooKeeper(), "/overseer/queue-work", null);
|
||||
return new DistributedQueue(zkClient, "/overseer/queue-work", null);
|
||||
}
|
||||
|
||||
/* Collection creation queue */
|
||||
static DistributedQueue getCollectionQueue(final SolrZkClient zkClient) {
|
||||
createOverseerNode(zkClient);
|
||||
return new DistributedQueue(zkClient.getSolrZooKeeper(), "/overseer/collection-queue-work", null);
|
||||
return new DistributedQueue(zkClient, "/overseer/collection-queue-work", null);
|
||||
}
|
||||
|
||||
private static void createOverseerNode(final SolrZkClient zkClient) {
|
||||
|
|
|
@ -69,6 +69,7 @@ public class SyncStrategy {
|
|||
|
||||
public boolean sync(ZkController zkController, SolrCore core,
|
||||
ZkNodeProps leaderProps) {
|
||||
log.info("Sync replicas to " + ZkCoreNodeProps.getCoreUrl(leaderProps));
|
||||
// TODO: look at our state usage of sync
|
||||
// zkController.publish(core, ZkStateReader.SYNC);
|
||||
|
||||
|
@ -208,7 +209,7 @@ public class SyncStrategy {
|
|||
// System.out
|
||||
// .println("try and ask " + node.getCoreUrl() + " to sync");
|
||||
log.info("try and ask " + node.getCoreUrl() + " to sync");
|
||||
requestSync(zkLeader.getCoreUrl(), node.getCoreName());
|
||||
requestSync(node.getCoreUrl(), zkLeader.getCoreUrl(), node.getCoreName());
|
||||
|
||||
} catch (Exception e) {
|
||||
SolrException.log(log, "Error syncing replica to leader", e);
|
||||
|
@ -224,12 +225,15 @@ public class SyncStrategy {
|
|||
if (!success) {
|
||||
try {
|
||||
log.info("Sync failed - asking replica to recover.");
|
||||
//System.out.println("Sync failed - asking replica to recover.");
|
||||
|
||||
// TODO: do this in background threads
|
||||
RequestRecovery recoverRequestCmd = new RequestRecovery();
|
||||
recoverRequestCmd.setAction(CoreAdminAction.REQUESTRECOVERY);
|
||||
recoverRequestCmd.setCoreName(((SyncShardRequest)srsp.getShardRequest()).coreName);
|
||||
|
||||
HttpSolrServer server = new HttpSolrServer(zkLeader.getBaseUrl());
|
||||
HttpSolrServer server = new HttpSolrServer(srsp.getShardAddress());
|
||||
server.setConnectionTimeout(45000);
|
||||
server.setSoTimeout(45000);
|
||||
server.request(recoverRequestCmd);
|
||||
} catch (Exception e) {
|
||||
log.info("Could not tell a replica to recover", e);
|
||||
|
@ -251,7 +255,7 @@ public class SyncStrategy {
|
|||
return success;
|
||||
}
|
||||
|
||||
private void requestSync(String replica, String coreName) {
|
||||
private void requestSync(String replica, String leaderUrl, String coreName) {
|
||||
SyncShardRequest sreq = new SyncShardRequest();
|
||||
sreq.coreName = coreName;
|
||||
sreq.purpose = 1;
|
||||
|
@ -264,7 +268,7 @@ public class SyncStrategy {
|
|||
sreq.params.set("qt","/get");
|
||||
sreq.params.set("distrib",false);
|
||||
sreq.params.set("getVersions",Integer.toString(100));
|
||||
sreq.params.set("sync",replica);
|
||||
sreq.params.set("sync",leaderUrl);
|
||||
|
||||
shardHandler.submit(sreq, replica, sreq.params);
|
||||
}
|
||||
|
|
|
@ -163,7 +163,7 @@ public class ZkCLI {
|
|||
}
|
||||
SolrZkClient zkClient = null;
|
||||
try {
|
||||
zkClient = new SolrZkClient(zkServerAddress, 15000, 5000,
|
||||
zkClient = new SolrZkClient(zkServerAddress, 30000, 30000,
|
||||
new OnReconnect() {
|
||||
@Override
|
||||
public void command() {}
|
||||
|
|
|
@ -177,29 +177,15 @@ public final class ZkController {
|
|||
overseerElector.joinElection(context);
|
||||
zkStateReader.createClusterStateWatchersAndUpdate();
|
||||
|
||||
List<CoreDescriptor> descriptors = registerOnReconnect
|
||||
.getCurrentDescriptors();
|
||||
if (descriptors != null) {
|
||||
// before registering as live, make sure everyone is in a
|
||||
// down state
|
||||
for (CoreDescriptor descriptor : descriptors) {
|
||||
final String coreZkNodeName = getNodeName() + "_"
|
||||
+ descriptor.getName();
|
||||
try {
|
||||
publish(descriptor, ZkStateReader.DOWN);
|
||||
waitForLeaderToSeeDownState(descriptor, coreZkNodeName);
|
||||
} catch (Exception e) {
|
||||
SolrException.log(log, "", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
registerAllCoresAsDown(registerOnReconnect);
|
||||
|
||||
|
||||
// we have to register as live first to pick up docs in the buffer
|
||||
createEphemeralLiveNode();
|
||||
|
||||
List<CoreDescriptor> descriptors = registerOnReconnect.getCurrentDescriptors();
|
||||
// re register all descriptors
|
||||
if (descriptors != null) {
|
||||
if (descriptors != null) {
|
||||
for (CoreDescriptor descriptor : descriptors) {
|
||||
// TODO: we need to think carefully about what happens when it was
|
||||
// a leader that was expired - as well as what to do about leaders/overseers
|
||||
|
@ -228,7 +214,28 @@ public final class ZkController {
|
|||
cmdExecutor = new ZkCmdExecutor();
|
||||
leaderElector = new LeaderElector(zkClient);
|
||||
zkStateReader = new ZkStateReader(zkClient);
|
||||
init();
|
||||
|
||||
init(registerOnReconnect);
|
||||
}
|
||||
|
||||
private void registerAllCoresAsDown(
|
||||
final CurrentCoreDescriptorProvider registerOnReconnect) {
|
||||
List<CoreDescriptor> descriptors = registerOnReconnect
|
||||
.getCurrentDescriptors();
|
||||
if (descriptors != null) {
|
||||
// before registering as live, make sure everyone is in a
|
||||
// down state
|
||||
for (CoreDescriptor descriptor : descriptors) {
|
||||
final String coreZkNodeName = getNodeName() + "_"
|
||||
+ descriptor.getName();
|
||||
try {
|
||||
publish(descriptor, ZkStateReader.DOWN);
|
||||
waitForLeaderToSeeDownState(descriptor, coreZkNodeName);
|
||||
} catch (Exception e) {
|
||||
SolrException.log(log, "", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -338,8 +345,9 @@ public final class ZkController {
|
|||
return zkServerAddress;
|
||||
}
|
||||
|
||||
private void init() {
|
||||
|
||||
private void init(CurrentCoreDescriptorProvider registerOnReconnect) {
|
||||
registerAllCoresAsDown(registerOnReconnect);
|
||||
|
||||
try {
|
||||
// makes nodes zkNode
|
||||
cmdExecutor.ensureExists(ZkStateReader.LIVE_NODES_ZKNODE, zkClient);
|
||||
|
@ -1126,13 +1134,12 @@ public final class ZkController {
|
|||
*/
|
||||
public static void bootstrapConf(SolrZkClient zkClient, Config cfg, String solrHome) throws IOException,
|
||||
KeeperException, InterruptedException {
|
||||
|
||||
log.info("bootstraping config into ZooKeeper using solr.xml");
|
||||
NodeList nodes = (NodeList)cfg.evaluate("solr/cores/core", XPathConstants.NODESET);
|
||||
|
||||
for (int i=0; i<nodes.getLength(); i++) {
|
||||
Node node = nodes.item(i);
|
||||
String rawName = DOMUtil.substituteProperty(DOMUtil.getAttr(node, "name", null), new Properties());
|
||||
|
||||
String instanceDir = DOMUtil.getAttr(node, "instanceDir", null);
|
||||
File idir = new File(instanceDir);
|
||||
if (!idir.isAbsolute()) {
|
||||
|
@ -1143,7 +1150,7 @@ public final class ZkController {
|
|||
confName = rawName;
|
||||
}
|
||||
File udir = new File(idir, "conf");
|
||||
SolrException.log(log, "Uploading directory " + udir + " with name " + confName + " for SolrCore " + rawName);
|
||||
log.info("Uploading directory " + udir + " with name " + confName + " for SolrCore " + rawName);
|
||||
ZkController.uploadConfigDir(zkClient, udir, confName);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -945,7 +945,7 @@ public class CoreContainer
|
|||
}
|
||||
}
|
||||
|
||||
SolrCore newCore = core.reload(solrLoader);
|
||||
SolrCore newCore = core.reload(solrLoader, core);
|
||||
// keep core to orig name link
|
||||
String origName = coreToOrigName.remove(core);
|
||||
if (origName != null) {
|
||||
|
|
|
@ -357,7 +357,7 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
return responseWriters.put(name, responseWriter);
|
||||
}
|
||||
|
||||
public SolrCore reload(SolrResourceLoader resourceLoader) throws IOException,
|
||||
public SolrCore reload(SolrResourceLoader resourceLoader, SolrCore prev) throws IOException,
|
||||
ParserConfigurationException, SAXException {
|
||||
// TODO - what if indexwriter settings have changed
|
||||
|
||||
|
@ -368,8 +368,8 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
getSchema().getResourceName(), null);
|
||||
|
||||
updateHandler.incref();
|
||||
SolrCore core = new SolrCore(getName(), null, config,
|
||||
schema, coreDescriptor, updateHandler);
|
||||
SolrCore core = new SolrCore(getName(), getDataDir(), config,
|
||||
schema, coreDescriptor, updateHandler, prev);
|
||||
return core;
|
||||
}
|
||||
|
||||
|
@ -548,7 +548,7 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
* @since solr 1.3
|
||||
*/
|
||||
public SolrCore(String name, String dataDir, SolrConfig config, IndexSchema schema, CoreDescriptor cd) {
|
||||
this(name, dataDir, config, schema, cd, null);
|
||||
this(name, dataDir, config, schema, cd, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -561,7 +561,7 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
*
|
||||
*@since solr 1.3
|
||||
*/
|
||||
public SolrCore(String name, String dataDir, SolrConfig config, IndexSchema schema, CoreDescriptor cd, UpdateHandler updateHandler) {
|
||||
public SolrCore(String name, String dataDir, SolrConfig config, IndexSchema schema, CoreDescriptor cd, UpdateHandler updateHandler, SolrCore prev) {
|
||||
coreDescriptor = cd;
|
||||
this.setName( name );
|
||||
resourceLoader = config.getResourceLoader();
|
||||
|
@ -640,10 +640,31 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
}
|
||||
});
|
||||
|
||||
// use the (old) writer to open the first searcher
|
||||
RefCounted<IndexWriter> iwRef = null;
|
||||
if (prev != null) {
|
||||
iwRef = prev.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
|
||||
if (iwRef != null) {
|
||||
final IndexWriter iw = iwRef.get();
|
||||
newReaderCreator = new Callable<DirectoryReader>() {
|
||||
@Override
|
||||
public DirectoryReader call() throws Exception {
|
||||
return DirectoryReader.open(iw, true);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Open the searcher *before* the update handler so we don't end up opening
|
||||
// one in the middle.
|
||||
// With lockless commits in Lucene now, this probably shouldn't be an issue anymore
|
||||
getSearcher(false,false,null);
|
||||
|
||||
try {
|
||||
getSearcher(false,false,null,true);
|
||||
} finally {
|
||||
newReaderCreator = null;
|
||||
if (iwRef != null) iwRef.decref();
|
||||
}
|
||||
|
||||
String updateHandlerClass = solrConfig.getUpdateHandlerInfo().className;
|
||||
|
||||
|
@ -1057,7 +1078,7 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
private final int maxWarmingSearchers; // max number of on-deck searchers allowed
|
||||
|
||||
private RefCounted<SolrIndexSearcher> realtimeSearcher;
|
||||
|
||||
private Callable<DirectoryReader> newReaderCreator;
|
||||
|
||||
/**
|
||||
* Return a registered {@link RefCounted}<{@link SolrIndexSearcher}> with
|
||||
|
@ -1208,9 +1229,20 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
tmp = new SolrIndexSearcher(this, schema, (realtime ? "realtime":"main"), newReader, true, !realtime, true, directoryFactory);
|
||||
|
||||
} else {
|
||||
// newestSearcher == null at this point
|
||||
|
||||
if (newReaderCreator != null) {
|
||||
// this is set in the constructor if there is a currently open index writer
|
||||
// so that we pick up any uncommitted changes and so we don't go backwards
|
||||
// in time on a core reload
|
||||
DirectoryReader newReader = newReaderCreator.call();
|
||||
tmp = new SolrIndexSearcher(this, schema, (realtime ? "realtime":"main"), newReader, true, !realtime, true, directoryFactory);
|
||||
} else {
|
||||
// normal open that happens at startup
|
||||
// verbose("non-reopen START:");
|
||||
tmp = new SolrIndexSearcher(this, newIndexDir, schema, getSolrConfig().indexConfig, "main", true, directoryFactory);
|
||||
// verbose("non-reopen DONE: searcher=",tmp);
|
||||
}
|
||||
}
|
||||
|
||||
List<RefCounted<SolrIndexSearcher>> searcherList = realtime ? _realtimeSearchers : _searchers;
|
||||
|
|
|
@ -17,9 +17,17 @@ package org.apache.solr.handler.admin;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestSyncShard;
|
||||
import org.apache.solr.cloud.Overseer;
|
||||
import org.apache.solr.cloud.OverseerCollectionProcessor;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.CloudState;
|
||||
import org.apache.solr.common.cloud.ZkCoreNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.params.CollectionParams.CollectionAction;
|
||||
|
@ -103,6 +111,10 @@ public class CollectionsHandler extends RequestHandlerBase {
|
|||
this.handleReloadAction(req, rsp);
|
||||
break;
|
||||
}
|
||||
case SYNCSHARD: {
|
||||
this.handleSyncShardAction(req, rsp);
|
||||
break;
|
||||
}
|
||||
|
||||
default: {
|
||||
throw new RuntimeException("Unknown action: " + action);
|
||||
|
@ -123,6 +135,24 @@ public class CollectionsHandler extends RequestHandlerBase {
|
|||
// TODO: what if you want to block until the collection is available?
|
||||
coreContainer.getZkController().getOverseerCollectionQueue().offer(ZkStateReader.toJSON(m));
|
||||
}
|
||||
|
||||
private void handleSyncShardAction(SolrQueryRequest req, SolrQueryResponse rsp) throws KeeperException, InterruptedException, SolrServerException, IOException {
|
||||
log.info("Syncing shard : " + req.getParamString());
|
||||
String collection = req.getParams().required().get("collection");
|
||||
String shard = req.getParams().required().get("shard");
|
||||
|
||||
CloudState cloudState = coreContainer.getZkController().getCloudState();
|
||||
|
||||
ZkNodeProps leaderProps = cloudState.getLeader(collection, shard);
|
||||
ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
|
||||
|
||||
HttpSolrServer server = new HttpSolrServer(nodeProps.getBaseUrl());
|
||||
RequestSyncShard reqSyncShard = new CoreAdminRequest.RequestSyncShard();
|
||||
reqSyncShard.setCollection(collection);
|
||||
reqSyncShard.setShard(shard);
|
||||
reqSyncShard.setCoreName(nodeProps.getCoreName());
|
||||
server.request(reqSyncShard);
|
||||
}
|
||||
|
||||
|
||||
private void handleDeleteAction(SolrQueryRequest req, SolrQueryResponse rsp) throws KeeperException, InterruptedException {
|
||||
|
|
|
@ -20,7 +20,9 @@ package org.apache.solr.handler.admin;
|
|||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
|
@ -28,6 +30,8 @@ import org.apache.lucene.index.DirectoryReader;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.solr.cloud.CloudDescriptor;
|
||||
import org.apache.solr.cloud.SyncStrategy;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.cloud.CloudState;
|
||||
|
@ -47,8 +51,6 @@ import org.apache.solr.core.CoreDescriptor;
|
|||
import org.apache.solr.core.DirectoryFactory;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.handler.RequestHandlerBase;
|
||||
import org.apache.solr.handler.component.ShardHandler;
|
||||
import org.apache.solr.handler.component.ShardHandlerFactory;
|
||||
import org.apache.solr.request.LocalSolrQueryRequest;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.response.SolrQueryResponse;
|
||||
|
@ -69,8 +71,6 @@ import org.slf4j.LoggerFactory;
|
|||
public class CoreAdminHandler extends RequestHandlerBase {
|
||||
protected static Logger log = LoggerFactory.getLogger(CoreAdminHandler.class);
|
||||
protected final CoreContainer coreContainer;
|
||||
private ShardHandlerFactory shardHandlerFactory;
|
||||
private ShardHandler shardHandler;
|
||||
|
||||
public CoreAdminHandler() {
|
||||
super();
|
||||
|
@ -87,8 +87,6 @@ public class CoreAdminHandler extends RequestHandlerBase {
|
|||
*/
|
||||
public CoreAdminHandler(final CoreContainer coreContainer) {
|
||||
this.coreContainer = coreContainer;
|
||||
shardHandlerFactory = coreContainer.getShardHandlerFactory();
|
||||
shardHandler = shardHandlerFactory.getShardHandler();
|
||||
}
|
||||
|
||||
|
||||
|
@ -182,6 +180,11 @@ public class CoreAdminHandler extends RequestHandlerBase {
|
|||
break;
|
||||
}
|
||||
|
||||
case REQUESTSYNCSHARD: {
|
||||
this.handleRequestSyncAction(req, rsp);
|
||||
break;
|
||||
}
|
||||
|
||||
default: {
|
||||
doPersist = this.handleCustomAction(req, rsp);
|
||||
break;
|
||||
|
@ -655,7 +658,7 @@ public class CoreAdminHandler extends RequestHandlerBase {
|
|||
protected void handleRequestRecoveryAction(SolrQueryRequest req,
|
||||
SolrQueryResponse rsp) throws IOException {
|
||||
final SolrParams params = req.getParams();
|
||||
|
||||
log.info("The leader requested that we recover");
|
||||
String cname = params.get(CoreAdminParams.CORE);
|
||||
if (cname == null) {
|
||||
cname = "";
|
||||
|
@ -676,6 +679,48 @@ public class CoreAdminHandler extends RequestHandlerBase {
|
|||
}
|
||||
}
|
||||
|
||||
protected void handleRequestSyncAction(SolrQueryRequest req,
|
||||
SolrQueryResponse rsp) throws IOException {
|
||||
final SolrParams params = req.getParams();
|
||||
|
||||
log.info("I have been requested to sync up my shard");
|
||||
ZkController zkController = coreContainer.getZkController();
|
||||
if (zkController == null) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "Only valid for SolrCloud");
|
||||
}
|
||||
|
||||
String cname = params.get(CoreAdminParams.CORE);
|
||||
if (cname == null) {
|
||||
throw new IllegalArgumentException(CoreAdminParams.CORE + " is required");
|
||||
}
|
||||
SolrCore core = null;
|
||||
try {
|
||||
core = coreContainer.getCore(cname);
|
||||
if (core != null) {
|
||||
SyncStrategy syncStrategy = new SyncStrategy();
|
||||
|
||||
Map<String,String> props = new HashMap<String,String>();
|
||||
props.put(ZkStateReader.BASE_URL_PROP, zkController.getBaseUrl());
|
||||
props.put(ZkStateReader.CORE_NAME_PROP, cname);
|
||||
props.put(ZkStateReader.NODE_NAME_PROP, zkController.getNodeName());
|
||||
|
||||
boolean success = syncStrategy.sync(zkController, core, new ZkNodeProps(props));
|
||||
if (!success) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Sync Failed");
|
||||
}
|
||||
} else {
|
||||
SolrException.log(log, "Cound not find core to call sync:" + cname);
|
||||
}
|
||||
} finally {
|
||||
// no recoveryStrat close for now
|
||||
if (core != null) {
|
||||
core.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
protected void handleWaitForStateAction(SolrQueryRequest req,
|
||||
SolrQueryResponse rsp) throws IOException, InterruptedException {
|
||||
final SolrParams params = req.getParams();
|
||||
|
|
|
@ -256,13 +256,12 @@ class SimpleStats {
|
|||
FieldCache.DocTermsIndex facetTermsIndex;
|
||||
for( String facetField : facet ) {
|
||||
SchemaField fsf = searcher.getSchema().getField(facetField);
|
||||
FieldType facetFieldType = fsf.getType();
|
||||
|
||||
if (facetFieldType.isTokenized() || facetFieldType.isMultiValued()) {
|
||||
if ( fsf.multiValued()) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"Stats can only facet on single-valued fields, not: " + facetField
|
||||
+ "[" + facetFieldType + "]");
|
||||
}
|
||||
"Stats can only facet on single-valued fields, not: " + facetField );
|
||||
}
|
||||
|
||||
try {
|
||||
facetTermsIndex = FieldCache.DEFAULT.getTermsIndex(searcher.getAtomicReader(), facetField);
|
||||
}
|
||||
|
|
|
@ -55,12 +55,19 @@ public final class DefaultSolrCoreState extends SolrCoreState {
|
|||
@Override
|
||||
public synchronized RefCounted<IndexWriter> getIndexWriter(SolrCore core)
|
||||
throws IOException {
|
||||
|
||||
if (core == null) {
|
||||
// core == null is a signal to just return the current writer, or null if none.
|
||||
if (refCntWriter != null) refCntWriter.incref();
|
||||
return refCntWriter;
|
||||
}
|
||||
|
||||
while (pauseWriter) {
|
||||
try {
|
||||
wait();
|
||||
} catch (InterruptedException e) {}
|
||||
}
|
||||
|
||||
|
||||
if (indexWriter == null) {
|
||||
indexWriter = createMainIndexWriter(core, "DirectUpdateHandler2", false,
|
||||
false);
|
||||
|
|
|
@ -343,7 +343,7 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
|
|||
|
||||
// currently for testing purposes. Do a delete of complete index w/o worrying about versions, don't log, clean up most state in update log, etc
|
||||
if (delAll && cmd.getVersion() == -Long.MAX_VALUE) {
|
||||
synchronized (this) {
|
||||
synchronized (solrCoreState) {
|
||||
deleteAll();
|
||||
ulog.deleteAll();
|
||||
return;
|
||||
|
@ -356,7 +356,7 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
|
|||
// a realtime view of the index. When a new searcher is opened after a DBQ, that
|
||||
// flag can be cleared. If those thing happen concurrently, it's not thread safe.
|
||||
//
|
||||
synchronized (this) {
|
||||
synchronized (solrCoreState) {
|
||||
if (delAll) {
|
||||
deleteAll();
|
||||
} else {
|
||||
|
@ -392,7 +392,7 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
|
|||
Term idTerm = new Term(idField.getName(), cmd.getIndexedId());
|
||||
|
||||
// see comment in deleteByQuery
|
||||
synchronized (this) {
|
||||
synchronized (solrCoreState) {
|
||||
RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core);
|
||||
try {
|
||||
IndexWriter writer = iw.get();
|
||||
|
@ -518,7 +518,7 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
|
|||
}
|
||||
|
||||
if (!cmd.softCommit) {
|
||||
synchronized (this) { // sync is currently needed to prevent preCommit
|
||||
synchronized (solrCoreState) { // sync is currently needed to prevent preCommit
|
||||
// from being called between preSoft and
|
||||
// postSoft... see postSoft comments.
|
||||
if (ulog != null) ulog.preCommit(cmd);
|
||||
|
@ -547,14 +547,14 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
|
|||
|
||||
if (cmd.softCommit) {
|
||||
// ulog.preSoftCommit();
|
||||
synchronized (this) {
|
||||
synchronized (solrCoreState) {
|
||||
if (ulog != null) ulog.preSoftCommit(cmd);
|
||||
core.getSearcher(true, false, waitSearcher, true);
|
||||
if (ulog != null) ulog.postSoftCommit(cmd);
|
||||
}
|
||||
// ulog.postSoftCommit();
|
||||
} else {
|
||||
synchronized (this) {
|
||||
synchronized (solrCoreState) {
|
||||
if (ulog != null) ulog.preSoftCommit(cmd);
|
||||
if (cmd.openSearcher) {
|
||||
core.getSearcher(true, false, waitSearcher);
|
||||
|
@ -705,7 +705,7 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
|
|||
// TODO: keep other commit callbacks from being called?
|
||||
// this.commit(cmd); // too many test failures using this method... is it because of callbacks?
|
||||
|
||||
synchronized (this) {
|
||||
synchronized (solrCoreState) {
|
||||
ulog.preCommit(cmd);
|
||||
}
|
||||
|
||||
|
@ -714,7 +714,7 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
|
|||
commitData.put(SolrIndexWriter.COMMIT_TIME_MSEC_KEY, String.valueOf(System.currentTimeMillis()));
|
||||
writer.commit(commitData);
|
||||
|
||||
synchronized (this) {
|
||||
synchronized (solrCoreState) {
|
||||
ulog.postCommit(cmd);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -165,7 +165,7 @@ public class PeerSync {
|
|||
String myURL = "";
|
||||
|
||||
if (zkController != null) {
|
||||
myURL = zkController.getZkServerAddress();
|
||||
myURL = zkController.getBaseUrl();
|
||||
}
|
||||
|
||||
// TODO: core name turns up blank in many tests - find URL if cloud enabled?
|
||||
|
|
|
@ -24,12 +24,16 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.CompletionService;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorCompletionService;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.RejectedExecutionException;
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
|
@ -49,17 +53,13 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
||||
|
||||
|
||||
public class SolrCmdDistributor {
|
||||
private static final int MAX_RETRIES_ON_FORWARD = 6;
|
||||
public static Logger log = LoggerFactory.getLogger(SolrCmdDistributor.class);
|
||||
|
||||
// TODO: shut this thing down
|
||||
// TODO: this cannot be per instance...
|
||||
static ThreadPoolExecutor commExecutor = new ThreadPoolExecutor(0,
|
||||
Integer.MAX_VALUE, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
|
||||
new DefaultSolrThreadFactory("cmdDistribExecutor"));
|
||||
static BoundedExecutor commExecutor;
|
||||
|
||||
static final HttpClient client;
|
||||
|
||||
|
@ -91,8 +91,22 @@ public class SolrCmdDistributor {
|
|||
ModifiableSolrParams params;
|
||||
}
|
||||
|
||||
public SolrCmdDistributor() {
|
||||
|
||||
public SolrCmdDistributor(int numHosts) {
|
||||
|
||||
BoundedExecutor executor = null;
|
||||
synchronized (SolrCmdDistributor.class) {
|
||||
if (commExecutor == null || commExecutor.getMaximumPoolSize() != numHosts) {
|
||||
// we don't shutdown the previous because all it's threads will die
|
||||
int maxPoolSize = Math.max(8, (numHosts-1) * 8);
|
||||
commExecutor = new BoundedExecutor(0, maxPoolSize, 5,
|
||||
TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(maxPoolSize * 2),
|
||||
new DefaultSolrThreadFactory("cmdDistribExecutor"));
|
||||
}
|
||||
executor = commExecutor;
|
||||
}
|
||||
|
||||
completionService = new ExecutorCompletionService<Request>(executor);
|
||||
pending = new HashSet<Future<Request>>();
|
||||
}
|
||||
|
||||
public void finish() {
|
||||
|
@ -297,10 +311,7 @@ public class SolrCmdDistributor {
|
|||
}
|
||||
|
||||
public void submit(final Request sreq) {
|
||||
if (completionService == null) {
|
||||
completionService = new ExecutorCompletionService<Request>(commExecutor);
|
||||
pending = new HashSet<Future<Request>>();
|
||||
}
|
||||
|
||||
final String url = sreq.node.getUrl();
|
||||
|
||||
Callable<Request> task = new Callable<Request>() {
|
||||
|
@ -502,4 +513,40 @@ public class SolrCmdDistributor {
|
|||
return nodeProps;
|
||||
}
|
||||
}
|
||||
|
||||
public class BoundedExecutor extends ThreadPoolExecutor {
|
||||
private final Semaphore semaphore;
|
||||
|
||||
public BoundedExecutor(int corePoolSize,
|
||||
int maximumPoolSize, long keepAliveTime, TimeUnit unit,
|
||||
BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory) {
|
||||
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory);
|
||||
this.semaphore = new Semaphore(maximumPoolSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute(final Runnable command) {
|
||||
try {
|
||||
semaphore.acquire();
|
||||
} catch (InterruptedException e1) {
|
||||
throw new RuntimeException();
|
||||
}
|
||||
try {
|
||||
super.execute(new Runnable() {
|
||||
public void run() {
|
||||
try {
|
||||
command.run();
|
||||
} finally {
|
||||
semaphore.release();
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (RejectedExecutionException e) {
|
||||
semaphore.release();
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -138,7 +138,7 @@ public class UpdateLog implements PluginInfoInitialized {
|
|||
|
||||
private SyncLevel defaultSyncLevel = SyncLevel.FLUSH;
|
||||
|
||||
private volatile UpdateHandler uhandler; // a core reload can change this reference!
|
||||
volatile UpdateHandler uhandler; // a core reload can change this reference!
|
||||
private volatile boolean cancelApplyBufferUpdate;
|
||||
List<Long> startingVersions;
|
||||
int startingOperation; // last operation in the logs on startup
|
||||
|
@ -177,6 +177,9 @@ public class UpdateLog implements PluginInfoInitialized {
|
|||
if (debug) {
|
||||
log.debug("UpdateHandler init: tlogDir=" + tlogDir + ", next id=" + id, " this is a reopen... nothing else to do.");
|
||||
}
|
||||
|
||||
versionInfo.reload();
|
||||
|
||||
// on a normal reopen, we currently shouldn't have to do anything
|
||||
return;
|
||||
}
|
||||
|
@ -209,7 +212,7 @@ public class UpdateLog implements PluginInfoInitialized {
|
|||
if (newestLogsOnStartup.size() >= 2) break;
|
||||
}
|
||||
|
||||
versionInfo = new VersionInfo(uhandler, 256);
|
||||
versionInfo = new VersionInfo(this, 256);
|
||||
|
||||
// TODO: these startingVersions assume that we successfully recover from all non-complete tlogs.
|
||||
UpdateLog.RecentUpdates startingUpdates = getRecentUpdates();
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.solr.update;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.locks.ReadWriteLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
|
@ -36,16 +35,15 @@ import org.apache.solr.util.RefCounted;
|
|||
public class VersionInfo {
|
||||
public static final String VERSION_FIELD="_version_";
|
||||
|
||||
private SolrCore core;
|
||||
private UpdateHandler updateHandler;
|
||||
private final UpdateLog ulog;
|
||||
private final VersionBucket[] buckets;
|
||||
private SchemaField versionField;
|
||||
private SchemaField idField;
|
||||
final ReadWriteLock lock = new ReentrantReadWriteLock(true);
|
||||
|
||||
public VersionInfo(UpdateHandler updateHandler, int nBuckets) {
|
||||
this.updateHandler = updateHandler;
|
||||
this.core = updateHandler.core;
|
||||
public VersionInfo(UpdateLog ulog, int nBuckets) {
|
||||
this.ulog = ulog;
|
||||
SolrCore core = ulog.uhandler.core;
|
||||
versionField = core.getSchema().getFieldOrNull(VERSION_FIELD);
|
||||
idField = core.getSchema().getUniqueKeyField();
|
||||
buckets = new VersionBucket[ BitUtil.nextHighestPowerOfTwo(nBuckets) ];
|
||||
|
@ -54,6 +52,10 @@ public class VersionInfo {
|
|||
}
|
||||
}
|
||||
|
||||
public void reload() {
|
||||
|
||||
}
|
||||
|
||||
public SchemaField getVersionField() {
|
||||
return versionField;
|
||||
}
|
||||
|
@ -143,14 +145,14 @@ public class VersionInfo {
|
|||
}
|
||||
|
||||
public Long lookupVersion(BytesRef idBytes) {
|
||||
return updateHandler.ulog.lookupVersion(idBytes);
|
||||
return ulog.lookupVersion(idBytes);
|
||||
}
|
||||
|
||||
public Long getVersionFromIndex(BytesRef idBytes) {
|
||||
// TODO: we could cache much of this and invalidate during a commit.
|
||||
// TODO: most DocValues classes are threadsafe - expose which.
|
||||
|
||||
RefCounted<SolrIndexSearcher> newestSearcher = core.getRealtimeSearcher();
|
||||
RefCounted<SolrIndexSearcher> newestSearcher = ulog.uhandler.core.getRealtimeSearcher();
|
||||
try {
|
||||
SolrIndexSearcher searcher = newestSearcher.get();
|
||||
long lookup = searcher.lookupId(idBytes);
|
||||
|
|
|
@ -19,9 +19,12 @@ package org.apache.solr.update.processor;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRef;
|
||||
|
@ -130,6 +133,8 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
|
|||
private boolean forwardToLeader = false;
|
||||
private List<Node> nodes;
|
||||
|
||||
private int numNodes;
|
||||
|
||||
|
||||
public DistributedUpdateProcessor(SolrQueryRequest req,
|
||||
SolrQueryResponse rsp, UpdateRequestProcessor next) {
|
||||
|
@ -164,7 +169,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
|
|||
collection = cloudDesc.getCollectionName();
|
||||
}
|
||||
|
||||
cmdDistrib = new SolrCmdDistributor();
|
||||
cmdDistrib = new SolrCmdDistributor(numNodes);
|
||||
}
|
||||
|
||||
private List<Node> setupRequest(int hash) {
|
||||
|
@ -172,6 +177,9 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
|
|||
|
||||
// if we are in zk mode...
|
||||
if (zkEnabled) {
|
||||
// set num nodes
|
||||
numNodes = zkController.getCloudState().getLiveNodes().size();
|
||||
|
||||
// the leader is...
|
||||
// TODO: if there is no leader, wait and look again
|
||||
// TODO: we are reading the leader from zk every time - we should cache
|
||||
|
@ -204,8 +212,22 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
|
|||
coreName, null, ZkStateReader.DOWN);
|
||||
if (replicaProps != null) {
|
||||
nodes = new ArrayList<Node>(replicaProps.size());
|
||||
// check for test param that lets us miss replicas
|
||||
String[] skipList = req.getParams().getParams("test.distrib.skip.servers");
|
||||
Set<String> skipListSet = null;
|
||||
if (skipList != null) {
|
||||
skipListSet = new HashSet<String>(skipList.length);
|
||||
skipListSet.addAll(Arrays.asList(skipList));
|
||||
}
|
||||
|
||||
for (ZkCoreNodeProps props : replicaProps) {
|
||||
nodes.add(new StdNode(props));
|
||||
if (skipList != null) {
|
||||
if (!skipListSet.contains(props.getCoreUrl())) {
|
||||
nodes.add(new StdNode(props));
|
||||
}
|
||||
} else {
|
||||
nodes.add(new StdNode(props));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -110,9 +110,15 @@ public abstract class AbstractDistributedZkTestCase extends BaseDistributedSearc
|
|||
waitForRecoveriesToFinish(collection, zkStateReader, verbose, true);
|
||||
}
|
||||
|
||||
protected void waitForRecoveriesToFinish(String collection,
|
||||
ZkStateReader zkStateReader, boolean verbose, boolean failOnTimeout)
|
||||
protected void waitForRecoveriesToFinish(String collection, ZkStateReader zkStateReader, boolean verbose, boolean failOnTimeout)
|
||||
throws Exception {
|
||||
waitForRecoveriesToFinish(collection, zkStateReader, verbose, failOnTimeout, 120 * (TEST_NIGHTLY ? 2 : 1) * RANDOM_MULTIPLIER);
|
||||
}
|
||||
|
||||
protected void waitForRecoveriesToFinish(String collection,
|
||||
ZkStateReader zkStateReader, boolean verbose, boolean failOnTimeout, int timeoutSeconds)
|
||||
throws Exception {
|
||||
log.info("Wait for recoveries to finish - collection: " + collection + " failOnTimeout:" + failOnTimeout + " timeout (sec):" + timeoutSeconds);
|
||||
boolean cont = true;
|
||||
int cnt = 0;
|
||||
|
||||
|
@ -139,7 +145,7 @@ public abstract class AbstractDistributedZkTestCase extends BaseDistributedSearc
|
|||
}
|
||||
}
|
||||
}
|
||||
if (!sawLiveRecovering || cnt == 520) {
|
||||
if (!sawLiveRecovering || cnt == timeoutSeconds) {
|
||||
if (!sawLiveRecovering) {
|
||||
if (verbose) System.out.println("no one is recoverying");
|
||||
} else {
|
||||
|
|
|
@ -36,6 +36,8 @@ import org.apache.solr.core.CoreContainer;
|
|||
import org.apache.solr.servlet.SolrDispatchFilter;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.eclipse.jetty.servlet.FilterHolder;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* The monkey can stop random or specific jetties used with SolrCloud.
|
||||
|
@ -45,7 +47,8 @@ import org.eclipse.jetty.servlet.FilterHolder;
|
|||
*
|
||||
*/
|
||||
public class ChaosMonkey {
|
||||
|
||||
private static Logger log = LoggerFactory.getLogger(ChaosMonkey.class);
|
||||
|
||||
private static final int CONLOSS_PERCENT = 3; //30%
|
||||
private static final int EXPIRE_PERCENT = 4; //40%
|
||||
private Map<String,List<CloudJettyRunner>> shardToJetty;
|
||||
|
@ -82,9 +85,12 @@ public class ChaosMonkey {
|
|||
Random random = LuceneTestCase.random();
|
||||
expireSessions = random.nextBoolean();
|
||||
causeConnectionLoss = random.nextBoolean();
|
||||
monkeyLog("init - expire sessions:" + expireSessions
|
||||
+ " cause connection loss:" + causeConnectionLoss);
|
||||
}
|
||||
|
||||
public void expireSession(JettySolrRunner jetty) {
|
||||
monkeyLog("expire session for " + jetty.getLocalPort() + " !");
|
||||
SolrDispatchFilter solrDispatchFilter = (SolrDispatchFilter) jetty.getDispatchFilter().getFilter();
|
||||
if (solrDispatchFilter != null) {
|
||||
CoreContainer cores = solrDispatchFilter.getCores();
|
||||
|
@ -106,8 +112,9 @@ public class ChaosMonkey {
|
|||
}
|
||||
|
||||
public void randomConnectionLoss() throws KeeperException, InterruptedException {
|
||||
String sliceName = getRandomSlice();
|
||||
monkeyLog("cause connection loss!");
|
||||
|
||||
String sliceName = getRandomSlice();
|
||||
JettySolrRunner jetty = getRandomJetty(sliceName, aggressivelyKillLeaders);
|
||||
if (jetty != null) {
|
||||
causeConnectionLoss(jetty);
|
||||
|
@ -145,7 +152,7 @@ public class ChaosMonkey {
|
|||
}
|
||||
|
||||
public static void stop(JettySolrRunner jetty) throws Exception {
|
||||
|
||||
monkeyLog("stop shard! " + jetty.getLocalPort());
|
||||
// get a clean shutdown so that no dirs are left open...
|
||||
FilterHolder fh = jetty.getDispatchFilter();
|
||||
if (fh != null) {
|
||||
|
@ -162,6 +169,7 @@ public class ChaosMonkey {
|
|||
}
|
||||
|
||||
public static void kill(JettySolrRunner jetty) throws Exception {
|
||||
monkeyLog("kill shard! " + jetty.getLocalPort());
|
||||
FilterHolder fh = jetty.getDispatchFilter();
|
||||
SolrDispatchFilter sdf = null;
|
||||
if (fh != null) {
|
||||
|
@ -288,6 +296,7 @@ public class ChaosMonkey {
|
|||
|
||||
if (numActive < 2) {
|
||||
// we cannot kill anyone
|
||||
monkeyLog("only one active node in shard - monkey cannot kill :(");
|
||||
return null;
|
||||
}
|
||||
Random random = LuceneTestCase.random();
|
||||
|
@ -306,17 +315,19 @@ public class ChaosMonkey {
|
|||
boolean isLeader = leader.get(ZkStateReader.NODE_NAME_PROP).equals(jetties.get(index).nodeName);
|
||||
if (!aggressivelyKillLeaders && isLeader) {
|
||||
// we don't kill leaders...
|
||||
monkeyLog("abort! I don't kill leaders");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
if (jetty.getLocalPort() == -1) {
|
||||
// we can't kill the dead
|
||||
monkeyLog("abort! This guy is already dead");
|
||||
return null;
|
||||
}
|
||||
|
||||
//System.out.println("num active:" + numActive + " for " + slice + " sac:" + jetty.getLocalPort());
|
||||
|
||||
monkeyLog("chose a victim! " + jetty.getLocalPort());
|
||||
return jetty;
|
||||
}
|
||||
|
||||
|
@ -335,6 +346,7 @@ public class ChaosMonkey {
|
|||
// synchronously starts and stops shards randomly, unless there is only one
|
||||
// active shard up for a slice or if there is one active and others recovering
|
||||
public void startTheMonkey(boolean killLeaders, final int roundPause) {
|
||||
monkeyLog("starting");
|
||||
this.aggressivelyKillLeaders = killLeaders;
|
||||
startTime = System.currentTimeMillis();
|
||||
// TODO: when kill leaders is on, lets kill a higher percentage of leaders
|
||||
|
@ -409,14 +421,18 @@ public class ChaosMonkey {
|
|||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
System.out.println("I ran for " + (System.currentTimeMillis() - startTime)/1000.0f + "sec. I stopped " + stops + " and I started " + starts
|
||||
monkeyLog("finished");
|
||||
monkeyLog("I ran for " + (System.currentTimeMillis() - startTime)/1000.0f + "sec. I stopped " + stops + " and I started " + starts
|
||||
+ ". I also expired " + expires.get() + " and caused " + connloss
|
||||
+ " connection losses");
|
||||
}
|
||||
}.start();
|
||||
}
|
||||
|
||||
public static void monkeyLog(String msg) {
|
||||
log.info("monkey: " + msg);
|
||||
}
|
||||
|
||||
public void stopTheMonkey() {
|
||||
stop = true;
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.solr.cloud;
|
|||
import java.net.ConnectException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
|
@ -35,10 +34,15 @@ import org.junit.AfterClass;
|
|||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Ignore;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@Slow
|
||||
@Ignore("ignore while investigating jenkins fails")
|
||||
public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest {
|
||||
public static Logger log = LoggerFactory.getLogger(ChaosMonkeyNothingIsSafeTest.class);
|
||||
|
||||
private static final int BASE_RUN_LENGTH = 180000;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeSuperClass() {
|
||||
|
@ -53,7 +57,7 @@ public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest {
|
|||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
// TODO use @Noisy annotation as we expect lots of exceptions
|
||||
ignoreException(".*");
|
||||
//ignoreException(".*");
|
||||
System.setProperty("numShards", Integer.toString(sliceCount));
|
||||
}
|
||||
|
||||
|
@ -67,8 +71,8 @@ public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest {
|
|||
|
||||
public ChaosMonkeyNothingIsSafeTest() {
|
||||
super();
|
||||
sliceCount = atLeast(2);
|
||||
shardCount = atLeast(sliceCount * 2);
|
||||
sliceCount = 2;
|
||||
shardCount = 6;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -99,8 +103,9 @@ public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest {
|
|||
ftIndexThread.start();
|
||||
|
||||
chaosMonkey.startTheMonkey(true, 1500);
|
||||
int runLength = atLeast(BASE_RUN_LENGTH);
|
||||
try {
|
||||
Thread.sleep(180000);
|
||||
Thread.sleep(runLength);
|
||||
} finally {
|
||||
chaosMonkey.stopTheMonkey();
|
||||
}
|
||||
|
@ -124,7 +129,7 @@ public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest {
|
|||
Thread.sleep(2000);
|
||||
|
||||
// wait until there are no recoveries...
|
||||
waitForThingsToLevelOut();
|
||||
waitForThingsToLevelOut(Math.round((runLength / 1000.0f / 5.0f)));
|
||||
|
||||
// make sure we again have leaders for each shard
|
||||
for (int j = 1; j < sliceCount; j++) {
|
||||
|
@ -156,35 +161,6 @@ public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void waitForThingsToLevelOut() throws Exception {
|
||||
int cnt = 0;
|
||||
boolean retry = false;
|
||||
do {
|
||||
waitForRecoveriesToFinish(VERBOSE);
|
||||
|
||||
try {
|
||||
commit();
|
||||
} catch (Exception e) {
|
||||
// we don't care if this commit fails on some nodes
|
||||
}
|
||||
|
||||
updateMappingsFromZk(jettys, clients);
|
||||
|
||||
Set<String> theShards = shardToClient.keySet();
|
||||
String failMessage = null;
|
||||
for (String shard : theShards) {
|
||||
failMessage = checkShardConsistency(shard, false);
|
||||
}
|
||||
|
||||
if (failMessage != null) {
|
||||
retry = true;
|
||||
}
|
||||
cnt++;
|
||||
if (cnt > 10) break;
|
||||
Thread.sleep(4000);
|
||||
} while (retry);
|
||||
}
|
||||
|
||||
// skip the randoms - they can deadlock...
|
||||
protected void indexr(Object... fields) throws Exception {
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.solr.cloud;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
|
@ -32,6 +31,8 @@ import org.junit.Ignore;
|
|||
@Ignore("SOLR-3126")
|
||||
public class ChaosMonkeySafeLeaderTest extends FullSolrCloudTest {
|
||||
|
||||
private static final int BASE_RUN_LENGTH = 120000;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeSuperClass() {
|
||||
|
||||
|
@ -66,7 +67,7 @@ public class ChaosMonkeySafeLeaderTest extends FullSolrCloudTest {
|
|||
public ChaosMonkeySafeLeaderTest() {
|
||||
super();
|
||||
sliceCount = atLeast(2);
|
||||
shardCount = atLeast(sliceCount);
|
||||
shardCount = atLeast(sliceCount*2);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -89,8 +90,8 @@ public class ChaosMonkeySafeLeaderTest extends FullSolrCloudTest {
|
|||
}
|
||||
|
||||
chaosMonkey.startTheMonkey(false, 500);
|
||||
|
||||
Thread.sleep(atLeast(8000));
|
||||
int runLength = atLeast(BASE_RUN_LENGTH);
|
||||
Thread.sleep(runLength);
|
||||
|
||||
chaosMonkey.stopTheMonkey();
|
||||
|
||||
|
@ -109,40 +110,12 @@ public class ChaosMonkeySafeLeaderTest extends FullSolrCloudTest {
|
|||
|
||||
// try and wait for any replications and what not to finish...
|
||||
|
||||
waitForThingsToLevelOut();
|
||||
waitForThingsToLevelOut(Math.round((runLength / 1000.0f / 5.0f)));
|
||||
|
||||
checkShardConsistency(true, true);
|
||||
|
||||
if (VERBOSE) System.out.println("control docs:" + controlClient.query(new SolrQuery("*:*")).getResults().getNumFound() + "\n\n");
|
||||
}
|
||||
|
||||
private void waitForThingsToLevelOut() throws Exception {
|
||||
int cnt = 0;
|
||||
boolean retry = false;
|
||||
do {
|
||||
waitForRecoveriesToFinish(false);
|
||||
|
||||
commit();
|
||||
|
||||
updateMappingsFromZk(jettys, clients);
|
||||
|
||||
Set<String> theShards = shardToClient.keySet();
|
||||
String failMessage = null;
|
||||
for (String shard : theShards) {
|
||||
failMessage = checkShardConsistency(shard, false);
|
||||
}
|
||||
|
||||
if (failMessage != null) {
|
||||
retry = true;
|
||||
} else {
|
||||
retry = false;
|
||||
}
|
||||
|
||||
cnt++;
|
||||
if (cnt > 10) break;
|
||||
Thread.sleep(2000);
|
||||
} while (retry);
|
||||
}
|
||||
|
||||
// skip the randoms - they can deadlock...
|
||||
protected void indexr(Object... fields) throws Exception {
|
||||
|
|
|
@ -55,6 +55,8 @@ import org.junit.After;
|
|||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -64,6 +66,8 @@ import org.junit.BeforeClass;
|
|||
*/
|
||||
@Slow
|
||||
public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
|
||||
private static Logger log = LoggerFactory.getLogger(FullSolrCloudTest.class);
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeFullSolrCloudTest() {
|
||||
// shorten the log output more for this test type
|
||||
|
@ -103,12 +107,13 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
|
|||
protected volatile ZkStateReader zkStateReader;
|
||||
|
||||
private Map<String,SolrServer> shardToLeaderClient = new HashMap<String,SolrServer>();
|
||||
private Map<String,CloudJettyRunner> shardToLeaderJetty = new HashMap<String,CloudJettyRunner>();
|
||||
protected Map<String,CloudJettyRunner> shardToLeaderJetty = new HashMap<String,CloudJettyRunner>();
|
||||
|
||||
class CloudJettyRunner {
|
||||
JettySolrRunner jetty;
|
||||
String nodeName;
|
||||
String coreNodeName;
|
||||
String url;
|
||||
}
|
||||
|
||||
static class CloudSolrServerClient {
|
||||
|
@ -403,6 +408,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
|
|||
cjr.jetty = jetty;
|
||||
cjr.nodeName = shard.getValue().get(ZkStateReader.NODE_NAME_PROP);
|
||||
cjr.coreNodeName = shard.getKey();
|
||||
cjr.url = shard.getValue().get(ZkStateReader.BASE_URL_PROP) + "/" + shard.getValue().get(ZkStateReader.CORE_NAME_PROP);
|
||||
list.add(cjr);
|
||||
if (isLeader) {
|
||||
shardToLeaderJetty.put(slice.getKey(), cjr);
|
||||
|
@ -538,6 +544,9 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
|
|||
|
||||
indexAbunchOfDocs();
|
||||
|
||||
// check again
|
||||
waitForRecoveriesToFinish(false);
|
||||
|
||||
commit();
|
||||
|
||||
assertDocCounts(VERBOSE);
|
||||
|
@ -653,6 +662,11 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
|
|||
super.waitForRecoveriesToFinish(DEFAULT_COLLECTION, zkStateReader, verbose);
|
||||
}
|
||||
|
||||
protected void waitForRecoveriesToFinish(boolean verbose, int timeoutSeconds)
|
||||
throws Exception {
|
||||
super.waitForRecoveriesToFinish(DEFAULT_COLLECTION, zkStateReader, verbose, true, timeoutSeconds);
|
||||
}
|
||||
|
||||
private void brindDownShardIndexSomeDocsAndRecover() throws Exception {
|
||||
SolrQuery query = new SolrQuery("*:*");
|
||||
query.set("distrib", false);
|
||||
|
@ -660,7 +674,6 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
|
|||
commit();
|
||||
|
||||
long deadShardCount = shardToClient.get(SHARD2).get(0).query(query).getResults().getNumFound();
|
||||
System.err.println("dsc:" + deadShardCount);
|
||||
|
||||
query("q", "*:*", "sort", "n_tl1 desc");
|
||||
|
||||
|
@ -698,7 +711,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
|
|||
// to talk to a downed node causes grief
|
||||
tries = 0;
|
||||
while (((SolrDispatchFilter) shardToJetty.get(SHARD2).get(1).jetty.getDispatchFilter().getFilter()).getCores().getZkController().getZkStateReader().getCloudState().liveNodesContain(clientToInfo.get(new CloudSolrServerClient(shardToClient.get(SHARD2).get(0))).get(ZkStateReader.NODE_NAME_PROP))) {
|
||||
if (tries++ == 60) {
|
||||
if (tries++ == 120) {
|
||||
fail("Shard still reported as live in zk");
|
||||
}
|
||||
Thread.sleep(1000);
|
||||
|
@ -1320,6 +1333,36 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
|
|||
|
||||
};
|
||||
|
||||
protected void waitForThingsToLevelOut(int waitForRecTimeSeconds) throws Exception {
|
||||
log.info("Wait for recoveries to finish - wait " + waitForRecTimeSeconds + " for each attempt");
|
||||
int cnt = 0;
|
||||
boolean retry = false;
|
||||
do {
|
||||
waitForRecoveriesToFinish(VERBOSE, waitForRecTimeSeconds);
|
||||
|
||||
try {
|
||||
commit();
|
||||
} catch (Exception e) {
|
||||
// we don't care if this commit fails on some nodes
|
||||
}
|
||||
|
||||
updateMappingsFromZk(jettys, clients);
|
||||
|
||||
Set<String> theShards = shardToClient.keySet();
|
||||
String failMessage = null;
|
||||
for (String shard : theShards) {
|
||||
failMessage = checkShardConsistency(shard, false);
|
||||
}
|
||||
|
||||
if (failMessage != null) {
|
||||
retry = true;
|
||||
}
|
||||
cnt++;
|
||||
if (cnt > 2) break;
|
||||
Thread.sleep(4000);
|
||||
} while (retry);
|
||||
}
|
||||
|
||||
@Override
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
|
@ -1364,7 +1407,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase {
|
|||
+ DEFAULT_COLLECTION;
|
||||
HttpSolrServer s = new HttpSolrServer(url);
|
||||
s.setConnectionTimeout(DEFAULT_CONNECTION_TIMEOUT);
|
||||
s.setSoTimeout(15000);
|
||||
s.setSoTimeout(20000);
|
||||
s.setDefaultMaxConnectionsPerHost(100);
|
||||
s.setMaxTotalConnections(100);
|
||||
return s;
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
|||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Ignore;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue