mirror of https://github.com/apache/lucene.git
LUCENE-1979: remove more deprecations in the index package.
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@825288 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
83ab2542db
commit
663adad4d7
|
@ -73,7 +73,7 @@ API Changes
|
|||
* LUCENE-944: Remove deprecated methods in BooleanQuery. (Michael Busch)
|
||||
|
||||
* LUCENE-1979: Remove remaining deprecations from indexer package.
|
||||
(Michael Busch)
|
||||
(Uwe Schindler, Michael Busch)
|
||||
|
||||
Bug fixes
|
||||
|
||||
|
|
|
@ -77,36 +77,6 @@ public class InstantiatedTermEnum
|
|||
public void close() {
|
||||
}
|
||||
|
||||
|
||||
public boolean skipTo(Term target) throws IOException {
|
||||
|
||||
// this method is not known to be used by anything
|
||||
// in lucene for many years now, so there is
|
||||
// very to gain by optimizing this method more,
|
||||
|
||||
if (reader.getIndex().getOrderedTerms().length == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
InstantiatedTerm term = reader.getIndex().findTerm(target);
|
||||
if (term != null) {
|
||||
this.term = term;
|
||||
nextTermIndex = term.getTermIndex() + 1;
|
||||
return true;
|
||||
} else {
|
||||
int pos = Arrays.binarySearch(reader.getIndex().getOrderedTerms(), target, InstantiatedTerm.termComparator);
|
||||
if (pos < 0) {
|
||||
pos = -1 - pos;
|
||||
}
|
||||
|
||||
if (pos > reader.getIndex().getOrderedTerms().length) {
|
||||
return false;
|
||||
}
|
||||
this.term = reader.getIndex().getOrderedTerms()[pos];
|
||||
nextTermIndex = pos + 1;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -105,7 +105,6 @@ public class TestEmptyIndex extends TestCase {
|
|||
|
||||
assertNull(terms.term());
|
||||
assertFalse(terms.next());
|
||||
assertFalse(terms.skipTo(new Term("foo", "bar")));
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -391,42 +391,6 @@ public class TestIndicesEquals extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// compare term enumeration seeking
|
||||
|
||||
aprioriTermEnum = aprioriReader.terms();
|
||||
|
||||
TermEnum aprioriTermEnumSeeker = aprioriReader.terms();
|
||||
TermEnum testTermEnumSeeker = testReader.terms();
|
||||
|
||||
while (aprioriTermEnum.next()) {
|
||||
if (aprioriTermEnumSeeker.skipTo(aprioriTermEnum.term())) {
|
||||
assertTrue(testTermEnumSeeker.skipTo(aprioriTermEnum.term()));
|
||||
assertEquals(aprioriTermEnumSeeker.term(), testTermEnumSeeker.term());
|
||||
} else {
|
||||
assertFalse(testTermEnumSeeker.skipTo(aprioriTermEnum.term()));
|
||||
}
|
||||
}
|
||||
|
||||
aprioriTermEnum.close();
|
||||
aprioriTermEnumSeeker.close();
|
||||
testTermEnumSeeker.close();
|
||||
|
||||
// skip to non existing terms
|
||||
|
||||
aprioriTermEnumSeeker = aprioriReader.terms();
|
||||
testTermEnumSeeker = testReader.terms();
|
||||
|
||||
aprioriTermEnum = aprioriReader.terms();
|
||||
aprioriTermEnum.next();
|
||||
Term nonExistingTerm = new Term(aprioriTermEnum.term().field(), "bzzzzoo993djdj380sdf");
|
||||
aprioriTermEnum.close();
|
||||
|
||||
assertEquals(aprioriTermEnumSeeker.skipTo(nonExistingTerm), testTermEnumSeeker.skipTo(nonExistingTerm));
|
||||
assertEquals(aprioriTermEnumSeeker.term(), testTermEnumSeeker.term());
|
||||
|
||||
aprioriTermEnumSeeker.close();
|
||||
testTermEnumSeeker.close();
|
||||
|
||||
// compare term vectors and position vectors
|
||||
|
||||
for (int documentNumber = 0; documentNumber < aprioriReader.numDocs(); documentNumber++) {
|
||||
|
|
|
@ -99,9 +99,8 @@ public class TermVectorAccessor {
|
|||
positions.clear();
|
||||
}
|
||||
|
||||
TermEnum termEnum = indexReader.terms();
|
||||
if (termEnum.skipTo(new Term(field, ""))) {
|
||||
|
||||
TermEnum termEnum = indexReader.terms(new Term(field, ""));
|
||||
if (termEnum.term() != null) {
|
||||
while (termEnum.term().field() == field) {
|
||||
TermPositions termPositions = indexReader.termPositions(termEnum.term());
|
||||
if (termPositions.skipTo(documentNumber)) {
|
||||
|
@ -125,13 +124,11 @@ public class TermVectorAccessor {
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mapper.setDocumentNumber(documentNumber);
|
||||
mapper.setExpectations(field, tokens.size(), false, !mapper.isIgnoringPositions());
|
||||
for (int i = 0; i < tokens.size(); i++) {
|
||||
mapper.map((String) tokens.get(i), ((Integer) frequencies.get(i)).intValue(), (TermVectorOffsetInfo[]) null, (int[]) positions.get(i));
|
||||
}
|
||||
|
||||
}
|
||||
termEnum.close();
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ public class IndexMergeTool {
|
|||
}
|
||||
|
||||
System.out.println("Merging...");
|
||||
writer.addIndexes(indexes);
|
||||
writer.addIndexesNoOptimize(indexes);
|
||||
|
||||
System.out.println("Optimizing...");
|
||||
writer.optimize();
|
||||
|
|
|
@ -48,11 +48,6 @@ import java.util.Map;
|
|||
*/
|
||||
public class CheckIndex {
|
||||
|
||||
/** Default PrintStream for all CheckIndex instances.
|
||||
* @deprecated Use {@link #setInfoStream} per instance,
|
||||
* instead. */
|
||||
public static PrintStream out = null;
|
||||
|
||||
private PrintStream infoStream;
|
||||
private Directory dir;
|
||||
|
||||
|
@ -257,7 +252,7 @@ public class CheckIndex {
|
|||
/** Create a new CheckIndex on the directory. */
|
||||
public CheckIndex(Directory dir) {
|
||||
this.dir = dir;
|
||||
infoStream = out;
|
||||
infoStream = null;
|
||||
}
|
||||
|
||||
/** Set infoStream where messages should go. If null, no
|
||||
|
|
|
@ -129,9 +129,6 @@ final class DocInverterPerField extends DocFieldConsumerPerField {
|
|||
|
||||
final int startLength = fieldState.length;
|
||||
|
||||
// deprecated
|
||||
final boolean allowMinus1Position = docState.allowMinus1Position;
|
||||
|
||||
try {
|
||||
int offsetEnd = fieldState.offset-1;
|
||||
|
||||
|
@ -157,7 +154,7 @@ final class DocInverterPerField extends DocFieldConsumerPerField {
|
|||
|
||||
final int posIncr = posIncrAttribute.getPositionIncrement();
|
||||
fieldState.position += posIncr;
|
||||
if (allowMinus1Position || fieldState.position > 0) {
|
||||
if (fieldState.position > 0) {
|
||||
fieldState.position--;
|
||||
}
|
||||
|
||||
|
|
|
@ -150,9 +150,6 @@ final class DocumentsWriter {
|
|||
Document doc;
|
||||
String maxTermPrefix;
|
||||
|
||||
// deprecated
|
||||
boolean allowMinus1Position;
|
||||
|
||||
// Only called by asserts
|
||||
public boolean testPoint(String name) {
|
||||
return docWriter.writer.testPoint(name);
|
||||
|
@ -299,11 +296,6 @@ final class DocumentsWriter {
|
|||
threadStates[i].docState.similarity = similarity;
|
||||
}
|
||||
|
||||
synchronized void setAllowMinus1Position() {
|
||||
for(int i=0;i<threadStates.length;i++)
|
||||
threadStates[i].docState.allowMinus1Position = true;
|
||||
}
|
||||
|
||||
/** Set how much RAM we can use before flushing. */
|
||||
synchronized void setRAMBufferSizeMB(double mb) {
|
||||
if (mb == IndexWriter.DISABLE_AUTO_FLUSH) {
|
||||
|
|
|
@ -40,7 +40,6 @@ final class DocumentsWriterThreadState {
|
|||
docState.infoStream = docWriter.infoStream;
|
||||
docState.similarity = docWriter.similarity;
|
||||
docState.docWriter = docWriter;
|
||||
docState.allowMinus1Position = docWriter.writer.getAllowMinus1Position();
|
||||
consumer = docWriter.consumer.addThread(this);
|
||||
}
|
||||
|
||||
|
|
|
@ -211,9 +211,6 @@ public class FilterIndexReader extends IndexReader {
|
|||
|
||||
protected void doDelete(int n) throws CorruptIndexException, IOException { in.deleteDocument(n); }
|
||||
|
||||
/** @deprecated */
|
||||
protected void doCommit() throws IOException { doCommit(null); }
|
||||
|
||||
protected void doCommit(Map commitUserData) throws IOException { in.commit(commitUserData); }
|
||||
|
||||
protected void doClose() throws IOException { in.close(); }
|
||||
|
|
|
@ -487,37 +487,6 @@ public abstract class IndexReader implements Cloneable {
|
|||
throw new UnsupportedOperationException("This reader does not support this method.");
|
||||
}
|
||||
|
||||
/**<p>For IndexReader implementations that use
|
||||
* TermInfosReader to read terms, this sets the
|
||||
* indexDivisor to subsample the number of indexed terms
|
||||
* loaded into memory. This has the same effect as {@link
|
||||
* IndexWriter#setTermIndexInterval} except that setting
|
||||
* must be done at indexing time while this setting can be
|
||||
* set per reader. When set to N, then one in every
|
||||
* N*termIndexInterval terms in the index is loaded into
|
||||
* memory. By setting this to a value > 1 you can reduce
|
||||
* memory usage, at the expense of higher latency when
|
||||
* loading a TermInfo. The default value is 1.</p>
|
||||
*
|
||||
* <b>NOTE:</b> you must call this before the term
|
||||
* index is loaded. If the index is already loaded,
|
||||
* an IllegalStateException is thrown.
|
||||
* @throws IllegalStateException if the term index has already been loaded into memory
|
||||
* @deprecated Please use {@link IndexReader#open(Directory, IndexDeletionPolicy, boolean, int)} to specify the required TermInfos index divisor instead.
|
||||
*/
|
||||
public void setTermInfosIndexDivisor(int indexDivisor) throws IllegalStateException {
|
||||
throw new UnsupportedOperationException("Please pass termInfosIndexDivisor up-front when opening IndexReader");
|
||||
}
|
||||
|
||||
/** <p>For IndexReader implementations that use
|
||||
* TermInfosReader to read terms, this returns the
|
||||
* current indexDivisor as specified when the reader was
|
||||
* opened.
|
||||
*/
|
||||
public int getTermInfosIndexDivisor() {
|
||||
throw new UnsupportedOperationException("This reader does not support this method.");
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether this IndexReader is still using the
|
||||
* current (i.e., most recently committed) version of the
|
||||
|
|
|
@ -180,12 +180,6 @@ public class IndexWriter {
|
|||
*/
|
||||
public static final String WRITE_LOCK_NAME = "write.lock";
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
* @see LogMergePolicy#DEFAULT_MERGE_FACTOR
|
||||
*/
|
||||
public final static int DEFAULT_MERGE_FACTOR = LogMergePolicy.DEFAULT_MERGE_FACTOR;
|
||||
|
||||
/**
|
||||
* Value to denote a flush trigger is disabled
|
||||
*/
|
||||
|
@ -209,12 +203,6 @@ public class IndexWriter {
|
|||
*/
|
||||
public final static int DEFAULT_MAX_BUFFERED_DELETE_TERMS = DISABLE_AUTO_FLUSH;
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
* @see LogDocMergePolicy#DEFAULT_MAX_MERGE_DOCS
|
||||
*/
|
||||
public final static int DEFAULT_MAX_MERGE_DOCS = LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS;
|
||||
|
||||
/**
|
||||
* Default value is 10,000. Change using {@link #setMaxFieldLength(int)}.
|
||||
*/
|
||||
|
@ -1790,16 +1778,6 @@ public class IndexWriter {
|
|||
return analyzer;
|
||||
}
|
||||
|
||||
/** Returns the number of documents currently in this
|
||||
* index, not counting deletions.
|
||||
* @deprecated Please use {@link #maxDoc()} (same as this
|
||||
* method) or {@link #numDocs()} (also takes deletions
|
||||
* into account), instead. */
|
||||
public synchronized int docCount() {
|
||||
ensureOpen();
|
||||
return maxDoc();
|
||||
}
|
||||
|
||||
/** Returns total number of docs in this index, including
|
||||
* docs not yet flushed (still in the RAM buffer),
|
||||
* not counting deletions.
|
||||
|
@ -1994,14 +1972,14 @@ public class IndexWriter {
|
|||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public void deleteDocuments(Term[] terms) throws CorruptIndexException, IOException {
|
||||
public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
try {
|
||||
boolean doFlush = docWriter.bufferDeleteTerms(terms);
|
||||
if (doFlush)
|
||||
flush(true, false, false);
|
||||
} catch (OutOfMemoryError oom) {
|
||||
handleOOM(oom, "deleteDocuments(Term[])");
|
||||
handleOOM(oom, "deleteDocuments(Term..)");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2036,7 +2014,7 @@ public class IndexWriter {
|
|||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public void deleteDocuments(Query[] queries) throws CorruptIndexException, IOException {
|
||||
public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
boolean doFlush = docWriter.bufferDeleteQueries(queries);
|
||||
if (doFlush)
|
||||
|
@ -2692,13 +2670,6 @@ public class IndexWriter {
|
|||
finishAddIndexes();
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Please use {@link #rollback} instead.
|
||||
*/
|
||||
public void abort() throws IOException {
|
||||
rollback();
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the <code>IndexWriter</code> without committing
|
||||
* any changes that have occurred since the last commit
|
||||
|
@ -2946,84 +2917,12 @@ public class IndexWriter {
|
|||
releaseRead();
|
||||
}
|
||||
|
||||
/** Merges all segments from an array of indexes into this index.
|
||||
*
|
||||
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
|
||||
* you should immediately close the writer. See <a
|
||||
* href="#OOME">above</a> for details.</p>
|
||||
*
|
||||
* @deprecated Use {@link #addIndexesNoOptimize} instead,
|
||||
* then separately call {@link #optimize} afterwards if
|
||||
* you need to.
|
||||
*
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public void addIndexes(Directory[] dirs)
|
||||
throws CorruptIndexException, IOException {
|
||||
|
||||
ensureOpen();
|
||||
|
||||
noDupDirs(dirs);
|
||||
|
||||
// Do not allow add docs or deletes while we are running:
|
||||
docWriter.pauseAllThreads();
|
||||
|
||||
try {
|
||||
|
||||
if (infoStream != null)
|
||||
message("flush at addIndexes");
|
||||
flush(true, false, true);
|
||||
|
||||
boolean success = false;
|
||||
|
||||
startTransaction(false);
|
||||
|
||||
try {
|
||||
|
||||
int docCount = 0;
|
||||
synchronized(this) {
|
||||
ensureOpen();
|
||||
for (int i = 0; i < dirs.length; i++) {
|
||||
SegmentInfos sis = new SegmentInfos(); // read infos from dir
|
||||
sis.read(dirs[i]);
|
||||
for (int j = 0; j < sis.size(); j++) {
|
||||
final SegmentInfo info = sis.info(j);
|
||||
docCount += info.docCount;
|
||||
assert !segmentInfos.contains(info);
|
||||
segmentInfos.add(info); // add each info
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Notify DocumentsWriter that the flushed count just increased
|
||||
docWriter.updateFlushedDocCount(docCount);
|
||||
|
||||
optimize();
|
||||
|
||||
success = true;
|
||||
} finally {
|
||||
if (success) {
|
||||
commitTransaction();
|
||||
} else {
|
||||
rollbackTransaction();
|
||||
}
|
||||
}
|
||||
} catch (OutOfMemoryError oom) {
|
||||
handleOOM(oom, "addIndexes(Directory[])");
|
||||
} finally {
|
||||
if (docWriter != null) {
|
||||
docWriter.resumeAllThreads();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void resetMergeExceptions() {
|
||||
mergeExceptions = new ArrayList();
|
||||
mergeGen++;
|
||||
}
|
||||
|
||||
private void noDupDirs(Directory[] dirs) {
|
||||
private void noDupDirs(Directory... dirs) {
|
||||
HashSet dups = new HashSet();
|
||||
for(int i=0;i<dirs.length;i++) {
|
||||
if (dups.contains(dirs[i]))
|
||||
|
@ -3084,7 +2983,7 @@ public class IndexWriter {
|
|||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public void addIndexesNoOptimize(Directory[] dirs)
|
||||
public void addIndexesNoOptimize(Directory... dirs)
|
||||
throws CorruptIndexException, IOException {
|
||||
|
||||
ensureOpen();
|
||||
|
@ -3247,7 +3146,7 @@ public class IndexWriter {
|
|||
* add or delete documents (with another thread) will be
|
||||
* paused until this method completes.
|
||||
*
|
||||
* <p>See {@link #addIndexesNoOptimize(Directory[])} for
|
||||
* <p>See {@link #addIndexesNoOptimize} for
|
||||
* details on transactional semantics, temporary free
|
||||
* space required in the Directory, and non-CFS segments
|
||||
* on an Exception.</p>
|
||||
|
@ -3259,7 +3158,7 @@ public class IndexWriter {
|
|||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public void addIndexes(IndexReader[] readers)
|
||||
public void addIndexes(IndexReader... readers)
|
||||
throws CorruptIndexException, IOException {
|
||||
|
||||
ensureOpen();
|
||||
|
@ -3326,7 +3225,7 @@ public class IndexWriter {
|
|||
segmentInfos.clear(); // pop old infos & add new
|
||||
info = new SegmentInfo(mergedName, docCount, directory, false, true,
|
||||
-1, null, false, merger.hasProx());
|
||||
setDiagnostics(info, "addIndexes(IndexReader[])");
|
||||
setDiagnostics(info, "addIndexes(IndexReader...)");
|
||||
segmentInfos.add(info);
|
||||
}
|
||||
|
||||
|
@ -3395,7 +3294,7 @@ public class IndexWriter {
|
|||
}
|
||||
}
|
||||
} catch (OutOfMemoryError oom) {
|
||||
handleOOM(oom, "addIndexes(IndexReader[])");
|
||||
handleOOM(oom, "addIndexes(IndexReader...)");
|
||||
} finally {
|
||||
if (docWriter != null) {
|
||||
docWriter.resumeAllThreads();
|
||||
|
@ -4930,22 +4829,6 @@ public class IndexWriter {
|
|||
throw oom;
|
||||
}
|
||||
|
||||
// deprecated
|
||||
private boolean allowMinus1Position;
|
||||
|
||||
/** Deprecated: emulates IndexWriter's buggy behavior when
|
||||
* first token(s) have positionIncrement==0 (ie, prior to
|
||||
* fixing LUCENE-1542) */
|
||||
public void setAllowMinus1Position() {
|
||||
allowMinus1Position = true;
|
||||
docWriter.setAllowMinus1Position();
|
||||
}
|
||||
|
||||
// deprecated
|
||||
boolean getAllowMinus1Position() {
|
||||
return allowMinus1Position;
|
||||
}
|
||||
|
||||
// Used only by assert for testing. Current points:
|
||||
// startDoFlush
|
||||
// startCommitMerge
|
||||
|
|
|
@ -172,20 +172,12 @@ public abstract class MergePolicy {
|
|||
* executing a merge. */
|
||||
public static class MergeException extends RuntimeException {
|
||||
private Directory dir;
|
||||
/** @deprecated
|
||||
* Use {@link #MergePolicy.MergeException(String,Directory)} instead */
|
||||
public MergeException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
public MergeException(String message, Directory dir) {
|
||||
super(message);
|
||||
this.dir = dir;
|
||||
}
|
||||
/** @deprecated
|
||||
* Use {@link #MergePolicy.MergeException(Throwable,Directory)} instead */
|
||||
public MergeException(Throwable exc) {
|
||||
super(exc);
|
||||
}
|
||||
|
||||
public MergeException(Throwable exc, Directory dir) {
|
||||
super(exc);
|
||||
this.dir = dir;
|
||||
|
|
|
@ -49,7 +49,7 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
* @param subReaders set of (sub)readers
|
||||
* @throws IOException
|
||||
*/
|
||||
public MultiReader(IndexReader[] subReaders) {
|
||||
public MultiReader(IndexReader... subReaders) {
|
||||
initialize(subReaders, true);
|
||||
}
|
||||
|
||||
|
@ -352,11 +352,6 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
return new MultiTermPositions(this, subReaders, starts);
|
||||
}
|
||||
|
||||
/** @deprecated */
|
||||
protected void doCommit() throws IOException {
|
||||
doCommit(null);
|
||||
}
|
||||
|
||||
protected void doCommit(Map commitUserData) throws IOException {
|
||||
for (int i = 0; i < subReaders.length; i++)
|
||||
subReaders[i].commit(commitUserData);
|
||||
|
|
|
@ -435,11 +435,6 @@ public class ParallelReader extends IndexReader {
|
|||
return (IndexReader[]) readers.toArray(new IndexReader[readers.size()]);
|
||||
}
|
||||
|
||||
/** @deprecated */
|
||||
protected void doCommit() throws IOException {
|
||||
doCommit(null);
|
||||
}
|
||||
|
||||
protected void doCommit(Map commitUserData) throws IOException {
|
||||
for (int i = 0; i < readers.size(); i++)
|
||||
((IndexReader)readers.get(i)).commit(commitUserData);
|
||||
|
|
|
@ -792,11 +792,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
return clone;
|
||||
}
|
||||
|
||||
/** @deprecated */
|
||||
protected void doCommit() throws IOException {
|
||||
doCommit(null);
|
||||
}
|
||||
|
||||
protected void doCommit(Map commitUserData) throws IOException {
|
||||
if (hasChanges) {
|
||||
if (deletedDocsDirty) { // re-write deleted
|
||||
|
|
|
@ -36,29 +36,4 @@ public abstract class TermEnum {
|
|||
|
||||
/** Closes the enumeration to further activity, freeing resources. */
|
||||
public abstract void close() throws IOException;
|
||||
|
||||
/** Skips terms to the first beyond the current whose value is
|
||||
* greater or equal to <i>target</i>. <p>Returns true iff there is such
|
||||
* an entry. <p>Behaves as if written: <pre>
|
||||
* public boolean skipTo(Term target) {
|
||||
* do {
|
||||
* if (!next())
|
||||
* return false;
|
||||
* } while (target > term());
|
||||
* return true;
|
||||
* }
|
||||
* </pre>
|
||||
* Some implementations *could* be considerably more efficient than a linear scan.
|
||||
* Check the implementation to be sure.
|
||||
* @deprecated This method is not performant and will be removed in Lucene 3.0.
|
||||
* Use {@link IndexReader#terms(Term)} to create a new TermEnum positioned at a
|
||||
* given term.
|
||||
*/
|
||||
public boolean skipTo(Term target) throws IOException {
|
||||
do {
|
||||
if (!next())
|
||||
return false;
|
||||
} while (target.compareTo(term()) > 0);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,27 +43,27 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
writer = newWriter(dir, true);
|
||||
// add 100 documents
|
||||
addDocs(writer, 100);
|
||||
assertEquals(100, writer.docCount());
|
||||
assertEquals(100, writer.maxDoc());
|
||||
writer.close();
|
||||
|
||||
writer = newWriter(aux, true);
|
||||
writer.setUseCompoundFile(false); // use one without a compound file
|
||||
// add 40 documents in separate files
|
||||
addDocs(writer, 40);
|
||||
assertEquals(40, writer.docCount());
|
||||
assertEquals(40, writer.maxDoc());
|
||||
writer.close();
|
||||
|
||||
writer = newWriter(aux2, true);
|
||||
// add 40 documents in compound files
|
||||
addDocs2(writer, 50);
|
||||
assertEquals(50, writer.docCount());
|
||||
assertEquals(50, writer.maxDoc());
|
||||
writer.close();
|
||||
|
||||
// test doc count before segments are merged
|
||||
writer = newWriter(dir, false);
|
||||
assertEquals(100, writer.docCount());
|
||||
assertEquals(100, writer.maxDoc());
|
||||
writer.addIndexesNoOptimize(new Directory[] { aux, aux2 });
|
||||
assertEquals(190, writer.docCount());
|
||||
assertEquals(190, writer.maxDoc());
|
||||
writer.close();
|
||||
|
||||
// make sure the old index is correct
|
||||
|
@ -77,14 +77,14 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
writer = newWriter(aux3, true);
|
||||
// add 40 documents
|
||||
addDocs(writer, 40);
|
||||
assertEquals(40, writer.docCount());
|
||||
assertEquals(40, writer.maxDoc());
|
||||
writer.close();
|
||||
|
||||
// test doc count before segments are merged/index is optimized
|
||||
writer = newWriter(dir, false);
|
||||
assertEquals(190, writer.docCount());
|
||||
assertEquals(190, writer.maxDoc());
|
||||
writer.addIndexesNoOptimize(new Directory[] { aux3 });
|
||||
assertEquals(230, writer.docCount());
|
||||
assertEquals(230, writer.maxDoc());
|
||||
writer.close();
|
||||
|
||||
// make sure the new index is correct
|
||||
|
@ -113,9 +113,9 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
writer = newWriter(dir, false);
|
||||
assertEquals(230, writer.docCount());
|
||||
assertEquals(230, writer.maxDoc());
|
||||
writer.addIndexesNoOptimize(new Directory[] { aux4 });
|
||||
assertEquals(231, writer.docCount());
|
||||
assertEquals(231, writer.maxDoc());
|
||||
writer.close();
|
||||
|
||||
verifyNumDocs(dir, 231);
|
||||
|
@ -250,7 +250,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
writer = newWriter(dir, true);
|
||||
// add 100 documents
|
||||
addDocs(writer, 100);
|
||||
assertEquals(100, writer.docCount());
|
||||
assertEquals(100, writer.maxDoc());
|
||||
writer.close();
|
||||
|
||||
writer = newWriter(aux, true);
|
||||
|
@ -272,7 +272,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
assertTrue(false);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
assertEquals(100, writer.docCount());
|
||||
assertEquals(100, writer.maxDoc());
|
||||
}
|
||||
writer.close();
|
||||
|
||||
|
@ -297,7 +297,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
addDocs(writer, 10);
|
||||
|
||||
writer.addIndexesNoOptimize(new Directory[] { aux });
|
||||
assertEquals(1040, writer.docCount());
|
||||
assertEquals(1040, writer.maxDoc());
|
||||
assertEquals(2, writer.getSegmentCount());
|
||||
assertEquals(1000, writer.getDocCount(0));
|
||||
writer.close();
|
||||
|
@ -321,7 +321,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
addDocs(writer, 2);
|
||||
|
||||
writer.addIndexesNoOptimize(new Directory[] { aux });
|
||||
assertEquals(1032, writer.docCount());
|
||||
assertEquals(1032, writer.maxDoc());
|
||||
assertEquals(2, writer.getSegmentCount());
|
||||
assertEquals(1000, writer.getDocCount(0));
|
||||
writer.close();
|
||||
|
@ -344,7 +344,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
writer.setMergeFactor(4);
|
||||
|
||||
writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) });
|
||||
assertEquals(1060, writer.docCount());
|
||||
assertEquals(1060, writer.maxDoc());
|
||||
assertEquals(1000, writer.getDocCount(0));
|
||||
writer.close();
|
||||
|
||||
|
@ -373,7 +373,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
writer.setMergeFactor(4);
|
||||
|
||||
writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) });
|
||||
assertEquals(1020, writer.docCount());
|
||||
assertEquals(1020, writer.maxDoc());
|
||||
assertEquals(1000, writer.getDocCount(0));
|
||||
writer.close();
|
||||
|
||||
|
@ -395,7 +395,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
writer.setMaxBufferedDocs(100);
|
||||
writer.setMergeFactor(10);
|
||||
writer.addIndexesNoOptimize(new Directory[] { aux });
|
||||
assertEquals(30, writer.docCount());
|
||||
assertEquals(30, writer.maxDoc());
|
||||
assertEquals(3, writer.getSegmentCount());
|
||||
writer.close();
|
||||
|
||||
|
@ -418,7 +418,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
writer.setMergeFactor(4);
|
||||
|
||||
writer.addIndexesNoOptimize(new Directory[] { aux, aux2 });
|
||||
assertEquals(1025, writer.docCount());
|
||||
assertEquals(1025, writer.maxDoc());
|
||||
assertEquals(1000, writer.getDocCount(0));
|
||||
writer.close();
|
||||
|
||||
|
@ -476,7 +476,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
writer.setMaxBufferedDocs(1000);
|
||||
// add 1000 documents in 1 segment
|
||||
addDocs(writer, 1000);
|
||||
assertEquals(1000, writer.docCount());
|
||||
assertEquals(1000, writer.maxDoc());
|
||||
assertEquals(1, writer.getSegmentCount());
|
||||
writer.close();
|
||||
|
||||
|
@ -493,7 +493,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
writer.setMaxBufferedDocs(100);
|
||||
writer.setMergeFactor(10);
|
||||
}
|
||||
assertEquals(30, writer.docCount());
|
||||
assertEquals(30, writer.maxDoc());
|
||||
assertEquals(3, writer.getSegmentCount());
|
||||
writer.close();
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ public class TestCrash extends LuceneTestCase {
|
|||
MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory();
|
||||
writer.close();
|
||||
writer = initIndex(dir);
|
||||
assertEquals(314, writer.docCount());
|
||||
assertEquals(314, writer.maxDoc());
|
||||
crash(writer);
|
||||
|
||||
/*
|
||||
|
|
|
@ -1762,7 +1762,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
}
|
||||
assertFalse(((SegmentReader) r.getSequentialSubReaders()[0]).termsIndexLoaded());
|
||||
|
||||
assertEquals(-1, r.getTermInfosIndexDivisor());
|
||||
assertEquals(-1, ((SegmentReader) r.getSequentialSubReaders()[0]).getTermInfosIndexDivisor());
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
|
|
@ -119,7 +119,8 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
|
|||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
iw.setMaxBufferedDocs(5);
|
||||
iw.setMergeFactor(3);
|
||||
iw.addIndexes(new Directory[] { dir1, dir2 });
|
||||
iw.addIndexesNoOptimize(new Directory[] { dir1, dir2 });
|
||||
iw.optimize();
|
||||
iw.close();
|
||||
|
||||
norms1.addAll(norms);
|
||||
|
|
|
@ -96,7 +96,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
for (i = 0; i < 100; i++) {
|
||||
addDoc(writer);
|
||||
}
|
||||
assertEquals(100, writer.docCount());
|
||||
assertEquals(100, writer.maxDoc());
|
||||
writer.close();
|
||||
|
||||
// delete 40 documents
|
||||
|
@ -108,7 +108,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
|
||||
// test doc count before segments are merged/index is optimized
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
assertEquals(100, writer.docCount());
|
||||
assertEquals(100, writer.maxDoc());
|
||||
writer.close();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
|
@ -156,7 +156,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
|
||||
/*
|
||||
Test: make sure when we run out of disk space or hit
|
||||
random IOExceptions in any of the addIndexes(*) calls
|
||||
random IOExceptions in any of the addIndexesNoOptimize(*) calls
|
||||
that 1) index is not corrupt (searcher can open/search
|
||||
it) and 2) transactional semantics are followed:
|
||||
either all or none of the incoming documents were in
|
||||
|
@ -171,7 +171,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
boolean debug = false;
|
||||
|
||||
// Build up a bunch of dirs that have indexes which we
|
||||
// will then merge together by calling addIndexes(*):
|
||||
// will then merge together by calling addIndexesNoOptimize(*):
|
||||
Directory[] dirs = new Directory[NUM_DIR];
|
||||
long inputDiskUsage = 0;
|
||||
for(int i=0;i<NUM_DIR;i++) {
|
||||
|
@ -188,7 +188,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
// Now, build a starting index that has START_COUNT docs. We
|
||||
// will then try to addIndexes into a copy of this:
|
||||
// will then try to addIndexesNoOptimize into a copy of this:
|
||||
RAMDirectory startDir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
for(int j=0;j<START_COUNT;j++) {
|
||||
|
@ -209,12 +209,12 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
|
||||
// Iterate with larger and larger amounts of free
|
||||
// disk space. With little free disk space,
|
||||
// addIndexes will certainly run out of space &
|
||||
// addIndexesNoOptimize will certainly run out of space &
|
||||
// fail. Verify that when this happens, index is
|
||||
// not corrupt and index in fact has added no
|
||||
// documents. Then, we increase disk space by 2000
|
||||
// bytes each iteration. At some point there is
|
||||
// enough free disk space and addIndexes should
|
||||
// enough free disk space and addIndexesNoOptimize should
|
||||
// succeed and index should show all documents were
|
||||
// added.
|
||||
|
||||
|
@ -242,7 +242,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
|
||||
String methodName;
|
||||
if (0 == method) {
|
||||
methodName = "addIndexes(Directory[])";
|
||||
methodName = "addIndexes(Directory[]) + optimize()";
|
||||
} else if (1 == method) {
|
||||
methodName = "addIndexes(IndexReader[])";
|
||||
} else {
|
||||
|
@ -306,7 +306,8 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
try {
|
||||
|
||||
if (0 == method) {
|
||||
writer.addIndexes(dirs);
|
||||
writer.addIndexesNoOptimize(dirs);
|
||||
writer.optimize();
|
||||
} else if (1 == method) {
|
||||
IndexReader readers[] = new IndexReader[dirs.length];
|
||||
for(int i=0;i<dirs.length;i++) {
|
||||
|
@ -488,7 +489,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
|
||||
if (hitError) {
|
||||
if (doAbort) {
|
||||
writer.abort();
|
||||
writer.rollback();
|
||||
} else {
|
||||
try {
|
||||
writer.close();
|
||||
|
@ -739,7 +740,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
|
||||
// now open index for create:
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
assertEquals("should be zero documents", writer.docCount(), 0);
|
||||
assertEquals("should be zero documents", writer.maxDoc(), 0);
|
||||
addDoc(writer);
|
||||
writer.close();
|
||||
|
||||
|
@ -999,9 +1000,9 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
searcher.close();
|
||||
|
||||
// Now, close the writer:
|
||||
writer.abort();
|
||||
writer.rollback();
|
||||
|
||||
assertNoUnreferencedFiles(dir, "unreferenced files remain after abort()");
|
||||
assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
|
||||
|
||||
searcher = new IndexSearcher(dir, false);
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
|
@ -1083,7 +1084,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
|
||||
/*
|
||||
* Verify that calling optimize when writer is open for
|
||||
* "commit on close" works correctly both for abort()
|
||||
* "commit on close" works correctly both for rollback()
|
||||
* and close().
|
||||
*/
|
||||
public void testCommitOnCloseOptimize() throws IOException {
|
||||
|
@ -1107,7 +1108,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
reader.close();
|
||||
|
||||
// Abort the writer:
|
||||
writer.abort();
|
||||
writer.rollback();
|
||||
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
|
||||
|
||||
// Open a reader after aborting writer:
|
||||
|
@ -2473,25 +2474,25 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
// LUCENE-1130: make sure initial IOException, and then 2nd
|
||||
// IOException during abort(), is OK:
|
||||
// IOException during rollback(), is OK:
|
||||
public void testIOExceptionDuringAbort() throws IOException {
|
||||
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(false));
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure initial IOException, and then 2nd
|
||||
// IOException during abort(), is OK:
|
||||
// IOException during rollback(), is OK:
|
||||
public void testIOExceptionDuringAbortOnlyOnce() throws IOException {
|
||||
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(true));
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure initial IOException, and then 2nd
|
||||
// IOException during abort(), with multiple threads, is OK:
|
||||
// IOException during rollback(), with multiple threads, is OK:
|
||||
public void testIOExceptionDuringAbortWithThreads() throws Exception {
|
||||
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(false));
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure initial IOException, and then 2nd
|
||||
// IOException during abort(), with multiple threads, is OK:
|
||||
// IOException during rollback(), with multiple threads, is OK:
|
||||
public void testIOExceptionDuringAbortWithThreadsOnlyOnce() throws Exception {
|
||||
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(true));
|
||||
}
|
||||
|
@ -2771,7 +2772,8 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||
|
||||
Directory[] indexDirs = {new MockRAMDirectory(dir)};
|
||||
writer.addIndexes(indexDirs);
|
||||
writer.addIndexesNoOptimize(indexDirs);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
}
|
||||
dir.close();
|
||||
|
@ -3224,7 +3226,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
// Expected
|
||||
}
|
||||
assertTrue(failure.fail1 && failure.fail2);
|
||||
w.abort();
|
||||
w.rollback();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
|
@ -3713,7 +3715,8 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
void doBody(int j, Directory[] dirs) throws Throwable {
|
||||
switch(j%4) {
|
||||
case 0:
|
||||
writer2.addIndexes(dirs);
|
||||
writer2.addIndexesNoOptimize(dirs);
|
||||
writer2.optimize();
|
||||
break;
|
||||
case 1:
|
||||
writer2.addIndexesNoOptimize(dirs);
|
||||
|
@ -3799,7 +3802,8 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
|||
void doBody(int j, Directory[] dirs) throws Throwable {
|
||||
switch(j%5) {
|
||||
case 0:
|
||||
writer2.addIndexes(dirs);
|
||||
writer2.addIndexesNoOptimize(dirs);
|
||||
writer2.optimize();
|
||||
break;
|
||||
case 1:
|
||||
writer2.addIndexesNoOptimize(dirs);
|
||||
|
|
|
@ -198,7 +198,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync();
|
||||
writer.commit();
|
||||
checkInvariants(writer);
|
||||
assertEquals(10, writer.docCount());
|
||||
assertEquals(10, writer.maxDoc());
|
||||
|
||||
writer.close();
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
|||
{
|
||||
|
||||
/**
|
||||
* Tests that index merging (specifically addIndexes()) doesn't
|
||||
* Tests that index merging (specifically addIndexesNoOptimize()) doesn't
|
||||
* change the index order of documents.
|
||||
*/
|
||||
public void testLucene() throws IOException
|
||||
|
@ -59,7 +59,8 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
|||
IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMergeFactor(2);
|
||||
|
||||
writer.addIndexes(new Directory[]{indexA, indexB});
|
||||
writer.addIndexesNoOptimize(new Directory[]{indexA, indexB});
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
fail = verifyIndex(merged, 0);
|
||||
|
|
|
@ -428,7 +428,8 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
void doBody(int j, Directory[] dirs) throws Throwable {
|
||||
switch (j % 4) {
|
||||
case 0:
|
||||
mainWriter.addIndexes(dirs);
|
||||
mainWriter.addIndexesNoOptimize(dirs);
|
||||
mainWriter.optimize();
|
||||
break;
|
||||
case 1:
|
||||
mainWriter.addIndexesNoOptimize(dirs);
|
||||
|
|
|
@ -36,7 +36,7 @@ import java.util.ArrayList;
|
|||
|
||||
/**
|
||||
* Test that norms info is preserved during index life - including
|
||||
* separate norms, addDocument, addIndexes, optimize.
|
||||
* separate norms, addDocument, addIndexesNoOptimize, optimize.
|
||||
*/
|
||||
public class TestNorms extends LuceneTestCase {
|
||||
|
||||
|
@ -112,7 +112,8 @@ public class TestNorms extends LuceneTestCase {
|
|||
IndexWriter iw = new IndexWriter(dir3,anlzr,false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
iw.setMaxBufferedDocs(5);
|
||||
iw.setMergeFactor(3);
|
||||
iw.addIndexes(new Directory[]{dir1,dir2});
|
||||
iw.addIndexesNoOptimize(new Directory[]{dir1,dir2});
|
||||
iw.optimize();
|
||||
iw.close();
|
||||
|
||||
norms1.addAll(norms);
|
||||
|
|
|
@ -114,7 +114,7 @@ public class TestThreadedOptimize extends LuceneTestCase {
|
|||
|
||||
// System.out.println("TEST: now index=" + writer.segString());
|
||||
|
||||
assertEquals(expectedDocCount, writer.docCount());
|
||||
assertEquals(expectedDocCount, writer.maxDoc());
|
||||
|
||||
writer.close();
|
||||
writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
|
|
|
@ -231,14 +231,10 @@ public class TestPositionIncrement extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
public void testPayloadsPos0() throws Exception {
|
||||
for(int x=0;x<2;x++) {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir,
|
||||
new TestPayloadAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
if (x == 1) {
|
||||
writer.setAllowMinus1Position();
|
||||
}
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("content",
|
||||
new StringReader("a a b c d e a f g h i j a b k k")));
|
||||
|
@ -251,16 +247,8 @@ public class TestPositionIncrement extends BaseTokenStreamTestCase {
|
|||
assertTrue(tp.next());
|
||||
// "a" occurs 4 times
|
||||
assertEquals(4, tp.freq());
|
||||
int expected;
|
||||
if (x == 1) {
|
||||
expected = Integer.MAX_VALUE;
|
||||
} else {
|
||||
expected = 0;
|
||||
}
|
||||
int expected = 0;
|
||||
assertEquals(expected, tp.nextPosition());
|
||||
if (x == 1) {
|
||||
continue;
|
||||
}
|
||||
assertEquals(1, tp.nextPosition());
|
||||
assertEquals(3, tp.nextPosition());
|
||||
assertEquals(6, tp.nextPosition());
|
||||
|
@ -322,7 +310,6 @@ public class TestPositionIncrement extends BaseTokenStreamTestCase {
|
|||
dir.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class TestPayloadAnalyzer extends Analyzer {
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ public class TestRAMDirectory extends LuceneTestCase {
|
|||
doc.add(new Field("content", English.intToEnglish(i).trim(), Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
assertEquals(docsToAdd, writer.docCount());
|
||||
assertEquals(docsToAdd, writer.maxDoc());
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue