From 663adad4d7ea0e079ff0c83e9483c83883af7180 Mon Sep 17 00:00:00 2001 From: Michael Busch Date: Wed, 14 Oct 2009 21:21:05 +0000 Subject: [PATCH] LUCENE-1979: remove more deprecations in the index package. git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@825288 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES.txt | 2 +- .../instantiated/InstantiatedTermEnum.java | 30 ---- .../store/instantiated/TestEmptyIndex.java | 1 - .../store/instantiated/TestIndicesEquals.java | 36 ----- .../lucene/index/TermVectorAccessor.java | 13 +- .../apache/lucene/misc/IndexMergeTool.java | 2 +- .../org/apache/lucene/index/CheckIndex.java | 7 +- .../lucene/index/DocInverterPerField.java | 5 +- .../apache/lucene/index/DocumentsWriter.java | 8 - .../index/DocumentsWriterThreadState.java | 1 - .../lucene/index/FilterIndexReader.java | 3 - .../org/apache/lucene/index/IndexReader.java | 31 ---- .../org/apache/lucene/index/IndexWriter.java | 135 ++-------------- .../org/apache/lucene/index/MergePolicy.java | 12 +- .../org/apache/lucene/index/MultiReader.java | 7 +- .../apache/lucene/index/ParallelReader.java | 5 - .../apache/lucene/index/SegmentReader.java | 5 - .../org/apache/lucene/index/TermEnum.java | 25 --- .../index/TestAddIndexesNoOptimize.java | 40 ++--- .../org/apache/lucene/index/TestCrash.java | 2 +- .../apache/lucene/index/TestIndexReader.java | 2 +- .../index/TestIndexReaderCloneNorms.java | 3 +- .../apache/lucene/index/TestIndexWriter.java | 50 +++--- .../index/TestIndexWriterMergePolicy.java | 2 +- .../lucene/index/TestIndexWriterMerging.java | 5 +- .../lucene/index/TestIndexWriterReader.java | 3 +- .../org/apache/lucene/index/TestNorms.java | 5 +- .../lucene/index/TestThreadedOptimize.java | 2 +- .../lucene/search/TestPositionIncrement.java | 151 ++++++++---------- .../apache/lucene/store/TestRAMDirectory.java | 2 +- 30 files changed, 152 insertions(+), 443 deletions(-) diff --git a/CHANGES.txt b/CHANGES.txt index 713f78d79e2..60ab4ec9c02 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -73,7 +73,7 @@ API Changes * LUCENE-944: Remove deprecated methods in BooleanQuery. (Michael Busch) * LUCENE-1979: Remove remaining deprecations from indexer package. - (Michael Busch) + (Uwe Schindler, Michael Busch) Bug fixes diff --git a/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermEnum.java b/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermEnum.java index b21d8f6f173..9d008b8a8b0 100644 --- a/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermEnum.java +++ b/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermEnum.java @@ -77,36 +77,6 @@ public class InstantiatedTermEnum public void close() { } - - public boolean skipTo(Term target) throws IOException { - - // this method is not known to be used by anything - // in lucene for many years now, so there is - // very to gain by optimizing this method more, - - if (reader.getIndex().getOrderedTerms().length == 0) { - return false; - } - - InstantiatedTerm term = reader.getIndex().findTerm(target); - if (term != null) { - this.term = term; - nextTermIndex = term.getTermIndex() + 1; - return true; - } else { - int pos = Arrays.binarySearch(reader.getIndex().getOrderedTerms(), target, InstantiatedTerm.termComparator); - if (pos < 0) { - pos = -1 - pos; - } - - if (pos > reader.getIndex().getOrderedTerms().length) { - return false; - } - this.term = reader.getIndex().getOrderedTerms()[pos]; - nextTermIndex = pos + 1; - return true; - } - } } diff --git a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java index 2fd8f3574fe..a980f0d2941 100644 --- a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java +++ b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java @@ -105,7 +105,6 @@ public class TestEmptyIndex extends TestCase { assertNull(terms.term()); assertFalse(terms.next()); - assertFalse(terms.skipTo(new Term("foo", "bar"))); } diff --git a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java index d35fdb8fda5..9e53ebc7513 100644 --- a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java +++ b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java @@ -391,42 +391,6 @@ public class TestIndicesEquals extends TestCase { } } - // compare term enumeration seeking - - aprioriTermEnum = aprioriReader.terms(); - - TermEnum aprioriTermEnumSeeker = aprioriReader.terms(); - TermEnum testTermEnumSeeker = testReader.terms(); - - while (aprioriTermEnum.next()) { - if (aprioriTermEnumSeeker.skipTo(aprioriTermEnum.term())) { - assertTrue(testTermEnumSeeker.skipTo(aprioriTermEnum.term())); - assertEquals(aprioriTermEnumSeeker.term(), testTermEnumSeeker.term()); - } else { - assertFalse(testTermEnumSeeker.skipTo(aprioriTermEnum.term())); - } - } - - aprioriTermEnum.close(); - aprioriTermEnumSeeker.close(); - testTermEnumSeeker.close(); - - // skip to non existing terms - - aprioriTermEnumSeeker = aprioriReader.terms(); - testTermEnumSeeker = testReader.terms(); - - aprioriTermEnum = aprioriReader.terms(); - aprioriTermEnum.next(); - Term nonExistingTerm = new Term(aprioriTermEnum.term().field(), "bzzzzoo993djdj380sdf"); - aprioriTermEnum.close(); - - assertEquals(aprioriTermEnumSeeker.skipTo(nonExistingTerm), testTermEnumSeeker.skipTo(nonExistingTerm)); - assertEquals(aprioriTermEnumSeeker.term(), testTermEnumSeeker.term()); - - aprioriTermEnumSeeker.close(); - testTermEnumSeeker.close(); - // compare term vectors and position vectors for (int documentNumber = 0; documentNumber < aprioriReader.numDocs(); documentNumber++) { diff --git a/contrib/misc/src/java/org/apache/lucene/index/TermVectorAccessor.java b/contrib/misc/src/java/org/apache/lucene/index/TermVectorAccessor.java index e8e3adcf6a7..fe3630f7863 100644 --- a/contrib/misc/src/java/org/apache/lucene/index/TermVectorAccessor.java +++ b/contrib/misc/src/java/org/apache/lucene/index/TermVectorAccessor.java @@ -99,17 +99,16 @@ public class TermVectorAccessor { positions.clear(); } - TermEnum termEnum = indexReader.terms(); - if (termEnum.skipTo(new Term(field, ""))) { - + TermEnum termEnum = indexReader.terms(new Term(field, "")); + if (termEnum.term() != null) { while (termEnum.term().field() == field) { TermPositions termPositions = indexReader.termPositions(termEnum.term()); if (termPositions.skipTo(documentNumber)) { - + frequencies.add(Integer.valueOf(termPositions.freq())); tokens.add(termEnum.term().text()); - - + + if (!mapper.isIgnoringPositions()) { int[] positions = new int[termPositions.freq()]; for (int i = 0; i < positions.length; i++) { @@ -125,13 +124,11 @@ public class TermVectorAccessor { break; } } - mapper.setDocumentNumber(documentNumber); mapper.setExpectations(field, tokens.size(), false, !mapper.isIgnoringPositions()); for (int i = 0; i < tokens.size(); i++) { mapper.map((String) tokens.get(i), ((Integer) frequencies.get(i)).intValue(), (TermVectorOffsetInfo[]) null, (int[]) positions.get(i)); } - } termEnum.close(); diff --git a/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java b/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java index 148743677f2..22247f68ec1 100644 --- a/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java +++ b/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java @@ -45,7 +45,7 @@ public class IndexMergeTool { } System.out.println("Merging..."); - writer.addIndexes(indexes); + writer.addIndexesNoOptimize(indexes); System.out.println("Optimizing..."); writer.optimize(); diff --git a/src/java/org/apache/lucene/index/CheckIndex.java b/src/java/org/apache/lucene/index/CheckIndex.java index 9efca4cce2f..84ea04404a1 100644 --- a/src/java/org/apache/lucene/index/CheckIndex.java +++ b/src/java/org/apache/lucene/index/CheckIndex.java @@ -48,11 +48,6 @@ import java.util.Map; */ public class CheckIndex { - /** Default PrintStream for all CheckIndex instances. - * @deprecated Use {@link #setInfoStream} per instance, - * instead. */ - public static PrintStream out = null; - private PrintStream infoStream; private Directory dir; @@ -257,7 +252,7 @@ public class CheckIndex { /** Create a new CheckIndex on the directory. */ public CheckIndex(Directory dir) { this.dir = dir; - infoStream = out; + infoStream = null; } /** Set infoStream where messages should go. If null, no diff --git a/src/java/org/apache/lucene/index/DocInverterPerField.java b/src/java/org/apache/lucene/index/DocInverterPerField.java index 09fa054c2ad..543767fafae 100644 --- a/src/java/org/apache/lucene/index/DocInverterPerField.java +++ b/src/java/org/apache/lucene/index/DocInverterPerField.java @@ -129,9 +129,6 @@ final class DocInverterPerField extends DocFieldConsumerPerField { final int startLength = fieldState.length; - // deprecated - final boolean allowMinus1Position = docState.allowMinus1Position; - try { int offsetEnd = fieldState.offset-1; @@ -157,7 +154,7 @@ final class DocInverterPerField extends DocFieldConsumerPerField { final int posIncr = posIncrAttribute.getPositionIncrement(); fieldState.position += posIncr; - if (allowMinus1Position || fieldState.position > 0) { + if (fieldState.position > 0) { fieldState.position--; } diff --git a/src/java/org/apache/lucene/index/DocumentsWriter.java b/src/java/org/apache/lucene/index/DocumentsWriter.java index 5eaa37221fe..2fa62e538de 100644 --- a/src/java/org/apache/lucene/index/DocumentsWriter.java +++ b/src/java/org/apache/lucene/index/DocumentsWriter.java @@ -150,9 +150,6 @@ final class DocumentsWriter { Document doc; String maxTermPrefix; - // deprecated - boolean allowMinus1Position; - // Only called by asserts public boolean testPoint(String name) { return docWriter.writer.testPoint(name); @@ -299,11 +296,6 @@ final class DocumentsWriter { threadStates[i].docState.similarity = similarity; } - synchronized void setAllowMinus1Position() { - for(int i=0;iFor IndexReader implementations that use - * TermInfosReader to read terms, this sets the - * indexDivisor to subsample the number of indexed terms - * loaded into memory. This has the same effect as {@link - * IndexWriter#setTermIndexInterval} except that setting - * must be done at indexing time while this setting can be - * set per reader. When set to N, then one in every - * N*termIndexInterval terms in the index is loaded into - * memory. By setting this to a value > 1 you can reduce - * memory usage, at the expense of higher latency when - * loading a TermInfo. The default value is 1.

- * - * NOTE: you must call this before the term - * index is loaded. If the index is already loaded, - * an IllegalStateException is thrown. - * @throws IllegalStateException if the term index has already been loaded into memory - * @deprecated Please use {@link IndexReader#open(Directory, IndexDeletionPolicy, boolean, int)} to specify the required TermInfos index divisor instead. - */ - public void setTermInfosIndexDivisor(int indexDivisor) throws IllegalStateException { - throw new UnsupportedOperationException("Please pass termInfosIndexDivisor up-front when opening IndexReader"); - } - - /**

For IndexReader implementations that use - * TermInfosReader to read terms, this returns the - * current indexDivisor as specified when the reader was - * opened. - */ - public int getTermInfosIndexDivisor() { - throw new UnsupportedOperationException("This reader does not support this method."); - } - /** * Check whether this IndexReader is still using the * current (i.e., most recently committed) version of the diff --git a/src/java/org/apache/lucene/index/IndexWriter.java b/src/java/org/apache/lucene/index/IndexWriter.java index d883d1314c1..edf05b05f76 100644 --- a/src/java/org/apache/lucene/index/IndexWriter.java +++ b/src/java/org/apache/lucene/index/IndexWriter.java @@ -180,12 +180,6 @@ public class IndexWriter { */ public static final String WRITE_LOCK_NAME = "write.lock"; - /** - * @deprecated - * @see LogMergePolicy#DEFAULT_MERGE_FACTOR - */ - public final static int DEFAULT_MERGE_FACTOR = LogMergePolicy.DEFAULT_MERGE_FACTOR; - /** * Value to denote a flush trigger is disabled */ @@ -209,12 +203,6 @@ public class IndexWriter { */ public final static int DEFAULT_MAX_BUFFERED_DELETE_TERMS = DISABLE_AUTO_FLUSH; - /** - * @deprecated - * @see LogDocMergePolicy#DEFAULT_MAX_MERGE_DOCS - */ - public final static int DEFAULT_MAX_MERGE_DOCS = LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS; - /** * Default value is 10,000. Change using {@link #setMaxFieldLength(int)}. */ @@ -1790,16 +1778,6 @@ public class IndexWriter { return analyzer; } - /** Returns the number of documents currently in this - * index, not counting deletions. - * @deprecated Please use {@link #maxDoc()} (same as this - * method) or {@link #numDocs()} (also takes deletions - * into account), instead. */ - public synchronized int docCount() { - ensureOpen(); - return maxDoc(); - } - /** Returns total number of docs in this index, including * docs not yet flushed (still in the RAM buffer), * not counting deletions. @@ -1994,14 +1972,14 @@ public class IndexWriter { * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ - public void deleteDocuments(Term[] terms) throws CorruptIndexException, IOException { + public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException { ensureOpen(); try { boolean doFlush = docWriter.bufferDeleteTerms(terms); if (doFlush) flush(true, false, false); } catch (OutOfMemoryError oom) { - handleOOM(oom, "deleteDocuments(Term[])"); + handleOOM(oom, "deleteDocuments(Term..)"); } } @@ -2036,7 +2014,7 @@ public class IndexWriter { * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ - public void deleteDocuments(Query[] queries) throws CorruptIndexException, IOException { + public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException { ensureOpen(); boolean doFlush = docWriter.bufferDeleteQueries(queries); if (doFlush) @@ -2692,13 +2670,6 @@ public class IndexWriter { finishAddIndexes(); } - /** - * @deprecated Please use {@link #rollback} instead. - */ - public void abort() throws IOException { - rollback(); - } - /** * Close the IndexWriter without committing * any changes that have occurred since the last commit @@ -2946,84 +2917,12 @@ public class IndexWriter { releaseRead(); } - /** Merges all segments from an array of indexes into this index. - * - *

NOTE: if this method hits an OutOfMemoryError - * you should immediately close the writer. See above for details.

- * - * @deprecated Use {@link #addIndexesNoOptimize} instead, - * then separately call {@link #optimize} afterwards if - * you need to. - * - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error - */ - public void addIndexes(Directory[] dirs) - throws CorruptIndexException, IOException { - - ensureOpen(); - - noDupDirs(dirs); - - // Do not allow add docs or deletes while we are running: - docWriter.pauseAllThreads(); - - try { - - if (infoStream != null) - message("flush at addIndexes"); - flush(true, false, true); - - boolean success = false; - - startTransaction(false); - - try { - - int docCount = 0; - synchronized(this) { - ensureOpen(); - for (int i = 0; i < dirs.length; i++) { - SegmentInfos sis = new SegmentInfos(); // read infos from dir - sis.read(dirs[i]); - for (int j = 0; j < sis.size(); j++) { - final SegmentInfo info = sis.info(j); - docCount += info.docCount; - assert !segmentInfos.contains(info); - segmentInfos.add(info); // add each info - } - } - } - - // Notify DocumentsWriter that the flushed count just increased - docWriter.updateFlushedDocCount(docCount); - - optimize(); - - success = true; - } finally { - if (success) { - commitTransaction(); - } else { - rollbackTransaction(); - } - } - } catch (OutOfMemoryError oom) { - handleOOM(oom, "addIndexes(Directory[])"); - } finally { - if (docWriter != null) { - docWriter.resumeAllThreads(); - } - } - } - private synchronized void resetMergeExceptions() { mergeExceptions = new ArrayList(); mergeGen++; } - private void noDupDirs(Directory[] dirs) { + private void noDupDirs(Directory... dirs) { HashSet dups = new HashSet(); for(int i=0;iSee {@link #addIndexesNoOptimize(Directory[])} for + *

See {@link #addIndexesNoOptimize} for * details on transactional semantics, temporary free * space required in the Directory, and non-CFS segments * on an Exception.

@@ -3259,7 +3158,7 @@ public class IndexWriter { * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ - public void addIndexes(IndexReader[] readers) + public void addIndexes(IndexReader... readers) throws CorruptIndexException, IOException { ensureOpen(); @@ -3326,7 +3225,7 @@ public class IndexWriter { segmentInfos.clear(); // pop old infos & add new info = new SegmentInfo(mergedName, docCount, directory, false, true, -1, null, false, merger.hasProx()); - setDiagnostics(info, "addIndexes(IndexReader[])"); + setDiagnostics(info, "addIndexes(IndexReader...)"); segmentInfos.add(info); } @@ -3395,7 +3294,7 @@ public class IndexWriter { } } } catch (OutOfMemoryError oom) { - handleOOM(oom, "addIndexes(IndexReader[])"); + handleOOM(oom, "addIndexes(IndexReader...)"); } finally { if (docWriter != null) { docWriter.resumeAllThreads(); @@ -4930,22 +4829,6 @@ public class IndexWriter { throw oom; } - // deprecated - private boolean allowMinus1Position; - - /** Deprecated: emulates IndexWriter's buggy behavior when - * first token(s) have positionIncrement==0 (ie, prior to - * fixing LUCENE-1542) */ - public void setAllowMinus1Position() { - allowMinus1Position = true; - docWriter.setAllowMinus1Position(); - } - - // deprecated - boolean getAllowMinus1Position() { - return allowMinus1Position; - } - // Used only by assert for testing. Current points: // startDoFlush // startCommitMerge diff --git a/src/java/org/apache/lucene/index/MergePolicy.java b/src/java/org/apache/lucene/index/MergePolicy.java index fc8a4be3134..b44255fc5eb 100644 --- a/src/java/org/apache/lucene/index/MergePolicy.java +++ b/src/java/org/apache/lucene/index/MergePolicy.java @@ -172,20 +172,12 @@ public abstract class MergePolicy { * executing a merge. */ public static class MergeException extends RuntimeException { private Directory dir; - /** @deprecated - * Use {@link #MergePolicy.MergeException(String,Directory)} instead */ - public MergeException(String message) { - super(message); - } + public MergeException(String message, Directory dir) { super(message); this.dir = dir; } - /** @deprecated - * Use {@link #MergePolicy.MergeException(Throwable,Directory)} instead */ - public MergeException(Throwable exc) { - super(exc); - } + public MergeException(Throwable exc, Directory dir) { super(exc); this.dir = dir; diff --git a/src/java/org/apache/lucene/index/MultiReader.java b/src/java/org/apache/lucene/index/MultiReader.java index 82f9ff6aacc..16ecd4a3286 100644 --- a/src/java/org/apache/lucene/index/MultiReader.java +++ b/src/java/org/apache/lucene/index/MultiReader.java @@ -49,7 +49,7 @@ public class MultiReader extends IndexReader implements Cloneable { * @param subReaders set of (sub)readers * @throws IOException */ - public MultiReader(IndexReader[] subReaders) { + public MultiReader(IndexReader... subReaders) { initialize(subReaders, true); } @@ -352,11 +352,6 @@ public class MultiReader extends IndexReader implements Cloneable { return new MultiTermPositions(this, subReaders, starts); } - /** @deprecated */ - protected void doCommit() throws IOException { - doCommit(null); - } - protected void doCommit(Map commitUserData) throws IOException { for (int i = 0; i < subReaders.length; i++) subReaders[i].commit(commitUserData); diff --git a/src/java/org/apache/lucene/index/ParallelReader.java b/src/java/org/apache/lucene/index/ParallelReader.java index a974c88f3ab..d558daf93fc 100644 --- a/src/java/org/apache/lucene/index/ParallelReader.java +++ b/src/java/org/apache/lucene/index/ParallelReader.java @@ -435,11 +435,6 @@ public class ParallelReader extends IndexReader { return (IndexReader[]) readers.toArray(new IndexReader[readers.size()]); } - /** @deprecated */ - protected void doCommit() throws IOException { - doCommit(null); - } - protected void doCommit(Map commitUserData) throws IOException { for (int i = 0; i < readers.size(); i++) ((IndexReader)readers.get(i)).commit(commitUserData); diff --git a/src/java/org/apache/lucene/index/SegmentReader.java b/src/java/org/apache/lucene/index/SegmentReader.java index 54bd076221c..e5fe12bb0de 100644 --- a/src/java/org/apache/lucene/index/SegmentReader.java +++ b/src/java/org/apache/lucene/index/SegmentReader.java @@ -792,11 +792,6 @@ public class SegmentReader extends IndexReader implements Cloneable { return clone; } - /** @deprecated */ - protected void doCommit() throws IOException { - doCommit(null); - } - protected void doCommit(Map commitUserData) throws IOException { if (hasChanges) { if (deletedDocsDirty) { // re-write deleted diff --git a/src/java/org/apache/lucene/index/TermEnum.java b/src/java/org/apache/lucene/index/TermEnum.java index 735ee9cde69..39859906c55 100644 --- a/src/java/org/apache/lucene/index/TermEnum.java +++ b/src/java/org/apache/lucene/index/TermEnum.java @@ -36,29 +36,4 @@ public abstract class TermEnum { /** Closes the enumeration to further activity, freeing resources. */ public abstract void close() throws IOException; - - /** Skips terms to the first beyond the current whose value is - * greater or equal to target.

Returns true iff there is such - * an entry.

Behaves as if written:

-   *   public boolean skipTo(Term target) {
-   *     do {
-   *       if (!next())
-   * 	     return false;
-   *     } while (target > term());
-   *     return true;
-   *   }
-   * 
- * Some implementations *could* be considerably more efficient than a linear scan. - * Check the implementation to be sure. - * @deprecated This method is not performant and will be removed in Lucene 3.0. - * Use {@link IndexReader#terms(Term)} to create a new TermEnum positioned at a - * given term. - */ - public boolean skipTo(Term target) throws IOException { - do { - if (!next()) - return false; - } while (target.compareTo(term()) > 0); - return true; - } } diff --git a/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java b/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java index 6e77800d06f..8b486fc03ec 100755 --- a/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java +++ b/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java @@ -43,27 +43,27 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer = newWriter(dir, true); // add 100 documents addDocs(writer, 100); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); writer = newWriter(aux, true); writer.setUseCompoundFile(false); // use one without a compound file // add 40 documents in separate files addDocs(writer, 40); - assertEquals(40, writer.docCount()); + assertEquals(40, writer.maxDoc()); writer.close(); writer = newWriter(aux2, true); // add 40 documents in compound files addDocs2(writer, 50); - assertEquals(50, writer.docCount()); + assertEquals(50, writer.maxDoc()); writer.close(); // test doc count before segments are merged writer = newWriter(dir, false); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux, aux2 }); - assertEquals(190, writer.docCount()); + assertEquals(190, writer.maxDoc()); writer.close(); // make sure the old index is correct @@ -77,14 +77,14 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer = newWriter(aux3, true); // add 40 documents addDocs(writer, 40); - assertEquals(40, writer.docCount()); + assertEquals(40, writer.maxDoc()); writer.close(); // test doc count before segments are merged/index is optimized writer = newWriter(dir, false); - assertEquals(190, writer.docCount()); + assertEquals(190, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux3 }); - assertEquals(230, writer.docCount()); + assertEquals(230, writer.maxDoc()); writer.close(); // make sure the new index is correct @@ -113,9 +113,9 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.close(); writer = newWriter(dir, false); - assertEquals(230, writer.docCount()); + assertEquals(230, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux4 }); - assertEquals(231, writer.docCount()); + assertEquals(231, writer.maxDoc()); writer.close(); verifyNumDocs(dir, 231); @@ -250,7 +250,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer = newWriter(dir, true); // add 100 documents addDocs(writer, 100); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); writer = newWriter(aux, true); @@ -272,7 +272,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { assertTrue(false); } catch (IllegalArgumentException e) { - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); } writer.close(); @@ -297,7 +297,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { addDocs(writer, 10); writer.addIndexesNoOptimize(new Directory[] { aux }); - assertEquals(1040, writer.docCount()); + assertEquals(1040, writer.maxDoc()); assertEquals(2, writer.getSegmentCount()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -321,7 +321,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { addDocs(writer, 2); writer.addIndexesNoOptimize(new Directory[] { aux }); - assertEquals(1032, writer.docCount()); + assertEquals(1032, writer.maxDoc()); assertEquals(2, writer.getSegmentCount()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -344,7 +344,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) }); - assertEquals(1060, writer.docCount()); + assertEquals(1060, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -373,7 +373,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) }); - assertEquals(1020, writer.docCount()); + assertEquals(1020, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -395,7 +395,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.setMaxBufferedDocs(100); writer.setMergeFactor(10); writer.addIndexesNoOptimize(new Directory[] { aux }); - assertEquals(30, writer.docCount()); + assertEquals(30, writer.maxDoc()); assertEquals(3, writer.getSegmentCount()); writer.close(); @@ -418,7 +418,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, aux2 }); - assertEquals(1025, writer.docCount()); + assertEquals(1025, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -476,7 +476,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.setMaxBufferedDocs(1000); // add 1000 documents in 1 segment addDocs(writer, 1000); - assertEquals(1000, writer.docCount()); + assertEquals(1000, writer.maxDoc()); assertEquals(1, writer.getSegmentCount()); writer.close(); @@ -493,7 +493,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.setMaxBufferedDocs(100); writer.setMergeFactor(10); } - assertEquals(30, writer.docCount()); + assertEquals(30, writer.maxDoc()); assertEquals(3, writer.getSegmentCount()); writer.close(); } diff --git a/src/test/org/apache/lucene/index/TestCrash.java b/src/test/org/apache/lucene/index/TestCrash.java index f41556394f9..09275e263c8 100644 --- a/src/test/org/apache/lucene/index/TestCrash.java +++ b/src/test/org/apache/lucene/index/TestCrash.java @@ -82,7 +82,7 @@ public class TestCrash extends LuceneTestCase { MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory(); writer.close(); writer = initIndex(dir); - assertEquals(314, writer.docCount()); + assertEquals(314, writer.maxDoc()); crash(writer); /* diff --git a/src/test/org/apache/lucene/index/TestIndexReader.java b/src/test/org/apache/lucene/index/TestIndexReader.java index f338f9320f2..1d685a42f5e 100644 --- a/src/test/org/apache/lucene/index/TestIndexReader.java +++ b/src/test/org/apache/lucene/index/TestIndexReader.java @@ -1762,7 +1762,7 @@ public class TestIndexReader extends LuceneTestCase } assertFalse(((SegmentReader) r.getSequentialSubReaders()[0]).termsIndexLoaded()); - assertEquals(-1, r.getTermInfosIndexDivisor()); + assertEquals(-1, ((SegmentReader) r.getSequentialSubReaders()[0]).getTermInfosIndexDivisor()); writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.addDocument(doc); writer.close(); diff --git a/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java b/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java index c4bdf00e223..9ace84ef2d0 100644 --- a/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java +++ b/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java @@ -119,7 +119,8 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { IndexWriter.MaxFieldLength.LIMITED); iw.setMaxBufferedDocs(5); iw.setMergeFactor(3); - iw.addIndexes(new Directory[] { dir1, dir2 }); + iw.addIndexesNoOptimize(new Directory[] { dir1, dir2 }); + iw.optimize(); iw.close(); norms1.addAll(norms); diff --git a/src/test/org/apache/lucene/index/TestIndexWriter.java b/src/test/org/apache/lucene/index/TestIndexWriter.java index 450d8c95f9c..7bf21e94724 100644 --- a/src/test/org/apache/lucene/index/TestIndexWriter.java +++ b/src/test/org/apache/lucene/index/TestIndexWriter.java @@ -96,7 +96,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase { for (i = 0; i < 100; i++) { addDoc(writer); } - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); // delete 40 documents @@ -108,7 +108,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase { // test doc count before segments are merged/index is optimized writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); reader = IndexReader.open(dir, true); @@ -156,7 +156,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase { /* Test: make sure when we run out of disk space or hit - random IOExceptions in any of the addIndexes(*) calls + random IOExceptions in any of the addIndexesNoOptimize(*) calls that 1) index is not corrupt (searcher can open/search it) and 2) transactional semantics are followed: either all or none of the incoming documents were in @@ -171,7 +171,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase { boolean debug = false; // Build up a bunch of dirs that have indexes which we - // will then merge together by calling addIndexes(*): + // will then merge together by calling addIndexesNoOptimize(*): Directory[] dirs = new Directory[NUM_DIR]; long inputDiskUsage = 0; for(int i=0;i