From 663adad4d7ea0e079ff0c83e9483c83883af7180 Mon Sep 17 00:00:00 2001
From: Michael Busch
For IndexReader implementations that use
- * TermInfosReader to read terms, this returns the
- * current indexDivisor as specified when the reader was
- * opened.
- */
- public int getTermInfosIndexDivisor() {
- throw new UnsupportedOperationException("This reader does not support this method.");
- }
-
/**
* Check whether this IndexReader is still using the
* current (i.e., most recently committed) version of the
diff --git a/src/java/org/apache/lucene/index/IndexWriter.java b/src/java/org/apache/lucene/index/IndexWriter.java
index d883d1314c1..edf05b05f76 100644
--- a/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/src/java/org/apache/lucene/index/IndexWriter.java
@@ -180,12 +180,6 @@ public class IndexWriter {
*/
public static final String WRITE_LOCK_NAME = "write.lock";
- /**
- * @deprecated
- * @see LogMergePolicy#DEFAULT_MERGE_FACTOR
- */
- public final static int DEFAULT_MERGE_FACTOR = LogMergePolicy.DEFAULT_MERGE_FACTOR;
-
/**
* Value to denote a flush trigger is disabled
*/
@@ -209,12 +203,6 @@ public class IndexWriter {
*/
public final static int DEFAULT_MAX_BUFFERED_DELETE_TERMS = DISABLE_AUTO_FLUSH;
- /**
- * @deprecated
- * @see LogDocMergePolicy#DEFAULT_MAX_MERGE_DOCS
- */
- public final static int DEFAULT_MAX_MERGE_DOCS = LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS;
-
/**
* Default value is 10,000. Change using {@link #setMaxFieldLength(int)}.
*/
@@ -1790,16 +1778,6 @@ public class IndexWriter {
return analyzer;
}
- /** Returns the number of documents currently in this
- * index, not counting deletions.
- * @deprecated Please use {@link #maxDoc()} (same as this
- * method) or {@link #numDocs()} (also takes deletions
- * into account), instead. */
- public synchronized int docCount() {
- ensureOpen();
- return maxDoc();
- }
-
/** Returns total number of docs in this index, including
* docs not yet flushed (still in the RAM buffer),
* not counting deletions.
@@ -1994,14 +1972,14 @@ public class IndexWriter {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public void deleteDocuments(Term[] terms) throws CorruptIndexException, IOException {
+ public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException {
ensureOpen();
try {
boolean doFlush = docWriter.bufferDeleteTerms(terms);
if (doFlush)
flush(true, false, false);
} catch (OutOfMemoryError oom) {
- handleOOM(oom, "deleteDocuments(Term[])");
+ handleOOM(oom, "deleteDocuments(Term..)");
}
}
@@ -2036,7 +2014,7 @@ public class IndexWriter {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public void deleteDocuments(Query[] queries) throws CorruptIndexException, IOException {
+ public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException {
ensureOpen();
boolean doFlush = docWriter.bufferDeleteQueries(queries);
if (doFlush)
@@ -2692,13 +2670,6 @@ public class IndexWriter {
finishAddIndexes();
}
- /**
- * @deprecated Please use {@link #rollback} instead.
- */
- public void abort() throws IOException {
- rollback();
- }
-
/**
* Close the IndexWriter
without committing
* any changes that have occurred since the last commit
@@ -2946,84 +2917,12 @@ public class IndexWriter {
releaseRead();
}
- /** Merges all segments from an array of indexes into this index.
- *
- *
NOTE: if this method hits an OutOfMemoryError - * you should immediately close the writer. See above for details.
- * - * @deprecated Use {@link #addIndexesNoOptimize} instead, - * then separately call {@link #optimize} afterwards if - * you need to. - * - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error - */ - public void addIndexes(Directory[] dirs) - throws CorruptIndexException, IOException { - - ensureOpen(); - - noDupDirs(dirs); - - // Do not allow add docs or deletes while we are running: - docWriter.pauseAllThreads(); - - try { - - if (infoStream != null) - message("flush at addIndexes"); - flush(true, false, true); - - boolean success = false; - - startTransaction(false); - - try { - - int docCount = 0; - synchronized(this) { - ensureOpen(); - for (int i = 0; i < dirs.length; i++) { - SegmentInfos sis = new SegmentInfos(); // read infos from dir - sis.read(dirs[i]); - for (int j = 0; j < sis.size(); j++) { - final SegmentInfo info = sis.info(j); - docCount += info.docCount; - assert !segmentInfos.contains(info); - segmentInfos.add(info); // add each info - } - } - } - - // Notify DocumentsWriter that the flushed count just increased - docWriter.updateFlushedDocCount(docCount); - - optimize(); - - success = true; - } finally { - if (success) { - commitTransaction(); - } else { - rollbackTransaction(); - } - } - } catch (OutOfMemoryError oom) { - handleOOM(oom, "addIndexes(Directory[])"); - } finally { - if (docWriter != null) { - docWriter.resumeAllThreads(); - } - } - } - private synchronized void resetMergeExceptions() { mergeExceptions = new ArrayList(); mergeGen++; } - private void noDupDirs(Directory[] dirs) { + private void noDupDirs(Directory... dirs) { HashSet dups = new HashSet(); for(int i=0;iSee {@link #addIndexesNoOptimize} for * details on transactional semantics, temporary free * space required in the Directory, and non-CFS segments * on an Exception.
@@ -3259,7 +3158,7 @@ public class IndexWriter { * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ - public void addIndexes(IndexReader[] readers) + public void addIndexes(IndexReader... readers) throws CorruptIndexException, IOException { ensureOpen(); @@ -3326,7 +3225,7 @@ public class IndexWriter { segmentInfos.clear(); // pop old infos & add new info = new SegmentInfo(mergedName, docCount, directory, false, true, -1, null, false, merger.hasProx()); - setDiagnostics(info, "addIndexes(IndexReader[])"); + setDiagnostics(info, "addIndexes(IndexReader...)"); segmentInfos.add(info); } @@ -3395,7 +3294,7 @@ public class IndexWriter { } } } catch (OutOfMemoryError oom) { - handleOOM(oom, "addIndexes(IndexReader[])"); + handleOOM(oom, "addIndexes(IndexReader...)"); } finally { if (docWriter != null) { docWriter.resumeAllThreads(); @@ -4930,22 +4829,6 @@ public class IndexWriter { throw oom; } - // deprecated - private boolean allowMinus1Position; - - /** Deprecated: emulates IndexWriter's buggy behavior when - * first token(s) have positionIncrement==0 (ie, prior to - * fixing LUCENE-1542) */ - public void setAllowMinus1Position() { - allowMinus1Position = true; - docWriter.setAllowMinus1Position(); - } - - // deprecated - boolean getAllowMinus1Position() { - return allowMinus1Position; - } - // Used only by assert for testing. Current points: // startDoFlush // startCommitMerge diff --git a/src/java/org/apache/lucene/index/MergePolicy.java b/src/java/org/apache/lucene/index/MergePolicy.java index fc8a4be3134..b44255fc5eb 100644 --- a/src/java/org/apache/lucene/index/MergePolicy.java +++ b/src/java/org/apache/lucene/index/MergePolicy.java @@ -172,20 +172,12 @@ public abstract class MergePolicy { * executing a merge. */ public static class MergeException extends RuntimeException { private Directory dir; - /** @deprecated - * Use {@link #MergePolicy.MergeException(String,Directory)} instead */ - public MergeException(String message) { - super(message); - } + public MergeException(String message, Directory dir) { super(message); this.dir = dir; } - /** @deprecated - * Use {@link #MergePolicy.MergeException(Throwable,Directory)} instead */ - public MergeException(Throwable exc) { - super(exc); - } + public MergeException(Throwable exc, Directory dir) { super(exc); this.dir = dir; diff --git a/src/java/org/apache/lucene/index/MultiReader.java b/src/java/org/apache/lucene/index/MultiReader.java index 82f9ff6aacc..16ecd4a3286 100644 --- a/src/java/org/apache/lucene/index/MultiReader.java +++ b/src/java/org/apache/lucene/index/MultiReader.java @@ -49,7 +49,7 @@ public class MultiReader extends IndexReader implements Cloneable { * @param subReaders set of (sub)readers * @throws IOException */ - public MultiReader(IndexReader[] subReaders) { + public MultiReader(IndexReader... subReaders) { initialize(subReaders, true); } @@ -352,11 +352,6 @@ public class MultiReader extends IndexReader implements Cloneable { return new MultiTermPositions(this, subReaders, starts); } - /** @deprecated */ - protected void doCommit() throws IOException { - doCommit(null); - } - protected void doCommit(Map commitUserData) throws IOException { for (int i = 0; i < subReaders.length; i++) subReaders[i].commit(commitUserData); diff --git a/src/java/org/apache/lucene/index/ParallelReader.java b/src/java/org/apache/lucene/index/ParallelReader.java index a974c88f3ab..d558daf93fc 100644 --- a/src/java/org/apache/lucene/index/ParallelReader.java +++ b/src/java/org/apache/lucene/index/ParallelReader.java @@ -435,11 +435,6 @@ public class ParallelReader extends IndexReader { return (IndexReader[]) readers.toArray(new IndexReader[readers.size()]); } - /** @deprecated */ - protected void doCommit() throws IOException { - doCommit(null); - } - protected void doCommit(Map commitUserData) throws IOException { for (int i = 0; i < readers.size(); i++) ((IndexReader)readers.get(i)).commit(commitUserData); diff --git a/src/java/org/apache/lucene/index/SegmentReader.java b/src/java/org/apache/lucene/index/SegmentReader.java index 54bd076221c..e5fe12bb0de 100644 --- a/src/java/org/apache/lucene/index/SegmentReader.java +++ b/src/java/org/apache/lucene/index/SegmentReader.java @@ -792,11 +792,6 @@ public class SegmentReader extends IndexReader implements Cloneable { return clone; } - /** @deprecated */ - protected void doCommit() throws IOException { - doCommit(null); - } - protected void doCommit(Map commitUserData) throws IOException { if (hasChanges) { if (deletedDocsDirty) { // re-write deleted diff --git a/src/java/org/apache/lucene/index/TermEnum.java b/src/java/org/apache/lucene/index/TermEnum.java index 735ee9cde69..39859906c55 100644 --- a/src/java/org/apache/lucene/index/TermEnum.java +++ b/src/java/org/apache/lucene/index/TermEnum.java @@ -36,29 +36,4 @@ public abstract class TermEnum { /** Closes the enumeration to further activity, freeing resources. */ public abstract void close() throws IOException; - - /** Skips terms to the first beyond the current whose value is - * greater or equal to target.Returns true iff there is such - * an entry.
Behaves as if written:
- * public boolean skipTo(Term target) { - * do { - * if (!next()) - * return false; - * } while (target > term()); - * return true; - * } - *- * Some implementations *could* be considerably more efficient than a linear scan. - * Check the implementation to be sure. - * @deprecated This method is not performant and will be removed in Lucene 3.0. - * Use {@link IndexReader#terms(Term)} to create a new TermEnum positioned at a - * given term. - */ - public boolean skipTo(Term target) throws IOException { - do { - if (!next()) - return false; - } while (target.compareTo(term()) > 0); - return true; - } } diff --git a/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java b/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java index 6e77800d06f..8b486fc03ec 100755 --- a/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java +++ b/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java @@ -43,27 +43,27 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer = newWriter(dir, true); // add 100 documents addDocs(writer, 100); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); writer = newWriter(aux, true); writer.setUseCompoundFile(false); // use one without a compound file // add 40 documents in separate files addDocs(writer, 40); - assertEquals(40, writer.docCount()); + assertEquals(40, writer.maxDoc()); writer.close(); writer = newWriter(aux2, true); // add 40 documents in compound files addDocs2(writer, 50); - assertEquals(50, writer.docCount()); + assertEquals(50, writer.maxDoc()); writer.close(); // test doc count before segments are merged writer = newWriter(dir, false); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux, aux2 }); - assertEquals(190, writer.docCount()); + assertEquals(190, writer.maxDoc()); writer.close(); // make sure the old index is correct @@ -77,14 +77,14 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer = newWriter(aux3, true); // add 40 documents addDocs(writer, 40); - assertEquals(40, writer.docCount()); + assertEquals(40, writer.maxDoc()); writer.close(); // test doc count before segments are merged/index is optimized writer = newWriter(dir, false); - assertEquals(190, writer.docCount()); + assertEquals(190, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux3 }); - assertEquals(230, writer.docCount()); + assertEquals(230, writer.maxDoc()); writer.close(); // make sure the new index is correct @@ -113,9 +113,9 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.close(); writer = newWriter(dir, false); - assertEquals(230, writer.docCount()); + assertEquals(230, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux4 }); - assertEquals(231, writer.docCount()); + assertEquals(231, writer.maxDoc()); writer.close(); verifyNumDocs(dir, 231); @@ -250,7 +250,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer = newWriter(dir, true); // add 100 documents addDocs(writer, 100); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); writer = newWriter(aux, true); @@ -272,7 +272,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { assertTrue(false); } catch (IllegalArgumentException e) { - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); } writer.close(); @@ -297,7 +297,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { addDocs(writer, 10); writer.addIndexesNoOptimize(new Directory[] { aux }); - assertEquals(1040, writer.docCount()); + assertEquals(1040, writer.maxDoc()); assertEquals(2, writer.getSegmentCount()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -321,7 +321,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { addDocs(writer, 2); writer.addIndexesNoOptimize(new Directory[] { aux }); - assertEquals(1032, writer.docCount()); + assertEquals(1032, writer.maxDoc()); assertEquals(2, writer.getSegmentCount()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -344,7 +344,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) }); - assertEquals(1060, writer.docCount()); + assertEquals(1060, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -373,7 +373,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) }); - assertEquals(1020, writer.docCount()); + assertEquals(1020, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -395,7 +395,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.setMaxBufferedDocs(100); writer.setMergeFactor(10); writer.addIndexesNoOptimize(new Directory[] { aux }); - assertEquals(30, writer.docCount()); + assertEquals(30, writer.maxDoc()); assertEquals(3, writer.getSegmentCount()); writer.close(); @@ -418,7 +418,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, aux2 }); - assertEquals(1025, writer.docCount()); + assertEquals(1025, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -476,7 +476,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.setMaxBufferedDocs(1000); // add 1000 documents in 1 segment addDocs(writer, 1000); - assertEquals(1000, writer.docCount()); + assertEquals(1000, writer.maxDoc()); assertEquals(1, writer.getSegmentCount()); writer.close(); @@ -493,7 +493,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.setMaxBufferedDocs(100); writer.setMergeFactor(10); } - assertEquals(30, writer.docCount()); + assertEquals(30, writer.maxDoc()); assertEquals(3, writer.getSegmentCount()); writer.close(); } diff --git a/src/test/org/apache/lucene/index/TestCrash.java b/src/test/org/apache/lucene/index/TestCrash.java index f41556394f9..09275e263c8 100644 --- a/src/test/org/apache/lucene/index/TestCrash.java +++ b/src/test/org/apache/lucene/index/TestCrash.java @@ -82,7 +82,7 @@ public class TestCrash extends LuceneTestCase { MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory(); writer.close(); writer = initIndex(dir); - assertEquals(314, writer.docCount()); + assertEquals(314, writer.maxDoc()); crash(writer); /* diff --git a/src/test/org/apache/lucene/index/TestIndexReader.java b/src/test/org/apache/lucene/index/TestIndexReader.java index f338f9320f2..1d685a42f5e 100644 --- a/src/test/org/apache/lucene/index/TestIndexReader.java +++ b/src/test/org/apache/lucene/index/TestIndexReader.java @@ -1762,7 +1762,7 @@ public class TestIndexReader extends LuceneTestCase } assertFalse(((SegmentReader) r.getSequentialSubReaders()[0]).termsIndexLoaded()); - assertEquals(-1, r.getTermInfosIndexDivisor()); + assertEquals(-1, ((SegmentReader) r.getSequentialSubReaders()[0]).getTermInfosIndexDivisor()); writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.addDocument(doc); writer.close(); diff --git a/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java b/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java index c4bdf00e223..9ace84ef2d0 100644 --- a/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java +++ b/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java @@ -119,7 +119,8 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { IndexWriter.MaxFieldLength.LIMITED); iw.setMaxBufferedDocs(5); iw.setMergeFactor(3); - iw.addIndexes(new Directory[] { dir1, dir2 }); + iw.addIndexesNoOptimize(new Directory[] { dir1, dir2 }); + iw.optimize(); iw.close(); norms1.addAll(norms); diff --git a/src/test/org/apache/lucene/index/TestIndexWriter.java b/src/test/org/apache/lucene/index/TestIndexWriter.java index 450d8c95f9c..7bf21e94724 100644 --- a/src/test/org/apache/lucene/index/TestIndexWriter.java +++ b/src/test/org/apache/lucene/index/TestIndexWriter.java @@ -96,7 +96,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase { for (i = 0; i < 100; i++) { addDoc(writer); } - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); // delete 40 documents @@ -108,7 +108,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase { // test doc count before segments are merged/index is optimized writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); reader = IndexReader.open(dir, true); @@ -156,7 +156,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase { /* Test: make sure when we run out of disk space or hit - random IOExceptions in any of the addIndexes(*) calls + random IOExceptions in any of the addIndexesNoOptimize(*) calls that 1) index is not corrupt (searcher can open/search it) and 2) transactional semantics are followed: either all or none of the incoming documents were in @@ -171,7 +171,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase { boolean debug = false; // Build up a bunch of dirs that have indexes which we - // will then merge together by calling addIndexes(*): + // will then merge together by calling addIndexesNoOptimize(*): Directory[] dirs = new Directory[NUM_DIR]; long inputDiskUsage = 0; for(int i=0;i