diff --git a/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java b/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java index 2b7dff05da7..2094acef1a2 100644 --- a/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java +++ b/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java @@ -109,12 +109,12 @@ public class IndexFiles { indexDocs(writer, docDir); // NOTE: if you want to maximize search performance, - // you can optionally call optimize here. This can be - // a costly operation, so generally it's only worth - // it when your index is relatively static (ie you're - // done adding documents to it): + // you can optionally call forceMerge here. This can be + // a terribly costly operation, so generally it's only + // worth it when your index is relatively static (ie + // you're done adding documents to it): // - // writer.optimize(); + // writer.forceMerge(1); writer.close(); diff --git a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java index 95a230b880a..3d237f6142c 100644 --- a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java +++ b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java @@ -1635,7 +1635,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte writer.addDocument( doc( "t_text1", "more random words for second field del" ) ); writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) ); writer.addDocument( doc( "t_text1", "more random words for second field" ) ); - writer.optimize(); + writer.forceMerge(1); writer.close(); } @@ -1643,7 +1643,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND)); writer.deleteDocuments( new Term( "t_text1", "del" ) ); // To see negative idf, keep comment the following line - //writer.optimize(); + //writer.forceMerge(1); writer.close(); } @@ -1759,7 +1759,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte doc = new Document(); doc.add(nfield); writer.addDocument(doc, analyzer); - writer.optimize(); + writer.forceMerge(1); writer.close(); reader = IndexReader.open(ramDir, true); numHighlights = 0; diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java index 9eb82eb2797..e0240d20870 100644 --- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java +++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java @@ -96,7 +96,7 @@ public class InstantiatedIndex * Creates a new instantiated index that looks just like the index in a specific state as represented by a reader. * * @param sourceIndexReader the source index this new instantiated index will be copied from. - * @throws IOException if the source index is not optimized, or when accessing the source. + * @throws IOException if the source index is not single-segment, or when accessing the source. */ public InstantiatedIndex(IndexReader sourceIndexReader) throws IOException { this(sourceIndexReader, null); @@ -109,13 +109,13 @@ public class InstantiatedIndex * * @param sourceIndexReader the source index this new instantiated index will be copied from. * @param fields fields to be added, or null for all - * @throws IOException if the source index is not optimized, or when accessing the source. + * @throws IOException if the source index is not single-segment, or when accessing the source. */ public InstantiatedIndex(IndexReader sourceIndexReader, Set fields) throws IOException { - if (!sourceIndexReader.isOptimized()) { - System.out.println(("Source index is not optimized.")); - //throw new IOException("Source index is not optimized."); + if (sourceIndexReader.getSequentialSubReaders().length != 1) { + System.out.println(("Source index has more than one segment.")); + //throw new IOException("Source index has more than one segment."); } diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java index 514a74cc0d4..82b518d30d2 100644 --- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java +++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java @@ -55,14 +55,6 @@ public class InstantiatedIndexReader extends IndexReader { readerFinishedListeners = Collections.synchronizedSet(new HashSet()); } - /** - * @return always true. - */ - @Override - public boolean isOptimized() { - return true; - } - /** * An InstantiatedIndexReader is not a snapshot in time, it is completely in * sync with the latest commit to the store! diff --git a/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java b/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestMultiSegmentReaderOnConstructor.java similarity index 87% rename from lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java rename to lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestMultiSegmentReaderOnConstructor.java index 549164b1565..a1391481c6f 100644 --- a/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java +++ b/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestMultiSegmentReaderOnConstructor.java @@ -30,7 +30,7 @@ import org.apache.lucene.document.TextField; /** * @since 2009-mar-30 13:15:49 */ -public class TestUnoptimizedReaderOnConstructor extends LuceneTestCase { +public class TestMultiSegmentReaderOnConstructor extends LuceneTestCase { public void test() throws Exception { Directory dir = newDirectory(); @@ -49,18 +49,18 @@ public class TestUnoptimizedReaderOnConstructor extends LuceneTestCase { addDocument(iw, "All work and no play makes wendy a dull girl"); iw.close(); - IndexReader unoptimizedReader = IndexReader.open(dir, false); - unoptimizedReader.deleteDocument(2); + IndexReader multiSegReader = IndexReader.open(dir, false); + multiSegReader.deleteDocument(2); try { - new InstantiatedIndex(unoptimizedReader); + new InstantiatedIndex(multiSegReader); } catch (Exception e) { e.printStackTrace(System.out); - fail("No exceptions when loading an unoptimized reader!"); + fail("No exceptions when loading a multi-seg reader!"); } // todo some assertations. - unoptimizedReader.close(); + multiSegReader.close(); dir.close(); } diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java b/lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java index 8bbb16968f4..cba756a7285 100644 --- a/lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java +++ b/lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java @@ -105,22 +105,22 @@ public class BalancedSegmentMergePolicy extends LogByteSizeMergePolicy { } @Override - public MergeSpecification findMergesForOptimize(SegmentInfos infos, int maxNumSegments, Map segmentsToOptimize) throws IOException { + public MergeSpecification findForcedMerges(SegmentInfos infos, int maxNumSegments, Map segmentsToMerge) throws IOException { assert maxNumSegments > 0; MergeSpecification spec = null; - if (!isOptimized(infos, maxNumSegments, segmentsToOptimize)) { + if (!isMerged(infos, maxNumSegments, segmentsToMerge)) { // Find the newest (rightmost) segment that needs to - // be optimized (other segments may have been flushed - // since optimize started): + // be merged (other segments may have been flushed + // since the merge started): int last = infos.size(); while(last > 0) { final SegmentInfo info = infos.info(--last); - if (segmentsToOptimize.containsKey(info)) { + if (segmentsToMerge.containsKey(info)) { last++; break; } @@ -130,9 +130,9 @@ public class BalancedSegmentMergePolicy extends LogByteSizeMergePolicy { if (maxNumSegments == 1) { - // Since we must optimize down to 1 segment, the + // Since we must merge down to 1 segment, the // choice is simple: - if (last > 1 || !isOptimized(infos.info(0))) { + if (last > 1 || !isMerged(infos.info(0))) { spec = new MergeSpecification(); spec.add(new OneMerge(infos.asList().subList(0, last))); diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java b/lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java index 4c10dfc3151..7fffd7a7793 100644 --- a/lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java +++ b/lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java @@ -36,8 +36,8 @@ import org.apache.lucene.store.FSDirectory; * *

This tool does file-level copying of segments files. * This means it's unable to split apart a single segment - * into multiple segments. For example if your index is - * optimized, this tool won't help. Also, it does basic + * into multiple segments. For example if your index is a + * single segment, this tool won't help. Also, it does basic * file-level copying (using simple * File{In,Out}putStream) so it will not work with non * FSDirectory Directory impls.

diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java b/lucene/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java index 775354f33e5..404549a9ee6 100644 --- a/lucene/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java +++ b/lucene/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java @@ -50,8 +50,8 @@ public class IndexMergeTool { System.out.println("Merging..."); writer.addIndexes(indexes); - System.out.println("Optimizing..."); - writer.optimize(); + System.out.println("Full merge..."); + writer.forceMerge(1); writer.close(); System.out.println("Done."); } diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java index 171a383ce7d..37aa240b3f4 100644 --- a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java +++ b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java @@ -94,7 +94,7 @@ public class TestIndexSplitter extends LuceneTestCase { fsDir.close(); } - public void testDeleteThenOptimize() throws Exception { + public void testDeleteThenFullMerge() throws Exception { // Create directories where the indexes will reside File indexPath = new File(TEMP_DIR, "testfilesplitter"); _TestUtil.rmDir(indexPath); @@ -134,7 +134,7 @@ public class TestIndexSplitter extends LuceneTestCase { indexReader.close(); fsDirDest.close(); - // Optimize the split index + // Fully merge the split index mergePolicy = new LogByteSizeMergePolicy(); mergePolicy.setNoCFSRatio(1); iwConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) @@ -142,7 +142,7 @@ public class TestIndexSplitter extends LuceneTestCase { .setMergePolicy(mergePolicy); fsDirDest = newFSDirectory(indexSplitPath); indexWriter = new IndexWriter(fsDirDest, iwConfig); - indexWriter.optimize(); + indexWriter.forceMerge(1); indexWriter.close(); fsDirDest.close(); diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java b/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java index 9398551ba97..a03bec30142 100644 --- a/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java +++ b/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java @@ -121,7 +121,7 @@ public class TestAppendingCodec extends LuceneTestCase { writer.addDocument(doc); writer.commit(); writer.addDocument(doc); - writer.optimize(); + writer.forceMerge(1); writer.close(); IndexReader reader = IndexReader.open(dir, null, true, 1); assertEquals(2, reader.numDocs()); diff --git a/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java b/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java index 112aa834d29..d6bc5b3ff72 100644 --- a/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java +++ b/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java @@ -57,7 +57,7 @@ public class DuplicateFilterTest extends LuceneTestCase { // Until we fix LUCENE-2348, the index must // have only 1 segment: - writer.optimize(); + writer.forceMerge(1); reader = writer.getReader(); writer.close(); diff --git a/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java b/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java index 15ec5c3db08..761b620e0fd 100644 --- a/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java +++ b/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java @@ -68,7 +68,7 @@ public class TestSpanRegexQuery extends LuceneTestCase { doc = new Document(); doc.add(newField("field", "first auto update", TextField.TYPE_UNSTORED)); writer.addDocument(doc); - writer.optimize(); + writer.forceMerge(1); writer.close(); IndexSearcher searcher = new IndexSearcher(directory, true); @@ -98,14 +98,14 @@ public class TestSpanRegexQuery extends LuceneTestCase { IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); writerA.addDocument(lDoc); - writerA.optimize(); + writerA.forceMerge(1); writerA.close(); // creating second index writer IndexWriter writerB = new IndexWriter(indexStoreB, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); writerB.addDocument(lDoc2); - writerB.optimize(); + writerB.forceMerge(1); writerB.close(); } } diff --git a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java index c0d5b9c56b2..64a8b464dec 100644 --- a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java +++ b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java @@ -141,7 +141,7 @@ public class TestCartesian extends LuceneTestCase { writer.commit(); // TODO: fix CustomScoreQuery usage in testRange/testGeoHashRange so we don't need this. - writer.optimize(); + writer.forceMerge(1); writer.close(); } diff --git a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java index f15f0778610..ad4f8bf5428 100644 --- a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java +++ b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java @@ -523,16 +523,6 @@ class DirectoryReader extends IndexReader implements Cloneable { subReaders[i].getTermFreqVector(docNumber - starts[i], mapper); } - /** - * Checks is the index is optimized (if it has a single segment and no deletions) - * @return true if the index is optimized; false otherwise - */ - @Override - public boolean isOptimized() { - ensureOpen(); - return segmentInfos.size() == 1 && !hasDeletions(); - } - @Override public int numDocs() { // Don't call ensureOpen() here (it could affect performance) @@ -953,8 +943,8 @@ class DirectoryReader extends IndexReader implements Cloneable { Directory dir; long generation; long version; - final boolean isOptimized; final Map userData; + private final int segmentCount; ReaderCommit(SegmentInfos infos, Directory dir) throws IOException { segmentsFileName = infos.getCurrentSegmentFileName(); @@ -963,7 +953,7 @@ class DirectoryReader extends IndexReader implements Cloneable { files = Collections.unmodifiableCollection(infos.files(dir, true)); version = infos.getVersion(); generation = infos.getGeneration(); - isOptimized = infos.size() == 1 && !infos.info(0).hasDeletions(); + segmentCount = infos.size(); } @Override @@ -972,8 +962,8 @@ class DirectoryReader extends IndexReader implements Cloneable { } @Override - public boolean isOptimized() { - return isOptimized; + public int getSegmentCount() { + return segmentCount; } @Override diff --git a/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java b/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java index 22b67885f56..cd5e816e476 100644 --- a/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java +++ b/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java @@ -430,12 +430,6 @@ public class FilterIndexReader extends IndexReader { return in.isCurrent(); } - @Override - public boolean isOptimized() { - ensureOpen(); - return in.isOptimized(); - } - @Override public IndexReader[] getSequentialSubReaders() { return in.getSequentialSubReaders(); diff --git a/lucene/src/java/org/apache/lucene/index/IndexCommit.java b/lucene/src/java/org/apache/lucene/index/IndexCommit.java index 908c0953b30..79de850390b 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexCommit.java +++ b/lucene/src/java/org/apache/lucene/index/IndexCommit.java @@ -75,8 +75,8 @@ public abstract class IndexCommit implements Comparable { public abstract boolean isDeleted(); - /** Returns true if this commit is an optimized index. */ - public abstract boolean isOptimized(); + /** Returns number of segments referenced by this commit. */ + public abstract int getSegmentCount(); /** Two IndexCommits are equal if both their Directory and versions are equal. */ @Override diff --git a/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java b/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java index 5791b44ca84..f99f1b8772a 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java +++ b/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java @@ -19,11 +19,9 @@ package org.apache.lucene.index; import java.io.FileNotFoundException; import java.io.IOException; -import java.io.PrintStream; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -653,8 +651,8 @@ final class IndexFileDeleter { Collection commitsToDelete; long version; long generation; - final boolean isOptimized; final Map userData; + private final int segmentCount; public CommitPoint(Collection commitsToDelete, Directory directory, SegmentInfos segmentInfos) throws IOException { this.directory = directory; @@ -664,7 +662,7 @@ final class IndexFileDeleter { version = segmentInfos.getVersion(); generation = segmentInfos.getGeneration(); files = Collections.unmodifiableCollection(segmentInfos.files(directory, true)); - isOptimized = segmentInfos.size() == 1 && !segmentInfos.info(0).hasDeletions(); + segmentCount = segmentInfos.size(); } @Override @@ -673,8 +671,8 @@ final class IndexFileDeleter { } @Override - public boolean isOptimized() { - return isOptimized; + public int getSegmentCount() { + return segmentCount; } @Override diff --git a/lucene/src/java/org/apache/lucene/index/IndexReader.java b/lucene/src/java/org/apache/lucene/index/IndexReader.java index ddd82b5b78f..d3bc8c6d56d 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexReader.java +++ b/lucene/src/java/org/apache/lucene/index/IndexReader.java @@ -28,7 +28,6 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.lucene.document.Document; import org.apache.lucene.document.DocumentStoredFieldVisitor; -import org.apache.lucene.index.codecs.PostingsFormat; import org.apache.lucene.index.codecs.PerDocValues; import org.apache.lucene.index.values.IndexDocValues; import org.apache.lucene.search.FieldCache; // javadocs @@ -804,16 +803,6 @@ public abstract class IndexReader implements Cloneable,Closeable { throw new UnsupportedOperationException("This reader does not support this method."); } - /** - * Checks is the index is optimized (if it has a single segment and - * no deletions). Not implemented in the IndexReader base class. - * @return true if the index is optimized; false otherwise - * @throws UnsupportedOperationException unless overridden in subclass - */ - public boolean isOptimized() { - throw new UnsupportedOperationException("This reader does not support this method."); - } - /** * Return an array of term frequency vectors for the specified document. * The array contains a vector for each vectorized field in the document. diff --git a/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java b/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java index 47fd350210b..f59656f87c2 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java +++ b/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java @@ -35,7 +35,7 @@ import java.util.Collection; * java -cp lucene-core.jar org.apache.lucene.index.IndexUpgrader [-delete-prior-commits] [-verbose] indexDir * * Alternatively this class can be instantiated and {@link #upgrade} invoked. It uses {@link UpgradeIndexMergePolicy} - * and triggers the upgrade via an optimize request to {@link IndexWriter}. + * and triggers the upgrade via an forceMerge request to {@link IndexWriter}. *

This tool keeps only the last commit in an index; for this * reason, if the incoming index has more than one commit, the tool * refuses to run by default. Specify {@code -delete-prior-commits} @@ -45,7 +45,7 @@ import java.util.Collection; *

Warning: This tool may reorder documents if the index was partially * upgraded before execution (e.g., documents were added). If your application relies * on "monotonicity" of doc IDs (which means that the order in which the documents - * were added to the index is preserved), do a full optimize instead. + * were added to the index is preserved), do a full forceMerge instead. * The {@link MergePolicy} set by {@link IndexWriterConfig} may also reorder * documents. */ @@ -134,7 +134,7 @@ public final class IndexUpgrader { if (infoStream != null) { infoStream.message("IndexUpgrader", "Upgrading all pre-" + Constants.LUCENE_MAIN_VERSION + " segments of index directory '" + dir + "' to version " + Constants.LUCENE_MAIN_VERSION + "..."); } - w.optimize(); + w.forceMerge(1); if (infoStream != null) { infoStream.message("IndexUpgrader", "All segments upgraded to version " + Constants.LUCENE_MAIN_VERSION); } diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java index eb0700d3d8d..2207fbf9045 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java @@ -101,11 +101,6 @@ import org.apache.lucene.util.TwoPhaseCommit; addDocument calls (see below for changing the {@link MergeScheduler}).

-

If an index will not have more documents added for a while and optimal search - performance is desired, then either the full {@link #optimize() optimize} - method or partial {@link #optimize(int)} method should be - called before the index is closed.

-

Opening an IndexWriter creates a lock file for the directory in use. Trying to open another IndexWriter on the same directory will lead to a {@link LockObtainFailedException}. The {@link LockObtainFailedException} @@ -134,9 +129,8 @@ import org.apache.lucene.util.TwoPhaseCommit; The {@link MergePolicy} is invoked whenever there are changes to the segments in the index. Its role is to select which merges to do, if any, and return a {@link - MergePolicy.MergeSpecification} describing the merges. It - also selects merges to do for optimize(). (The default is - {@link LogByteSizeMergePolicy}. Then, the {@link + MergePolicy.MergeSpecification} describing the merges. + The default is {@link LogByteSizeMergePolicy}. Then, the {@link MergeScheduler} is invoked with the requested merges and it decides when and how to run the merges. The default is {@link ConcurrentMergeScheduler}.

@@ -223,8 +217,9 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { private DocumentsWriter docWriter; final IndexFileDeleter deleter; - private Map segmentsToOptimize = new HashMap(); // used by optimize to note those needing optimization - private int optimizeMaxNumSegments; + // used by forceMerge to note those needing merging + private Map segmentsToMerge = new HashMap(); + private int mergeMaxNumSegments; private Lock writeLock; @@ -1215,7 +1210,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { * readers/searchers are open against the index, and up to * 2X the size of all segments being merged when * readers/searchers are open against the index (see - * {@link #optimize()} for details). The sequence of + * {@link #forceMerge(int)} for details). The sequence of * primitive merge operations performed is governed by the * merge policy. * @@ -1565,55 +1560,52 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { final InfoStream infoStream; /** - * Requests an "optimize" operation on an index, priming the index - * for the fastest available search. Traditionally this has meant - * merging all segments into a single segment as is done in the - * default merge policy, but individual merge policies may implement - * optimize in different ways. + * Forces merge policy to merge segments until there's <= + * maxNumSegments. The actual merges to be + * executed are determined by the {@link MergePolicy}. * - *

Optimize is a very costly operation, so you - * should only do it if your search performance really - * requires it. Many search applications do fine never - * calling optimize.

+ *

This is a horribly costly operation, especially when + * you pass a small {@code maxNumSegments}; usually you + * should only call this if the index is static (will no + * longer be changed).

* - *

Note that optimize requires 2X the index size free + *

Note that this requires up to 2X the index size free * space in your Directory (3X if you're using compound * file format). For example, if your index size is 10 MB - * then you need 20 MB free for optimize to complete (30 + * then you need up to 20 MB free for this to complete (30 * MB if you're using compound file format). Also, - * it's best to call {@link #commit()} after the optimize - * completes to allow IndexWriter to free up disk space.

+ * it's best to call {@link #commit()} afterwards, + * to allow IndexWriter to free up disk space.

* - *

If some but not all readers re-open while an - * optimize is underway, this will cause > 2X temporary + *

If some but not all readers re-open while merging + * is underway, this will cause > 2X temporary * space to be consumed as those new readers will then - * hold open the partially optimized segments at that - * time. It is best not to re-open readers while optimize - * is running.

+ * hold open the temporary segments at that time. It is + * best not to re-open readers while merging is running.

* *

The actual temporary usage could be much less than * these figures (it depends on many factors).

* - *

In general, once the optimize completes, the total size of the + *

In general, once the this completes, the total size of the * index will be less than the size of the starting index. * It could be quite a bit smaller (if there were many * pending deletes) or just slightly smaller.

* - *

If an Exception is hit during optimize(), for example + *

If an Exception is hit, for example * due to disk full, the index will not be corrupt and no * documents will have been lost. However, it may have - * been partially optimized (some segments were merged but + * been partially merged (some segments were merged but * not all), and it's possible that one of the segments in * the index will be in non-compound format even when * using compound file format. This will occur when the * Exception is hit during conversion of the segment into * compound format.

* - *

This call will optimize those segments present in + *

This call will merge those segments present in * the index when the call started. If other threads are * still adding documents and flushing segments, those - * newly created segments will not be optimized unless you - * call optimize again.

+ * newly created segments will not be merged unless you + * call forceMerge again.

* *

NOTE: if this method hits an OutOfMemoryError * you should immediately close the writer. See NOTE: if this method hits an OutOfMemoryError - * you should immediately close the writer. See above for details.

+ * @see MergePolicy#findMerges * * @param maxNumSegments maximum number of segments left - * in the index after optimization finishes - */ - public void optimize(int maxNumSegments) throws CorruptIndexException, IOException { - optimize(maxNumSegments, true); + * in the index after merging finishes + */ + public void forceMerge(int maxNumSegments) throws CorruptIndexException, IOException { + forceMerge(maxNumSegments, true); } - /** Just like {@link #optimize()}, except you can specify - * whether the call should block until the optimize - * completes. This is only meaningful with a + /** Just like {@link #forceMerge(int)}, except you can + * specify whether the call should block until + * all merging completes. This is only meaningful with a * {@link MergeScheduler} that is able to run merges in * background threads. * - *

NOTE: if this method hits an OutOfMemoryError - * you should immediately close the writer. See above for details.

+ *

NOTE: if this method hits an OutOfMemoryError + * you should immediately close the writer. See above for details.

*/ - public void optimize(boolean doWait) throws CorruptIndexException, IOException { - optimize(1, doWait); - } - - /** Just like {@link #optimize(int)}, except you can - * specify whether the call should block until the - * optimize completes. This is only meaningful with a - * {@link MergeScheduler} that is able to run merges in - * background threads. - * - *

NOTE: if this method hits an OutOfMemoryError - * you should immediately close the writer. See above for details.

- */ - public void optimize(int maxNumSegments, boolean doWait) throws CorruptIndexException, IOException { + public void forceMerge(int maxNumSegments, boolean doWait) throws CorruptIndexException, IOException { ensureOpen(); if (maxNumSegments < 1) throw new IllegalArgumentException("maxNumSegments must be >= 1; got " + maxNumSegments); if (infoStream != null) { - infoStream.message("IW", "optimize: index now " + segString()); - infoStream.message("IW", "now flush at optimize"); + infoStream.message("IW", "forceMerge: index now " + segString()); + infoStream.message("IW", "now flush at forceMerge"); } flush(true, true); synchronized(this) { resetMergeExceptions(); - segmentsToOptimize.clear(); + segmentsToMerge.clear(); for(SegmentInfo info : segmentInfos) { - segmentsToOptimize.put(info, Boolean.TRUE); + segmentsToMerge.put(info, Boolean.TRUE); } - optimizeMaxNumSegments = maxNumSegments; + mergeMaxNumSegments = maxNumSegments; - // Now mark all pending & running merges as optimize - // merge: + // Now mark all pending & running merges as isMaxNumSegments: for(final MergePolicy.OneMerge merge : pendingMerges) { - merge.optimize = true; - merge.maxNumSegmentsOptimize = maxNumSegments; - segmentsToOptimize.put(merge.info, Boolean.TRUE); + merge.maxNumSegments = maxNumSegments; + segmentsToMerge.put(merge.info, Boolean.TRUE); } for ( final MergePolicy.OneMerge merge: runningMerges ) { - merge.optimize = true; - merge.maxNumSegmentsOptimize = maxNumSegments; - segmentsToOptimize.put(merge.info, Boolean.TRUE); + merge.maxNumSegments = maxNumSegments; + segmentsToMerge.put(merge.info, Boolean.TRUE); } } - maybeMerge(maxNumSegments, true); + maybeMerge(maxNumSegments); if (doWait) { synchronized(this) { while(true) { if (hitOOM) { - throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete optimize"); + throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete forceMerge"); } if (mergeExceptions.size() > 0) { @@ -1724,7 +1686,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { final int size = mergeExceptions.size(); for(int i=0;iWhen an index * has many document deletions (or updates to existing - * documents), it's best to either call optimize or + * documents), it's best to either call forceMerge or * expungeDeletes to remove all unused data in the index * associated with the deleted documents. To see how * many deletions you have pending in your index, call * {@link IndexReader#numDeletedDocs} * This saves disk space and memory usage while * searching. expungeDeletes should be somewhat faster - * than optimize since it does not insist on reducing the + * than forceMerge since it does not insist on reducing the * index to a single segment (though, this depends on the * {@link MergePolicy}; see {@link * MergePolicy#findMergesToExpungeDeletes}.). Note that @@ -1896,22 +1858,18 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { * href="#OOME">above for details.

*/ public final void maybeMerge() throws CorruptIndexException, IOException { - maybeMerge(false); + maybeMerge(-1); } - private final void maybeMerge(boolean optimize) throws CorruptIndexException, IOException { - maybeMerge(1, optimize); - } - - private final void maybeMerge(int maxNumSegmentsOptimize, boolean optimize) throws CorruptIndexException, IOException { + private final void maybeMerge(int maxNumSegments) throws CorruptIndexException, IOException { ensureOpen(false); - updatePendingMerges(maxNumSegmentsOptimize, optimize); + updatePendingMerges(maxNumSegments); mergeScheduler.merge(this); } - private synchronized void updatePendingMerges(int maxNumSegmentsOptimize, boolean optimize) + private synchronized void updatePendingMerges(int maxNumSegments) throws CorruptIndexException, IOException { - assert !optimize || maxNumSegmentsOptimize > 0; + assert maxNumSegments == -1 || maxNumSegments > 0; if (stopMerges) { return; @@ -1923,14 +1881,13 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { } final MergePolicy.MergeSpecification spec; - if (optimize) { - spec = mergePolicy.findMergesForOptimize(segmentInfos, maxNumSegmentsOptimize, Collections.unmodifiableMap(segmentsToOptimize)); + if (maxNumSegments != -1) { + spec = mergePolicy.findForcedMerges(segmentInfos, maxNumSegments, Collections.unmodifiableMap(segmentsToMerge)); if (spec != null) { final int numMerges = spec.merges.size(); for(int i=0;iNOTE: this method will forcefully abort all merges * in progress. If other threads are running {@link - * #optimize()}, {@link #addIndexes(IndexReader[])} or + * #forceMerge}, {@link #addIndexes(IndexReader[])} or * {@link #expungeDeletes} methods, they may receive * {@link MergePolicy.MergeAbortedException}s. */ @@ -2390,7 +2347,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { * (including the starting index). If readers/searchers * are open against the starting index, then temporary * free space required will be higher by the size of the - * starting index (see {@link #optimize()} for details). + * starting index (see {@link #forceMerge(int)} for details). * *

* NOTE: this method only copies the segments of the incoming indexes @@ -2452,7 +2409,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { && versionComparator.compare(info.getVersion(), "3.1") >= 0; } - IOContext context = new IOContext(new MergeInfo(info.docCount, info.sizeInBytes(true), true, false)); + IOContext context = new IOContext(new MergeInfo(info.docCount, info.sizeInBytes(true), true, -1)); if (createCFS) { copySegmentIntoCFS(info, newSegName, context); @@ -2476,7 +2433,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { } /** Merges the provided indexes into this index. - *

After this completes, the index is optimized.

*

The provided IndexReaders are not closed.

* *

NOTE: while this is running, any attempts to @@ -2512,7 +2468,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { for (IndexReader indexReader : readers) { numDocs += indexReader.numDocs(); } - final IOContext context = new IOContext(new MergeInfo(numDocs, -1, true, false)); + final IOContext context = new IOContext(new MergeInfo(numDocs, -1, true, -1)); // TODO: somehow we should fix this merge so it's // abortable so that IW.close(false) is able to stop it @@ -2789,7 +2745,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { /** *

Commits all pending changes (added & deleted - * documents, optimizations, segment merges, added + * documents, segment merges, added * indexes, etc.) to the index, and syncs all referenced * index files, such that a reader will see the changes * and the index updates will survive an OS or machine @@ -3199,10 +3155,10 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { // disk, updating SegmentInfo, etc.: readerPool.clear(merge.segments); - if (merge.optimize) { - // cascade the optimize: - if (!segmentsToOptimize.containsKey(merge.info)) { - segmentsToOptimize.put(merge.info, Boolean.FALSE); + if (merge.maxNumSegments != -1) { + // cascade the forceMerge: + if (!segmentsToMerge.containsKey(merge.info)) { + segmentsToMerge.put(merge.info, Boolean.FALSE); } } @@ -3216,7 +3172,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { } // Set the exception on the merge, so if - // optimize() is waiting on us it sees the root + // forceMerge is waiting on us it sees the root // cause exception: merge.setException(t); addMergeException(merge); @@ -3283,8 +3239,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { // This merge (and, generally, any change to the // segments) may now enable new merges, so we call // merge policy & update pending merges. - if (success && !merge.isAborted() && (merge.optimize || (!closed && !closing))) { - updatePendingMerges(merge.maxNumSegmentsOptimize, merge.optimize); + if (success && !merge.isAborted() && (merge.maxNumSegments != -1 || (!closed && !closing))) { + updatePendingMerges(merge.maxNumSegments); } } } @@ -3328,9 +3284,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { if (info.dir != directory) { isExternal = true; } - if (segmentsToOptimize.containsKey(info)) { - merge.optimize = true; - merge.maxNumSegmentsOptimize = optimizeMaxNumSegments; + if (segmentsToMerge.containsKey(info)) { + merge.maxNumSegments = mergeMaxNumSegments; } } @@ -3393,7 +3348,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { assert testPoint("startMergeInit"); assert merge.registerDone; - assert !merge.optimize || merge.maxNumSegmentsOptimize > 0; + assert merge.maxNumSegments == -1 || merge.maxNumSegments > 0; if (hitOOM) { throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot merge"); @@ -3443,7 +3398,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { // Lock order: IW -> BD bufferedDeletesStream.prune(segmentInfos); Map details = new HashMap(); - details.put("optimize", Boolean.toString(merge.optimize)); + details.put("mergeMaxNumSegments", ""+merge.maxNumSegments); details.put("mergeFactor", Integer.toString(merge.segments.size())); setDiagnostics(merge.info, "merge", details); @@ -3495,7 +3450,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { * the synchronized lock on IndexWriter instance. */ final synchronized void mergeFinish(MergePolicy.OneMerge merge) throws IOException { - // Optimize, addIndexes or finishMerges may be waiting + // forceMerge, addIndexes or finishMerges may be waiting // on merges to finish. notifyAll(); @@ -4090,7 +4045,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit { * NOTE: the set {@link PayloadProcessorProvider} will be in effect * immediately, potentially for already running merges too. If you want to be * sure it is used for further operations only, such as {@link #addIndexes} or - * {@link #optimize}, you can call {@link #waitForMerges()} before. + * {@link #forceMerge}, you can call {@link #waitForMerges()} before. */ public void setPayloadProcessorProvider(PayloadProcessorProvider pcp) { ensureOpen(); diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java b/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java index 653044105e0..b9a9772615d 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java @@ -518,7 +518,7 @@ public final class IndexWriterConfig implements Cloneable { * Expert: {@link MergePolicy} is invoked whenever there are changes to the * segments in the index. Its role is to select which merges to do, if any, * and return a {@link MergePolicy.MergeSpecification} describing the merges. - * It also selects merges to do for optimize(). (The default is + * It also selects merges to do for forceMerge. (The default is * {@link LogByteSizeMergePolicy}. * *

Only takes effect when IndexWriter is first created. */ diff --git a/lucene/src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java b/lucene/src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java index 7ef2902099f..48083878f4a 100644 --- a/lucene/src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java +++ b/lucene/src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java @@ -31,13 +31,13 @@ public class LogByteSizeMergePolicy extends LogMergePolicy { public static final double DEFAULT_MAX_MERGE_MB = 2048; /** Default maximum segment size. A segment of this size - * or larger will never be merged during optimize. @see setMaxMergeMBForOptimize */ - public static final double DEFAULT_MAX_MERGE_MB_FOR_OPTIMIZE = Long.MAX_VALUE; + * or larger will never be merged during forceMerge. @see setMaxMergeMBForForceMerge */ + public static final double DEFAULT_MAX_MERGE_MB_FOR_MERGE_IF_NEEDED = Long.MAX_VALUE; public LogByteSizeMergePolicy() { minMergeSize = (long) (DEFAULT_MIN_MERGE_MB*1024*1024); maxMergeSize = (long) (DEFAULT_MAX_MERGE_MB*1024*1024); - maxMergeSizeForOptimize = (long) (DEFAULT_MAX_MERGE_MB_FOR_OPTIMIZE*1024*1024); + maxMergeSizeForForcedMerge = (long) (DEFAULT_MAX_MERGE_MB_FOR_MERGE_IF_NEEDED*1024*1024); } @Override @@ -70,19 +70,19 @@ public class LogByteSizeMergePolicy extends LogMergePolicy { /**

Determines the largest segment (measured by total * byte size of the segment's files, in MB) that may be - * merged with other segments during optimize. Setting + * merged with other segments during forceMerge. Setting * it low will leave the index with more than 1 segment, - * even if {@link IndexWriter#optimize()} is called.*/ - public void setMaxMergeMBForOptimize(double mb) { - maxMergeSizeForOptimize = (long) (mb*1024*1024); + * even if {@link IndexWriter#forceMerge} is called.*/ + public void setMaxMergeMBForForcedMerge(double mb) { + maxMergeSizeForForcedMerge = (long) (mb*1024*1024); } /** Returns the largest segment (measured by total byte * size of the segment's files, in MB) that may be merged - * with other segments during optimize. - * @see #setMaxMergeMBForOptimize */ - public double getMaxMergeMBForOptimize() { - return ((double) maxMergeSizeForOptimize)/1024/1024; + * with other segments during forceMerge. + * @see #setMaxMergeMBForForcedMerge */ + public double getMaxMergeMBForForcedMerge() { + return ((double) maxMergeSizeForForcedMerge)/1024/1024; } /** Sets the minimum size for the lowest level segments. diff --git a/lucene/src/java/org/apache/lucene/index/LogDocMergePolicy.java b/lucene/src/java/org/apache/lucene/index/LogDocMergePolicy.java index 42ec5136145..ff138fff015 100644 --- a/lucene/src/java/org/apache/lucene/index/LogDocMergePolicy.java +++ b/lucene/src/java/org/apache/lucene/index/LogDocMergePolicy.java @@ -31,10 +31,10 @@ public class LogDocMergePolicy extends LogMergePolicy { public LogDocMergePolicy() { minMergeSize = DEFAULT_MIN_MERGE_DOCS; - // maxMergeSize(ForOptimize) are never used by LogDocMergePolicy; set + // maxMergeSize(ForForcedMerge) are never used by LogDocMergePolicy; set // it to Long.MAX_VALUE to disable it maxMergeSize = Long.MAX_VALUE; - maxMergeSizeForOptimize = Long.MAX_VALUE; + maxMergeSizeForForcedMerge = Long.MAX_VALUE; } @Override diff --git a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java index 351e28612b9..ae460ba4ca9 100644 --- a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java +++ b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java @@ -70,7 +70,7 @@ public abstract class LogMergePolicy extends MergePolicy { protected long maxMergeSize; // Although the core MPs set it explicitly, we must default in case someone // out there wrote his own LMP ... - protected long maxMergeSizeForOptimize = Long.MAX_VALUE; + protected long maxMergeSizeForForcedMerge = Long.MAX_VALUE; protected int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS; protected double noCFSRatio = DEFAULT_NO_CFS_RATIO; @@ -123,10 +123,10 @@ public abstract class LogMergePolicy extends MergePolicy { /** Determines how often segment indices are merged by * addDocument(). With smaller values, less RAM is used - * while indexing, and searches on unoptimized indices are + * while indexing, and searches are * faster, but indexing speed is slower. With larger * values, more RAM is used during indexing, and while - * searches on unoptimized indices are slower, indexing is + * searches is slower, indexing is * faster. Thus larger values (> 10) are best for batch * index creation, and smaller values (< 10) for indices * that are interactively maintained. */ @@ -207,29 +207,29 @@ public abstract class LogMergePolicy extends MergePolicy { } } - protected boolean isOptimized(SegmentInfos infos, int maxNumSegments, Map segmentsToOptimize) throws IOException { + protected boolean isMerged(SegmentInfos infos, int maxNumSegments, Map segmentsToMerge) throws IOException { final int numSegments = infos.size(); - int numToOptimize = 0; - SegmentInfo optimizeInfo = null; + int numToMerge = 0; + SegmentInfo mergeInfo = null; boolean segmentIsOriginal = false; - for(int i=0;i segments = infos.asList(); @@ -256,14 +256,15 @@ public abstract class LogMergePolicy extends MergePolicy { int start = last - 1; while (start >= 0) { SegmentInfo info = infos.info(start); - if (size(info) > maxMergeSizeForOptimize || sizeDocs(info) > maxMergeDocs) { + if (size(info) > maxMergeSizeForForcedMerge || sizeDocs(info) > maxMergeDocs) { if (verbose()) { - message("optimize: skip segment=" + info + ": size is > maxMergeSize (" + maxMergeSizeForOptimize + ") or sizeDocs is > maxMergeDocs (" + maxMergeDocs + ")"); + message("findForcedMergesSizeLimit: skip segment=" + info + ": size is > maxMergeSize (" + maxMergeSizeForForcedMerge + ") or sizeDocs is > maxMergeDocs (" + maxMergeDocs + ")"); } // need to skip that segment + add a merge for the 'right' segments, - // unless there is only 1 which is optimized. - if (last - start - 1 > 1 || (start != last - 1 && !isOptimized(infos.info(start + 1)))) { - // there is more than 1 segment to the right of this one, or an unoptimized single segment. + // unless there is only 1 which is merged. + if (last - start - 1 > 1 || (start != last - 1 && !isMerged(infos.info(start + 1)))) { + // there is more than 1 segment to the right of + // this one, or a mergeable single segment. spec.add(new OneMerge(segments.subList(start + 1, last))); } last = start; @@ -275,8 +276,9 @@ public abstract class LogMergePolicy extends MergePolicy { --start; } - // Add any left-over segments, unless there is just 1 already optimized. - if (last > 0 && (++start + 1 < last || !isOptimized(infos.info(start)))) { + // Add any left-over segments, unless there is just 1 + // already fully merged + if (last > 0 && (++start + 1 < last || !isMerged(infos.info(start)))) { spec.add(new OneMerge(segments.subList(start, last))); } @@ -284,11 +286,11 @@ public abstract class LogMergePolicy extends MergePolicy { } /** - * Returns the merges necessary to optimize the index. This method constraints + * Returns the merges necessary to forceMerge the index. This method constraints * the returned merges only by the {@code maxNumSegments} parameter, and * guaranteed that exactly that number of segments will remain in the index. */ - private MergeSpecification findMergesForOptimizeMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last) throws IOException { + private MergeSpecification findForcedMergesMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last) throws IOException { MergeSpecification spec = new MergeSpecification(); final List segments = infos.asList(); @@ -304,9 +306,9 @@ public abstract class LogMergePolicy extends MergePolicy { if (0 == spec.merges.size()) { if (maxNumSegments == 1) { - // Since we must optimize down to 1 segment, the + // Since we must merge down to 1 segment, the // choice is simple: - if (last > 1 || !isOptimized(infos.info(0))) { + if (last > 1 || !isMerged(infos.info(0))) { spec.add(new OneMerge(segments.subList(0, last))); } } else if (last > maxNumSegments) { @@ -319,7 +321,7 @@ public abstract class LogMergePolicy extends MergePolicy { // We must merge this many segments to leave // maxNumSegments in the index (from when - // optimize was first kicked off): + // forceMerge was first kicked off): final int finalMergeSize = last - maxNumSegments + 1; // Consider all possible starting points: @@ -342,10 +344,9 @@ public abstract class LogMergePolicy extends MergePolicy { return spec.merges.size() == 0 ? null : spec; } - /** Returns the merges necessary to optimize the index. - * This merge policy defines "optimized" to mean only the - * requested number of segments is left in the index, and - * respects the {@link #maxMergeSizeForOptimize} setting. + /** Returns the merges necessary to merge the index down + * to a specified number of segments. + * This respects the {@link #maxMergeSizeForForcedMerge} setting. * By default, and assuming {@code maxNumSegments=1}, only * one segment will be left in the index, where that segment * has no deletions pending nor separate norms, and it is in @@ -354,30 +355,30 @@ public abstract class LogMergePolicy extends MergePolicy { * (mergeFactor at a time) so the {@link MergeScheduler} * in use may make use of concurrency. */ @Override - public MergeSpecification findMergesForOptimize(SegmentInfos infos, - int maxNumSegments, Map segmentsToOptimize) throws IOException { + public MergeSpecification findForcedMerges(SegmentInfos infos, + int maxNumSegments, Map segmentsToMerge) throws IOException { assert maxNumSegments > 0; if (verbose()) { - message("findMergesForOptimize: maxNumSegs=" + maxNumSegments + " segsToOptimize="+ segmentsToOptimize); + message("findForcedMerges: maxNumSegs=" + maxNumSegments + " segsToMerge="+ segmentsToMerge); } - // If the segments are already optimized (e.g. there's only 1 segment), or - // there are 0) { final SegmentInfo info = infos.info(--last); - if (segmentsToOptimize.get(info) != null) { + if (segmentsToMerge.get(info) != null) { last++; break; } @@ -390,8 +391,8 @@ public abstract class LogMergePolicy extends MergePolicy { return null; } - // There is only one segment already, and it is optimized - if (maxNumSegments == 1 && last == 1 && isOptimized(infos.info(0))) { + // There is only one segment already, and it is merged + if (maxNumSegments == 1 && last == 1 && isMerged(infos.info(0))) { if (verbose()) { message("already 1 seg; skip"); } @@ -402,16 +403,16 @@ public abstract class LogMergePolicy extends MergePolicy { boolean anyTooLarge = false; for (int i = 0; i < last; i++) { SegmentInfo info = infos.info(i); - if (size(info) > maxMergeSizeForOptimize || sizeDocs(info) > maxMergeDocs) { + if (size(info) > maxMergeSizeForForcedMerge || sizeDocs(info) > maxMergeDocs) { anyTooLarge = true; break; } } if (anyTooLarge) { - return findMergesForOptimizeSizeLimit(infos, maxNumSegments, last); + return findForcedMergesSizeLimit(infos, maxNumSegments, last); } else { - return findMergesForOptimizeMaxNumSegments(infos, maxNumSegments, last); + return findForcedMergesMaxNumSegments(infos, maxNumSegments, last); } } @@ -661,7 +662,7 @@ public abstract class LogMergePolicy extends MergePolicy { sb.append("minMergeSize=").append(minMergeSize).append(", "); sb.append("mergeFactor=").append(mergeFactor).append(", "); sb.append("maxMergeSize=").append(maxMergeSize).append(", "); - sb.append("maxMergeSizeForOptimize=").append(maxMergeSizeForOptimize).append(", "); + sb.append("maxMergeSizeForForcedMerge=").append(maxMergeSizeForForcedMerge).append(", "); sb.append("calibrateSizeByDeletes=").append(calibrateSizeByDeletes).append(", "); sb.append("maxMergeDocs=").append(maxMergeDocs).append(", "); sb.append("useCompoundFile=").append(useCompoundFile).append(", "); diff --git a/lucene/src/java/org/apache/lucene/index/MergePolicy.java b/lucene/src/java/org/apache/lucene/index/MergePolicy.java index a5092f31e1e..531a74a2a4a 100644 --- a/lucene/src/java/org/apache/lucene/index/MergePolicy.java +++ b/lucene/src/java/org/apache/lucene/index/MergePolicy.java @@ -30,8 +30,7 @@ import org.apache.lucene.util.SetOnce; /** *

Expert: a MergePolicy determines the sequence of - * primitive merge operations to be used for overall merge - * and optimize operations.

+ * primitive merge operations.

* *

Whenever the segments in an index have been altered by * {@link IndexWriter}, either the addition of a newly @@ -42,8 +41,8 @@ import org.apache.lucene.util.SetOnce; * merges that are now required. This method returns a * {@link MergeSpecification} instance describing the set of * merges that should be done, or null if no merges are - * necessary. When IndexWriter.optimize is called, it calls - * {@link #findMergesForOptimize} and the MergePolicy should + * necessary. When IndexWriter.forceMerge is called, it calls + * {@link #findForcedMerges(SegmentInfos,int,Map)} and the MergePolicy should * then return the necessary merges.

* *

Note that the policy can return more than one merge at @@ -69,11 +68,10 @@ public abstract class MergePolicy implements java.io.Closeable { public static class OneMerge { SegmentInfo info; // used by IndexWriter - boolean optimize; // used by IndexWriter boolean registerDone; // used by IndexWriter long mergeGen; // used by IndexWriter boolean isExternal; // used by IndexWriter - int maxNumSegmentsOptimize; // used by IndexWriter + int maxNumSegments = -1; // used by IndexWriter public long estimatedMergeBytes; // used by IndexWriter List readers; // used by IndexWriter List readerLiveDocs; // used by IndexWriter @@ -160,8 +158,8 @@ public abstract class MergePolicy implements java.io.Closeable { } if (info != null) b.append(" into ").append(info.name); - if (optimize) - b.append(" [optimize]"); + if (maxNumSegments != -1) + b.append(" [maxNumSegments=" + maxNumSegments + "]"); if (aborted) { b.append(" [ABORTED]"); } @@ -193,7 +191,7 @@ public abstract class MergePolicy implements java.io.Closeable { } public MergeInfo getMergeInfo() { - return new MergeInfo(totalDocCount, estimatedMergeBytes, isExternal, optimize); + return new MergeInfo(totalDocCount, estimatedMergeBytes, isExternal, maxNumSegments); } } @@ -290,9 +288,9 @@ public abstract class MergePolicy implements java.io.Closeable { throws CorruptIndexException, IOException; /** - * Determine what set of merge operations is necessary in order to optimize - * the index. {@link IndexWriter} calls this when its - * {@link IndexWriter#optimize()} method is called. This call is always + * Determine what set of merge operations is necessary in + * order to merge to <= the specified segment count. {@link IndexWriter} calls this when its + * {@link IndexWriter#forceMerge} method is called. This call is always * synchronized on the {@link IndexWriter} instance so only one thread at a * time will call this method. * @@ -301,17 +299,17 @@ public abstract class MergePolicy implements java.io.Closeable { * @param maxSegmentCount * requested maximum number of segments in the index (currently this * is always 1) - * @param segmentsToOptimize + * @param segmentsToMerge * contains the specific SegmentInfo instances that must be merged * away. This may be a subset of all * SegmentInfos. If the value is True for a * given SegmentInfo, that means this segment was * an original segment present in the - * to-be-optimized index; else, it was a segment + * to-be-merged index; else, it was a segment * produced by a cascaded merge. */ - public abstract MergeSpecification findMergesForOptimize( - SegmentInfos segmentInfos, int maxSegmentCount, Map segmentsToOptimize) + public abstract MergeSpecification findForcedMerges( + SegmentInfos segmentInfos, int maxSegmentCount, Map segmentsToMerge) throws CorruptIndexException, IOException; /** diff --git a/lucene/src/java/org/apache/lucene/index/MultiReader.java b/lucene/src/java/org/apache/lucene/index/MultiReader.java index 1cef8208051..785889acac8 100644 --- a/lucene/src/java/org/apache/lucene/index/MultiReader.java +++ b/lucene/src/java/org/apache/lucene/index/MultiReader.java @@ -233,12 +233,6 @@ public class MultiReader extends IndexReader implements Cloneable { subReaders[i].getTermFreqVector(docNumber - starts[i], mapper); } - @Override - public boolean isOptimized() { - ensureOpen(); - return false; - } - @Override public int numDocs() { // Don't call ensureOpen() here (it could affect performance) diff --git a/lucene/src/java/org/apache/lucene/index/NoMergePolicy.java b/lucene/src/java/org/apache/lucene/index/NoMergePolicy.java index 74e82409d87..b9678fc9b61 100644 --- a/lucene/src/java/org/apache/lucene/index/NoMergePolicy.java +++ b/lucene/src/java/org/apache/lucene/index/NoMergePolicy.java @@ -58,8 +58,8 @@ public final class NoMergePolicy extends MergePolicy { throws CorruptIndexException, IOException { return null; } @Override - public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos, - int maxSegmentCount, Map segmentsToOptimize) + public MergeSpecification findForcedMerges(SegmentInfos segmentInfos, + int maxSegmentCount, Map segmentsToMerge) throws CorruptIndexException, IOException { return null; } @Override diff --git a/lucene/src/java/org/apache/lucene/index/ParallelReader.java b/lucene/src/java/org/apache/lucene/index/ParallelReader.java index fecce84950f..01f84d25255 100644 --- a/lucene/src/java/org/apache/lucene/index/ParallelReader.java +++ b/lucene/src/java/org/apache/lucene/index/ParallelReader.java @@ -477,23 +477,6 @@ public class ParallelReader extends IndexReader { return true; } - /** - * Checks recursively if all subindexes are optimized - */ - @Override - public boolean isOptimized() { - ensureOpen(); - for (final IndexReader reader : readers) { - if (!reader.isOptimized()) { - return false; - } - } - - // all subindexes are optimized - return true; - } - - /** Not implemented. * @throws UnsupportedOperationException */ diff --git a/lucene/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java b/lucene/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java index 3b32145ebcb..23137ba3bc3 100644 --- a/lucene/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java +++ b/lucene/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java @@ -136,8 +136,8 @@ public class SnapshotDeletionPolicy implements IndexDeletionPolicy { } @Override - public boolean isOptimized() { - return cp.isOptimized(); + public int getSegmentCount() { + return cp.getSegmentCount(); } } @@ -340,7 +340,7 @@ public class SnapshotDeletionPolicy implements IndexDeletionPolicy { * NOTE: while the snapshot is held, the files it references will not * be deleted, which will consume additional disk space in your index. If you * take a snapshot at a particularly bad time (say just before you call - * optimize()) then in the worst case this could consume an extra 1X of your + * forceMerge) then in the worst case this could consume an extra 1X of your * total index size, until you release the snapshot. * * @param id diff --git a/lucene/src/java/org/apache/lucene/index/TieredMergePolicy.java b/lucene/src/java/org/apache/lucene/index/TieredMergePolicy.java index ea4d1be5948..ced2836ed53 100644 --- a/lucene/src/java/org/apache/lucene/index/TieredMergePolicy.java +++ b/lucene/src/java/org/apache/lucene/index/TieredMergePolicy.java @@ -62,7 +62,7 @@ import org.apache.lucene.util.InfoStream; *

NOTE: This policy always merges by byte size * of the segments, always pro-rates by percent deletes, * and does not apply any maximum segment size during - * optimize (unlike {@link LogByteSizeMergePolicy}). + * forceMerge (unlike {@link LogByteSizeMergePolicy}). * * @lucene.experimental */ @@ -88,7 +88,7 @@ public class TieredMergePolicy extends MergePolicy { /** Maximum number of segments to be merged at a time * during "normal" merging. For explicit merging (eg, - * optimize or expungeDeletes was called), see {@link + * forceMerge or expungeDeletes was called), see {@link * #setMaxMergeAtOnceExplicit}. Default is 10. */ public TieredMergePolicy setMaxMergeAtOnce(int v) { if (v < 2) { @@ -107,7 +107,7 @@ public class TieredMergePolicy extends MergePolicy { // if user calls IW.maybeMerge "explicitly" /** Maximum number of segments to be merged at a time, - * during optimize or expungeDeletes. Default is 30. */ + * during forceMerge or expungeDeletes. Default is 30. */ public TieredMergePolicy setMaxMergeAtOnceExplicit(int v) { if (v < 2) { throw new IllegalArgumentException("maxMergeAtOnceExplicit must be > 1 (got " + v + ")"); @@ -478,23 +478,23 @@ public class TieredMergePolicy extends MergePolicy { } @Override - public MergeSpecification findMergesForOptimize(SegmentInfos infos, int maxSegmentCount, Map segmentsToOptimize) throws IOException { + public MergeSpecification findForcedMerges(SegmentInfos infos, int maxSegmentCount, Map segmentsToMerge) throws IOException { if (verbose()) { - message("findMergesForOptimize maxSegmentCount=" + maxSegmentCount + " infos=" + writer.get().segString(infos) + " segmentsToOptimize=" + segmentsToOptimize); + message("findForcedMerges maxSegmentCount=" + maxSegmentCount + " infos=" + writer.get().segString(infos) + " segmentsToMerge=" + segmentsToMerge); } List eligible = new ArrayList(); - boolean optimizeMergeRunning = false; + boolean forceMergeRunning = false; final Collection merging = writer.get().getMergingSegments(); boolean segmentIsOriginal = false; for(SegmentInfo info : infos) { - final Boolean isOriginal = segmentsToOptimize.get(info); + final Boolean isOriginal = segmentsToMerge.get(info); if (isOriginal != null) { segmentIsOriginal = isOriginal; if (!merging.contains(info)) { eligible.add(info); } else { - optimizeMergeRunning = true; + forceMergeRunning = true; } } } @@ -504,9 +504,9 @@ public class TieredMergePolicy extends MergePolicy { } if ((maxSegmentCount > 1 && eligible.size() <= maxSegmentCount) || - (maxSegmentCount == 1 && eligible.size() == 1 && (!segmentIsOriginal || isOptimized(eligible.get(0))))) { + (maxSegmentCount == 1 && eligible.size() == 1 && (!segmentIsOriginal || isMerged(eligible.get(0))))) { if (verbose()) { - message("already optimized"); + message("already merged"); } return null; } @@ -515,7 +515,7 @@ public class TieredMergePolicy extends MergePolicy { if (verbose()) { message("eligible=" + eligible); - message("optimizeMergeRunning=" + optimizeMergeRunning); + message("forceMergeRunning=" + forceMergeRunning); } int end = eligible.size(); @@ -535,7 +535,7 @@ public class TieredMergePolicy extends MergePolicy { end -= maxMergeAtOnceExplicit; } - if (spec == null && !optimizeMergeRunning) { + if (spec == null && !forceMergeRunning) { // Do final merge final int numToMerge = end - maxSegmentCount + 1; final OneMerge merge = new OneMerge(eligible.subList(end-numToMerge, end)); @@ -580,7 +580,7 @@ public class TieredMergePolicy extends MergePolicy { while(start < eligible.size()) { // Don't enforce max merged size here: app is explicitly // calling expungeDeletes, and knows this may take a - // long time / produce big segments (like optimize): + // long time / produce big segments (like forceMerge): final int end = Math.min(start + maxMergeAtOnceExplicit, eligible.size()); if (spec == null) { spec = new MergeSpecification(); @@ -619,7 +619,7 @@ public class TieredMergePolicy extends MergePolicy { public void close() { } - private boolean isOptimized(SegmentInfo info) + private boolean isMerged(SegmentInfo info) throws IOException { IndexWriter w = writer.get(); assert w != null; diff --git a/lucene/src/java/org/apache/lucene/index/UpgradeIndexMergePolicy.java b/lucene/src/java/org/apache/lucene/index/UpgradeIndexMergePolicy.java index a17ea2f61ae..c2af7532e67 100644 --- a/lucene/src/java/org/apache/lucene/index/UpgradeIndexMergePolicy.java +++ b/lucene/src/java/org/apache/lucene/index/UpgradeIndexMergePolicy.java @@ -27,24 +27,24 @@ import java.util.Map; import java.util.HashMap; /** This {@link MergePolicy} is used for upgrading all existing segments of - * an index when calling {@link IndexWriter#optimize()}. + * an index when calling {@link IndexWriter#forceMerge(int)}. * All other methods delegate to the base {@code MergePolicy} given to the constructor. * This allows for an as-cheap-as possible upgrade of an older index by only upgrading segments that - * are created by previous Lucene versions. Optimize does no longer really optimize - * it is just used to "optimize" older segment versions away. + * are created by previous Lucene versions. forceMerge does no longer really merge; + * it is just used to "forceMerge" older segment versions away. *

In general one would use {@link IndexUpgrader}, but for a fully customizeable upgrade, - * you can use this like any other {@code MergePolicy} and call {@link IndexWriter#optimize()}: + * you can use this like any other {@code MergePolicy} and call {@link IndexWriter#forceMerge(int)}: *

   *  IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_XX, new KeywordAnalyzer());
   *  iwc.setMergePolicy(new UpgradeIndexMergePolicy(iwc.getMergePolicy()));
   *  IndexWriter w = new IndexWriter(dir, iwc);
-  *  w.optimize();
+  *  w.forceMerge(1);
   *  w.close();
   * 
*

Warning: This merge policy may reorder documents if the index was partially - * upgraded before calling optimize (e.g., documents were added). If your application relies + * upgraded before calling forceMerge (e.g., documents were added). If your application relies * on "monotonicity" of doc IDs (which means that the order in which the documents - * were added to the index is preserved), do a full optimize instead. Please note, the + * were added to the index is preserved), do a forceMerge(1) instead. Please note, the * delegate {@code MergePolicy} may also reorder documents. * @lucene.experimental * @see IndexUpgrader @@ -53,7 +53,7 @@ public class UpgradeIndexMergePolicy extends MergePolicy { protected final MergePolicy base; - /** Wrap the given {@link MergePolicy} and intercept optimize requests to + /** Wrap the given {@link MergePolicy} and intercept forceMerge requests to * only upgrade segments written with previous Lucene versions. */ public UpgradeIndexMergePolicy(MergePolicy base) { this.base = base; @@ -80,22 +80,22 @@ public class UpgradeIndexMergePolicy extends MergePolicy { } @Override - public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos, int maxSegmentCount, Map segmentsToOptimize) throws CorruptIndexException, IOException { + public MergeSpecification findForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, Map segmentsToMerge) throws CorruptIndexException, IOException { // first find all old segments final Map oldSegments = new HashMap(); for (final SegmentInfo si : segmentInfos) { - final Boolean v =segmentsToOptimize.get(si); + final Boolean v = segmentsToMerge.get(si); if (v != null && shouldUpgradeSegment(si)) { oldSegments.put(si, v); } } - if (verbose()) message("findMergesForOptimize: segmentsToUpgrade=" + oldSegments); + if (verbose()) message("findForcedMerges: segmentsToUpgrade=" + oldSegments); if (oldSegments.isEmpty()) return null; - MergeSpecification spec = base.findMergesForOptimize(segmentInfos, maxSegmentCount, oldSegments); + MergeSpecification spec = base.findForcedMerges(segmentInfos, maxSegmentCount, oldSegments); if (spec != null) { // remove all segments that are in merge specification from oldSegments, @@ -108,7 +108,7 @@ public class UpgradeIndexMergePolicy extends MergePolicy { if (!oldSegments.isEmpty()) { if (verbose()) - message("findMergesForOptimize: " + base.getClass().getSimpleName() + + message("findForcedMerges: " + base.getClass().getSimpleName() + " does not want to merge all old segments, merge remaining ones into new segment: " + oldSegments); final List newInfos = new ArrayList(); for (final SegmentInfo si : segmentInfos) { diff --git a/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java b/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java index da66af0ad13..899d166a3aa 100644 --- a/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java +++ b/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java @@ -273,14 +273,14 @@ public abstract class MultiTermQuery extends Query { /** * Expert: Return the number of unique terms visited during execution of the query. * If there are many of them, you may consider using another query type - * or optimize your total term count in index. + * or reduce your total term count in index. *

This method is not thread safe, be sure to only call it when no query is running! * If you re-use the same query instance for another * search, be sure to first reset the term counter * with {@link #clearTotalNumberOfTerms}. - *

On optimized indexes / no MultiReaders, you get the correct number of + *

On single-segment indexes / no MultiReaders, you get the correct number of * unique terms for the whole index. Use this number to compare different queries. - * For non-optimized indexes this number can also be achieved in + * For multi-segment indexes this number can also be achieved in * non-constant-score mode. In constant-score mode you get the total number of * terms seeked for all segments / sub-readers. * @see #clearTotalNumberOfTerms diff --git a/lucene/src/java/org/apache/lucene/store/FlushInfo.java b/lucene/src/java/org/apache/lucene/store/FlushInfo.java index 3bde34fe3e8..d2be85c37e3 100644 --- a/lucene/src/java/org/apache/lucene/store/FlushInfo.java +++ b/lucene/src/java/org/apache/lucene/store/FlushInfo.java @@ -18,7 +18,7 @@ package org.apache.lucene.store; */ /** - *

A FlushInfo provides information required for a FLUSH context and other optimization operations. + *

A FlushInfo provides information required for a FLUSH context. * It is used as part of an {@link IOContext} in case of FLUSH context.

*/ diff --git a/lucene/src/java/org/apache/lucene/store/MergeInfo.java b/lucene/src/java/org/apache/lucene/store/MergeInfo.java index 7dabc4b4f8e..e7bb1bbf04f 100644 --- a/lucene/src/java/org/apache/lucene/store/MergeInfo.java +++ b/lucene/src/java/org/apache/lucene/store/MergeInfo.java @@ -17,7 +17,7 @@ package org.apache.lucene.store; */ /** - *

A MergeInfo provides information required for a MERGE context and other optimization operations. + *

A MergeInfo provides information required for a MERGE context. * It is used as part of an {@link IOContext} in case of MERGE context.

*/ @@ -29,7 +29,7 @@ public class MergeInfo { public final boolean isExternal; - public final boolean optimize; + public final int mergeMaxNumSegments; /** @@ -40,11 +40,11 @@ public class MergeInfo { * */ - public MergeInfo(int totalDocCount, long estimatedMergeBytes, boolean isExternal, boolean optimize) { + public MergeInfo(int totalDocCount, long estimatedMergeBytes, boolean isExternal, int mergeMaxNumSegments) { this.totalDocCount = totalDocCount; this.estimatedMergeBytes = estimatedMergeBytes; this.isExternal = isExternal; - this.optimize = optimize; + this.mergeMaxNumSegments = mergeMaxNumSegments; } @@ -55,7 +55,7 @@ public class MergeInfo { result = prime * result + (int) (estimatedMergeBytes ^ (estimatedMergeBytes >>> 32)); result = prime * result + (isExternal ? 1231 : 1237); - result = prime * result + (optimize ? 1231 : 1237); + result = prime * result + mergeMaxNumSegments; result = prime * result + totalDocCount; return result; } @@ -73,7 +73,7 @@ public class MergeInfo { return false; if (isExternal != other.isExternal) return false; - if (optimize != other.optimize) + if (mergeMaxNumSegments != other.mergeMaxNumSegments) return false; if (totalDocCount != other.totalDocCount) return false; @@ -84,6 +84,6 @@ public class MergeInfo { public String toString() { return "MergeInfo [totalDocCount=" + totalDocCount + ", estimatedMergeBytes=" + estimatedMergeBytes + ", isExternal=" - + isExternal + ", optimize=" + optimize + "]"; + + isExternal + ", mergeMaxNumSegments=" + mergeMaxNumSegments + "]"; } -} \ No newline at end of file +} diff --git a/lucene/src/test-framework/java/org/apache/lucene/analysis/CollationTestBase.java b/lucene/src/test-framework/java/org/apache/lucene/analysis/CollationTestBase.java index 0a88ccfdbe2..a16e1b37b8f 100644 --- a/lucene/src/test-framework/java/org/apache/lucene/analysis/CollationTestBase.java +++ b/lucene/src/test-framework/java/org/apache/lucene/analysis/CollationTestBase.java @@ -216,7 +216,7 @@ public abstract class CollationTestBase extends LuceneTestCase { doc.add(new TextField("Denmark", denmarkAnalyzer.tokenStream("Denmark", new StringReader(sortData[i][5])))); writer.addDocument(doc); } - writer.optimize(); + writer.forceMerge(1); writer.close(); IndexSearcher searcher = new IndexSearcher(indexStore, true); diff --git a/lucene/src/test-framework/java/org/apache/lucene/index/MockRandomMergePolicy.java b/lucene/src/test-framework/java/org/apache/lucene/index/MockRandomMergePolicy.java index 1ff354344eb..7e8d3684f06 100644 --- a/lucene/src/test-framework/java/org/apache/lucene/index/MockRandomMergePolicy.java +++ b/lucene/src/test-framework/java/org/apache/lucene/index/MockRandomMergePolicy.java @@ -55,18 +55,18 @@ public class MockRandomMergePolicy extends MergePolicy { } @Override - public MergeSpecification findMergesForOptimize( - SegmentInfos segmentInfos, int maxSegmentCount, Map segmentsToOptimize) + public MergeSpecification findForcedMerges( + SegmentInfos segmentInfos, int maxSegmentCount, Map segmentsToMerge) throws CorruptIndexException, IOException { final List eligibleSegments = new ArrayList(); for(SegmentInfo info : segmentInfos) { - if (segmentsToOptimize.containsKey(info)) { + if (segmentsToMerge.containsKey(info)) { eligibleSegments.add(info); } } - //System.out.println("MRMP: findMergesForOptimize sis=" + segmentInfos + " eligible=" + eligibleSegments); + //System.out.println("MRMP: findMerges sis=" + segmentInfos + " eligible=" + eligibleSegments); MergeSpecification mergeSpec = null; if (eligibleSegments.size() > 1 || (eligibleSegments.size() == 1 && eligibleSegments.get(0).hasDeletions())) { mergeSpec = new MergeSpecification(); @@ -85,7 +85,7 @@ public class MockRandomMergePolicy extends MergePolicy { if (mergeSpec != null) { for(OneMerge merge : mergeSpec.merges) { for(SegmentInfo info : merge.segments) { - assert segmentsToOptimize.containsKey(info); + assert segmentsToMerge.containsKey(info); } } } diff --git a/lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java b/lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java index 10e1ec22f76..10a611d408d 100644 --- a/lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java +++ b/lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java @@ -38,7 +38,7 @@ import org.apache.lucene.util._TestUtil; /** Silly class that randomizes the indexing experience. EG * it may swap in a different merge policy/scheduler; may - * commit periodically; may or may not optimize in the end, + * commit periodically; may or may not forceMerge in the end, * may flush by doc count instead of RAM, etc. */ @@ -323,8 +323,8 @@ public class RandomIndexWriter implements Closeable { return getReader(true); } - private boolean doRandomOptimize = true; - private boolean doRandomOptimizeAssert = true; + private boolean doRandomForceMerge = true; + private boolean doRandomForceMergeAssert = true; public void expungeDeletes(boolean doWait) throws IOException { w.expungeDeletes(doWait); @@ -334,25 +334,25 @@ public class RandomIndexWriter implements Closeable { w.expungeDeletes(); } - public void setDoRandomOptimize(boolean v) { - doRandomOptimize = v; + public void setDoRandomForceMerge(boolean v) { + doRandomForceMerge = v; } - public void setDoRandomOptimizeAssert(boolean v) { - doRandomOptimizeAssert = v; + public void setDoRandomForceMergeAssert(boolean v) { + doRandomForceMergeAssert = v; } - private void doRandomOptimize() throws IOException { - if (doRandomOptimize) { + private void doRandomForceMerge() throws IOException { + if (doRandomForceMerge) { final int segCount = w.getSegmentCount(); if (r.nextBoolean() || segCount == 0) { - // full optimize - w.optimize(); + // full forceMerge + w.forceMerge(1); } else { - // partial optimize + // partial forceMerge final int limit = _TestUtil.nextInt(r, 1, segCount); - w.optimize(limit); - assert !doRandomOptimizeAssert || w.getSegmentCount() <= limit: "limit=" + limit + " actual=" + w.getSegmentCount(); + w.forceMerge(limit); + assert !doRandomForceMergeAssert || w.getSegmentCount() <= limit: "limit=" + limit + " actual=" + w.getSegmentCount(); } } switchDoDocValues(); @@ -361,7 +361,7 @@ public class RandomIndexWriter implements Closeable { public IndexReader getReader(boolean applyDeletions) throws IOException { getReaderCalled = true; if (r.nextInt(4) == 2) { - doRandomOptimize(); + doRandomForceMerge(); } // If we are writing with PreFlexRW, force a full // IndexReader.open so terms are sorted in codepoint @@ -394,21 +394,21 @@ public class RandomIndexWriter implements Closeable { */ public void close() throws IOException { // if someone isn't using getReader() API, we want to be sure to - // maybeOptimize since presumably they might open a reader on the dir. + // forceMerge since presumably they might open a reader on the dir. if (getReaderCalled == false && r.nextInt(8) == 2) { - doRandomOptimize(); + doRandomForceMerge(); } w.close(); } /** - * Forces an optimize. + * Forces a forceMerge. *

* NOTE: this should be avoided in tests unless absolutely necessary, * as it will result in less test coverage. - * @see IndexWriter#optimize() + * @see IndexWriter#forceMerge(int) */ - public void optimize() throws IOException { - w.optimize(); + public void forceMerge(int maxSegmentCount) throws IOException { + w.forceMerge(maxSegmentCount); } } diff --git a/lucene/src/test-framework/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java b/lucene/src/test-framework/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java index 4b800b225f5..68583481749 100644 --- a/lucene/src/test-framework/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java +++ b/lucene/src/test-framework/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java @@ -54,7 +54,7 @@ import org.apache.lucene.util.NamedThreadFactory; import org.apache.lucene.util._TestUtil; // TODO -// - mix in optimize, addIndexes +// - mix in forceMerge, addIndexes // - randomly mix in non-congruent docs /** Utility class that spawns multiple indexing and diff --git a/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java index be50dfea024..178f899e658 100644 --- a/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java +++ b/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java @@ -32,7 +32,6 @@ import java.util.Map.Entry; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.lucene.analysis.Analyzer; @@ -41,17 +40,8 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.*; import org.apache.lucene.index.codecs.Codec; import org.apache.lucene.index.codecs.PostingsFormat; -import org.apache.lucene.index.codecs.lucene3x.Lucene3xCodec; -import org.apache.lucene.index.codecs.lucene3x.Lucene3xPostingsFormat; import org.apache.lucene.index.codecs.lucene40.Lucene40Codec; -import org.apache.lucene.index.codecs.mockintblock.MockFixedIntBlockPostingsFormat; -import org.apache.lucene.index.codecs.mockintblock.MockVariableIntBlockPostingsFormat; -import org.apache.lucene.index.codecs.mocksep.MockSepPostingsFormat; -import org.apache.lucene.index.codecs.mockrandom.MockRandomPostingsFormat; -import org.apache.lucene.index.codecs.perfield.PerFieldPostingsFormat; import org.apache.lucene.index.codecs.preflexrw.PreFlexRWCodec; -import org.apache.lucene.index.codecs.preflexrw.PreFlexRWPostingsFormat; -import org.apache.lucene.index.codecs.pulsing.PulsingPostingsFormat; import org.apache.lucene.index.codecs.simpletext.SimpleTextCodec; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.FieldCache; @@ -1329,7 +1319,7 @@ public abstract class LuceneTestCase extends Assert { context = IOContext.READONCE; break; case 3: - context = new IOContext(new MergeInfo(randomNumDocs, size, true, false)); + context = new IOContext(new MergeInfo(randomNumDocs, size, true, -1)); break; case 4: context = new IOContext(new FlushInfo(randomNumDocs, size)); diff --git a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java index 3e3ec5b36b5..98f7e54723e 100644 --- a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java +++ b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java @@ -25,11 +25,9 @@ import org.apache.lucene.document.*; import org.apache.lucene.index.*; import org.apache.lucene.index.codecs.*; import org.apache.lucene.index.codecs.lucene40.Lucene40Codec; -import org.apache.lucene.index.codecs.perfield.PerFieldPostingsFormat; import org.apache.lucene.search.*; import org.apache.lucene.store.*; import org.apache.lucene.util.*; -import org.apache.lucene.util.Bits; /* Intentionally outside of oal.index to verify fully external codecs work fine */ @@ -104,7 +102,7 @@ public class TestExternalCodecs extends LuceneTestCase { System.out.println("\nTEST: now delete 2nd doc"); } w.deleteDocuments(new Term("id", "44")); - w.optimize(); + w.forceMerge(1); r = IndexReader.open(w, true); assertEquals(NUM_DOCS-2, r.maxDoc()); assertEquals(NUM_DOCS-2, r.numDocs()); diff --git a/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java b/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java index 08b1e6dc5ea..8f0418f968b 100644 --- a/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java +++ b/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java @@ -142,7 +142,7 @@ public class TestMergeSchedulerExternal extends LuceneTestCase { writer.commit(); // trigger flush writer.addDocument(new Document()); writer.commit(); // trigger flush - writer.optimize(); + writer.forceMerge(1); writer.close(); dir.close(); } diff --git a/lucene/src/test/org/apache/lucene/index/Test2BPostings.java b/lucene/src/test/org/apache/lucene/index/Test2BPostings.java index e8944176724..78787859e0b 100644 --- a/lucene/src/test/org/apache/lucene/index/Test2BPostings.java +++ b/lucene/src/test/org/apache/lucene/index/Test2BPostings.java @@ -73,7 +73,7 @@ public class Test2BPostings extends LuceneTestCase { System.out.println(i + " of " + numDocs + "..."); } } - w.optimize(); + w.forceMerge(1); w.close(); CheckIndex ci = new CheckIndex(dir); if (VERBOSE) { diff --git a/lucene/src/test/org/apache/lucene/index/Test2BTerms.java b/lucene/src/test/org/apache/lucene/index/Test2BTerms.java index fc6821a27e5..3ec43ac4f7b 100644 --- a/lucene/src/test/org/apache/lucene/index/Test2BTerms.java +++ b/lucene/src/test/org/apache/lucene/index/Test2BTerms.java @@ -195,8 +195,8 @@ public class Test2BTerms extends LuceneTestCase { } savedTerms = ts.savedTerms; - System.out.println("TEST: optimize"); - w.optimize(); + System.out.println("TEST: full merge"); + w.forceMerge(1); System.out.println("TEST: close writer"); w.close(); } diff --git a/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java index c863dc26d41..a2637e178ea 100755 --- a/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java +++ b/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java @@ -39,13 +39,7 @@ import org.apache.lucene.index.codecs.StoredFieldsFormat; import org.apache.lucene.index.codecs.PostingsFormat; import org.apache.lucene.index.codecs.SegmentInfosFormat; import org.apache.lucene.index.codecs.lucene40.Lucene40Codec; -import org.apache.lucene.index.codecs.lucene40.Lucene40PostingsBaseFormat; -import org.apache.lucene.index.codecs.lucene40.Lucene40PostingsFormat; -import org.apache.lucene.index.codecs.mocksep.MockSepPostingsFormat; -import org.apache.lucene.index.codecs.perfield.PerFieldPostingsFormat; import org.apache.lucene.index.codecs.pulsing.Pulsing40PostingsFormat; -import org.apache.lucene.index.codecs.pulsing.PulsingPostingsFormat; -import org.apache.lucene.index.codecs.simpletext.SimpleTextPostingsFormat; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.store.AlreadyClosedException; @@ -114,7 +108,7 @@ public class TestAddIndexes extends LuceneTestCase { assertEquals(40, writer.maxDoc()); writer.close(); - // test doc count before segments are merged/index is optimized + // test doc count before segments are merged writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); assertEquals(190, writer.maxDoc()); writer.addIndexes(aux3); @@ -128,9 +122,9 @@ public class TestAddIndexes extends LuceneTestCase { verifyTermDocs(dir, new Term("content", "bbb"), 50); - // now optimize it. + // now fully merge it. writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); - writer.optimize(); + writer.forceMerge(1); writer.close(); // make sure the new index is correct @@ -186,7 +180,7 @@ public class TestAddIndexes extends LuceneTestCase { q.add(new Term("content", "14")); writer.deleteDocuments(q); - writer.optimize(); + writer.forceMerge(1); writer.commit(); verifyNumDocs(dir, 1039); @@ -224,7 +218,7 @@ public class TestAddIndexes extends LuceneTestCase { q.add(new Term("content", "14")); writer.deleteDocuments(q); - writer.optimize(); + writer.forceMerge(1); writer.commit(); verifyNumDocs(dir, 1039); @@ -262,7 +256,7 @@ public class TestAddIndexes extends LuceneTestCase { writer.addIndexes(aux); - writer.optimize(); + writer.forceMerge(1); writer.commit(); verifyNumDocs(dir, 1039); @@ -729,10 +723,10 @@ public class TestAddIndexes extends LuceneTestCase { switch(j%5) { case 0: if (VERBOSE) { - System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(Dir[]) then optimize"); + System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(Dir[]) then full merge"); } writer2.addIndexes(dirs); - writer2.optimize(); + writer2.forceMerge(1); break; case 1: if (VERBOSE) { @@ -834,10 +828,10 @@ public class TestAddIndexes extends LuceneTestCase { switch(j%5) { case 0: if (VERBOSE) { - System.out.println("TEST: " + Thread.currentThread().getName() + ": addIndexes + optimize"); + System.out.println("TEST: " + Thread.currentThread().getName() + ": addIndexes + full merge"); } writer2.addIndexes(dirs); - writer2.optimize(); + writer2.forceMerge(1); break; case 1: if (VERBOSE) { @@ -853,9 +847,9 @@ public class TestAddIndexes extends LuceneTestCase { break; case 3: if (VERBOSE) { - System.out.println("TEST: " + Thread.currentThread().getName() + ": optimize"); + System.out.println("TEST: " + Thread.currentThread().getName() + ": full merge"); } - writer2.optimize(); + writer2.forceMerge(1); break; case 4: if (VERBOSE) { @@ -1214,7 +1208,7 @@ public class TestAddIndexes extends LuceneTestCase { } try { - IndexReader indexReader = IndexReader.open(toAdd); + IndexReader.open(toAdd); fail("no such codec"); } catch (IllegalArgumentException ex) { // expected diff --git a/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java index e0c65ccc17f..e4b5ebe8e13 100644 --- a/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java +++ b/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java @@ -79,16 +79,16 @@ public class TestBackwardsCompatibility extends LuceneTestCase { /* // These are only needed for the special upgrade test to verify - // that also optimized indexes are correctly upgraded by IndexUpgrader. + // that also single-segment indexes are correctly upgraded by IndexUpgrader. // You don't need them to be build for non-3.1 (the test is happy with just one // "old" segment format, version is unimportant: - public void testCreateOptimizedCFS() throws IOException { - createIndex("index.optimized.cfs", true, true); + public void testCreateSingleSegmentCFS() throws IOException { + createIndex("index.singlesegment.cfs", true, true); } - public void testCreateOptimizedNoCFS() throws IOException { - createIndex("index.optimized.nocfs", false, true); + public void testCreateSingleSegmentNoCFS() throws IOException { + createIndex("index.singlesegment.nocfs", false, true); } */ @@ -118,8 +118,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase { "29.nocfs", }; - final String[] oldOptimizedNames = {"31.optimized.cfs", - "31.optimized.nocfs", + final String[] oldSingleSegmentNames = {"31.optimized.cfs", + "31.optimized.nocfs", }; /** This test checks that *only* IndexFormatTooOldExceptions are thrown when you open and operate on too old indexes! */ @@ -180,7 +180,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase { } } - public void testOptimizeOldIndex() throws Exception { + public void testFullyMergeOldIndex() throws Exception { for(int i=0;i names = new ArrayList(oldNames.length + oldOptimizedNames.length); + List names = new ArrayList(oldNames.length + oldSingleSegmentNames.length); names.addAll(Arrays.asList(oldNames)); - names.addAll(Arrays.asList(oldOptimizedNames)); + names.addAll(Arrays.asList(oldSingleSegmentNames)); for(String name : names) { if (VERBOSE) { System.out.println("testUpgradeOldIndex: index=" +name); @@ -764,16 +764,16 @@ public class TestBackwardsCompatibility extends LuceneTestCase { } } - public void testUpgradeOldOptimizedIndexWithAdditions() throws Exception { - for (String name : oldOptimizedNames) { + public void testUpgradeOldSingleSegmentIndexWithAdditions() throws Exception { + for (String name : oldSingleSegmentNames) { if (VERBOSE) { - System.out.println("testUpgradeOldOptimizedIndexWithAdditions: index=" +name); + System.out.println("testUpgradeOldSingleSegmentIndexWithAdditions: index=" +name); } File oldIndxeDir = _TestUtil.getTempDir(name); _TestUtil.unzip(getDataFile("index." + name + ".zip"), oldIndxeDir); Directory dir = newFSDirectory(oldIndxeDir); - assertEquals("Original index must be optimized", 1, getNumberOfSegments(dir)); + assertEquals("Original index must be single segment", 1, getNumberOfSegments(dir)); // create a bunch of dummy segments int id = 40; @@ -791,7 +791,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase { w.close(false); } - // add dummy segments (which are all in current version) to optimized index + // add dummy segments (which are all in current + // version) to single segment index MergePolicy mp = random.nextBoolean() ? newLogMergePolicy() : newTieredMergePolicy(); IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, null) .setMergePolicy(mp); diff --git a/lucene/src/test/org/apache/lucene/index/TestCheckIndex.java b/lucene/src/test/org/apache/lucene/index/TestCheckIndex.java index 996d889b24c..7f010e5b251 100644 --- a/lucene/src/test/org/apache/lucene/index/TestCheckIndex.java +++ b/lucene/src/test/org/apache/lucene/index/TestCheckIndex.java @@ -45,7 +45,7 @@ public class TestCheckIndex extends LuceneTestCase { for(int i=0;i<19;i++) { writer.addDocument(doc); } - writer.optimize(); + writer.forceMerge(1); writer.close(); IndexReader reader = IndexReader.open(dir, false); reader.deleteDocument(5); diff --git a/lucene/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/src/test/org/apache/lucene/index/TestCodecs.java index aa378316e50..51ff6ebe2b1 100644 --- a/lucene/src/test/org/apache/lucene/index/TestCodecs.java +++ b/lucene/src/test/org/apache/lucene/index/TestCodecs.java @@ -33,7 +33,6 @@ import org.apache.lucene.index.codecs.PostingsConsumer; import org.apache.lucene.index.codecs.TermStats; import org.apache.lucene.index.codecs.TermsConsumer; import org.apache.lucene.index.codecs.lucene3x.Lucene3xCodec; -import org.apache.lucene.index.codecs.lucene3x.Lucene3xPostingsFormat; import org.apache.lucene.index.codecs.mocksep.MockSepPostingsFormat; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; @@ -371,7 +370,7 @@ public class TestCodecs extends LuceneTestCase { assertEquals(2, results.length); assertEquals(0, results[0].doc); - writer.optimize(); + writer.forceMerge(1); // optimise to merge the segments. results = this.search(writer, pq, 5); diff --git a/lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java b/lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java index 0f4a5c051a6..0308cee2fec 100644 --- a/lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java +++ b/lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java @@ -77,7 +77,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase { assertEquals("f4", fis2.fieldInfo(3).name); writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); - writer.optimize(); + writer.forceMerge(1); writer.close(); sis = new SegmentInfos(); @@ -141,7 +141,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase { assertEquals("f4", fis2.fieldInfo(3).name); writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); - writer.optimize(); + writer.forceMerge(1); writer.close(); sis = new SegmentInfos(); @@ -252,7 +252,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy( new LogByteSizeMergePolicy()).setInfoStream(new FailOnNonBulkMergesInfoStream())); - writer.optimize(); + writer.forceMerge(1); writer.close(); SegmentInfos sis = new SegmentInfos(); @@ -293,7 +293,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase { writer.addDocument(d); } - writer.optimize(); + writer.forceMerge(1); writer.close(); SegmentInfos sis = new SegmentInfos(); diff --git a/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java b/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java index a9005a02649..61bcfd6af2c 100644 --- a/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java +++ b/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java @@ -74,7 +74,7 @@ public class TestDeletionPolicy extends LuceneTestCase { public void onCommit(List commits) throws IOException { IndexCommit lastCommit = commits.get(commits.size()-1); IndexReader r = IndexReader.open(dir, true); - assertEquals("lastCommit.isOptimized()=" + lastCommit.isOptimized() + " vs IndexReader.isOptimized=" + r.isOptimized(), r.isOptimized(), lastCommit.isOptimized()); + assertEquals("lastCommit.segmentCount()=" + lastCommit.getSegmentCount() + " vs IndexReader.segmentCount=" + r.getSequentialSubReaders().length, r.getSequentialSubReaders().length, lastCommit.getSegmentCount()); r.close(); verifyCommitOrder(commits); numOnCommit++; @@ -317,13 +317,13 @@ public class TestDeletionPolicy extends LuceneTestCase { } writer.close(); - final boolean isOptimized; + final boolean needsMerging; { IndexReader r = IndexReader.open(dir); - isOptimized = r.isOptimized(); + needsMerging = r.getSequentialSubReaders().length != 1; r.close(); } - if (!isOptimized) { + if (needsMerging) { conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode( OpenMode.APPEND).setIndexDeletionPolicy(policy); @@ -332,22 +332,22 @@ public class TestDeletionPolicy extends LuceneTestCase { ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile); } if (VERBOSE) { - System.out.println("TEST: open writer for optimize"); + System.out.println("TEST: open writer for forceMerge"); } writer = new IndexWriter(dir, conf); - writer.optimize(); + writer.forceMerge(1); writer.close(); } - assertEquals(isOptimized ? 0:1, policy.numOnInit); + assertEquals(needsMerging ? 1:0, policy.numOnInit); // If we are not auto committing then there should // be exactly 2 commits (one per close above): - assertEquals(1 + (isOptimized ? 0:1), policy.numOnCommit); + assertEquals(1 + (needsMerging ? 1:0), policy.numOnCommit); // Test listCommits Collection commits = IndexReader.listCommits(dir); // 2 from closing writer - assertEquals(1 + (isOptimized ? 0:1), commits.size()); + assertEquals(1 + (needsMerging ? 1:0), commits.size()); // Make sure we can open a reader on each commit: for (final IndexCommit commit : commits) { @@ -418,16 +418,16 @@ public class TestDeletionPolicy extends LuceneTestCase { } assertTrue(lastCommit != null); - // Now add 1 doc and optimize + // Now add 1 doc and merge writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(policy)); addDoc(writer); assertEquals(11, writer.numDocs()); - writer.optimize(); + writer.forceMerge(1); writer.close(); assertEquals(6, IndexReader.listCommits(dir).size()); - // Now open writer on the commit just before optimize: + // Now open writer on the commit just before merge: writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) .setIndexDeletionPolicy(policy).setIndexCommit(lastCommit)); assertEquals(10, writer.numDocs()); @@ -436,8 +436,8 @@ public class TestDeletionPolicy extends LuceneTestCase { writer.rollback(); IndexReader r = IndexReader.open(dir, true); - // Still optimized, still 11 docs - assertTrue(r.isOptimized()); + // Still merged, still 11 docs + assertEquals(1, r.getSequentialSubReaders().length); assertEquals(11, r.numDocs()); r.close(); @@ -451,39 +451,39 @@ public class TestDeletionPolicy extends LuceneTestCase { assertEquals(7, IndexReader.listCommits(dir).size()); r = IndexReader.open(dir, true); - // Not optimized because we rolled it back, and now only + // Not fully merged because we rolled it back, and now only // 10 docs - assertTrue(!r.isOptimized()); + assertTrue(r.getSequentialSubReaders().length > 1); assertEquals(10, r.numDocs()); r.close(); - // Reoptimize + // Re-merge writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(policy)); - writer.optimize(); + writer.forceMerge(1); writer.close(); r = IndexReader.open(dir, true); - assertTrue(r.isOptimized()); + assertEquals(1, r.getSequentialSubReaders().length); assertEquals(10, r.numDocs()); r.close(); - // Now open writer on the commit just before optimize, + // Now open writer on the commit just before merging, // but this time keeping only the last commit: writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexCommit(lastCommit)); assertEquals(10, writer.numDocs()); - // Reader still sees optimized index, because writer + // Reader still sees fully merged index, because writer // opened on the prior commit has not yet committed: r = IndexReader.open(dir, true); - assertTrue(r.isOptimized()); + assertEquals(1, r.getSequentialSubReaders().length); assertEquals(10, r.numDocs()); r.close(); writer.close(); - // Now reader sees unoptimized index: + // Now reader sees not-fully-merged index: r = IndexReader.open(dir, true); - assertTrue(!r.isOptimized()); + assertTrue(r.getSequentialSubReaders().length > 1); assertEquals(10, r.numDocs()); r.close(); @@ -525,7 +525,7 @@ public class TestDeletionPolicy extends LuceneTestCase { ((LogMergePolicy) mp).setUseCompoundFile(true); } writer = new IndexWriter(dir, conf); - writer.optimize(); + writer.forceMerge(1); writer.close(); assertEquals(1, policy.numOnInit); @@ -569,7 +569,7 @@ public class TestDeletionPolicy extends LuceneTestCase { for(int i=0;i<17;i++) { addDoc(writer); } - writer.optimize(); + writer.forceMerge(1); writer.close(); } @@ -673,15 +673,15 @@ public class TestDeletionPolicy extends LuceneTestCase { ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile); } IndexReader r = IndexReader.open(dir); - final boolean wasOptimized = r.isOptimized(); + final boolean wasFullyMerged = r.getSequentialSubReaders().length == 1 && !r.hasDeletions(); r.close(); writer = new IndexWriter(dir, conf); - writer.optimize(); + writer.forceMerge(1); // this is a commit writer.close(); assertEquals(2*(N+1)+1, policy.numOnInit); - assertEquals(2*(N+2) - (wasOptimized ? 1:0), policy.numOnCommit); + assertEquals(2*(N+2) - (wasFullyMerged ? 1:0), policy.numOnCommit); IndexSearcher searcher = new IndexSearcher(dir, false); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; diff --git a/lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java b/lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java index 4686baaff13..6963dc5879b 100644 --- a/lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java @@ -184,7 +184,7 @@ public class TestDirectoryReader extends LuceneTestCase { while (td.nextDoc() != td.NO_MORE_DOCS) ret += td.docID(); // really a dummy assert to ensure that we got some docs and to ensure that - // nothing is optimized out. + // nothing is eliminated by hotspot assertTrue(ret > 0); readers1[0].close(); readers1[1].close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestDocCount.java b/lucene/src/test/org/apache/lucene/index/TestDocCount.java index bba8e302a6b..ec7e5b516d0 100644 --- a/lucene/src/test/org/apache/lucene/index/TestDocCount.java +++ b/lucene/src/test/org/apache/lucene/index/TestDocCount.java @@ -42,7 +42,7 @@ public class TestDocCount extends LuceneTestCase { IndexReader ir = iw.getReader(); verifyCount(ir); ir.close(); - iw.optimize(); + iw.forceMerge(1); ir = iw.getReader(); verifyCount(ir); ir.close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java b/lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java index a4f90814952..cdaea162b95 100644 --- a/lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java +++ b/lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java @@ -321,7 +321,7 @@ public class TestDocumentWriter extends LuceneTestCase { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); writer.addDocument(doc); - writer.optimize(); // be sure to have a single segment + writer.forceMerge(1); // be sure to have a single segment writer.close(); _TestUtil.checkIndex(dir); diff --git a/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java b/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java index 09fd2642d30..80dd7e814dc 100644 --- a/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java @@ -203,7 +203,7 @@ public class TestFieldsReader extends LuceneTestCase { TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); for(int i=0;i<2;i++) writer.addDocument(testDoc); - writer.optimize(); + writer.forceMerge(1); writer.close(); IndexReader reader = IndexReader.open(dir, true); diff --git a/lucene/src/test/org/apache/lucene/index/TestFlex.java b/lucene/src/test/org/apache/lucene/index/TestFlex.java index 90e46282b18..caf17bba75b 100644 --- a/lucene/src/test/org/apache/lucene/index/TestFlex.java +++ b/lucene/src/test/org/apache/lucene/index/TestFlex.java @@ -48,7 +48,7 @@ public class TestFlex extends LuceneTestCase { w.addDocument(doc); } } else { - w.optimize(); + w.forceMerge(1); } IndexReader r = w.getReader(); diff --git a/lucene/src/test/org/apache/lucene/index/TestOptimizeForever.java b/lucene/src/test/org/apache/lucene/index/TestForceMergeForever.java similarity index 85% rename from lucene/src/test/org/apache/lucene/index/TestOptimizeForever.java rename to lucene/src/test/org/apache/lucene/index/TestForceMergeForever.java index 04770d306ba..3b6ca96f31a 100644 --- a/lucene/src/test/org/apache/lucene/index/TestOptimizeForever.java +++ b/lucene/src/test/org/apache/lucene/index/TestForceMergeForever.java @@ -27,13 +27,12 @@ import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; +public class TestForceMergeForever extends LuceneTestCase { -public class TestOptimizeForever extends LuceneTestCase { - - // Just counts how many merges are done for optimize + // Just counts how many merges are done private static class MyIndexWriter extends IndexWriter { - AtomicInteger optimizeMergeCount = new AtomicInteger(); + AtomicInteger mergeCount = new AtomicInteger(); private boolean first; public MyIndexWriter(Directory dir, IndexWriterConfig conf) throws Exception { @@ -42,12 +41,12 @@ public class TestOptimizeForever extends LuceneTestCase { @Override public void merge(MergePolicy.OneMerge merge) throws CorruptIndexException, IOException { - if (merge.optimize && (first || merge.segments.size() == 1)) { + if (merge.maxNumSegments != -1 && (first || merge.segments.size() == 1)) { first = false; if (VERBOSE) { - System.out.println("TEST: optimized merge"); + System.out.println("TEST: maxNumSegments merge"); } - optimizeMergeCount.incrementAndGet(); + mergeCount.incrementAndGet(); } super.merge(merge); } @@ -57,7 +56,7 @@ public class TestOptimizeForever extends LuceneTestCase { final Directory d = newDirectory(); final MyIndexWriter w = new MyIndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); - // Try to make an index that requires optimizing: + // Try to make an index that requires merging: w.getConfig().setMaxBufferedDocs(_TestUtil.nextInt(random, 2, 11)); final int numStartDocs = atLeast(20); final LineFileDocs docs = new LineFileDocs(random); @@ -95,10 +94,10 @@ public class TestOptimizeForever extends LuceneTestCase { } }; t.start(); - w.optimize(); + w.forceMerge(1); doStop.set(true); t.join(); - assertTrue("optimize count is " + w.optimizeMergeCount.get(), w.optimizeMergeCount.get() <= 1); + assertTrue("merge count is " + w.mergeCount.get(), w.mergeCount.get() <= 1); w.close(); d.close(); } diff --git a/lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java b/lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java index 6f9647ddc67..ecef101d388 100644 --- a/lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java +++ b/lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java @@ -100,7 +100,7 @@ public class TestGlobalFieldNumbers extends LuceneTestCase { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setInfoStream(new FailOnNonBulkMergesInfoStream())); - writer.optimize(); + writer.forceMerge(1); writer.close(); assertFNXFiles(dir, "_2.fnx"); @@ -140,7 +140,7 @@ public class TestGlobalFieldNumbers extends LuceneTestCase { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setInfoStream(new FailOnNonBulkMergesInfoStream())); - writer.optimize(); + writer.forceMerge(1); writer.close(); assertFNXFiles(dir, "_2.fnx"); @@ -187,7 +187,7 @@ public class TestGlobalFieldNumbers extends LuceneTestCase { } IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setInfoStream(new FailOnNonBulkMergesInfoStream())); - writer.optimize(); + writer.forceMerge(1); writer.close(); assertFNXFiles(dir, "_2.fnx"); dir.close(); @@ -270,7 +270,7 @@ public class TestGlobalFieldNumbers extends LuceneTestCase { return dir; } - public void testOptimize() throws IOException { + public void testForceMerge() throws IOException { for (int i = 0; i < 2*RANDOM_MULTIPLIER; i++) { Set fieldNames = new HashSet(); final int numFields = 2 + (TEST_NIGHTLY ? random.nextInt(200) : random.nextInt(20)); @@ -285,7 +285,7 @@ public class TestGlobalFieldNumbers extends LuceneTestCase { FieldNumberBiMap globalFieldMap = writer.segmentInfos .getOrLoadGlobalFieldNumberMap(base); Set> entries = globalFieldMap.entries(); - writer.optimize(); + writer.forceMerge(1); writer.commit(); writer.close(); Set> afterOptmize = globalFieldMap.entries(); @@ -352,7 +352,7 @@ public class TestGlobalFieldNumbers extends LuceneTestCase { IndexWriter w = new IndexWriter(base, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy( new LogByteSizeMergePolicy())); - w.optimize(); + w.forceMerge(1); w.close(); SegmentInfos sis = new SegmentInfos(); sis.read(base); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexCommit.java b/lucene/src/test/org/apache/lucene/index/TestIndexCommit.java index d6482b1d35e..d56d62d1bd2 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexCommit.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexCommit.java @@ -42,7 +42,7 @@ public class TestIndexCommit extends LuceneTestCase { @Override public long getTimestamp() throws IOException { return 1;} @Override public Map getUserData() throws IOException { return null; } @Override public boolean isDeleted() { return false; } - @Override public boolean isOptimized() { return false; } + @Override public int getSegmentCount() { return 2; } }; IndexCommit ic2 = new IndexCommit() { @@ -55,7 +55,7 @@ public class TestIndexCommit extends LuceneTestCase { @Override public long getTimestamp() throws IOException { return 1;} @Override public Map getUserData() throws IOException { return null; } @Override public boolean isDeleted() { return false; } - @Override public boolean isOptimized() { return false; } + @Override public int getSegmentCount() { return 2; } }; assertEquals(ic1, ic2); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexReader.java index 55fc1b45dd4..e805c5e1563 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexReader.java @@ -95,18 +95,18 @@ public class TestIndexReader extends LuceneTestCase IndexReader r3 = IndexReader.openIfChanged(r2); assertNotNull(r3); assertFalse(c.equals(r3.getIndexCommit())); - assertFalse(r2.getIndexCommit().isOptimized()); + assertFalse(r2.getIndexCommit().getSegmentCount() == 1); r3.close(); writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) .setOpenMode(OpenMode.APPEND)); - writer.optimize(); + writer.forceMerge(1); writer.close(); r3 = IndexReader.openIfChanged(r2); assertNotNull(r3); - assertTrue(r3.getIndexCommit().isOptimized()); + assertEquals(1, r3.getIndexCommit().getSegmentCount()); r2.close(); r3.close(); d.close(); @@ -381,11 +381,11 @@ public class TestIndexReader extends LuceneTestCase assertEquals(bin[i], bytesRef.bytes[i + bytesRef.offset]); } reader.close(); - // force optimize + // force merge writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy())); - writer.optimize(); + writer.forceMerge(1); writer.close(); reader = IndexReader.open(dir, false); doc2 = reader.document(reader.maxDoc() - 1); @@ -721,7 +721,7 @@ public class TestIndexReader extends LuceneTestCase // [incorrectly] hit a "docs out of order" // IllegalStateException because above out-of-bounds // deleteDocument corrupted the index: - writer.optimize(); + writer.forceMerge(1); writer.close(); if (!gotException) { fail("delete of out-of-bounds doc number failed to hit exception"); @@ -846,7 +846,9 @@ public class TestIndexReader extends LuceneTestCase assertEquals("IndexReaders have different values for numDocs.", index1.numDocs(), index2.numDocs()); assertEquals("IndexReaders have different values for maxDoc.", index1.maxDoc(), index2.maxDoc()); assertEquals("Only one IndexReader has deletions.", index1.hasDeletions(), index2.hasDeletions()); - assertEquals("Only one index is optimized.", index1.isOptimized(), index2.isOptimized()); + if (!(index1 instanceof ParallelReader)) { + assertEquals("Single segment test differs.", index1.getSequentialSubReaders().length == 1, index2.getSequentialSubReaders().length == 1); + } // check field names Collection fields1 = index1.getFieldNames(FieldOption.ALL); @@ -970,19 +972,19 @@ public class TestIndexReader extends LuceneTestCase IndexReader r2 = IndexReader.openIfChanged(r); assertNotNull(r2); assertFalse(c.equals(r2.getIndexCommit())); - assertFalse(r2.getIndexCommit().isOptimized()); + assertFalse(r2.getIndexCommit().getSegmentCount() == 1); r2.close(); writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) .setOpenMode(OpenMode.APPEND)); - writer.optimize(); + writer.forceMerge(1); writer.close(); r2 = IndexReader.openIfChanged(r); assertNotNull(r2); assertNull(IndexReader.openIfChanged(r2)); - assertTrue(r2.getIndexCommit().isOptimized()); + assertEquals(1, r2.getIndexCommit().getSegmentCount()); r.close(); r2.close(); @@ -1032,7 +1034,7 @@ public class TestIndexReader extends LuceneTestCase writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) .setOpenMode(OpenMode.APPEND)); - writer.optimize(); + writer.forceMerge(1); writer.close(); // Make sure reopen to a single segment is still readonly: diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java b/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java index 4bcfd26500a..452fc06ac7c 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java @@ -192,15 +192,15 @@ public class TestIndexReaderClone extends LuceneTestCase { } // open non-readOnly reader1 on multi-segment index, then - // optimize the index, then clone to readOnly reader2 - public void testReadOnlyCloneAfterOptimize() throws Exception { + // fully merge the index, then clone to readOnly reader2 + public void testReadOnlyCloneAfterFullMerge() throws Exception { final Directory dir1 = newDirectory(); TestIndexReaderReopen.createIndex(random, dir1, true); IndexReader reader1 = IndexReader.open(dir1, false); IndexWriter w = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); - w.optimize(); + w.forceMerge(1); w.close(); IndexReader reader2 = reader1.clone(true); assertTrue(isReadOnly(reader2)); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java b/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java index 238e747101b..7f5f3b89b19 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java @@ -80,7 +80,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { /** * Test that norms values are preserved as the index is maintained. Including * separate norms. Including merging indexes with seprate norms. Including - * optimize. + * full merge. */ public void testNorms() throws IOException { // test with a single index: index1 @@ -112,7 +112,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { createIndex(random, dir3); if (VERBOSE) { - System.out.println("TEST: now addIndexes/optimize"); + System.out.println("TEST: now addIndexes/full merge"); } IndexWriter iw = new IndexWriter( dir3, @@ -122,7 +122,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { setMergePolicy(newLogMergePolicy(3)) ); iw.addIndexes(dir1, dir2); - iw.optimize(); + iw.forceMerge(1); iw.close(); norms1.addAll(norms); @@ -135,7 +135,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { verifyIndex(dir3); doTestNorms(random, dir3); - // now with optimize + // now with full merge iw = new IndexWriter( dir3, newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr). @@ -143,7 +143,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { setMaxBufferedDocs(5). setMergePolicy(newLogMergePolicy(3)) ); - iw.optimize(); + iw.forceMerge(1); iw.close(); verifyIndex(dir3); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java b/lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java index bfe40a810d7..858e856f803 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java @@ -33,7 +33,7 @@ import static org.apache.lucene.index.TestIndexReader.assertTermDocsCount; import static org.apache.lucene.index.TestIndexReader.createDocument; public class TestIndexReaderDelete extends LuceneTestCase { - private void deleteReaderReaderConflict(boolean optimize) throws IOException { + private void deleteReaderReaderConflict(boolean doFullMerge) throws IOException { Directory dir = newDirectory(); Term searchTerm1 = new Term("content", "aaa"); @@ -49,8 +49,9 @@ public class TestIndexReaderDelete extends LuceneTestCase { addDoc(writer, searchTerm2.text()); addDoc(writer, searchTerm3.text()); } - if(optimize) - writer.optimize(); + if (doFullMerge) { + writer.forceMerge(1); + } writer.close(); // OPEN TWO READERS @@ -131,7 +132,7 @@ public class TestIndexReaderDelete extends LuceneTestCase { dir.close(); } - private void deleteReaderWriterConflict(boolean optimize) throws IOException { + private void deleteReaderWriterConflict(boolean doFullMerge) throws IOException { //Directory dir = new RAMDirectory(); Directory dir = newDirectory(); @@ -159,13 +160,14 @@ public class TestIndexReaderDelete extends LuceneTestCase { addDoc(writer, searchTerm2.text()); } - // REQUEST OPTIMIZATION + // REQUEST full merge // This causes a new segment to become current for all subsequent // searchers. Because of this, deletions made via a previously open // reader, which would be applied to that reader's segment, are lost // for subsequent searchers/readers - if(optimize) - writer.optimize(); + if (doFullMerge) { + writer.forceMerge(1); + } writer.close(); // The reader should not see the new data @@ -255,19 +257,19 @@ public class TestIndexReaderDelete extends LuceneTestCase { dir.close(); } - public void testDeleteReaderReaderConflictUnoptimized() throws IOException { + public void testDeleteReaderReaderConflictNoFullMerge() throws IOException { deleteReaderReaderConflict(false); } - public void testDeleteReaderReaderConflictOptimized() throws IOException { + public void testDeleteReaderReaderConflictFullMerge() throws IOException { deleteReaderReaderConflict(true); } - public void testDeleteReaderWriterConflictUnoptimized() throws IOException { + public void testDeleteReaderWriterConflictNoFullMerge() throws IOException { deleteReaderWriterConflict(false); } - public void testDeleteReaderWriterConflictOptimized() throws IOException { + public void testDeleteReaderWriterConflictFullMerge() throws IOException { deleteReaderWriterConflict(true); } diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java b/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java index 7215d6b2757..8046d928a6a 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java @@ -711,7 +711,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { for (int i = 0; i < n; i++) { writer.addDocument(createDocument(i, 3)); } - writer.optimize(); + writer.forceMerge(1); writer.close(); final TestReopen test = new TestReopen() { @@ -961,7 +961,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { } if (!multiSegment) { - w.optimize(); + w.forceMerge(1); } w.close(); @@ -1019,14 +1019,14 @@ public class TestIndexReaderReopen extends LuceneTestCase { } case 2: { IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); - w.optimize(); + w.forceMerge(1); w.close(); break; } case 3: { IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); w.addDocument(createDocument(101, 4)); - w.optimize(); + w.forceMerge(1); w.addDocument(createDocument(102, 4)); w.addDocument(createDocument(103, 4)); w.close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java index 49c74e070a6..49e03fc37d4 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java @@ -19,7 +19,6 @@ package org.apache.lucene.index; import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.PrintStream; import java.io.Reader; import java.io.StringReader; import java.util.ArrayList; @@ -54,16 +53,13 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.Lock; import org.apache.lucene.store.LockFactory; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.MockDirectoryWrapper; -import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.store.NoLockFactory; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.store.SimpleFSLockFactory; import org.apache.lucene.store.SingleInstanceLockFactory; import org.apache.lucene.util.BytesRef; @@ -108,10 +104,10 @@ public class TestIndexWriter extends LuceneTestCase { assertEquals(60, reader.numDocs()); reader.close(); - // optimize the index and check that the new doc count is correct + // merge the index down and check that the new doc count is correct writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); assertEquals(60, writer.numDocs()); - writer.optimize(); + writer.forceMerge(1); assertEquals(60, writer.maxDoc()); assertEquals(60, writer.numDocs()); writer.close(); @@ -733,7 +729,7 @@ public class TestIndexWriter extends LuceneTestCase { writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); //LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy(); //lmp2.setUseCompoundFile(false); - writer.optimize(); + writer.forceMerge(1); writer.close(); } } @@ -1302,7 +1298,7 @@ public class TestIndexWriter extends LuceneTestCase { w.addDocument(doc); w.commit(); - w.optimize(); // force segment merge. + w.forceMerge(1); // force segment merge. w.close(); IndexReader ir = IndexReader.open(dir, true); @@ -1439,7 +1435,7 @@ public class TestIndexWriter extends LuceneTestCase { List files = Arrays.asList(dir.listAll()); assertTrue(files.contains("_0.cfs")); w.addDocument(doc); - w.optimize(); + w.forceMerge(1); if (iter == 1) { w.commit(); } @@ -1450,10 +1446,10 @@ public class TestIndexWriter extends LuceneTestCase { // NOTE: here we rely on "Windows" behavior, ie, even // though IW wanted to delete _0.cfs since it was - // optimized away, because we have a reader open + // merged away, because we have a reader open // against this file, it should still be here: assertTrue(files.contains("_0.cfs")); - // optimize created this + // forceMerge created this //assertTrue(files.contains("_2.cfs")); w.deleteUnusedFiles(); @@ -1697,7 +1693,7 @@ public class TestIndexWriter extends LuceneTestCase { } s.close(); r.close(); - w.optimize(); + w.forceMerge(1); } } w.close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java index 36c46821f5f..fbe809637b7 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java @@ -224,7 +224,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { } long midDiskUsage = dir.getMaxUsedSizeInBytes(); dir.resetMaxUsedSizeInBytes(); - writer.optimize(); + writer.forceMerge(1); writer.close(); IndexReader.open(dir, true).close(); @@ -246,11 +246,11 @@ public class TestIndexWriterCommit extends LuceneTestCase { /* - * Verify that calling optimize when writer is open for + * Verify that calling forceMerge when writer is open for * "commit on close" works correctly both for rollback() * and close(). */ - public void testCommitOnCloseOptimize() throws IOException { + public void testCommitOnCloseForceMerge() throws IOException { MockDirectoryWrapper dir = newDirectory(); // Must disable throwing exc on double-write: this // test uses IW.rollback which easily results in @@ -268,44 +268,44 @@ public class TestIndexWriterCommit extends LuceneTestCase { writer.close(); writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); - writer.optimize(); + writer.forceMerge(1); // Open a reader before closing (commiting) the writer: IndexReader reader = IndexReader.open(dir, true); - // Reader should see index as unoptimized at this + // Reader should see index as multi-seg at this // point: - assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized()); + assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().length > 1); reader.close(); // Abort the writer: writer.rollback(); - TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after optimize"); + TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge"); // Open a reader after aborting writer: reader = IndexReader.open(dir, true); - // Reader should still see index as unoptimized: - assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized()); + // Reader should still see index as multi-segment + assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().length > 1); reader.close(); if (VERBOSE) { - System.out.println("TEST: do real optimize"); + System.out.println("TEST: do real full merge"); } writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); - writer.optimize(); + writer.forceMerge(1); writer.close(); if (VERBOSE) { System.out.println("TEST: writer closed"); } - TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after optimize"); + TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge"); // Open a reader after aborting writer: reader = IndexReader.open(dir, true); - // Reader should still see index as unoptimized: - assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized()); + // Reader should see index as one segment + assertEquals("Reader incorrectly sees more than one segment", 1, reader.getSequentialSubReaders().length); reader.close(); dir.close(); } @@ -657,7 +657,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { r.close(); w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); - w.optimize(); + w.forceMerge(1); w.close(); assertEquals("test1", IndexReader.getCommitUserData(dir).get("label")); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java index 6f4e42b8fc3..6a789feb514 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java @@ -64,7 +64,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { doc.add(newField("city", text[i], TextField.TYPE_STORED)); modifier.addDocument(doc); } - modifier.optimize(); + modifier.forceMerge(1); modifier.commit(); Term term = new Term("city", "Amsterdam"); @@ -711,10 +711,10 @@ public class TestIndexWriterDelete extends LuceneTestCase { // flush (and commit if ac) if (VERBOSE) { - System.out.println("TEST: now optimize"); + System.out.println("TEST: now full merge"); } - modifier.optimize(); + modifier.forceMerge(1); if (VERBOSE) { System.out.println("TEST: now commit"); } diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java index 6e07c45d194..169056dbc38 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java @@ -17,13 +17,10 @@ package org.apache.lucene.index; * limitations under the License. */ -import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.PrintStream; import java.io.Reader; import java.io.StringReader; import java.util.ArrayList; -import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Random; @@ -657,7 +654,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5)); for(int j=0;j<17;j++) writer.addDocument(doc); - writer.optimize(); + writer.forceMerge(1); writer.close(); reader = IndexReader.open(dir, true); @@ -771,7 +768,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5)); for(int j=0;j<17;j++) writer.addDocument(doc); - writer.optimize(); + writer.forceMerge(1); writer.close(); reader = IndexReader.open(dir, true); @@ -927,7 +924,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { } } - public void testOptimizeExceptions() throws IOException { + public void testForceMergeExceptions() throws IOException { Directory startDir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy()); ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100); @@ -947,10 +944,10 @@ public class TestIndexWriterExceptions extends LuceneTestCase { w = new IndexWriter(dir, conf); dir.setRandomIOExceptionRate(0.5); try { - w.optimize(); + w.forceMerge(1); } catch (IOException ioe) { if (ioe.getCause() == null) - fail("optimize threw IOException without root cause"); + fail("forceMerge threw IOException without root cause"); } dir.setRandomIOExceptionRate(0); w.close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java similarity index 87% rename from lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java rename to lucene/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java index a4b32427a5f..90f173657e8 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java @@ -28,8 +28,8 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; -public class TestIndexWriterOptimize extends LuceneTestCase { - public void testOptimizeMaxNumSegments() throws IOException { +public class TestIndexWriterForceMerge extends LuceneTestCase { + public void testPartialMerge() throws IOException { MockDirectoryWrapper dir = newDirectory(); @@ -56,7 +56,7 @@ public class TestIndexWriterOptimize extends LuceneTestCase { ldmp.setMergeFactor(5); writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(ldmp)); - writer.optimize(3); + writer.forceMerge(3); writer.close(); sis = new SegmentInfos(); @@ -71,7 +71,7 @@ public class TestIndexWriterOptimize extends LuceneTestCase { dir.close(); } - public void testOptimizeMaxNumSegments2() throws IOException { + public void testMaxNumSegments2() throws IOException { MockDirectoryWrapper dir = newDirectory(); final Document doc = new Document(); @@ -97,7 +97,7 @@ public class TestIndexWriterOptimize extends LuceneTestCase { final int segCount = sis.size(); - writer.optimize(7); + writer.forceMerge(7); writer.commit(); writer.waitForMerges(); @@ -115,11 +115,11 @@ public class TestIndexWriterOptimize extends LuceneTestCase { } /** - * Make sure optimize doesn't use any more than 1X + * Make sure forceMerge doesn't use any more than 1X * starting index size as its temporary free space * required. */ - public void testOptimizeTempSpaceUsage() throws IOException { + public void testForceMergeTempSpaceUsage() throws IOException { MockDirectoryWrapper dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy())); @@ -156,18 +156,18 @@ public class TestIndexWriterOptimize extends LuceneTestCase { // smaller one here could increase the disk usage and // cause a false failure: writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval).setMergePolicy(newLogMergePolicy())); - writer.optimize(); + writer.forceMerge(1); writer.close(); long maxDiskUsage = dir.getMaxUsedSizeInBytes(); - assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)", + assertTrue("forceMerge used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)", maxDiskUsage <= 4*startDiskUsage); dir.close(); } - // Test calling optimize(false) whereby optimize is kicked + // Test calling forceMerge(1, false) whereby forceMerge is kicked // off but we don't wait for it to finish (but // writer.close()) does wait - public void testBackgroundOptimize() throws IOException { + public void testBackgroundForceMerge() throws IOException { Directory dir = newDirectory(); for(int pass=0;pass<2;pass++) { @@ -182,22 +182,22 @@ public class TestIndexWriterOptimize extends LuceneTestCase { doc.add(newField("field", "aaa", StringField.TYPE_UNSTORED)); for(int i=0;i<100;i++) writer.addDocument(doc); - writer.optimize(false); + writer.forceMerge(1, false); if (0 == pass) { writer.close(); IndexReader reader = IndexReader.open(dir, true); - assertTrue(reader.isOptimized()); + assertEquals(1, reader.getSequentialSubReaders().length); reader.close(); } else { // Get another segment to flush so we can verify it is - // NOT included in the optimization + // NOT included in the merging writer.addDocument(doc); writer.addDocument(doc); writer.close(); IndexReader reader = IndexReader.open(dir, true); - assertTrue(!reader.isOptimized()); + assertTrue(reader.getSequentialSubReaders().length > 1); reader.close(); SegmentInfos infos = new SegmentInfos(); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java index 09c8071edf6..166135d23d1 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java @@ -64,7 +64,7 @@ public class TestIndexWriterMerging extends LuceneTestCase setMergePolicy(newLogMergePolicy(2)) ); writer.addIndexes(indexA, indexB); - writer.optimize(); + writer.forceMerge(1); writer.close(); fail = verifyIndex(merged, 0); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java index e849abf5328..97e62131c1e 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java @@ -24,7 +24,6 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; -import org.apache.lucene.index.codecs.Codec; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; @@ -180,7 +179,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase { } // Now, build a starting index that has START_COUNT docs. We - // will then try to addIndexesNoOptimize into a copy of this: + // will then try to addIndexes into a copy of this: MockDirectoryWrapper startDir = newDirectory(); IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); for(int j=0;j count); writer.close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java index cf0a6b6270a..c3c33437ea0 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java @@ -326,9 +326,9 @@ public class TestIndexWriterUnicode extends LuceneTestCase { // Test multi segment r.close(); - writer.optimize(); + writer.forceMerge(1); - // Test optimized single segment + // Test single segment r = writer.getReader(); checkTermsOrder(r, allTerms, true); r.close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java b/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java index 1b307f78ce6..86c0de9ad86 100755 --- a/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java +++ b/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java @@ -102,7 +102,7 @@ public class TestLazyProxSkipping extends LuceneTestCase { } // make sure the index has only a single segment - writer.optimize(); + writer.forceMerge(1); writer.close(); SegmentReader reader = getOnlySegmentReader(IndexReader.open(directory, false)); diff --git a/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java index 8b630f41214..81c9cb4f525 100644 --- a/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java +++ b/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java @@ -77,7 +77,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase { writer.addDocument(d1); } writer.commit(); - writer.optimize(); + writer.forceMerge(1); writer.close(); IndexReader reader = getOnlySegmentReader(IndexReader.open(dir)); diff --git a/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java b/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java index b792eca7f35..be387e9300f 100644 --- a/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java +++ b/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java @@ -25,7 +25,7 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LuceneTestCase.UseNoMemoryExpensiveCodec; // TODO -// - mix in optimize, addIndexes +// - mix in forceMerge, addIndexes // - randomoly mix in non-congruent docs @UseNoMemoryExpensiveCodec diff --git a/lucene/src/test/org/apache/lucene/index/TestNoMergePolicy.java b/lucene/src/test/org/apache/lucene/index/TestNoMergePolicy.java index e73b0b3c0e0..d3fe1e5c88a 100644 --- a/lucene/src/test/org/apache/lucene/index/TestNoMergePolicy.java +++ b/lucene/src/test/org/apache/lucene/index/TestNoMergePolicy.java @@ -31,7 +31,7 @@ public class TestNoMergePolicy extends LuceneTestCase { public void testNoMergePolicy() throws Exception { MergePolicy mp = NoMergePolicy.NO_COMPOUND_FILES; assertNull(mp.findMerges(null)); - assertNull(mp.findMergesForOptimize(null, 0, null)); + assertNull(mp.findForcedMerges(null, 0, null)); assertNull(mp.findMergesToExpungeDeletes(null)); assertFalse(mp.useCompoundFile(null, null)); mp.close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestNorms.java b/lucene/src/test/org/apache/lucene/index/TestNorms.java index 98cd28c301a..3f70b0d8c63 100755 --- a/lucene/src/test/org/apache/lucene/index/TestNorms.java +++ b/lucene/src/test/org/apache/lucene/index/TestNorms.java @@ -36,7 +36,7 @@ import org.apache.lucene.util.LuceneTestCase; /** * Test that norms info is preserved during index life - including - * separate norms, addDocument, addIndexes, optimize. + * separate norms, addDocument, addIndexes, forceMerge. */ public class TestNorms extends LuceneTestCase { @@ -74,7 +74,7 @@ public class TestNorms extends LuceneTestCase { * Test that norms values are preserved as the index is maintained. * Including separate norms. * Including merging indexes with seprate norms. - * Including optimize. + * Including forceMerge. */ public void testNorms() throws IOException { Directory dir1 = newDirectory(); @@ -111,7 +111,7 @@ public class TestNorms extends LuceneTestCase { setMergePolicy(newLogMergePolicy(3)) ); iw.addIndexes(dir1,dir2); - iw.optimize(); + iw.forceMerge(1); iw.close(); norms1.addAll(norms); @@ -124,7 +124,7 @@ public class TestNorms extends LuceneTestCase { verifyIndex(dir3); doTestNorms(random, dir3); - // now with optimize + // now with single segment iw = new IndexWriter( dir3, newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr). @@ -132,7 +132,7 @@ public class TestNorms extends LuceneTestCase { setMaxBufferedDocs(5). setMergePolicy(newLogMergePolicy(3)) ); - iw.optimize(); + iw.forceMerge(1); iw.close(); verifyIndex(dir3); diff --git a/lucene/src/test/org/apache/lucene/index/TestOmitNorms.java b/lucene/src/test/org/apache/lucene/index/TestOmitNorms.java index edcb8b383a2..867b2c43a7a 100644 --- a/lucene/src/test/org/apache/lucene/index/TestOmitNorms.java +++ b/lucene/src/test/org/apache/lucene/index/TestOmitNorms.java @@ -49,7 +49,7 @@ public class TestOmitNorms extends LuceneTestCase { d.add(f2); writer.addDocument(d); - writer.optimize(); + writer.forceMerge(1); // now we add another document which has term freq for field f2 and not for f1 and verify if the SegmentMerger // keep things constant d = new Document(); @@ -62,7 +62,7 @@ public class TestOmitNorms extends LuceneTestCase { writer.addDocument(d); // force merge - writer.optimize(); + writer.forceMerge(1); // flush writer.close(); @@ -116,7 +116,7 @@ public class TestOmitNorms extends LuceneTestCase { } // force merge - writer.optimize(); + writer.forceMerge(1); // flush writer.close(); @@ -163,7 +163,7 @@ public class TestOmitNorms extends LuceneTestCase { } // force merge - writer.optimize(); + writer.forceMerge(1); // flush writer.close(); @@ -209,7 +209,7 @@ public class TestOmitNorms extends LuceneTestCase { assertNoNrm(ram); // force merge - writer.optimize(); + writer.forceMerge(1); // flush writer.close(); @@ -221,7 +221,7 @@ public class TestOmitNorms extends LuceneTestCase { * Tests various combinations of omitNorms=true/false, the field not existing at all, * ensuring that only omitNorms is 'viral'. * Internally checks that MultiNorms.norms() is consistent (returns the same bytes) - * as the optimized equivalent. + * as the fully merged equivalent. */ public void testOmitNormsCombos() throws IOException { // indexed with norms @@ -290,8 +290,8 @@ public class TestOmitNorms extends LuceneTestCase { IndexReader ir1 = riw.getReader(); byte[] norms1 = MultiNorms.norms(ir1, field); - // optimize and validate MultiNorms against single segment. - riw.optimize(); + // fully merge and validate MultiNorms against single segment. + riw.forceMerge(1); IndexReader ir2 = riw.getReader(); byte[] norms2 = ir2.getSequentialSubReaders()[0].norms(field); diff --git a/lucene/src/test/org/apache/lucene/index/TestOmitPositions.java b/lucene/src/test/org/apache/lucene/index/TestOmitPositions.java index 3c501776720..e2c6badf8b5 100644 --- a/lucene/src/test/org/apache/lucene/index/TestOmitPositions.java +++ b/lucene/src/test/org/apache/lucene/index/TestOmitPositions.java @@ -109,7 +109,7 @@ public class TestOmitPositions extends LuceneTestCase { d.add(f9); writer.addDocument(d); - writer.optimize(); + writer.forceMerge(1); // now we add another document which has docs-only for f1, f4, f7, docs/freqs for f2, f5, f8, // and docs/freqs/positions for f3, f6, f9 @@ -148,7 +148,7 @@ public class TestOmitPositions extends LuceneTestCase { writer.addDocument(d); // force merge - writer.optimize(); + writer.forceMerge(1); // flush writer.close(); @@ -217,7 +217,7 @@ public class TestOmitPositions extends LuceneTestCase { writer.addDocument(d); // force merge - writer.optimize(); + writer.forceMerge(1); // flush writer.close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestOmitTf.java b/lucene/src/test/org/apache/lucene/index/TestOmitTf.java index 97c6ca8e4f4..7b198979fe3 100644 --- a/lucene/src/test/org/apache/lucene/index/TestOmitTf.java +++ b/lucene/src/test/org/apache/lucene/index/TestOmitTf.java @@ -81,7 +81,7 @@ public class TestOmitTf extends LuceneTestCase { d.add(f2); writer.addDocument(d); - writer.optimize(); + writer.forceMerge(1); // now we add another document which has term freq for field f2 and not for f1 and verify if the SegmentMerger // keep things constant d = new Document(); @@ -96,7 +96,7 @@ public class TestOmitTf extends LuceneTestCase { writer.addDocument(d); // force merge - writer.optimize(); + writer.forceMerge(1); // flush writer.close(); @@ -148,7 +148,7 @@ public class TestOmitTf extends LuceneTestCase { writer.addDocument(d); // force merge - writer.optimize(); + writer.forceMerge(1); // flush writer.close(); @@ -190,7 +190,7 @@ public class TestOmitTf extends LuceneTestCase { writer.addDocument(d); // force merge - writer.optimize(); + writer.forceMerge(1); // flush writer.close(); @@ -233,7 +233,8 @@ public class TestOmitTf extends LuceneTestCase { assertNoPrx(ram); - // now add some documents with positions, and check there is no prox after optimization + // now add some documents with positions, and check + // there is no prox after full merge d = new Document(); f1 = newField("f1", "This field has positions", TextField.TYPE_UNSTORED); d.add(f1); @@ -242,7 +243,7 @@ public class TestOmitTf extends LuceneTestCase { writer.addDocument(d); // force merge - writer.optimize(); + writer.forceMerge(1); // flush writer.close(); @@ -278,7 +279,7 @@ public class TestOmitTf extends LuceneTestCase { //System.out.println(d); } - writer.optimize(); + writer.forceMerge(1); // flush writer.close(); @@ -413,7 +414,7 @@ public class TestOmitTf extends LuceneTestCase { @Override public void collect(int doc) throws IOException { count++; - sum += doc + docBase; // use it to avoid any possibility of being optimized away + sum += doc + docBase; // use it to avoid any possibility of being merged away } public static int getCount() { return count; } diff --git a/lucene/src/test/org/apache/lucene/index/TestParallelReader.java b/lucene/src/test/org/apache/lucene/index/TestParallelReader.java index 76ec8bb9423..b27f64fa539 100644 --- a/lucene/src/test/org/apache/lucene/index/TestParallelReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestParallelReader.java @@ -144,64 +144,6 @@ public class TestParallelReader extends LuceneTestCase { dir2.close(); } - public void testIsOptimized() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); - - // add another document to ensure that the indexes are not optimized - IndexWriter modifier = new IndexWriter( - dir1, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). - setMergePolicy(newLogMergePolicy(10)) - ); - Document d = new Document(); - d.add(newField("f1", "v1", TextField.TYPE_STORED)); - modifier.addDocument(d); - modifier.close(); - - modifier = new IndexWriter( - dir2, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). - setMergePolicy(newLogMergePolicy(10)) - ); - d = new Document(); - d.add(newField("f2", "v2", TextField.TYPE_STORED)); - modifier.addDocument(d); - modifier.close(); - - - ParallelReader pr = new ParallelReader(); - pr.add(IndexReader.open(dir1, false)); - pr.add(IndexReader.open(dir2, false)); - assertFalse(pr.isOptimized()); - pr.close(); - - modifier = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); - modifier.optimize(); - modifier.close(); - - pr = new ParallelReader(); - pr.add(IndexReader.open(dir1, false)); - pr.add(IndexReader.open(dir2, false)); - // just one of the two indexes are optimized - assertFalse(pr.isOptimized()); - pr.close(); - - - modifier = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); - modifier.optimize(); - modifier.close(); - - pr = new ParallelReader(); - pr.add(IndexReader.open(dir1, false)); - pr.add(IndexReader.open(dir2, false)); - // now both indexes are optimized - assertTrue(pr.isOptimized()); - pr.close(); - dir1.close(); - dir2.close(); - } - private void queryTest(Query query) throws IOException { ScoreDoc[] parallelHits = parallel.search(query, null, 1000).scoreDocs; ScoreDoc[] singleHits = single.search(query, null, 1000).scoreDocs; diff --git a/lucene/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java b/lucene/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java index f34a2b2ea14..4bf7bc9d724 100644 --- a/lucene/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java +++ b/lucene/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java @@ -58,7 +58,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { // When unpatched, Lucene crashes here with a NoSuchElementException (caused by ParallelTermEnum) iwOut.addIndexes(pr); - iwOut.optimize(); + iwOut.forceMerge(1); iwOut.close(); rdOut.close(); rd1.close(); @@ -88,7 +88,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { ir.close(); iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); - iw.optimize(); + iw.forceMerge(1); iw.close(); } @@ -116,7 +116,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { rd1.close(); rd2.close(); - iwOut.optimize(); + iwOut.forceMerge(1); iwOut.close(); rdOut.close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java b/lucene/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java index 768de2592dc..14b0ce9af14 100644 --- a/lucene/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java +++ b/lucene/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java @@ -245,7 +245,7 @@ public class TestPayloadProcessorProvider extends LuceneTestCase { processors.put(dir, new PerTermPayloadProcessor()); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); writer.setPayloadProcessorProvider(new PerDirPayloadProcessor(processors)); - writer.optimize(); + writer.forceMerge(1); writer.close(); verifyPayloadExists(dir, "p", new BytesRef("p1"), 0); diff --git a/lucene/src/test/org/apache/lucene/index/TestPayloads.java b/lucene/src/test/org/apache/lucene/index/TestPayloads.java index 96ee84987e6..735f172c4d6 100644 --- a/lucene/src/test/org/apache/lucene/index/TestPayloads.java +++ b/lucene/src/test/org/apache/lucene/index/TestPayloads.java @@ -134,7 +134,7 @@ public class TestPayloads extends LuceneTestCase { writer.addDocument(d); // force merge - writer.optimize(); + writer.forceMerge(1); // flush writer.close(); @@ -204,7 +204,7 @@ public class TestPayloads extends LuceneTestCase { writer.addDocument(d, analyzer); } - writer.optimize(); + writer.forceMerge(1); // flush writer.close(); @@ -322,7 +322,7 @@ public class TestPayloads extends LuceneTestCase { writer.addDocument(d); - writer.optimize(); + writer.forceMerge(1); // flush writer.close(); @@ -621,7 +621,7 @@ public class TestPayloads extends LuceneTestCase { doc.add(new Field("hasMaybepayload2", "here we go", TextField.TYPE_STORED)); writer.addDocument(doc); writer.addDocument(doc); - writer.optimize(); + writer.forceMerge(1); writer.close(); dir.close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java b/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java index 71c6eda8327..06ce9307bd0 100644 --- a/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java +++ b/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java @@ -267,8 +267,8 @@ public class TestPerSegmentDeletes extends LuceneTestCase { } @Override - public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos, - int maxSegmentCount, Map segmentsToOptimize) + public MergeSpecification findForcedMerges(SegmentInfos segmentInfos, + int maxSegmentCount, Map segmentsToMerge) throws CorruptIndexException, IOException { return null; } diff --git a/lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java b/lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java index f630ad66a28..5d2384c6adc 100644 --- a/lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java +++ b/lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java @@ -120,7 +120,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { addDoc(writer, "ccc ccc ccc ccc"); // assure that we deal with a single segment - writer.optimize(); + writer.forceMerge(1); writer.close(); IndexReader reader = IndexReader.open(dir, null, true, indexDivisor); diff --git a/lucene/src/test/org/apache/lucene/index/TestSegmentTermEnum.java b/lucene/src/test/org/apache/lucene/index/TestSegmentTermEnum.java index c1a393e9065..77d6483fc17 100644 --- a/lucene/src/test/org/apache/lucene/index/TestSegmentTermEnum.java +++ b/lucene/src/test/org/apache/lucene/index/TestSegmentTermEnum.java @@ -61,15 +61,15 @@ public class TestSegmentTermEnum extends LuceneTestCase { writer.close(); - // verify document frequency of terms in an unoptimized index + // verify document frequency of terms in an multi segment index verifyDocFreq(); - // merge segments by optimizing the index + // merge segments writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); - writer.optimize(); + writer.forceMerge(1); writer.close(); - // verify document frequency of terms in an optimized index + // verify document frequency of terms in a single segment index verifyDocFreq(); } diff --git a/lucene/src/test/org/apache/lucene/index/TestSizeBoundedOptimize.java b/lucene/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java similarity index 93% rename from lucene/src/test/org/apache/lucene/index/TestSizeBoundedOptimize.java rename to lucene/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java index c1d768276e2..6b157cf2383 100644 --- a/lucene/src/test/org/apache/lucene/index/TestSizeBoundedOptimize.java +++ b/lucene/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java @@ -24,7 +24,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; -public class TestSizeBoundedOptimize extends LuceneTestCase { +public class TestSizeBoundedForceMerge extends LuceneTestCase { private void addDocs(IndexWriter writer, int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { @@ -44,7 +44,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { } public void testByteSizeLimit() throws Exception { - // tests that the max merge size constraint is applied during optimize. + // tests that the max merge size constraint is applied during forceMerge. Directory dir = new RAMDirectory(); // Prepare an index w/ several small segments and a large one. @@ -63,11 +63,11 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { conf = newWriterConfig(); LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(); - lmp.setMaxMergeMBForOptimize((min + 1) / (1 << 20)); + lmp.setMaxMergeMBForForcedMerge((min + 1) / (1 << 20)); conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); - writer.optimize(); + writer.forceMerge(1); writer.close(); // Should only be 3 segments in the index, because one of them exceeds the size limit @@ -77,7 +77,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { } public void testNumDocsLimit() throws Exception { - // tests that the max merge docs constraint is applied during optimize. + // tests that the max merge docs constraint is applied during forceMerge. Directory dir = new RAMDirectory(); // Prepare an index w/ several small segments and a large one. @@ -100,7 +100,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); - writer.optimize(); + writer.forceMerge(1); writer.close(); // Should only be 3 segments in the index, because one of them exceeds the size limit @@ -128,7 +128,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); - writer.optimize(); + writer.forceMerge(1); writer.close(); SegmentInfos sis = new SegmentInfos(); @@ -155,7 +155,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); - writer.optimize(); + writer.forceMerge(1); writer.close(); SegmentInfos sis = new SegmentInfos(); @@ -182,7 +182,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); - writer.optimize(); + writer.forceMerge(1); writer.close(); SegmentInfos sis = new SegmentInfos(); @@ -208,7 +208,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); - writer.optimize(); + writer.forceMerge(1); writer.close(); SegmentInfos sis = new SegmentInfos(); @@ -235,7 +235,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); - writer.optimize(); + writer.forceMerge(1); writer.close(); SegmentInfos sis = new SegmentInfos(); @@ -266,7 +266,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); - writer.optimize(); + writer.forceMerge(1); writer.close(); // Should only be 4 segments in the index, because of the merge factor and @@ -276,7 +276,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { assertEquals(4, sis.size()); } - public void testSingleNonOptimizedSegment() throws Exception { + public void testSingleMergeableSegment() throws Exception { Directory dir = new RAMDirectory(); IndexWriterConfig conf = newWriterConfig(); @@ -288,7 +288,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { writer.close(); - // delete the last document, so that the last segment is optimized. + // delete the last document, so that the last segment is merged. IndexReader r = IndexReader.open(dir, false); r.deleteDocument(r.numDocs() - 1); r.close(); @@ -299,7 +299,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); - writer.optimize(); + writer.forceMerge(1); writer.close(); // Verify that the last segment does not have deletions. @@ -309,7 +309,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { assertFalse(sis.info(2).hasDeletions()); } - public void testSingleOptimizedSegment() throws Exception { + public void testSingleNonMergeableSegment() throws Exception { Directory dir = new RAMDirectory(); IndexWriterConfig conf = newWriterConfig(); @@ -325,7 +325,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); - writer.optimize(); + writer.forceMerge(1); writer.close(); // Verify that the last segment does not have deletions. @@ -334,7 +334,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { assertEquals(1, sis.size()); } - public void testSingleNonOptimizedTooLargeSegment() throws Exception { + public void testSingleMergeableTooLargeSegment() throws Exception { Directory dir = new RAMDirectory(); IndexWriterConfig conf = newWriterConfig(); @@ -355,7 +355,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase { conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); - writer.optimize(); + writer.forceMerge(1); writer.close(); // Verify that the last segment does not have deletions. diff --git a/lucene/src/test/org/apache/lucene/index/TestStressAdvance.java b/lucene/src/test/org/apache/lucene/index/TestStressAdvance.java index f2e4c32ee05..8e6d2b14549 100644 --- a/lucene/src/test/org/apache/lucene/index/TestStressAdvance.java +++ b/lucene/src/test/org/apache/lucene/index/TestStressAdvance.java @@ -52,7 +52,7 @@ public class TestStressAdvance extends LuceneTestCase { w.addDocument(doc); } - w.optimize(); + w.forceMerge(1); final List aDocIDs = new ArrayList(); final List bDocIDs = new ArrayList(); diff --git a/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java b/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java index 4883683dec8..26ece4621fd 100644 --- a/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java +++ b/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java @@ -179,7 +179,7 @@ public class TestStressIndexing2 extends LuceneTestCase { threads[i].join(); } - // w.optimize(); + // w.forceMerge(1); //w.close(); for (int i=0; iThis method is not thread safe, be sure to only call it when no filter is running! * If you re-use the same filter instance for another * search, be sure to first reset the term counter diff --git a/lucene/src/test/org/apache/lucene/search/TestBooleanQuery.java b/lucene/src/test/org/apache/lucene/search/TestBooleanQuery.java index 4edc7173af1..9f21dac07ea 100644 --- a/lucene/src/test/org/apache/lucene/search/TestBooleanQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestBooleanQuery.java @@ -213,7 +213,7 @@ public class TestBooleanQuery extends LuceneTestCase { doc.add(new TextField("field", contents)); w.addDocument(doc); } - w.optimize(); + w.forceMerge(1); final IndexReader r = w.getReader(); final IndexSearcher s = newSearcher(r); w.close(); diff --git a/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java b/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java index 35d879d30dc..45c07c2c6cb 100644 --- a/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java +++ b/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java @@ -32,7 +32,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.Bits; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.FixedBitSet; -import org.apache.lucene.util._TestUtil; public class TestCachingWrapperFilter extends LuceneTestCase { @@ -239,8 +238,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase { assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits); assertEquals(missCount, filter.missCount); - // NOTE: silliness to make sure JRE does not optimize - // away our holding onto oldReader to prevent + // NOTE: silliness to make sure JRE does not eliminate + // our holding onto oldReader to prevent // CachingWrapperFilter's WeakHashMap from dropping the // entry: assertTrue(oldReader != null); diff --git a/lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java b/lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java index c2e71ea43de..6e18c6bd4ed 100644 --- a/lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java +++ b/lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java @@ -540,7 +540,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { writer.addDocument(doc); } - writer.optimize(); + writer.forceMerge(1); writer.deleteDocuments(new Term("id","0")); writer.close(); diff --git a/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java b/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java index 3df21e12223..21a9257b381 100644 --- a/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java @@ -77,7 +77,7 @@ public class TestFilteredQuery extends LuceneTestCase { // tests here require single segment (eg try seed // 8239472272678419952L), because SingleDocTestFilter(x) // blindly accepts that docID in any sub-segment - writer.optimize(); + writer.forceMerge(1); reader = writer.getReader(); writer.close (); diff --git a/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java b/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java index d4e1f14d932..af5b239cff0 100644 --- a/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java +++ b/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java @@ -61,15 +61,16 @@ public class TestFilteredSearch extends LuceneTestCase { directory.close(); } - public void searchFiltered(IndexWriter writer, Directory directory, Filter filter, boolean optimize) { + public void searchFiltered(IndexWriter writer, Directory directory, Filter filter, boolean fullMerge) { try { for (int i = 0; i < 60; i++) {//Simple docs Document doc = new Document(); doc.add(newField(FIELD, Integer.toString(i), StringField.TYPE_STORED)); writer.addDocument(doc); } - if(optimize) - writer.optimize(); + if (fullMerge) { + writer.forceMerge(1); + } writer.close(); BooleanQuery booleanQuery = new BooleanQuery(); diff --git a/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java b/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java index 3ddc84b3b22..53524e11514 100644 --- a/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java +++ b/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java @@ -172,7 +172,7 @@ public class TestFuzzyQuery2 extends LuceneTestCase { writer.addDocument(doc); } - writer.optimize(); + writer.forceMerge(1); writer.close(); IndexSearcher searcher = new IndexSearcher(dir); diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java b/lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java index fe3df7a1e57..50ae51820f9 100644 --- a/lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java +++ b/lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java @@ -56,7 +56,7 @@ public class TestMultiTermQueryRewrites extends LuceneTestCase { writer.addDocument(doc); ((i % 2 == 0) ? swriter1 : swriter2).addDocument(doc); } - writer.optimize(); swriter1.optimize(); swriter2.optimize(); + writer.forceMerge(1); swriter1.forceMerge(1); swriter2.forceMerge(1); writer.close(); swriter1.close(); swriter2.close(); reader = IndexReader.open(dir, true); diff --git a/lucene/src/test/org/apache/lucene/search/TestScorerPerf.java b/lucene/src/test/org/apache/lucene/search/TestScorerPerf.java index b46711ba878..5826c729591 100755 --- a/lucene/src/test/org/apache/lucene/search/TestScorerPerf.java +++ b/lucene/src/test/org/apache/lucene/search/TestScorerPerf.java @@ -71,7 +71,7 @@ public class TestScorerPerf extends LuceneTestCase { } iw.addDocument(d); } - iw.optimize(); + iw.forceMerge(1); iw.close(); } @@ -103,7 +103,7 @@ public class TestScorerPerf extends LuceneTestCase { @Override public void collect(int doc) { count++; - sum += docBase + doc; // use it to avoid any possibility of being optimized away + sum += docBase + doc; // use it to avoid any possibility of being eliminated by hotspot } public int getCount() { return count; } diff --git a/lucene/src/test/org/apache/lucene/search/TestSort.java b/lucene/src/test/org/apache/lucene/search/TestSort.java index 8a35d91625a..b3112516329 100644 --- a/lucene/src/test/org/apache/lucene/search/TestSort.java +++ b/lucene/src/test/org/apache/lucene/search/TestSort.java @@ -201,9 +201,9 @@ public class TestSort extends LuceneTestCase { } writer.addDocument (doc); } - //writer.optimize (); + //writer.forceMerge(1); //System.out.println(writer.getSegmentCount()); - writer.close (); + writer.close(); return new IndexSearcher (indexStore, true); } @@ -1132,7 +1132,7 @@ public class TestSort extends LuceneTestCase { doc.add (new StringField ("string", "b"+i)); writer.addDocument (doc); } - writer.optimize(); // enforce one segment to have a higher unique term count in all cases + writer.forceMerge(1); // enforce one segment to have a higher unique term count in all cases writer.close(); sort.setSort( new SortField("string", SortField.Type.STRING), diff --git a/lucene/src/test/org/apache/lucene/search/TestTermVectors.java b/lucene/src/test/org/apache/lucene/search/TestTermVectors.java index 337872aad1b..003086dfec2 100644 --- a/lucene/src/test/org/apache/lucene/search/TestTermVectors.java +++ b/lucene/src/test/org/apache/lucene/search/TestTermVectors.java @@ -514,22 +514,22 @@ public class TestTermVectors extends LuceneTestCase { r.close(); } - public void testOptimizeAddDocs() throws Exception { + public void testFullMergeAddDocs() throws Exception { Directory target = newDirectory(); IndexWriter writer = createWriter(target); - // with maxBufferedDocs=2, this results in two segments, so that optimize + // with maxBufferedDocs=2, this results in two segments, so that forceMerge // actually does something. for (int i = 0; i < 4; i++) { writer.addDocument(createDoc()); } - writer.optimize(); + writer.forceMerge(1); writer.close(); verifyIndex(target); target.close(); } - public void testOptimizeAddIndexesDir() throws Exception { + public void testFullMergeAddIndexesDir() throws Exception { Directory[] input = new Directory[] { newDirectory(), newDirectory() }; Directory target = newDirectory(); @@ -539,7 +539,7 @@ public class TestTermVectors extends LuceneTestCase { IndexWriter writer = createWriter(target); writer.addIndexes(input); - writer.optimize(); + writer.forceMerge(1); writer.close(); verifyIndex(target); @@ -547,7 +547,7 @@ public class TestTermVectors extends LuceneTestCase { IOUtils.close(target, input[0], input[1]); } - public void testOptimizeAddIndexesReader() throws Exception { + public void testFullMergeAddIndexesReader() throws Exception { Directory[] input = new Directory[] { newDirectory(), newDirectory() }; Directory target = newDirectory(); @@ -561,7 +561,7 @@ public class TestTermVectors extends LuceneTestCase { writer.addIndexes(r); r.close(); } - writer.optimize(); + writer.forceMerge(1); writer.close(); verifyIndex(target); diff --git a/lucene/src/test/org/apache/lucene/search/TestTopDocsMerge.java b/lucene/src/test/org/apache/lucene/search/TestTopDocsMerge.java index ad3c420c2b7..1b32e76061e 100644 --- a/lucene/src/test/org/apache/lucene/search/TestTopDocsMerge.java +++ b/lucene/src/test/org/apache/lucene/search/TestTopDocsMerge.java @@ -74,7 +74,7 @@ public class TestTopDocsMerge extends LuceneTestCase { { dir = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random, dir); - // w.setDoRandomOptimize(false); + // w.setDoRandomForceMerge(false); // w.w.getConfig().setMaxBufferedDocs(atLeast(100)); diff --git a/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java b/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java index 9b40d527061..890c66ce8e6 100644 --- a/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java +++ b/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java @@ -104,7 +104,7 @@ public class TestRAMDirectory extends LuceneTestCase { final IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); - writer.optimize(); + writer.forceMerge(1); assertEquals(ramDir.sizeInBytes(), ramDir.getRecomputedSizeInBytes()); @@ -131,7 +131,7 @@ public class TestRAMDirectory extends LuceneTestCase { for (int i=0; i : 2000 - Optimize + ForcMerge(1) CloseIndex } diff --git a/modules/benchmark/conf/collector-small.alg b/modules/benchmark/conf/collector-small.alg index c67cab91bb1..490039f7d5e 100644 --- a/modules/benchmark/conf/collector-small.alg +++ b/modules/benchmark/conf/collector-small.alg @@ -52,7 +52,7 @@ log.queries=true { "Populate" CreateIndex { "MAddDocs" AddDoc } : 200000 - Optimize + ForcMerge(1) CloseIndex } diff --git a/modules/benchmark/conf/collector.alg b/modules/benchmark/conf/collector.alg index a80d5642348..2baf000657f 100644 --- a/modules/benchmark/conf/collector.alg +++ b/modules/benchmark/conf/collector.alg @@ -52,7 +52,7 @@ log.queries=true { "Populate" CreateIndex { "MAddDocs" AddDoc } : 2000000 - Optimize + ForcMerge(1) CloseIndex } diff --git a/modules/benchmark/conf/deletes.alg b/modules/benchmark/conf/deletes.alg index a54d4f81fbe..2f2e638385f 100644 --- a/modules/benchmark/conf/deletes.alg +++ b/modules/benchmark/conf/deletes.alg @@ -58,7 +58,7 @@ CloseIndex { "Populate" OpenIndex { AddDoc(10) > : 200000 - Optimize + ForcMerge(1) CloseIndex > diff --git a/modules/benchmark/conf/facets.alg b/modules/benchmark/conf/facets.alg index d53337eb77b..c757d5710ce 100644 --- a/modules/benchmark/conf/facets.alg +++ b/modules/benchmark/conf/facets.alg @@ -52,7 +52,7 @@ task.max.depth.log=2 -CreateIndex -CreateTaxonomyIndex { "MAddDocs" AddFacetedDoc > : * - -Optimize + -ForcMerge(1) -CloseIndex -CloseTaxonomyIndex } diff --git a/modules/benchmark/conf/highlight-profile.alg b/modules/benchmark/conf/highlight-profile.alg index 234ebb1e841..3d56cc78d65 100644 --- a/modules/benchmark/conf/highlight-profile.alg +++ b/modules/benchmark/conf/highlight-profile.alg @@ -44,7 +44,7 @@ log.queries=true { "Populate" CreateIndex { "MAddDocs" AddDoc } : 20000 - Optimize + ForcMerge(1) CloseIndex } { "Rounds" diff --git a/modules/benchmark/conf/highlight-vs-vector-highlight.alg b/modules/benchmark/conf/highlight-vs-vector-highlight.alg index a98e3217760..61835381695 100644 --- a/modules/benchmark/conf/highlight-vs-vector-highlight.alg +++ b/modules/benchmark/conf/highlight-vs-vector-highlight.alg @@ -48,7 +48,7 @@ log.queries=true { "Populate" CreateIndex { "MAddDocs" AddDoc } : 20000 - Optimize + ForcMerge(1) CloseIndex } { diff --git a/modules/benchmark/conf/indexing-flush-by-RAM-multithreaded.alg b/modules/benchmark/conf/indexing-flush-by-RAM-multithreaded.alg index 58028f9a183..feb1f5ae85b 100644 --- a/modules/benchmark/conf/indexing-flush-by-RAM-multithreaded.alg +++ b/modules/benchmark/conf/indexing-flush-by-RAM-multithreaded.alg @@ -54,7 +54,7 @@ log.queries=true { "Populate" CreateIndex [{ "MAddDocs" AddDoc } : 5000] : 4 - Optimize + ForcMerge(1) CloseIndex } diff --git a/modules/benchmark/conf/indexing-flush-by-RAM.alg b/modules/benchmark/conf/indexing-flush-by-RAM.alg index be88a1f4bc0..d4346685050 100644 --- a/modules/benchmark/conf/indexing-flush-by-RAM.alg +++ b/modules/benchmark/conf/indexing-flush-by-RAM.alg @@ -54,7 +54,7 @@ log.queries=true { "Populate" CreateIndex { "MAddDocs" AddDoc } : 20000 - Optimize + ForcMerge(1) CloseIndex } diff --git a/modules/benchmark/conf/indexing-multithreaded.alg b/modules/benchmark/conf/indexing-multithreaded.alg index 261cdb3e6e0..86e820be765 100644 --- a/modules/benchmark/conf/indexing-multithreaded.alg +++ b/modules/benchmark/conf/indexing-multithreaded.alg @@ -54,7 +54,7 @@ log.queries=true { "Populate" CreateIndex [{ "MAddDocs" AddDoc } : 5000] : 4 - Optimize + ForcMerge(1) CommitIndex(commit1) CloseIndex } diff --git a/modules/benchmark/conf/indexing.alg b/modules/benchmark/conf/indexing.alg index 7c8673b7981..b97195bb1f5 100644 --- a/modules/benchmark/conf/indexing.alg +++ b/modules/benchmark/conf/indexing.alg @@ -54,7 +54,7 @@ log.queries=true { "Populate" CreateIndex { "MAddDocs" AddDoc } : 20000 - Optimize + ForcMerge(1) CloseIndex } diff --git a/modules/benchmark/conf/micro-standard-flush-by-ram.alg b/modules/benchmark/conf/micro-standard-flush-by-ram.alg index 0d2c6853d3e..63e25b5e0c8 100644 --- a/modules/benchmark/conf/micro-standard-flush-by-ram.alg +++ b/modules/benchmark/conf/micro-standard-flush-by-ram.alg @@ -53,7 +53,7 @@ log.queries=true { "Populate" CreateIndex { "MAddDocs" AddDoc > : 2000 - Optimize + ForcMerge(1) CloseIndex } diff --git a/modules/benchmark/conf/micro-standard.alg b/modules/benchmark/conf/micro-standard.alg index e0a554a24f8..b3bbb45d617 100644 --- a/modules/benchmark/conf/micro-standard.alg +++ b/modules/benchmark/conf/micro-standard.alg @@ -52,7 +52,7 @@ log.queries=true { "Populate" -CreateIndex { "MAddDocs" AddDoc > : 2000 - -Optimize + -ForcMerge(1) -CloseIndex } diff --git a/modules/benchmark/conf/sample.alg b/modules/benchmark/conf/sample.alg index c7b9f25fb00..2ce800e9d3f 100644 --- a/modules/benchmark/conf/sample.alg +++ b/modules/benchmark/conf/sample.alg @@ -62,7 +62,7 @@ log.queries=false { "PopulateShort" CreateIndex { AddDoc(4000) > : 20000 - Optimize + ForcMerge(1) CloseIndex > @@ -71,7 +71,7 @@ log.queries=false { "PopulateLong" CreateIndex { AddDoc(8000) > : 10000 - Optimize + ForcMerge(1) CloseIndex > diff --git a/modules/benchmark/conf/sloppy-phrase.alg b/modules/benchmark/conf/sloppy-phrase.alg index f0caad72599..0fc2a91cea6 100644 --- a/modules/benchmark/conf/sloppy-phrase.alg +++ b/modules/benchmark/conf/sloppy-phrase.alg @@ -52,7 +52,7 @@ ResetSystemErase { "Populate" CreateIndex { "MAddDocs" AddDoc(2000) > : 20000 - Optimize + ForcMerge(1) CloseIndex } diff --git a/modules/benchmark/conf/sort-standard.alg b/modules/benchmark/conf/sort-standard.alg index c7413fc594b..0097fa8c17c 100644 --- a/modules/benchmark/conf/sort-standard.alg +++ b/modules/benchmark/conf/sort-standard.alg @@ -50,7 +50,7 @@ log.queries=true { "Populate" -CreateIndex { "MAddDocs" AddDoc(100) > : 500000 - -Optimize + -ForcMerge(1) -CloseIndex } diff --git a/modules/benchmark/conf/standard-flush-by-RAM.alg b/modules/benchmark/conf/standard-flush-by-RAM.alg index ba60ac8247c..73a1023b1b7 100644 --- a/modules/benchmark/conf/standard-flush-by-RAM.alg +++ b/modules/benchmark/conf/standard-flush-by-RAM.alg @@ -53,7 +53,7 @@ log.queries=true { "Populate" CreateIndex { "MAddDocs" AddDoc } : 20000 - Optimize + ForcMerge(1) CloseIndex } diff --git a/modules/benchmark/conf/standard-highlights-notv.alg b/modules/benchmark/conf/standard-highlights-notv.alg index 889f5d744f1..25c4afe8a8e 100644 --- a/modules/benchmark/conf/standard-highlights-notv.alg +++ b/modules/benchmark/conf/standard-highlights-notv.alg @@ -44,7 +44,7 @@ log.queries=true { "Populate" CreateIndex { "MAddDocs" AddDoc } : 20000 - Optimize + ForcMerge(1) CloseIndex } { "Rounds" diff --git a/modules/benchmark/conf/standard-highlights-tv.alg b/modules/benchmark/conf/standard-highlights-tv.alg index 8c7f5339a06..8aab1f88f9d 100644 --- a/modules/benchmark/conf/standard-highlights-tv.alg +++ b/modules/benchmark/conf/standard-highlights-tv.alg @@ -44,7 +44,7 @@ log.queries=true { "Populate" CreateIndex { "MAddDocs" AddDoc } : 20000 - Optimize + ForcMerge(1) CloseIndex } { "Rounds" diff --git a/modules/benchmark/conf/standard.alg b/modules/benchmark/conf/standard.alg index 66e66ef2c3f..6a9038f7b02 100644 --- a/modules/benchmark/conf/standard.alg +++ b/modules/benchmark/conf/standard.alg @@ -53,7 +53,7 @@ log.queries=true { "Populate" CreateIndex { "MAddDocs" AddDoc } : 20000 - Optimize + ForcMerge(1) CloseIndex } diff --git a/modules/benchmark/conf/vector-highlight-profile.alg b/modules/benchmark/conf/vector-highlight-profile.alg index 6b456dffe37..e7acc47287c 100644 --- a/modules/benchmark/conf/vector-highlight-profile.alg +++ b/modules/benchmark/conf/vector-highlight-profile.alg @@ -44,7 +44,7 @@ log.queries=true { "Populate" CreateIndex { "MAddDocs" AddDoc } : 20000 - Optimize + ForcMerge(1) CloseIndex } { "Rounds" diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OptimizeTask.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ForceMergeTask.java similarity index 78% rename from modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OptimizeTask.java rename to modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ForceMergeTask.java index 19947b60ca7..07a355e4888 100644 --- a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OptimizeTask.java +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ForceMergeTask.java @@ -21,22 +21,25 @@ import org.apache.lucene.benchmark.byTask.PerfRunData; import org.apache.lucene.index.IndexWriter; /** - * Optimize the index. + * Runs forceMerge on the index. *
Other side effects: none. */ -public class OptimizeTask extends PerfTask { +public class ForceMergeTask extends PerfTask { - public OptimizeTask(PerfRunData runData) { + public ForceMergeTask(PerfRunData runData) { super(runData); } - int maxNumSegments = 1; + int maxNumSegments = -1; @Override public int doLogic() throws Exception { + if (maxNumSegments == -1) { + throw new IllegalStateException("required argument (maxNumSegments) was not specified"); + } IndexWriter iw = getRunData().getIndexWriter(); - iw.optimize(maxNumSegments); - //System.out.println("optimize called"); + iw.forceMerge(maxNumSegments); + //System.out.println("forceMerge called"); return 1; } diff --git a/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java b/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java index 848ac1fe400..b2c2f582616 100755 --- a/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java +++ b/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java @@ -77,7 +77,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { "ResetSystemErase", "CreateIndex", "{ AddDoc } : 1000", - "Optimize", + "ForceMerge(1)", "CloseIndex", "OpenReader", "{ CountingSearchTest } : 200", @@ -114,7 +114,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { "ResetSystemErase", "CreateIndex", "{ AddDoc } : 100", - "Optimize", + "ForceMerge(1)", "CloseIndex", "OpenReader", "{ CountingSearchTest } : .5s", @@ -137,7 +137,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { "ResetSystemErase", "CreateIndex", "{ AddDoc } : 1000", - "Optimize", + "ForceMerge(1)", "CloseIndex", "OpenReader", "{", @@ -163,7 +163,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { "ResetSystemErase", "CreateIndex", "{ AddDoc } : 100", - "Optimize", + "ForceMerge(1)", "CloseIndex", "OpenReader(true)", "{ CountingHighlighterTest(size[1],highlight[1],mergeContiguous[true],maxFrags[1],fields[body]) } : 200", @@ -202,7 +202,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { "ResetSystemErase", "CreateIndex", "{ AddDoc } : 1000", - "Optimize", + "ForceMerge(1)", "CloseIndex", "OpenReader(false)", "{ CountingHighlighterTest(size[1],highlight[1],mergeContiguous[true],maxFrags[1],fields[body]) } : 200", @@ -240,7 +240,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { "ResetSystemErase", "CreateIndex", "{ AddDoc } : 1000", - "Optimize", + "ForceMerge(1)", "CloseIndex", "OpenReader", "{ CountingHighlighterTest(size[1],highlight[1],mergeContiguous[true],maxFrags[1],fields[body]) } : 200", @@ -277,7 +277,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { "# ----- alg ", "CreateIndex", "{ AddDoc } : * ", - "Optimize", + "ForceMerge(1)", "CloseIndex", "OpenReader", "{ CountingSearchTest } : 100", @@ -818,9 +818,9 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { } /** - * Test that we can call optimize(maxNumSegments). + * Test that we can call forceMerge(maxNumSegments). */ - public void testOptimizeMaxNumSegments() throws Exception { + public void testForceMerge() throws Exception { // 1. alg definition (required in every "logic" test) String algLines[] = { "# ----- properties ", @@ -841,7 +841,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { " ResetSystemErase", " CreateIndex", " { \"AddDocs\" AddDoc > : * ", - " Optimize(3)", + " ForceMerge(3)", " CloseIndex()", "} : 2", }; diff --git a/modules/facet/src/java/org/apache/lucene/facet/index/FacetsPayloadProcessorProvider.java b/modules/facet/src/java/org/apache/lucene/facet/index/FacetsPayloadProcessorProvider.java index 4fab2a8e302..b69c3e6c21f 100644 --- a/modules/facet/src/java/org/apache/lucene/facet/index/FacetsPayloadProcessorProvider.java +++ b/modules/facet/src/java/org/apache/lucene/facet/index/FacetsPayloadProcessorProvider.java @@ -58,7 +58,7 @@ import org.apache.lucene.util.encoding.IntEncoder; * conf.setMergePolicy(new ForceOptimizeMergePolicy()); * IndexWriter writer = new IndexWriter(oldDir, conf); * writer.setPayloadProcessorProvider(fppp); - * writer.optimize(); + * writer.forceMerge(1); * writer.close(); * * // merge that directory with the new index. diff --git a/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java b/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java index 8446321edef..ebfad3de00a 100644 --- a/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java +++ b/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java @@ -113,9 +113,9 @@ public abstract class FunctionTestSetup extends LuceneTestCase { } if (!doMultiSegment) { if (VERBOSE) { - System.out.println("TEST: setUp optimize"); + System.out.println("TEST: setUp full merge"); } - iw.optimize(); + iw.forceMerge(1); } iw.close(); if (VERBOSE) { diff --git a/modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java b/modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java index 2c021348dc9..53a19c7d260 100644 --- a/modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java +++ b/modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java @@ -145,7 +145,7 @@ public class TestQueryTemplateManager extends LuceneTestCase { for (String docFieldValue : docFieldValues) { w.addDocument(getDocumentFromString(docFieldValue)); } - w.optimize(); + w.forceMerge(1); w.close(); searcher = new IndexSearcher(dir, true); diff --git a/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java b/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java index 68ac65e8cd3..15c925ed140 100755 --- a/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java +++ b/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java @@ -481,11 +481,11 @@ public class SpellChecker implements java.io.Closeable { * Indexes the data from the given {@link Dictionary}. * @param dict Dictionary to index * @param config {@link IndexWriterConfig} to use - * @param optimize whether or not the spellcheck index should be optimized + * @param fullMerge whether or not the spellcheck index should be fully merged * @throws AlreadyClosedException if the Spellchecker is already closed * @throws IOException */ - public final void indexDictionary(Dictionary dict, IndexWriterConfig config, boolean optimize) throws IOException { + public final void indexDictionary(Dictionary dict, IndexWriterConfig config, boolean fullMerge) throws IOException { synchronized (modifyCurrentIndexLock) { ensureOpen(); final Directory dir = this.spellIndex; @@ -536,9 +536,10 @@ public class SpellChecker implements java.io.Closeable { } finally { releaseSearcher(indexSearcher); } + if (fullMerge) { + writer.forceMerge(1); + } // close writer - if (optimize) - writer.optimize(); writer.close(); // TODO: this isn't that great, maybe in the future SpellChecker should take // IWC in its ctor / keep its writer open? diff --git a/modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java b/modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java index 714e19a5dfd..f8c6c831889 100644 --- a/modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java +++ b/modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java @@ -70,7 +70,7 @@ public class TestLuceneDictionary extends LuceneTestCase { doc.add(newField("zzz", "bar", TextField.TYPE_STORED)); writer.addDocument(doc); - writer.optimize(); + writer.forceMerge(1); writer.close(); } diff --git a/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java b/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java index f6f26c31982..d11129277d4 100644 --- a/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java +++ b/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java @@ -173,8 +173,8 @@ public class IndexDeletionPolicyWrapper implements IndexDeletionPolicy { } @Override - public boolean isOptimized() { - return delegate.isOptimized(); + public int getSegmentCount() { + return delegate.getSegmentCount(); } @Override diff --git a/solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java b/solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java index c5e5278d860..9f119b0f5c6 100644 --- a/solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java +++ b/solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java @@ -135,7 +135,7 @@ public class SolrDeletionPolicy implements IndexDeletionPolicy, NamedListInitial IndexCommit newest = commits.get(commits.size() - 1); log.info("newest commit = " + newest.getVersion()); - int optimizedKept = newest.isOptimized() ? 1 : 0; + int singleSegKept = (newest.getSegmentCount() == 1) ? 1 : 0; int totalKept = 1; // work our way from newest to oldest, skipping the first since we always want to keep it. @@ -158,9 +158,9 @@ public class SolrDeletionPolicy implements IndexDeletionPolicy, NamedListInitial log.warn("Exception while checking commit point's age for deletion", e); } - if (optimizedKept < maxOptimizedCommitsToKeep && commit.isOptimized()) { + if (singleSegKept < maxOptimizedCommitsToKeep && commit.getSegmentCount() == 1) { totalKept++; - optimizedKept++; + singleSegKept++; continue; } diff --git a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java index fe94860baed..90a00be51e3 100644 --- a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java @@ -758,7 +758,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw void refreshCommitpoint() { IndexCommit commitPoint = core.getDeletionPolicy().getLatestCommit(); - if(replicateOnCommit || (replicateOnOptimize && commitPoint.isOptimized())) { + if(replicateOnCommit || (replicateOnOptimize && commitPoint.getSegmentCount() == 1)) { indexCommitPoint = commitPoint; } } @@ -827,7 +827,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw if(replicateOnOptimize){ Collection commits = IndexReader.listCommits(reader.directory()); for (IndexCommit ic : commits) { - if(ic.isOptimized()){ + if(ic.getSegmentCount() == 1){ if(indexCommitPoint == null || indexCommitPoint.getVersion() < ic.getVersion()) indexCommitPoint = ic; } } diff --git a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java index 8e4b7f48211..3f9ca09f414 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java @@ -492,7 +492,7 @@ public class LukeRequestHandler extends RequestHandlerBase } indexInfo.add("version", reader.getVersion()); // TODO? Is this different then: IndexReader.getCurrentVersion( dir )? - indexInfo.add("optimized", reader.isOptimized() ); + indexInfo.add("segmentCount", reader.getSequentialSubReaders().length); indexInfo.add("current", reader.isCurrent() ); indexInfo.add("hasDeletions", reader.hasDeletions() ); indexInfo.add("directory", dir ); diff --git a/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java b/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java index 6c94dca15f7..2983fb0128d 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java @@ -22,12 +22,7 @@ import java.io.StringReader; import java.util.*; import java.util.concurrent.ConcurrentHashMap; -import org.apache.lucene.search.spell.DirectSpellChecker; -import org.apache.lucene.search.spell.JaroWinklerDistance; -import org.apache.lucene.search.spell.LevensteinDistance; -import org.apache.lucene.search.spell.StringDistance; import org.apache.lucene.search.spell.SuggestWord; -import org.apache.lucene.search.spell.SuggestWordQueue; import org.apache.solr.client.solrj.response.SpellCheckResponse; import org.apache.solr.common.params.ModifiableSolrParams; import org.slf4j.Logger; @@ -618,7 +613,7 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar if (buildOnCommit) { buildSpellIndex(newSearcher); } else if (buildOnOptimize) { - if (newSearcher.getIndexReader().isOptimized()) { + if (newSearcher.getIndexReader().getSequentialSubReaders().length == 1) { buildSpellIndex(newSearcher); } else { LOG.info("Index is not optimized therefore skipping building spell check index for: " + checker.getDictionaryName()); diff --git a/solr/core/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java b/solr/core/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java index c14fe4e9399..508971e0907 100644 --- a/solr/core/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java +++ b/solr/core/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java @@ -108,7 +108,7 @@ public class FileBasedSpellChecker extends AbstractLuceneSpellChecker { d.add(new TextField(WORD_FIELD_NAME, s)); writer.addDocument(d); } - writer.optimize(); + writer.forceMerge(1); writer.close(); dictionary = new HighFrequencyDictionary(IndexReader.open(ramDir, true), diff --git a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java index 1bba1eaf715..3fa4f4da3fb 100644 --- a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java +++ b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.net.URL; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -312,7 +311,7 @@ public class DirectUpdateHandler2 extends UpdateHandler { log.info("start "+cmd); if (cmd.optimize) { - writer.optimize(cmd.maxOptimizeSegments); + writer.forceMerge(cmd.maxOptimizeSegments); } else if (cmd.expungeDeletes) { writer.expungeDeletes(); } diff --git a/solr/core/src/test/org/apache/solr/core/TestSolrDeletionPolicy1.java b/solr/core/src/test/org/apache/solr/core/TestSolrDeletionPolicy1.java index cd02ca298df..5da62965942 100644 --- a/solr/core/src/test/org/apache/solr/core/TestSolrDeletionPolicy1.java +++ b/solr/core/src/test/org/apache/solr/core/TestSolrDeletionPolicy1.java @@ -95,7 +95,7 @@ public class TestSolrDeletionPolicy1 extends SolrTestCaseJ4 { for (Long version : commits.keySet()) { if (commits.get(version) == latest) continue; - assertTrue(commits.get(version).isOptimized()); + assertEquals(1, commits.get(version).getSegmentCount()); } } diff --git a/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java b/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java index c74c28a711a..5b33c699cab 100644 --- a/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java +++ b/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java @@ -31,7 +31,6 @@ import org.apache.solr.common.SolrException; import org.apache.solr.request.SolrQueryRequest; import org.junit.BeforeClass; import org.junit.Test; -import org.junit.Ignore; import java.io.IOException; import java.util.*; @@ -448,7 +447,7 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); - writer.setDoRandomOptimizeAssert(false); + writer.setDoRandomForceMergeAssert(false); // writer.commit(); // reader = IndexReader.open(dir); diff --git a/solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java b/solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java index 0983ae8fe0b..99da37629b5 100644 --- a/solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java +++ b/solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java @@ -293,7 +293,7 @@ public class IndexBasedSpellCheckerTest extends SolrTestCaseJ4 { doc.add(new Field("title", ALT_DOCS[i], TextField.TYPE_STORED)); iw.addDocument(doc); } - iw.optimize(); + iw.forceMerge(1); iw.close(); dir.close(); indexDir.mkdirs();