LUCENE-3454: rename IW.optimize -> IW.forceMerge

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1201036 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2011-11-11 19:50:51 +00:00
parent a39e04bb08
commit 19889d6873
152 changed files with 701 additions and 893 deletions

View File

@ -109,12 +109,12 @@ public class IndexFiles {
indexDocs(writer, docDir); indexDocs(writer, docDir);
// NOTE: if you want to maximize search performance, // NOTE: if you want to maximize search performance,
// you can optionally call optimize here. This can be // you can optionally call forceMerge here. This can be
// a costly operation, so generally it's only worth // a terribly costly operation, so generally it's only
// it when your index is relatively static (ie you're // worth it when your index is relatively static (ie
// done adding documents to it): // you're done adding documents to it):
// //
// writer.optimize(); // writer.forceMerge(1);
writer.close(); writer.close();

View File

@ -1635,7 +1635,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
writer.addDocument( doc( "t_text1", "more random words for second field del" ) ); writer.addDocument( doc( "t_text1", "more random words for second field del" ) );
writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) ); writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) );
writer.addDocument( doc( "t_text1", "more random words for second field" ) ); writer.addDocument( doc( "t_text1", "more random words for second field" ) );
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
} }
@ -1643,7 +1643,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND)); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND));
writer.deleteDocuments( new Term( "t_text1", "del" ) ); writer.deleteDocuments( new Term( "t_text1", "del" ) );
// To see negative idf, keep comment the following line // To see negative idf, keep comment the following line
//writer.optimize(); //writer.forceMerge(1);
writer.close(); writer.close();
} }
@ -1759,7 +1759,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
doc = new Document(); doc = new Document();
doc.add(nfield); doc.add(nfield);
writer.addDocument(doc, analyzer); writer.addDocument(doc, analyzer);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
reader = IndexReader.open(ramDir, true); reader = IndexReader.open(ramDir, true);
numHighlights = 0; numHighlights = 0;

View File

@ -96,7 +96,7 @@ public class InstantiatedIndex
* Creates a new instantiated index that looks just like the index in a specific state as represented by a reader. * Creates a new instantiated index that looks just like the index in a specific state as represented by a reader.
* *
* @param sourceIndexReader the source index this new instantiated index will be copied from. * @param sourceIndexReader the source index this new instantiated index will be copied from.
* @throws IOException if the source index is not optimized, or when accessing the source. * @throws IOException if the source index is not single-segment, or when accessing the source.
*/ */
public InstantiatedIndex(IndexReader sourceIndexReader) throws IOException { public InstantiatedIndex(IndexReader sourceIndexReader) throws IOException {
this(sourceIndexReader, null); this(sourceIndexReader, null);
@ -109,13 +109,13 @@ public class InstantiatedIndex
* *
* @param sourceIndexReader the source index this new instantiated index will be copied from. * @param sourceIndexReader the source index this new instantiated index will be copied from.
* @param fields fields to be added, or null for all * @param fields fields to be added, or null for all
* @throws IOException if the source index is not optimized, or when accessing the source. * @throws IOException if the source index is not single-segment, or when accessing the source.
*/ */
public InstantiatedIndex(IndexReader sourceIndexReader, Set<String> fields) throws IOException { public InstantiatedIndex(IndexReader sourceIndexReader, Set<String> fields) throws IOException {
if (!sourceIndexReader.isOptimized()) { if (sourceIndexReader.getSequentialSubReaders().length != 1) {
System.out.println(("Source index is not optimized.")); System.out.println(("Source index has more than one segment."));
//throw new IOException("Source index is not optimized."); //throw new IOException("Source index has more than one segment.");
} }

View File

@ -55,14 +55,6 @@ public class InstantiatedIndexReader extends IndexReader {
readerFinishedListeners = Collections.synchronizedSet(new HashSet<ReaderFinishedListener>()); readerFinishedListeners = Collections.synchronizedSet(new HashSet<ReaderFinishedListener>());
} }
/**
* @return always true.
*/
@Override
public boolean isOptimized() {
return true;
}
/** /**
* An InstantiatedIndexReader is not a snapshot in time, it is completely in * An InstantiatedIndexReader is not a snapshot in time, it is completely in
* sync with the latest commit to the store! * sync with the latest commit to the store!

View File

@ -30,7 +30,7 @@ import org.apache.lucene.document.TextField;
/** /**
* @since 2009-mar-30 13:15:49 * @since 2009-mar-30 13:15:49
*/ */
public class TestUnoptimizedReaderOnConstructor extends LuceneTestCase { public class TestMultiSegmentReaderOnConstructor extends LuceneTestCase {
public void test() throws Exception { public void test() throws Exception {
Directory dir = newDirectory(); Directory dir = newDirectory();
@ -49,18 +49,18 @@ public class TestUnoptimizedReaderOnConstructor extends LuceneTestCase {
addDocument(iw, "All work and no play makes wendy a dull girl"); addDocument(iw, "All work and no play makes wendy a dull girl");
iw.close(); iw.close();
IndexReader unoptimizedReader = IndexReader.open(dir, false); IndexReader multiSegReader = IndexReader.open(dir, false);
unoptimizedReader.deleteDocument(2); multiSegReader.deleteDocument(2);
try { try {
new InstantiatedIndex(unoptimizedReader); new InstantiatedIndex(multiSegReader);
} catch (Exception e) { } catch (Exception e) {
e.printStackTrace(System.out); e.printStackTrace(System.out);
fail("No exceptions when loading an unoptimized reader!"); fail("No exceptions when loading a multi-seg reader!");
} }
// todo some assertations. // todo some assertations.
unoptimizedReader.close(); multiSegReader.close();
dir.close(); dir.close();
} }

View File

@ -105,22 +105,22 @@ public class BalancedSegmentMergePolicy extends LogByteSizeMergePolicy {
} }
@Override @Override
public MergeSpecification findMergesForOptimize(SegmentInfos infos, int maxNumSegments, Map<SegmentInfo,Boolean> segmentsToOptimize) throws IOException { public MergeSpecification findForcedMerges(SegmentInfos infos, int maxNumSegments, Map<SegmentInfo,Boolean> segmentsToMerge) throws IOException {
assert maxNumSegments > 0; assert maxNumSegments > 0;
MergeSpecification spec = null; MergeSpecification spec = null;
if (!isOptimized(infos, maxNumSegments, segmentsToOptimize)) { if (!isMerged(infos, maxNumSegments, segmentsToMerge)) {
// Find the newest (rightmost) segment that needs to // Find the newest (rightmost) segment that needs to
// be optimized (other segments may have been flushed // be merged (other segments may have been flushed
// since optimize started): // since the merge started):
int last = infos.size(); int last = infos.size();
while(last > 0) { while(last > 0) {
final SegmentInfo info = infos.info(--last); final SegmentInfo info = infos.info(--last);
if (segmentsToOptimize.containsKey(info)) { if (segmentsToMerge.containsKey(info)) {
last++; last++;
break; break;
} }
@ -130,9 +130,9 @@ public class BalancedSegmentMergePolicy extends LogByteSizeMergePolicy {
if (maxNumSegments == 1) { if (maxNumSegments == 1) {
// Since we must optimize down to 1 segment, the // Since we must merge down to 1 segment, the
// choice is simple: // choice is simple:
if (last > 1 || !isOptimized(infos.info(0))) { if (last > 1 || !isMerged(infos.info(0))) {
spec = new MergeSpecification(); spec = new MergeSpecification();
spec.add(new OneMerge(infos.asList().subList(0, last))); spec.add(new OneMerge(infos.asList().subList(0, last)));

View File

@ -36,8 +36,8 @@ import org.apache.lucene.store.FSDirectory;
* *
* <p>This tool does file-level copying of segments files. * <p>This tool does file-level copying of segments files.
* This means it's unable to split apart a single segment * This means it's unable to split apart a single segment
* into multiple segments. For example if your index is * into multiple segments. For example if your index is a
* optimized, this tool won't help. Also, it does basic * single segment, this tool won't help. Also, it does basic
* file-level copying (using simple * file-level copying (using simple
* File{In,Out}putStream) so it will not work with non * File{In,Out}putStream) so it will not work with non
* FSDirectory Directory impls.</p> * FSDirectory Directory impls.</p>

View File

@ -50,8 +50,8 @@ public class IndexMergeTool {
System.out.println("Merging..."); System.out.println("Merging...");
writer.addIndexes(indexes); writer.addIndexes(indexes);
System.out.println("Optimizing..."); System.out.println("Full merge...");
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
System.out.println("Done."); System.out.println("Done.");
} }

View File

@ -94,7 +94,7 @@ public class TestIndexSplitter extends LuceneTestCase {
fsDir.close(); fsDir.close();
} }
public void testDeleteThenOptimize() throws Exception { public void testDeleteThenFullMerge() throws Exception {
// Create directories where the indexes will reside // Create directories where the indexes will reside
File indexPath = new File(TEMP_DIR, "testfilesplitter"); File indexPath = new File(TEMP_DIR, "testfilesplitter");
_TestUtil.rmDir(indexPath); _TestUtil.rmDir(indexPath);
@ -134,7 +134,7 @@ public class TestIndexSplitter extends LuceneTestCase {
indexReader.close(); indexReader.close();
fsDirDest.close(); fsDirDest.close();
// Optimize the split index // Fully merge the split index
mergePolicy = new LogByteSizeMergePolicy(); mergePolicy = new LogByteSizeMergePolicy();
mergePolicy.setNoCFSRatio(1); mergePolicy.setNoCFSRatio(1);
iwConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) iwConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
@ -142,7 +142,7 @@ public class TestIndexSplitter extends LuceneTestCase {
.setMergePolicy(mergePolicy); .setMergePolicy(mergePolicy);
fsDirDest = newFSDirectory(indexSplitPath); fsDirDest = newFSDirectory(indexSplitPath);
indexWriter = new IndexWriter(fsDirDest, iwConfig); indexWriter = new IndexWriter(fsDirDest, iwConfig);
indexWriter.optimize(); indexWriter.forceMerge(1);
indexWriter.close(); indexWriter.close();
fsDirDest.close(); fsDirDest.close();

View File

@ -121,7 +121,7 @@ public class TestAppendingCodec extends LuceneTestCase {
writer.addDocument(doc); writer.addDocument(doc);
writer.commit(); writer.commit();
writer.addDocument(doc); writer.addDocument(doc);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
IndexReader reader = IndexReader.open(dir, null, true, 1); IndexReader reader = IndexReader.open(dir, null, true, 1);
assertEquals(2, reader.numDocs()); assertEquals(2, reader.numDocs());

View File

@ -57,7 +57,7 @@ public class DuplicateFilterTest extends LuceneTestCase {
// Until we fix LUCENE-2348, the index must // Until we fix LUCENE-2348, the index must
// have only 1 segment: // have only 1 segment:
writer.optimize(); writer.forceMerge(1);
reader = writer.getReader(); reader = writer.getReader();
writer.close(); writer.close();

View File

@ -68,7 +68,7 @@ public class TestSpanRegexQuery extends LuceneTestCase {
doc = new Document(); doc = new Document();
doc.add(newField("field", "first auto update", TextField.TYPE_UNSTORED)); doc.add(newField("field", "first auto update", TextField.TYPE_UNSTORED));
writer.addDocument(doc); writer.addDocument(doc);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
IndexSearcher searcher = new IndexSearcher(directory, true); IndexSearcher searcher = new IndexSearcher(directory, true);
@ -98,14 +98,14 @@ public class TestSpanRegexQuery extends LuceneTestCase {
IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig( IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
writerA.addDocument(lDoc); writerA.addDocument(lDoc);
writerA.optimize(); writerA.forceMerge(1);
writerA.close(); writerA.close();
// creating second index writer // creating second index writer
IndexWriter writerB = new IndexWriter(indexStoreB, newIndexWriterConfig( IndexWriter writerB = new IndexWriter(indexStoreB, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
writerB.addDocument(lDoc2); writerB.addDocument(lDoc2);
writerB.optimize(); writerB.forceMerge(1);
writerB.close(); writerB.close();
} }
} }

View File

@ -141,7 +141,7 @@ public class TestCartesian extends LuceneTestCase {
writer.commit(); writer.commit();
// TODO: fix CustomScoreQuery usage in testRange/testGeoHashRange so we don't need this. // TODO: fix CustomScoreQuery usage in testRange/testGeoHashRange so we don't need this.
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
} }

View File

@ -523,16 +523,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
subReaders[i].getTermFreqVector(docNumber - starts[i], mapper); subReaders[i].getTermFreqVector(docNumber - starts[i], mapper);
} }
/**
* Checks is the index is optimized (if it has a single segment and no deletions)
* @return <code>true</code> if the index is optimized; <code>false</code> otherwise
*/
@Override
public boolean isOptimized() {
ensureOpen();
return segmentInfos.size() == 1 && !hasDeletions();
}
@Override @Override
public int numDocs() { public int numDocs() {
// Don't call ensureOpen() here (it could affect performance) // Don't call ensureOpen() here (it could affect performance)
@ -953,8 +943,8 @@ class DirectoryReader extends IndexReader implements Cloneable {
Directory dir; Directory dir;
long generation; long generation;
long version; long version;
final boolean isOptimized;
final Map<String,String> userData; final Map<String,String> userData;
private final int segmentCount;
ReaderCommit(SegmentInfos infos, Directory dir) throws IOException { ReaderCommit(SegmentInfos infos, Directory dir) throws IOException {
segmentsFileName = infos.getCurrentSegmentFileName(); segmentsFileName = infos.getCurrentSegmentFileName();
@ -963,7 +953,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
files = Collections.unmodifiableCollection(infos.files(dir, true)); files = Collections.unmodifiableCollection(infos.files(dir, true));
version = infos.getVersion(); version = infos.getVersion();
generation = infos.getGeneration(); generation = infos.getGeneration();
isOptimized = infos.size() == 1 && !infos.info(0).hasDeletions(); segmentCount = infos.size();
} }
@Override @Override
@ -972,8 +962,8 @@ class DirectoryReader extends IndexReader implements Cloneable {
} }
@Override @Override
public boolean isOptimized() { public int getSegmentCount() {
return isOptimized; return segmentCount;
} }
@Override @Override

View File

@ -430,12 +430,6 @@ public class FilterIndexReader extends IndexReader {
return in.isCurrent(); return in.isCurrent();
} }
@Override
public boolean isOptimized() {
ensureOpen();
return in.isOptimized();
}
@Override @Override
public IndexReader[] getSequentialSubReaders() { public IndexReader[] getSequentialSubReaders() {
return in.getSequentialSubReaders(); return in.getSequentialSubReaders();

View File

@ -75,8 +75,8 @@ public abstract class IndexCommit implements Comparable<IndexCommit> {
public abstract boolean isDeleted(); public abstract boolean isDeleted();
/** Returns true if this commit is an optimized index. */ /** Returns number of segments referenced by this commit. */
public abstract boolean isOptimized(); public abstract int getSegmentCount();
/** Two IndexCommits are equal if both their Directory and versions are equal. */ /** Two IndexCommits are equal if both their Directory and versions are equal. */
@Override @Override

View File

@ -19,11 +19,9 @@ package org.apache.lucene.index;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Date;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -653,8 +651,8 @@ final class IndexFileDeleter {
Collection<CommitPoint> commitsToDelete; Collection<CommitPoint> commitsToDelete;
long version; long version;
long generation; long generation;
final boolean isOptimized;
final Map<String,String> userData; final Map<String,String> userData;
private final int segmentCount;
public CommitPoint(Collection<CommitPoint> commitsToDelete, Directory directory, SegmentInfos segmentInfos) throws IOException { public CommitPoint(Collection<CommitPoint> commitsToDelete, Directory directory, SegmentInfos segmentInfos) throws IOException {
this.directory = directory; this.directory = directory;
@ -664,7 +662,7 @@ final class IndexFileDeleter {
version = segmentInfos.getVersion(); version = segmentInfos.getVersion();
generation = segmentInfos.getGeneration(); generation = segmentInfos.getGeneration();
files = Collections.unmodifiableCollection(segmentInfos.files(directory, true)); files = Collections.unmodifiableCollection(segmentInfos.files(directory, true));
isOptimized = segmentInfos.size() == 1 && !segmentInfos.info(0).hasDeletions(); segmentCount = segmentInfos.size();
} }
@Override @Override
@ -673,8 +671,8 @@ final class IndexFileDeleter {
} }
@Override @Override
public boolean isOptimized() { public int getSegmentCount() {
return isOptimized; return segmentCount;
} }
@Override @Override

View File

@ -28,7 +28,6 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.DocumentStoredFieldVisitor; import org.apache.lucene.document.DocumentStoredFieldVisitor;
import org.apache.lucene.index.codecs.PostingsFormat;
import org.apache.lucene.index.codecs.PerDocValues; import org.apache.lucene.index.codecs.PerDocValues;
import org.apache.lucene.index.values.IndexDocValues; import org.apache.lucene.index.values.IndexDocValues;
import org.apache.lucene.search.FieldCache; // javadocs import org.apache.lucene.search.FieldCache; // javadocs
@ -804,16 +803,6 @@ public abstract class IndexReader implements Cloneable,Closeable {
throw new UnsupportedOperationException("This reader does not support this method."); throw new UnsupportedOperationException("This reader does not support this method.");
} }
/**
* Checks is the index is optimized (if it has a single segment and
* no deletions). Not implemented in the IndexReader base class.
* @return <code>true</code> if the index is optimized; <code>false</code> otherwise
* @throws UnsupportedOperationException unless overridden in subclass
*/
public boolean isOptimized() {
throw new UnsupportedOperationException("This reader does not support this method.");
}
/** /**
* Return an array of term frequency vectors for the specified document. * Return an array of term frequency vectors for the specified document.
* The array contains a vector for each vectorized field in the document. * The array contains a vector for each vectorized field in the document.

View File

@ -35,7 +35,7 @@ import java.util.Collection;
* java -cp lucene-core.jar org.apache.lucene.index.IndexUpgrader [-delete-prior-commits] [-verbose] indexDir * java -cp lucene-core.jar org.apache.lucene.index.IndexUpgrader [-delete-prior-commits] [-verbose] indexDir
* </pre> * </pre>
* Alternatively this class can be instantiated and {@link #upgrade} invoked. It uses {@link UpgradeIndexMergePolicy} * Alternatively this class can be instantiated and {@link #upgrade} invoked. It uses {@link UpgradeIndexMergePolicy}
* and triggers the upgrade via an optimize request to {@link IndexWriter}. * and triggers the upgrade via an forceMerge request to {@link IndexWriter}.
* <p>This tool keeps only the last commit in an index; for this * <p>This tool keeps only the last commit in an index; for this
* reason, if the incoming index has more than one commit, the tool * reason, if the incoming index has more than one commit, the tool
* refuses to run by default. Specify {@code -delete-prior-commits} * refuses to run by default. Specify {@code -delete-prior-commits}
@ -45,7 +45,7 @@ import java.util.Collection;
* <p><b>Warning:</b> This tool may reorder documents if the index was partially * <p><b>Warning:</b> This tool may reorder documents if the index was partially
* upgraded before execution (e.g., documents were added). If your application relies * upgraded before execution (e.g., documents were added). If your application relies
* on &quot;monotonicity&quot; of doc IDs (which means that the order in which the documents * on &quot;monotonicity&quot; of doc IDs (which means that the order in which the documents
* were added to the index is preserved), do a full optimize instead. * were added to the index is preserved), do a full forceMerge instead.
* The {@link MergePolicy} set by {@link IndexWriterConfig} may also reorder * The {@link MergePolicy} set by {@link IndexWriterConfig} may also reorder
* documents. * documents.
*/ */
@ -134,7 +134,7 @@ public final class IndexUpgrader {
if (infoStream != null) { if (infoStream != null) {
infoStream.message("IndexUpgrader", "Upgrading all pre-" + Constants.LUCENE_MAIN_VERSION + " segments of index directory '" + dir + "' to version " + Constants.LUCENE_MAIN_VERSION + "..."); infoStream.message("IndexUpgrader", "Upgrading all pre-" + Constants.LUCENE_MAIN_VERSION + " segments of index directory '" + dir + "' to version " + Constants.LUCENE_MAIN_VERSION + "...");
} }
w.optimize(); w.forceMerge(1);
if (infoStream != null) { if (infoStream != null) {
infoStream.message("IndexUpgrader", "All segments upgraded to version " + Constants.LUCENE_MAIN_VERSION); infoStream.message("IndexUpgrader", "All segments upgraded to version " + Constants.LUCENE_MAIN_VERSION);
} }

View File

@ -101,11 +101,6 @@ import org.apache.lucene.util.TwoPhaseCommit;
addDocument calls (see <a href="#mergePolicy">below</a> addDocument calls (see <a href="#mergePolicy">below</a>
for changing the {@link MergeScheduler}).</p> for changing the {@link MergeScheduler}).</p>
<p>If an index will not have more documents added for a while and optimal search
performance is desired, then either the full {@link #optimize() optimize}
method or partial {@link #optimize(int)} method should be
called before the index is closed.</p>
<p>Opening an <code>IndexWriter</code> creates a lock file for the directory in use. Trying to open <p>Opening an <code>IndexWriter</code> creates a lock file for the directory in use. Trying to open
another <code>IndexWriter</code> on the same directory will lead to a another <code>IndexWriter</code> on the same directory will lead to a
{@link LockObtainFailedException}. The {@link LockObtainFailedException} {@link LockObtainFailedException}. The {@link LockObtainFailedException}
@ -134,9 +129,8 @@ import org.apache.lucene.util.TwoPhaseCommit;
The {@link MergePolicy} is invoked whenever there are The {@link MergePolicy} is invoked whenever there are
changes to the segments in the index. Its role is to changes to the segments in the index. Its role is to
select which merges to do, if any, and return a {@link select which merges to do, if any, and return a {@link
MergePolicy.MergeSpecification} describing the merges. It MergePolicy.MergeSpecification} describing the merges.
also selects merges to do for optimize(). (The default is The default is {@link LogByteSizeMergePolicy}. Then, the {@link
{@link LogByteSizeMergePolicy}. Then, the {@link
MergeScheduler} is invoked with the requested merges and MergeScheduler} is invoked with the requested merges and
it decides when and how to run the merges. The default is it decides when and how to run the merges. The default is
{@link ConcurrentMergeScheduler}. </p> {@link ConcurrentMergeScheduler}. </p>
@ -223,8 +217,9 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
private DocumentsWriter docWriter; private DocumentsWriter docWriter;
final IndexFileDeleter deleter; final IndexFileDeleter deleter;
private Map<SegmentInfo,Boolean> segmentsToOptimize = new HashMap<SegmentInfo,Boolean>(); // used by optimize to note those needing optimization // used by forceMerge to note those needing merging
private int optimizeMaxNumSegments; private Map<SegmentInfo,Boolean> segmentsToMerge = new HashMap<SegmentInfo,Boolean>();
private int mergeMaxNumSegments;
private Lock writeLock; private Lock writeLock;
@ -1215,7 +1210,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* readers/searchers are open against the index, and up to * readers/searchers are open against the index, and up to
* 2X the size of all segments being merged when * 2X the size of all segments being merged when
* readers/searchers are open against the index (see * readers/searchers are open against the index (see
* {@link #optimize()} for details). The sequence of * {@link #forceMerge(int)} for details). The sequence of
* primitive merge operations performed is governed by the * primitive merge operations performed is governed by the
* merge policy. * merge policy.
* *
@ -1565,55 +1560,52 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
final InfoStream infoStream; final InfoStream infoStream;
/** /**
* Requests an "optimize" operation on an index, priming the index * Forces merge policy to merge segments until there's <=
* for the fastest available search. Traditionally this has meant * maxNumSegments. The actual merges to be
* merging all segments into a single segment as is done in the * executed are determined by the {@link MergePolicy}.
* default merge policy, but individual merge policies may implement
* optimize in different ways.
* *
* <p> Optimize is a very costly operation, so you * <p>This is a horribly costly operation, especially when
* should only do it if your search performance really * you pass a small {@code maxNumSegments}; usually you
* requires it. Many search applications do fine never * should only call this if the index is static (will no
* calling optimize. </p> * longer be changed).</p>
* *
* <p>Note that optimize requires 2X the index size free * <p>Note that this requires up to 2X the index size free
* space in your Directory (3X if you're using compound * space in your Directory (3X if you're using compound
* file format). For example, if your index size is 10 MB * file format). For example, if your index size is 10 MB
* then you need 20 MB free for optimize to complete (30 * then you need up to 20 MB free for this to complete (30
* MB if you're using compound file format). Also, * MB if you're using compound file format). Also,
* it's best to call {@link #commit()} after the optimize * it's best to call {@link #commit()} afterwards,
* completes to allow IndexWriter to free up disk space.</p> * to allow IndexWriter to free up disk space.</p>
* *
* <p>If some but not all readers re-open while an * <p>If some but not all readers re-open while merging
* optimize is underway, this will cause > 2X temporary * is underway, this will cause > 2X temporary
* space to be consumed as those new readers will then * space to be consumed as those new readers will then
* hold open the partially optimized segments at that * hold open the temporary segments at that time. It is
* time. It is best not to re-open readers while optimize * best not to re-open readers while merging is running.</p>
* is running.</p>
* *
* <p>The actual temporary usage could be much less than * <p>The actual temporary usage could be much less than
* these figures (it depends on many factors).</p> * these figures (it depends on many factors).</p>
* *
* <p>In general, once the optimize completes, the total size of the * <p>In general, once the this completes, the total size of the
* index will be less than the size of the starting index. * index will be less than the size of the starting index.
* It could be quite a bit smaller (if there were many * It could be quite a bit smaller (if there were many
* pending deletes) or just slightly smaller.</p> * pending deletes) or just slightly smaller.</p>
* *
* <p>If an Exception is hit during optimize(), for example * <p>If an Exception is hit, for example
* due to disk full, the index will not be corrupt and no * due to disk full, the index will not be corrupt and no
* documents will have been lost. However, it may have * documents will have been lost. However, it may have
* been partially optimized (some segments were merged but * been partially merged (some segments were merged but
* not all), and it's possible that one of the segments in * not all), and it's possible that one of the segments in
* the index will be in non-compound format even when * the index will be in non-compound format even when
* using compound file format. This will occur when the * using compound file format. This will occur when the
* Exception is hit during conversion of the segment into * Exception is hit during conversion of the segment into
* compound format.</p> * compound format.</p>
* *
* <p>This call will optimize those segments present in * <p>This call will merge those segments present in
* the index when the call started. If other threads are * the index when the call started. If other threads are
* still adding documents and flushing segments, those * still adding documents and flushing segments, those
* newly created segments will not be optimized unless you * newly created segments will not be merged unless you
* call optimize again.</p> * call forceMerge again.</p>
* *
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError * <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a * you should immediately close the writer. See <a
@ -1626,96 +1618,66 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* *
* @throws CorruptIndexException if the index is corrupt * @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error * @throws IOException if there is a low-level IO error
* @see MergePolicy#findMergesForOptimize * @see MergePolicy#findMerges
*/
public void optimize() throws CorruptIndexException, IOException {
optimize(true);
}
/**
* Optimize the index down to <= maxNumSegments. If
* maxNumSegments==1 then this is the same as {@link
* #optimize()}.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
* *
* @param maxNumSegments maximum number of segments left * @param maxNumSegments maximum number of segments left
* in the index after optimization finishes * in the index after merging finishes
*/ */
public void optimize(int maxNumSegments) throws CorruptIndexException, IOException { public void forceMerge(int maxNumSegments) throws CorruptIndexException, IOException {
optimize(maxNumSegments, true); forceMerge(maxNumSegments, true);
} }
/** Just like {@link #optimize()}, except you can specify /** Just like {@link #forceMerge(int)}, except you can
* whether the call should block until the optimize * specify whether the call should block until
* completes. This is only meaningful with a * all merging completes. This is only meaningful with a
* {@link MergeScheduler} that is able to run merges in * {@link MergeScheduler} that is able to run merges in
* background threads. * background threads.
* *
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError * <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a * you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p> * href="#OOME">above</a> for details.</p>
*/ */
public void optimize(boolean doWait) throws CorruptIndexException, IOException { public void forceMerge(int maxNumSegments, boolean doWait) throws CorruptIndexException, IOException {
optimize(1, doWait);
}
/** Just like {@link #optimize(int)}, except you can
* specify whether the call should block until the
* optimize completes. This is only meaningful with a
* {@link MergeScheduler} that is able to run merges in
* background threads.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
public void optimize(int maxNumSegments, boolean doWait) throws CorruptIndexException, IOException {
ensureOpen(); ensureOpen();
if (maxNumSegments < 1) if (maxNumSegments < 1)
throw new IllegalArgumentException("maxNumSegments must be >= 1; got " + maxNumSegments); throw new IllegalArgumentException("maxNumSegments must be >= 1; got " + maxNumSegments);
if (infoStream != null) { if (infoStream != null) {
infoStream.message("IW", "optimize: index now " + segString()); infoStream.message("IW", "forceMerge: index now " + segString());
infoStream.message("IW", "now flush at optimize"); infoStream.message("IW", "now flush at forceMerge");
} }
flush(true, true); flush(true, true);
synchronized(this) { synchronized(this) {
resetMergeExceptions(); resetMergeExceptions();
segmentsToOptimize.clear(); segmentsToMerge.clear();
for(SegmentInfo info : segmentInfos) { for(SegmentInfo info : segmentInfos) {
segmentsToOptimize.put(info, Boolean.TRUE); segmentsToMerge.put(info, Boolean.TRUE);
} }
optimizeMaxNumSegments = maxNumSegments; mergeMaxNumSegments = maxNumSegments;
// Now mark all pending & running merges as optimize // Now mark all pending & running merges as isMaxNumSegments:
// merge:
for(final MergePolicy.OneMerge merge : pendingMerges) { for(final MergePolicy.OneMerge merge : pendingMerges) {
merge.optimize = true; merge.maxNumSegments = maxNumSegments;
merge.maxNumSegmentsOptimize = maxNumSegments; segmentsToMerge.put(merge.info, Boolean.TRUE);
segmentsToOptimize.put(merge.info, Boolean.TRUE);
} }
for ( final MergePolicy.OneMerge merge: runningMerges ) { for ( final MergePolicy.OneMerge merge: runningMerges ) {
merge.optimize = true; merge.maxNumSegments = maxNumSegments;
merge.maxNumSegmentsOptimize = maxNumSegments; segmentsToMerge.put(merge.info, Boolean.TRUE);
segmentsToOptimize.put(merge.info, Boolean.TRUE);
} }
} }
maybeMerge(maxNumSegments, true); maybeMerge(maxNumSegments);
if (doWait) { if (doWait) {
synchronized(this) { synchronized(this) {
while(true) { while(true) {
if (hitOOM) { if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete optimize"); throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete forceMerge");
} }
if (mergeExceptions.size() > 0) { if (mergeExceptions.size() > 0) {
@ -1724,7 +1686,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
final int size = mergeExceptions.size(); final int size = mergeExceptions.size();
for(int i=0;i<size;i++) { for(int i=0;i<size;i++) {
final MergePolicy.OneMerge merge = mergeExceptions.get(i); final MergePolicy.OneMerge merge = mergeExceptions.get(i);
if (merge.optimize) { if (merge.maxNumSegments != -1) {
IOException err = new IOException("background merge hit exception: " + merge.segString(directory)); IOException err = new IOException("background merge hit exception: " + merge.segString(directory));
final Throwable t = merge.getException(); final Throwable t = merge.getException();
if (t != null) if (t != null)
@ -1734,7 +1696,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
} }
} }
if (optimizeMergesPending()) if (maxNumSegmentsMergesPending())
doWait(); doWait();
else else
break; break;
@ -1743,26 +1705,26 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// If close is called while we are still // If close is called while we are still
// running, throw an exception so the calling // running, throw an exception so the calling
// thread will know the optimize did not // thread will know merging did not
// complete // complete
ensureOpen(); ensureOpen();
} }
// NOTE: in the ConcurrentMergeScheduler case, when // NOTE: in the ConcurrentMergeScheduler case, when
// doWait is false, we can return immediately while // doWait is false, we can return immediately while
// background threads accomplish the optimization // background threads accomplish the merging
} }
/** Returns true if any merges in pendingMerges or /** Returns true if any merges in pendingMerges or
* runningMerges are optimization merges. */ * runningMerges are maxNumSegments merges. */
private synchronized boolean optimizeMergesPending() { private synchronized boolean maxNumSegmentsMergesPending() {
for (final MergePolicy.OneMerge merge : pendingMerges) { for (final MergePolicy.OneMerge merge : pendingMerges) {
if (merge.optimize) if (merge.maxNumSegments != -1)
return true; return true;
} }
for (final MergePolicy.OneMerge merge : runningMerges) { for (final MergePolicy.OneMerge merge : runningMerges) {
if (merge.optimize) if (merge.maxNumSegments != -1)
return true; return true;
} }
@ -1841,7 +1803,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// NOTE: in the ConcurrentMergeScheduler case, when // NOTE: in the ConcurrentMergeScheduler case, when
// doWait is false, we can return immediately while // doWait is false, we can return immediately while
// background threads accomplish the optimization // background threads accomplish the merging
} }
@ -1854,14 +1816,14 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* *
* <p>When an index * <p>When an index
* has many document deletions (or updates to existing * has many document deletions (or updates to existing
* documents), it's best to either call optimize or * documents), it's best to either call forceMerge or
* expungeDeletes to remove all unused data in the index * expungeDeletes to remove all unused data in the index
* associated with the deleted documents. To see how * associated with the deleted documents. To see how
* many deletions you have pending in your index, call * many deletions you have pending in your index, call
* {@link IndexReader#numDeletedDocs} * {@link IndexReader#numDeletedDocs}
* This saves disk space and memory usage while * This saves disk space and memory usage while
* searching. expungeDeletes should be somewhat faster * searching. expungeDeletes should be somewhat faster
* than optimize since it does not insist on reducing the * than forceMerge since it does not insist on reducing the
* index to a single segment (though, this depends on the * index to a single segment (though, this depends on the
* {@link MergePolicy}; see {@link * {@link MergePolicy}; see {@link
* MergePolicy#findMergesToExpungeDeletes}.). Note that * MergePolicy#findMergesToExpungeDeletes}.). Note that
@ -1896,22 +1858,18 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* href="#OOME">above</a> for details.</p> * href="#OOME">above</a> for details.</p>
*/ */
public final void maybeMerge() throws CorruptIndexException, IOException { public final void maybeMerge() throws CorruptIndexException, IOException {
maybeMerge(false); maybeMerge(-1);
} }
private final void maybeMerge(boolean optimize) throws CorruptIndexException, IOException { private final void maybeMerge(int maxNumSegments) throws CorruptIndexException, IOException {
maybeMerge(1, optimize);
}
private final void maybeMerge(int maxNumSegmentsOptimize, boolean optimize) throws CorruptIndexException, IOException {
ensureOpen(false); ensureOpen(false);
updatePendingMerges(maxNumSegmentsOptimize, optimize); updatePendingMerges(maxNumSegments);
mergeScheduler.merge(this); mergeScheduler.merge(this);
} }
private synchronized void updatePendingMerges(int maxNumSegmentsOptimize, boolean optimize) private synchronized void updatePendingMerges(int maxNumSegments)
throws CorruptIndexException, IOException { throws CorruptIndexException, IOException {
assert !optimize || maxNumSegmentsOptimize > 0; assert maxNumSegments == -1 || maxNumSegments > 0;
if (stopMerges) { if (stopMerges) {
return; return;
@ -1923,14 +1881,13 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
} }
final MergePolicy.MergeSpecification spec; final MergePolicy.MergeSpecification spec;
if (optimize) { if (maxNumSegments != -1) {
spec = mergePolicy.findMergesForOptimize(segmentInfos, maxNumSegmentsOptimize, Collections.unmodifiableMap(segmentsToOptimize)); spec = mergePolicy.findForcedMerges(segmentInfos, maxNumSegments, Collections.unmodifiableMap(segmentsToMerge));
if (spec != null) { if (spec != null) {
final int numMerges = spec.merges.size(); final int numMerges = spec.merges.size();
for(int i=0;i<numMerges;i++) { for(int i=0;i<numMerges;i++) {
final MergePolicy.OneMerge merge = spec.merges.get(i); final MergePolicy.OneMerge merge = spec.merges.get(i);
merge.optimize = true; merge.maxNumSegments = maxNumSegments;
merge.maxNumSegmentsOptimize = maxNumSegmentsOptimize;
} }
} }
@ -2083,7 +2040,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* *
* <p>NOTE: this method will forcefully abort all merges * <p>NOTE: this method will forcefully abort all merges
* in progress. If other threads are running {@link * in progress. If other threads are running {@link
* #optimize()}, {@link #addIndexes(IndexReader[])} or * #forceMerge}, {@link #addIndexes(IndexReader[])} or
* {@link #expungeDeletes} methods, they may receive * {@link #expungeDeletes} methods, they may receive
* {@link MergePolicy.MergeAbortedException}s. * {@link MergePolicy.MergeAbortedException}s.
*/ */
@ -2390,7 +2347,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* (including the starting index). If readers/searchers * (including the starting index). If readers/searchers
* are open against the starting index, then temporary * are open against the starting index, then temporary
* free space required will be higher by the size of the * free space required will be higher by the size of the
* starting index (see {@link #optimize()} for details). * starting index (see {@link #forceMerge(int)} for details).
* *
* <p> * <p>
* <b>NOTE:</b> this method only copies the segments of the incoming indexes * <b>NOTE:</b> this method only copies the segments of the incoming indexes
@ -2452,7 +2409,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
&& versionComparator.compare(info.getVersion(), "3.1") >= 0; && versionComparator.compare(info.getVersion(), "3.1") >= 0;
} }
IOContext context = new IOContext(new MergeInfo(info.docCount, info.sizeInBytes(true), true, false)); IOContext context = new IOContext(new MergeInfo(info.docCount, info.sizeInBytes(true), true, -1));
if (createCFS) { if (createCFS) {
copySegmentIntoCFS(info, newSegName, context); copySegmentIntoCFS(info, newSegName, context);
@ -2476,7 +2433,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
} }
/** Merges the provided indexes into this index. /** Merges the provided indexes into this index.
* <p>After this completes, the index is optimized. </p>
* <p>The provided IndexReaders are not closed.</p> * <p>The provided IndexReaders are not closed.</p>
* *
* <p><b>NOTE:</b> while this is running, any attempts to * <p><b>NOTE:</b> while this is running, any attempts to
@ -2512,7 +2468,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
for (IndexReader indexReader : readers) { for (IndexReader indexReader : readers) {
numDocs += indexReader.numDocs(); numDocs += indexReader.numDocs();
} }
final IOContext context = new IOContext(new MergeInfo(numDocs, -1, true, false)); final IOContext context = new IOContext(new MergeInfo(numDocs, -1, true, -1));
// TODO: somehow we should fix this merge so it's // TODO: somehow we should fix this merge so it's
// abortable so that IW.close(false) is able to stop it // abortable so that IW.close(false) is able to stop it
@ -2789,7 +2745,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
/** /**
* <p>Commits all pending changes (added & deleted * <p>Commits all pending changes (added & deleted
* documents, optimizations, segment merges, added * documents, segment merges, added
* indexes, etc.) to the index, and syncs all referenced * indexes, etc.) to the index, and syncs all referenced
* index files, such that a reader will see the changes * index files, such that a reader will see the changes
* and the index updates will survive an OS or machine * and the index updates will survive an OS or machine
@ -3199,10 +3155,10 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// disk, updating SegmentInfo, etc.: // disk, updating SegmentInfo, etc.:
readerPool.clear(merge.segments); readerPool.clear(merge.segments);
if (merge.optimize) { if (merge.maxNumSegments != -1) {
// cascade the optimize: // cascade the forceMerge:
if (!segmentsToOptimize.containsKey(merge.info)) { if (!segmentsToMerge.containsKey(merge.info)) {
segmentsToOptimize.put(merge.info, Boolean.FALSE); segmentsToMerge.put(merge.info, Boolean.FALSE);
} }
} }
@ -3216,7 +3172,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
} }
// Set the exception on the merge, so if // Set the exception on the merge, so if
// optimize() is waiting on us it sees the root // forceMerge is waiting on us it sees the root
// cause exception: // cause exception:
merge.setException(t); merge.setException(t);
addMergeException(merge); addMergeException(merge);
@ -3283,8 +3239,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// This merge (and, generally, any change to the // This merge (and, generally, any change to the
// segments) may now enable new merges, so we call // segments) may now enable new merges, so we call
// merge policy & update pending merges. // merge policy & update pending merges.
if (success && !merge.isAborted() && (merge.optimize || (!closed && !closing))) { if (success && !merge.isAborted() && (merge.maxNumSegments != -1 || (!closed && !closing))) {
updatePendingMerges(merge.maxNumSegmentsOptimize, merge.optimize); updatePendingMerges(merge.maxNumSegments);
} }
} }
} }
@ -3328,9 +3284,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
if (info.dir != directory) { if (info.dir != directory) {
isExternal = true; isExternal = true;
} }
if (segmentsToOptimize.containsKey(info)) { if (segmentsToMerge.containsKey(info)) {
merge.optimize = true; merge.maxNumSegments = mergeMaxNumSegments;
merge.maxNumSegmentsOptimize = optimizeMaxNumSegments;
} }
} }
@ -3393,7 +3348,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
assert testPoint("startMergeInit"); assert testPoint("startMergeInit");
assert merge.registerDone; assert merge.registerDone;
assert !merge.optimize || merge.maxNumSegmentsOptimize > 0; assert merge.maxNumSegments == -1 || merge.maxNumSegments > 0;
if (hitOOM) { if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot merge"); throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot merge");
@ -3443,7 +3398,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// Lock order: IW -> BD // Lock order: IW -> BD
bufferedDeletesStream.prune(segmentInfos); bufferedDeletesStream.prune(segmentInfos);
Map<String,String> details = new HashMap<String,String>(); Map<String,String> details = new HashMap<String,String>();
details.put("optimize", Boolean.toString(merge.optimize)); details.put("mergeMaxNumSegments", ""+merge.maxNumSegments);
details.put("mergeFactor", Integer.toString(merge.segments.size())); details.put("mergeFactor", Integer.toString(merge.segments.size()));
setDiagnostics(merge.info, "merge", details); setDiagnostics(merge.info, "merge", details);
@ -3495,7 +3450,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* the synchronized lock on IndexWriter instance. */ * the synchronized lock on IndexWriter instance. */
final synchronized void mergeFinish(MergePolicy.OneMerge merge) throws IOException { final synchronized void mergeFinish(MergePolicy.OneMerge merge) throws IOException {
// Optimize, addIndexes or finishMerges may be waiting // forceMerge, addIndexes or finishMerges may be waiting
// on merges to finish. // on merges to finish.
notifyAll(); notifyAll();
@ -4090,7 +4045,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* <b>NOTE:</b> the set {@link PayloadProcessorProvider} will be in effect * <b>NOTE:</b> the set {@link PayloadProcessorProvider} will be in effect
* immediately, potentially for already running merges too. If you want to be * immediately, potentially for already running merges too. If you want to be
* sure it is used for further operations only, such as {@link #addIndexes} or * sure it is used for further operations only, such as {@link #addIndexes} or
* {@link #optimize}, you can call {@link #waitForMerges()} before. * {@link #forceMerge}, you can call {@link #waitForMerges()} before.
*/ */
public void setPayloadProcessorProvider(PayloadProcessorProvider pcp) { public void setPayloadProcessorProvider(PayloadProcessorProvider pcp) {
ensureOpen(); ensureOpen();

View File

@ -518,7 +518,7 @@ public final class IndexWriterConfig implements Cloneable {
* Expert: {@link MergePolicy} is invoked whenever there are changes to the * Expert: {@link MergePolicy} is invoked whenever there are changes to the
* segments in the index. Its role is to select which merges to do, if any, * segments in the index. Its role is to select which merges to do, if any,
* and return a {@link MergePolicy.MergeSpecification} describing the merges. * and return a {@link MergePolicy.MergeSpecification} describing the merges.
* It also selects merges to do for optimize(). (The default is * It also selects merges to do for forceMerge. (The default is
* {@link LogByteSizeMergePolicy}. * {@link LogByteSizeMergePolicy}.
* *
* <p>Only takes effect when IndexWriter is first created. */ * <p>Only takes effect when IndexWriter is first created. */

View File

@ -31,13 +31,13 @@ public class LogByteSizeMergePolicy extends LogMergePolicy {
public static final double DEFAULT_MAX_MERGE_MB = 2048; public static final double DEFAULT_MAX_MERGE_MB = 2048;
/** Default maximum segment size. A segment of this size /** Default maximum segment size. A segment of this size
* or larger will never be merged during optimize. @see setMaxMergeMBForOptimize */ * or larger will never be merged during forceMerge. @see setMaxMergeMBForForceMerge */
public static final double DEFAULT_MAX_MERGE_MB_FOR_OPTIMIZE = Long.MAX_VALUE; public static final double DEFAULT_MAX_MERGE_MB_FOR_MERGE_IF_NEEDED = Long.MAX_VALUE;
public LogByteSizeMergePolicy() { public LogByteSizeMergePolicy() {
minMergeSize = (long) (DEFAULT_MIN_MERGE_MB*1024*1024); minMergeSize = (long) (DEFAULT_MIN_MERGE_MB*1024*1024);
maxMergeSize = (long) (DEFAULT_MAX_MERGE_MB*1024*1024); maxMergeSize = (long) (DEFAULT_MAX_MERGE_MB*1024*1024);
maxMergeSizeForOptimize = (long) (DEFAULT_MAX_MERGE_MB_FOR_OPTIMIZE*1024*1024); maxMergeSizeForForcedMerge = (long) (DEFAULT_MAX_MERGE_MB_FOR_MERGE_IF_NEEDED*1024*1024);
} }
@Override @Override
@ -70,19 +70,19 @@ public class LogByteSizeMergePolicy extends LogMergePolicy {
/** <p>Determines the largest segment (measured by total /** <p>Determines the largest segment (measured by total
* byte size of the segment's files, in MB) that may be * byte size of the segment's files, in MB) that may be
* merged with other segments during optimize. Setting * merged with other segments during forceMerge. Setting
* it low will leave the index with more than 1 segment, * it low will leave the index with more than 1 segment,
* even if {@link IndexWriter#optimize()} is called.*/ * even if {@link IndexWriter#forceMerge} is called.*/
public void setMaxMergeMBForOptimize(double mb) { public void setMaxMergeMBForForcedMerge(double mb) {
maxMergeSizeForOptimize = (long) (mb*1024*1024); maxMergeSizeForForcedMerge = (long) (mb*1024*1024);
} }
/** Returns the largest segment (measured by total byte /** Returns the largest segment (measured by total byte
* size of the segment's files, in MB) that may be merged * size of the segment's files, in MB) that may be merged
* with other segments during optimize. * with other segments during forceMerge.
* @see #setMaxMergeMBForOptimize */ * @see #setMaxMergeMBForForcedMerge */
public double getMaxMergeMBForOptimize() { public double getMaxMergeMBForForcedMerge() {
return ((double) maxMergeSizeForOptimize)/1024/1024; return ((double) maxMergeSizeForForcedMerge)/1024/1024;
} }
/** Sets the minimum size for the lowest level segments. /** Sets the minimum size for the lowest level segments.

View File

@ -31,10 +31,10 @@ public class LogDocMergePolicy extends LogMergePolicy {
public LogDocMergePolicy() { public LogDocMergePolicy() {
minMergeSize = DEFAULT_MIN_MERGE_DOCS; minMergeSize = DEFAULT_MIN_MERGE_DOCS;
// maxMergeSize(ForOptimize) are never used by LogDocMergePolicy; set // maxMergeSize(ForForcedMerge) are never used by LogDocMergePolicy; set
// it to Long.MAX_VALUE to disable it // it to Long.MAX_VALUE to disable it
maxMergeSize = Long.MAX_VALUE; maxMergeSize = Long.MAX_VALUE;
maxMergeSizeForOptimize = Long.MAX_VALUE; maxMergeSizeForForcedMerge = Long.MAX_VALUE;
} }
@Override @Override

View File

@ -70,7 +70,7 @@ public abstract class LogMergePolicy extends MergePolicy {
protected long maxMergeSize; protected long maxMergeSize;
// Although the core MPs set it explicitly, we must default in case someone // Although the core MPs set it explicitly, we must default in case someone
// out there wrote his own LMP ... // out there wrote his own LMP ...
protected long maxMergeSizeForOptimize = Long.MAX_VALUE; protected long maxMergeSizeForForcedMerge = Long.MAX_VALUE;
protected int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS; protected int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
protected double noCFSRatio = DEFAULT_NO_CFS_RATIO; protected double noCFSRatio = DEFAULT_NO_CFS_RATIO;
@ -123,10 +123,10 @@ public abstract class LogMergePolicy extends MergePolicy {
/** Determines how often segment indices are merged by /** Determines how often segment indices are merged by
* addDocument(). With smaller values, less RAM is used * addDocument(). With smaller values, less RAM is used
* while indexing, and searches on unoptimized indices are * while indexing, and searches are
* faster, but indexing speed is slower. With larger * faster, but indexing speed is slower. With larger
* values, more RAM is used during indexing, and while * values, more RAM is used during indexing, and while
* searches on unoptimized indices are slower, indexing is * searches is slower, indexing is
* faster. Thus larger values (> 10) are best for batch * faster. Thus larger values (> 10) are best for batch
* index creation, and smaller values (< 10) for indices * index creation, and smaller values (< 10) for indices
* that are interactively maintained. */ * that are interactively maintained. */
@ -207,29 +207,29 @@ public abstract class LogMergePolicy extends MergePolicy {
} }
} }
protected boolean isOptimized(SegmentInfos infos, int maxNumSegments, Map<SegmentInfo,Boolean> segmentsToOptimize) throws IOException { protected boolean isMerged(SegmentInfos infos, int maxNumSegments, Map<SegmentInfo,Boolean> segmentsToMerge) throws IOException {
final int numSegments = infos.size(); final int numSegments = infos.size();
int numToOptimize = 0; int numToMerge = 0;
SegmentInfo optimizeInfo = null; SegmentInfo mergeInfo = null;
boolean segmentIsOriginal = false; boolean segmentIsOriginal = false;
for(int i=0;i<numSegments && numToOptimize <= maxNumSegments;i++) { for(int i=0;i<numSegments && numToMerge <= maxNumSegments;i++) {
final SegmentInfo info = infos.info(i); final SegmentInfo info = infos.info(i);
final Boolean isOriginal = segmentsToOptimize.get(info); final Boolean isOriginal = segmentsToMerge.get(info);
if (isOriginal != null) { if (isOriginal != null) {
segmentIsOriginal = isOriginal; segmentIsOriginal = isOriginal;
numToOptimize++; numToMerge++;
optimizeInfo = info; mergeInfo = info;
} }
} }
return numToOptimize <= maxNumSegments && return numToMerge <= maxNumSegments &&
(numToOptimize != 1 || !segmentIsOriginal || isOptimized(optimizeInfo)); (numToMerge != 1 || !segmentIsOriginal || isMerged(mergeInfo));
} }
/** Returns true if this single info is optimized (has no /** Returns true if this single info is already fully merged (has no
* pending norms or deletes, is in the same dir as the * pending norms or deletes, is in the same dir as the
* writer, and matches the current compound file setting */ * writer, and matches the current compound file setting */
protected boolean isOptimized(SegmentInfo info) protected boolean isMerged(SegmentInfo info)
throws IOException { throws IOException {
IndexWriter w = writer.get(); IndexWriter w = writer.get();
assert w != null; assert w != null;
@ -241,14 +241,14 @@ public abstract class LogMergePolicy extends MergePolicy {
} }
/** /**
* Returns the merges necessary to optimize the index, taking the max merge * Returns the merges necessary to merge the index, taking the max merge
* size or max merge docs into consideration. This method attempts to respect * size or max merge docs into consideration. This method attempts to respect
* the {@code maxNumSegments} parameter, however it might be, due to size * the {@code maxNumSegments} parameter, however it might be, due to size
* constraints, that more than that number of segments will remain in the * constraints, that more than that number of segments will remain in the
* index. Also, this method does not guarantee that exactly {@code * index. Also, this method does not guarantee that exactly {@code
* maxNumSegments} will remain, but &lt;= that number. * maxNumSegments} will remain, but &lt;= that number.
*/ */
private MergeSpecification findMergesForOptimizeSizeLimit( private MergeSpecification findForcedMergesSizeLimit(
SegmentInfos infos, int maxNumSegments, int last) throws IOException { SegmentInfos infos, int maxNumSegments, int last) throws IOException {
MergeSpecification spec = new MergeSpecification(); MergeSpecification spec = new MergeSpecification();
final List<SegmentInfo> segments = infos.asList(); final List<SegmentInfo> segments = infos.asList();
@ -256,14 +256,15 @@ public abstract class LogMergePolicy extends MergePolicy {
int start = last - 1; int start = last - 1;
while (start >= 0) { while (start >= 0) {
SegmentInfo info = infos.info(start); SegmentInfo info = infos.info(start);
if (size(info) > maxMergeSizeForOptimize || sizeDocs(info) > maxMergeDocs) { if (size(info) > maxMergeSizeForForcedMerge || sizeDocs(info) > maxMergeDocs) {
if (verbose()) { if (verbose()) {
message("optimize: skip segment=" + info + ": size is > maxMergeSize (" + maxMergeSizeForOptimize + ") or sizeDocs is > maxMergeDocs (" + maxMergeDocs + ")"); message("findForcedMergesSizeLimit: skip segment=" + info + ": size is > maxMergeSize (" + maxMergeSizeForForcedMerge + ") or sizeDocs is > maxMergeDocs (" + maxMergeDocs + ")");
} }
// need to skip that segment + add a merge for the 'right' segments, // need to skip that segment + add a merge for the 'right' segments,
// unless there is only 1 which is optimized. // unless there is only 1 which is merged.
if (last - start - 1 > 1 || (start != last - 1 && !isOptimized(infos.info(start + 1)))) { if (last - start - 1 > 1 || (start != last - 1 && !isMerged(infos.info(start + 1)))) {
// there is more than 1 segment to the right of this one, or an unoptimized single segment. // there is more than 1 segment to the right of
// this one, or a mergeable single segment.
spec.add(new OneMerge(segments.subList(start + 1, last))); spec.add(new OneMerge(segments.subList(start + 1, last)));
} }
last = start; last = start;
@ -275,8 +276,9 @@ public abstract class LogMergePolicy extends MergePolicy {
--start; --start;
} }
// Add any left-over segments, unless there is just 1 already optimized. // Add any left-over segments, unless there is just 1
if (last > 0 && (++start + 1 < last || !isOptimized(infos.info(start)))) { // already fully merged
if (last > 0 && (++start + 1 < last || !isMerged(infos.info(start)))) {
spec.add(new OneMerge(segments.subList(start, last))); spec.add(new OneMerge(segments.subList(start, last)));
} }
@ -284,11 +286,11 @@ public abstract class LogMergePolicy extends MergePolicy {
} }
/** /**
* Returns the merges necessary to optimize the index. This method constraints * Returns the merges necessary to forceMerge the index. This method constraints
* the returned merges only by the {@code maxNumSegments} parameter, and * the returned merges only by the {@code maxNumSegments} parameter, and
* guaranteed that exactly that number of segments will remain in the index. * guaranteed that exactly that number of segments will remain in the index.
*/ */
private MergeSpecification findMergesForOptimizeMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last) throws IOException { private MergeSpecification findForcedMergesMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last) throws IOException {
MergeSpecification spec = new MergeSpecification(); MergeSpecification spec = new MergeSpecification();
final List<SegmentInfo> segments = infos.asList(); final List<SegmentInfo> segments = infos.asList();
@ -304,9 +306,9 @@ public abstract class LogMergePolicy extends MergePolicy {
if (0 == spec.merges.size()) { if (0 == spec.merges.size()) {
if (maxNumSegments == 1) { if (maxNumSegments == 1) {
// Since we must optimize down to 1 segment, the // Since we must merge down to 1 segment, the
// choice is simple: // choice is simple:
if (last > 1 || !isOptimized(infos.info(0))) { if (last > 1 || !isMerged(infos.info(0))) {
spec.add(new OneMerge(segments.subList(0, last))); spec.add(new OneMerge(segments.subList(0, last)));
} }
} else if (last > maxNumSegments) { } else if (last > maxNumSegments) {
@ -319,7 +321,7 @@ public abstract class LogMergePolicy extends MergePolicy {
// We must merge this many segments to leave // We must merge this many segments to leave
// maxNumSegments in the index (from when // maxNumSegments in the index (from when
// optimize was first kicked off): // forceMerge was first kicked off):
final int finalMergeSize = last - maxNumSegments + 1; final int finalMergeSize = last - maxNumSegments + 1;
// Consider all possible starting points: // Consider all possible starting points:
@ -342,10 +344,9 @@ public abstract class LogMergePolicy extends MergePolicy {
return spec.merges.size() == 0 ? null : spec; return spec.merges.size() == 0 ? null : spec;
} }
/** Returns the merges necessary to optimize the index. /** Returns the merges necessary to merge the index down
* This merge policy defines "optimized" to mean only the * to a specified number of segments.
* requested number of segments is left in the index, and * This respects the {@link #maxMergeSizeForForcedMerge} setting.
* respects the {@link #maxMergeSizeForOptimize} setting.
* By default, and assuming {@code maxNumSegments=1}, only * By default, and assuming {@code maxNumSegments=1}, only
* one segment will be left in the index, where that segment * one segment will be left in the index, where that segment
* has no deletions pending nor separate norms, and it is in * has no deletions pending nor separate norms, and it is in
@ -354,30 +355,30 @@ public abstract class LogMergePolicy extends MergePolicy {
* (mergeFactor at a time) so the {@link MergeScheduler} * (mergeFactor at a time) so the {@link MergeScheduler}
* in use may make use of concurrency. */ * in use may make use of concurrency. */
@Override @Override
public MergeSpecification findMergesForOptimize(SegmentInfos infos, public MergeSpecification findForcedMerges(SegmentInfos infos,
int maxNumSegments, Map<SegmentInfo,Boolean> segmentsToOptimize) throws IOException { int maxNumSegments, Map<SegmentInfo,Boolean> segmentsToMerge) throws IOException {
assert maxNumSegments > 0; assert maxNumSegments > 0;
if (verbose()) { if (verbose()) {
message("findMergesForOptimize: maxNumSegs=" + maxNumSegments + " segsToOptimize="+ segmentsToOptimize); message("findForcedMerges: maxNumSegs=" + maxNumSegments + " segsToMerge="+ segmentsToMerge);
} }
// If the segments are already optimized (e.g. there's only 1 segment), or // If the segments are already merged (e.g. there's only 1 segment), or
// there are <maxNumSegements, all optimized, nothing to do. // there are <maxNumSegements:.
if (isOptimized(infos, maxNumSegments, segmentsToOptimize)) { if (isMerged(infos, maxNumSegments, segmentsToMerge)) {
if (verbose()) { if (verbose()) {
message("already optimized; skip"); message("already merged; skip");
} }
return null; return null;
} }
// Find the newest (rightmost) segment that needs to // Find the newest (rightmost) segment that needs to
// be optimized (other segments may have been flushed // be merged (other segments may have been flushed
// since optimize started): // since merging started):
int last = infos.size(); int last = infos.size();
while (last > 0) { while (last > 0) {
final SegmentInfo info = infos.info(--last); final SegmentInfo info = infos.info(--last);
if (segmentsToOptimize.get(info) != null) { if (segmentsToMerge.get(info) != null) {
last++; last++;
break; break;
} }
@ -390,8 +391,8 @@ public abstract class LogMergePolicy extends MergePolicy {
return null; return null;
} }
// There is only one segment already, and it is optimized // There is only one segment already, and it is merged
if (maxNumSegments == 1 && last == 1 && isOptimized(infos.info(0))) { if (maxNumSegments == 1 && last == 1 && isMerged(infos.info(0))) {
if (verbose()) { if (verbose()) {
message("already 1 seg; skip"); message("already 1 seg; skip");
} }
@ -402,16 +403,16 @@ public abstract class LogMergePolicy extends MergePolicy {
boolean anyTooLarge = false; boolean anyTooLarge = false;
for (int i = 0; i < last; i++) { for (int i = 0; i < last; i++) {
SegmentInfo info = infos.info(i); SegmentInfo info = infos.info(i);
if (size(info) > maxMergeSizeForOptimize || sizeDocs(info) > maxMergeDocs) { if (size(info) > maxMergeSizeForForcedMerge || sizeDocs(info) > maxMergeDocs) {
anyTooLarge = true; anyTooLarge = true;
break; break;
} }
} }
if (anyTooLarge) { if (anyTooLarge) {
return findMergesForOptimizeSizeLimit(infos, maxNumSegments, last); return findForcedMergesSizeLimit(infos, maxNumSegments, last);
} else { } else {
return findMergesForOptimizeMaxNumSegments(infos, maxNumSegments, last); return findForcedMergesMaxNumSegments(infos, maxNumSegments, last);
} }
} }
@ -661,7 +662,7 @@ public abstract class LogMergePolicy extends MergePolicy {
sb.append("minMergeSize=").append(minMergeSize).append(", "); sb.append("minMergeSize=").append(minMergeSize).append(", ");
sb.append("mergeFactor=").append(mergeFactor).append(", "); sb.append("mergeFactor=").append(mergeFactor).append(", ");
sb.append("maxMergeSize=").append(maxMergeSize).append(", "); sb.append("maxMergeSize=").append(maxMergeSize).append(", ");
sb.append("maxMergeSizeForOptimize=").append(maxMergeSizeForOptimize).append(", "); sb.append("maxMergeSizeForForcedMerge=").append(maxMergeSizeForForcedMerge).append(", ");
sb.append("calibrateSizeByDeletes=").append(calibrateSizeByDeletes).append(", "); sb.append("calibrateSizeByDeletes=").append(calibrateSizeByDeletes).append(", ");
sb.append("maxMergeDocs=").append(maxMergeDocs).append(", "); sb.append("maxMergeDocs=").append(maxMergeDocs).append(", ");
sb.append("useCompoundFile=").append(useCompoundFile).append(", "); sb.append("useCompoundFile=").append(useCompoundFile).append(", ");

View File

@ -30,8 +30,7 @@ import org.apache.lucene.util.SetOnce;
/** /**
* <p>Expert: a MergePolicy determines the sequence of * <p>Expert: a MergePolicy determines the sequence of
* primitive merge operations to be used for overall merge * primitive merge operations.</p>
* and optimize operations.</p>
* *
* <p>Whenever the segments in an index have been altered by * <p>Whenever the segments in an index have been altered by
* {@link IndexWriter}, either the addition of a newly * {@link IndexWriter}, either the addition of a newly
@ -42,8 +41,8 @@ import org.apache.lucene.util.SetOnce;
* merges that are now required. This method returns a * merges that are now required. This method returns a
* {@link MergeSpecification} instance describing the set of * {@link MergeSpecification} instance describing the set of
* merges that should be done, or null if no merges are * merges that should be done, or null if no merges are
* necessary. When IndexWriter.optimize is called, it calls * necessary. When IndexWriter.forceMerge is called, it calls
* {@link #findMergesForOptimize} and the MergePolicy should * {@link #findForcedMerges(SegmentInfos,int,Map)} and the MergePolicy should
* then return the necessary merges.</p> * then return the necessary merges.</p>
* *
* <p>Note that the policy can return more than one merge at * <p>Note that the policy can return more than one merge at
@ -69,11 +68,10 @@ public abstract class MergePolicy implements java.io.Closeable {
public static class OneMerge { public static class OneMerge {
SegmentInfo info; // used by IndexWriter SegmentInfo info; // used by IndexWriter
boolean optimize; // used by IndexWriter
boolean registerDone; // used by IndexWriter boolean registerDone; // used by IndexWriter
long mergeGen; // used by IndexWriter long mergeGen; // used by IndexWriter
boolean isExternal; // used by IndexWriter boolean isExternal; // used by IndexWriter
int maxNumSegmentsOptimize; // used by IndexWriter int maxNumSegments = -1; // used by IndexWriter
public long estimatedMergeBytes; // used by IndexWriter public long estimatedMergeBytes; // used by IndexWriter
List<SegmentReader> readers; // used by IndexWriter List<SegmentReader> readers; // used by IndexWriter
List<BitVector> readerLiveDocs; // used by IndexWriter List<BitVector> readerLiveDocs; // used by IndexWriter
@ -160,8 +158,8 @@ public abstract class MergePolicy implements java.io.Closeable {
} }
if (info != null) if (info != null)
b.append(" into ").append(info.name); b.append(" into ").append(info.name);
if (optimize) if (maxNumSegments != -1)
b.append(" [optimize]"); b.append(" [maxNumSegments=" + maxNumSegments + "]");
if (aborted) { if (aborted) {
b.append(" [ABORTED]"); b.append(" [ABORTED]");
} }
@ -193,7 +191,7 @@ public abstract class MergePolicy implements java.io.Closeable {
} }
public MergeInfo getMergeInfo() { public MergeInfo getMergeInfo() {
return new MergeInfo(totalDocCount, estimatedMergeBytes, isExternal, optimize); return new MergeInfo(totalDocCount, estimatedMergeBytes, isExternal, maxNumSegments);
} }
} }
@ -290,9 +288,9 @@ public abstract class MergePolicy implements java.io.Closeable {
throws CorruptIndexException, IOException; throws CorruptIndexException, IOException;
/** /**
* Determine what set of merge operations is necessary in order to optimize * Determine what set of merge operations is necessary in
* the index. {@link IndexWriter} calls this when its * order to merge to <= the specified segment count. {@link IndexWriter} calls this when its
* {@link IndexWriter#optimize()} method is called. This call is always * {@link IndexWriter#forceMerge} method is called. This call is always
* synchronized on the {@link IndexWriter} instance so only one thread at a * synchronized on the {@link IndexWriter} instance so only one thread at a
* time will call this method. * time will call this method.
* *
@ -301,17 +299,17 @@ public abstract class MergePolicy implements java.io.Closeable {
* @param maxSegmentCount * @param maxSegmentCount
* requested maximum number of segments in the index (currently this * requested maximum number of segments in the index (currently this
* is always 1) * is always 1)
* @param segmentsToOptimize * @param segmentsToMerge
* contains the specific SegmentInfo instances that must be merged * contains the specific SegmentInfo instances that must be merged
* away. This may be a subset of all * away. This may be a subset of all
* SegmentInfos. If the value is True for a * SegmentInfos. If the value is True for a
* given SegmentInfo, that means this segment was * given SegmentInfo, that means this segment was
* an original segment present in the * an original segment present in the
* to-be-optimized index; else, it was a segment * to-be-merged index; else, it was a segment
* produced by a cascaded merge. * produced by a cascaded merge.
*/ */
public abstract MergeSpecification findMergesForOptimize( public abstract MergeSpecification findForcedMerges(
SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToOptimize) SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToMerge)
throws CorruptIndexException, IOException; throws CorruptIndexException, IOException;
/** /**

View File

@ -233,12 +233,6 @@ public class MultiReader extends IndexReader implements Cloneable {
subReaders[i].getTermFreqVector(docNumber - starts[i], mapper); subReaders[i].getTermFreqVector(docNumber - starts[i], mapper);
} }
@Override
public boolean isOptimized() {
ensureOpen();
return false;
}
@Override @Override
public int numDocs() { public int numDocs() {
// Don't call ensureOpen() here (it could affect performance) // Don't call ensureOpen() here (it could affect performance)

View File

@ -58,8 +58,8 @@ public final class NoMergePolicy extends MergePolicy {
throws CorruptIndexException, IOException { return null; } throws CorruptIndexException, IOException { return null; }
@Override @Override
public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos, public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToOptimize) int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToMerge)
throws CorruptIndexException, IOException { return null; } throws CorruptIndexException, IOException { return null; }
@Override @Override

View File

@ -477,23 +477,6 @@ public class ParallelReader extends IndexReader {
return true; return true;
} }
/**
* Checks recursively if all subindexes are optimized
*/
@Override
public boolean isOptimized() {
ensureOpen();
for (final IndexReader reader : readers) {
if (!reader.isOptimized()) {
return false;
}
}
// all subindexes are optimized
return true;
}
/** Not implemented. /** Not implemented.
* @throws UnsupportedOperationException * @throws UnsupportedOperationException
*/ */

View File

@ -136,8 +136,8 @@ public class SnapshotDeletionPolicy implements IndexDeletionPolicy {
} }
@Override @Override
public boolean isOptimized() { public int getSegmentCount() {
return cp.isOptimized(); return cp.getSegmentCount();
} }
} }
@ -340,7 +340,7 @@ public class SnapshotDeletionPolicy implements IndexDeletionPolicy {
* <b>NOTE:</b> while the snapshot is held, the files it references will not * <b>NOTE:</b> while the snapshot is held, the files it references will not
* be deleted, which will consume additional disk space in your index. If you * be deleted, which will consume additional disk space in your index. If you
* take a snapshot at a particularly bad time (say just before you call * take a snapshot at a particularly bad time (say just before you call
* optimize()) then in the worst case this could consume an extra 1X of your * forceMerge) then in the worst case this could consume an extra 1X of your
* total index size, until you release the snapshot. * total index size, until you release the snapshot.
* *
* @param id * @param id

View File

@ -62,7 +62,7 @@ import org.apache.lucene.util.InfoStream;
* <p><b>NOTE</b>: This policy always merges by byte size * <p><b>NOTE</b>: This policy always merges by byte size
* of the segments, always pro-rates by percent deletes, * of the segments, always pro-rates by percent deletes,
* and does not apply any maximum segment size during * and does not apply any maximum segment size during
* optimize (unlike {@link LogByteSizeMergePolicy}). * forceMerge (unlike {@link LogByteSizeMergePolicy}).
* *
* @lucene.experimental * @lucene.experimental
*/ */
@ -88,7 +88,7 @@ public class TieredMergePolicy extends MergePolicy {
/** Maximum number of segments to be merged at a time /** Maximum number of segments to be merged at a time
* during "normal" merging. For explicit merging (eg, * during "normal" merging. For explicit merging (eg,
* optimize or expungeDeletes was called), see {@link * forceMerge or expungeDeletes was called), see {@link
* #setMaxMergeAtOnceExplicit}. Default is 10. */ * #setMaxMergeAtOnceExplicit}. Default is 10. */
public TieredMergePolicy setMaxMergeAtOnce(int v) { public TieredMergePolicy setMaxMergeAtOnce(int v) {
if (v < 2) { if (v < 2) {
@ -107,7 +107,7 @@ public class TieredMergePolicy extends MergePolicy {
// if user calls IW.maybeMerge "explicitly" // if user calls IW.maybeMerge "explicitly"
/** Maximum number of segments to be merged at a time, /** Maximum number of segments to be merged at a time,
* during optimize or expungeDeletes. Default is 30. */ * during forceMerge or expungeDeletes. Default is 30. */
public TieredMergePolicy setMaxMergeAtOnceExplicit(int v) { public TieredMergePolicy setMaxMergeAtOnceExplicit(int v) {
if (v < 2) { if (v < 2) {
throw new IllegalArgumentException("maxMergeAtOnceExplicit must be > 1 (got " + v + ")"); throw new IllegalArgumentException("maxMergeAtOnceExplicit must be > 1 (got " + v + ")");
@ -478,23 +478,23 @@ public class TieredMergePolicy extends MergePolicy {
} }
@Override @Override
public MergeSpecification findMergesForOptimize(SegmentInfos infos, int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToOptimize) throws IOException { public MergeSpecification findForcedMerges(SegmentInfos infos, int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToMerge) throws IOException {
if (verbose()) { if (verbose()) {
message("findMergesForOptimize maxSegmentCount=" + maxSegmentCount + " infos=" + writer.get().segString(infos) + " segmentsToOptimize=" + segmentsToOptimize); message("findForcedMerges maxSegmentCount=" + maxSegmentCount + " infos=" + writer.get().segString(infos) + " segmentsToMerge=" + segmentsToMerge);
} }
List<SegmentInfo> eligible = new ArrayList<SegmentInfo>(); List<SegmentInfo> eligible = new ArrayList<SegmentInfo>();
boolean optimizeMergeRunning = false; boolean forceMergeRunning = false;
final Collection<SegmentInfo> merging = writer.get().getMergingSegments(); final Collection<SegmentInfo> merging = writer.get().getMergingSegments();
boolean segmentIsOriginal = false; boolean segmentIsOriginal = false;
for(SegmentInfo info : infos) { for(SegmentInfo info : infos) {
final Boolean isOriginal = segmentsToOptimize.get(info); final Boolean isOriginal = segmentsToMerge.get(info);
if (isOriginal != null) { if (isOriginal != null) {
segmentIsOriginal = isOriginal; segmentIsOriginal = isOriginal;
if (!merging.contains(info)) { if (!merging.contains(info)) {
eligible.add(info); eligible.add(info);
} else { } else {
optimizeMergeRunning = true; forceMergeRunning = true;
} }
} }
} }
@ -504,9 +504,9 @@ public class TieredMergePolicy extends MergePolicy {
} }
if ((maxSegmentCount > 1 && eligible.size() <= maxSegmentCount) || if ((maxSegmentCount > 1 && eligible.size() <= maxSegmentCount) ||
(maxSegmentCount == 1 && eligible.size() == 1 && (!segmentIsOriginal || isOptimized(eligible.get(0))))) { (maxSegmentCount == 1 && eligible.size() == 1 && (!segmentIsOriginal || isMerged(eligible.get(0))))) {
if (verbose()) { if (verbose()) {
message("already optimized"); message("already merged");
} }
return null; return null;
} }
@ -515,7 +515,7 @@ public class TieredMergePolicy extends MergePolicy {
if (verbose()) { if (verbose()) {
message("eligible=" + eligible); message("eligible=" + eligible);
message("optimizeMergeRunning=" + optimizeMergeRunning); message("forceMergeRunning=" + forceMergeRunning);
} }
int end = eligible.size(); int end = eligible.size();
@ -535,7 +535,7 @@ public class TieredMergePolicy extends MergePolicy {
end -= maxMergeAtOnceExplicit; end -= maxMergeAtOnceExplicit;
} }
if (spec == null && !optimizeMergeRunning) { if (spec == null && !forceMergeRunning) {
// Do final merge // Do final merge
final int numToMerge = end - maxSegmentCount + 1; final int numToMerge = end - maxSegmentCount + 1;
final OneMerge merge = new OneMerge(eligible.subList(end-numToMerge, end)); final OneMerge merge = new OneMerge(eligible.subList(end-numToMerge, end));
@ -580,7 +580,7 @@ public class TieredMergePolicy extends MergePolicy {
while(start < eligible.size()) { while(start < eligible.size()) {
// Don't enforce max merged size here: app is explicitly // Don't enforce max merged size here: app is explicitly
// calling expungeDeletes, and knows this may take a // calling expungeDeletes, and knows this may take a
// long time / produce big segments (like optimize): // long time / produce big segments (like forceMerge):
final int end = Math.min(start + maxMergeAtOnceExplicit, eligible.size()); final int end = Math.min(start + maxMergeAtOnceExplicit, eligible.size());
if (spec == null) { if (spec == null) {
spec = new MergeSpecification(); spec = new MergeSpecification();
@ -619,7 +619,7 @@ public class TieredMergePolicy extends MergePolicy {
public void close() { public void close() {
} }
private boolean isOptimized(SegmentInfo info) private boolean isMerged(SegmentInfo info)
throws IOException { throws IOException {
IndexWriter w = writer.get(); IndexWriter w = writer.get();
assert w != null; assert w != null;

View File

@ -27,24 +27,24 @@ import java.util.Map;
import java.util.HashMap; import java.util.HashMap;
/** This {@link MergePolicy} is used for upgrading all existing segments of /** This {@link MergePolicy} is used for upgrading all existing segments of
* an index when calling {@link IndexWriter#optimize()}. * an index when calling {@link IndexWriter#forceMerge(int)}.
* All other methods delegate to the base {@code MergePolicy} given to the constructor. * All other methods delegate to the base {@code MergePolicy} given to the constructor.
* This allows for an as-cheap-as possible upgrade of an older index by only upgrading segments that * This allows for an as-cheap-as possible upgrade of an older index by only upgrading segments that
* are created by previous Lucene versions. Optimize does no longer really optimize * are created by previous Lucene versions. forceMerge does no longer really merge;
* it is just used to &quot;optimize&quot; older segment versions away. * it is just used to &quot;forceMerge&quot; older segment versions away.
* <p>In general one would use {@link IndexUpgrader}, but for a fully customizeable upgrade, * <p>In general one would use {@link IndexUpgrader}, but for a fully customizeable upgrade,
* you can use this like any other {@code MergePolicy} and call {@link IndexWriter#optimize()}: * you can use this like any other {@code MergePolicy} and call {@link IndexWriter#forceMerge(int)}:
* <pre class="prettyprint lang-java"> * <pre class="prettyprint lang-java">
* IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_XX, new KeywordAnalyzer()); * IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_XX, new KeywordAnalyzer());
* iwc.setMergePolicy(new UpgradeIndexMergePolicy(iwc.getMergePolicy())); * iwc.setMergePolicy(new UpgradeIndexMergePolicy(iwc.getMergePolicy()));
* IndexWriter w = new IndexWriter(dir, iwc); * IndexWriter w = new IndexWriter(dir, iwc);
* w.optimize(); * w.forceMerge(1);
* w.close(); * w.close();
* </pre> * </pre>
* <p><b>Warning:</b> This merge policy may reorder documents if the index was partially * <p><b>Warning:</b> This merge policy may reorder documents if the index was partially
* upgraded before calling optimize (e.g., documents were added). If your application relies * upgraded before calling forceMerge (e.g., documents were added). If your application relies
* on &quot;monotonicity&quot; of doc IDs (which means that the order in which the documents * on &quot;monotonicity&quot; of doc IDs (which means that the order in which the documents
* were added to the index is preserved), do a full optimize instead. Please note, the * were added to the index is preserved), do a forceMerge(1) instead. Please note, the
* delegate {@code MergePolicy} may also reorder documents. * delegate {@code MergePolicy} may also reorder documents.
* @lucene.experimental * @lucene.experimental
* @see IndexUpgrader * @see IndexUpgrader
@ -53,7 +53,7 @@ public class UpgradeIndexMergePolicy extends MergePolicy {
protected final MergePolicy base; protected final MergePolicy base;
/** Wrap the given {@link MergePolicy} and intercept optimize requests to /** Wrap the given {@link MergePolicy} and intercept forceMerge requests to
* only upgrade segments written with previous Lucene versions. */ * only upgrade segments written with previous Lucene versions. */
public UpgradeIndexMergePolicy(MergePolicy base) { public UpgradeIndexMergePolicy(MergePolicy base) {
this.base = base; this.base = base;
@ -80,22 +80,22 @@ public class UpgradeIndexMergePolicy extends MergePolicy {
} }
@Override @Override
public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToOptimize) throws CorruptIndexException, IOException { public MergeSpecification findForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToMerge) throws CorruptIndexException, IOException {
// first find all old segments // first find all old segments
final Map<SegmentInfo,Boolean> oldSegments = new HashMap<SegmentInfo,Boolean>(); final Map<SegmentInfo,Boolean> oldSegments = new HashMap<SegmentInfo,Boolean>();
for (final SegmentInfo si : segmentInfos) { for (final SegmentInfo si : segmentInfos) {
final Boolean v =segmentsToOptimize.get(si); final Boolean v = segmentsToMerge.get(si);
if (v != null && shouldUpgradeSegment(si)) { if (v != null && shouldUpgradeSegment(si)) {
oldSegments.put(si, v); oldSegments.put(si, v);
} }
} }
if (verbose()) message("findMergesForOptimize: segmentsToUpgrade=" + oldSegments); if (verbose()) message("findForcedMerges: segmentsToUpgrade=" + oldSegments);
if (oldSegments.isEmpty()) if (oldSegments.isEmpty())
return null; return null;
MergeSpecification spec = base.findMergesForOptimize(segmentInfos, maxSegmentCount, oldSegments); MergeSpecification spec = base.findForcedMerges(segmentInfos, maxSegmentCount, oldSegments);
if (spec != null) { if (spec != null) {
// remove all segments that are in merge specification from oldSegments, // remove all segments that are in merge specification from oldSegments,
@ -108,7 +108,7 @@ public class UpgradeIndexMergePolicy extends MergePolicy {
if (!oldSegments.isEmpty()) { if (!oldSegments.isEmpty()) {
if (verbose()) if (verbose())
message("findMergesForOptimize: " + base.getClass().getSimpleName() + message("findForcedMerges: " + base.getClass().getSimpleName() +
" does not want to merge all old segments, merge remaining ones into new segment: " + oldSegments); " does not want to merge all old segments, merge remaining ones into new segment: " + oldSegments);
final List<SegmentInfo> newInfos = new ArrayList<SegmentInfo>(); final List<SegmentInfo> newInfos = new ArrayList<SegmentInfo>();
for (final SegmentInfo si : segmentInfos) { for (final SegmentInfo si : segmentInfos) {

View File

@ -273,14 +273,14 @@ public abstract class MultiTermQuery extends Query {
/** /**
* Expert: Return the number of unique terms visited during execution of the query. * Expert: Return the number of unique terms visited during execution of the query.
* If there are many of them, you may consider using another query type * If there are many of them, you may consider using another query type
* or optimize your total term count in index. * or reduce your total term count in index.
* <p>This method is not thread safe, be sure to only call it when no query is running! * <p>This method is not thread safe, be sure to only call it when no query is running!
* If you re-use the same query instance for another * If you re-use the same query instance for another
* search, be sure to first reset the term counter * search, be sure to first reset the term counter
* with {@link #clearTotalNumberOfTerms}. * with {@link #clearTotalNumberOfTerms}.
* <p>On optimized indexes / no MultiReaders, you get the correct number of * <p>On single-segment indexes / no MultiReaders, you get the correct number of
* unique terms for the whole index. Use this number to compare different queries. * unique terms for the whole index. Use this number to compare different queries.
* For non-optimized indexes this number can also be achieved in * For multi-segment indexes this number can also be achieved in
* non-constant-score mode. In constant-score mode you get the total number of * non-constant-score mode. In constant-score mode you get the total number of
* terms seeked for all segments / sub-readers. * terms seeked for all segments / sub-readers.
* @see #clearTotalNumberOfTerms * @see #clearTotalNumberOfTerms

View File

@ -18,7 +18,7 @@ package org.apache.lucene.store;
*/ */
/** /**
* <p>A FlushInfo provides information required for a FLUSH context and other optimization operations. * <p>A FlushInfo provides information required for a FLUSH context.
* It is used as part of an {@link IOContext} in case of FLUSH context.</p> * It is used as part of an {@link IOContext} in case of FLUSH context.</p>
*/ */

View File

@ -17,7 +17,7 @@ package org.apache.lucene.store;
*/ */
/** /**
* <p>A MergeInfo provides information required for a MERGE context and other optimization operations. * <p>A MergeInfo provides information required for a MERGE context.
* It is used as part of an {@link IOContext} in case of MERGE context.</p> * It is used as part of an {@link IOContext} in case of MERGE context.</p>
*/ */
@ -29,7 +29,7 @@ public class MergeInfo {
public final boolean isExternal; public final boolean isExternal;
public final boolean optimize; public final int mergeMaxNumSegments;
/** /**
@ -40,11 +40,11 @@ public class MergeInfo {
* *
*/ */
public MergeInfo(int totalDocCount, long estimatedMergeBytes, boolean isExternal, boolean optimize) { public MergeInfo(int totalDocCount, long estimatedMergeBytes, boolean isExternal, int mergeMaxNumSegments) {
this.totalDocCount = totalDocCount; this.totalDocCount = totalDocCount;
this.estimatedMergeBytes = estimatedMergeBytes; this.estimatedMergeBytes = estimatedMergeBytes;
this.isExternal = isExternal; this.isExternal = isExternal;
this.optimize = optimize; this.mergeMaxNumSegments = mergeMaxNumSegments;
} }
@ -55,7 +55,7 @@ public class MergeInfo {
result = prime * result result = prime * result
+ (int) (estimatedMergeBytes ^ (estimatedMergeBytes >>> 32)); + (int) (estimatedMergeBytes ^ (estimatedMergeBytes >>> 32));
result = prime * result + (isExternal ? 1231 : 1237); result = prime * result + (isExternal ? 1231 : 1237);
result = prime * result + (optimize ? 1231 : 1237); result = prime * result + mergeMaxNumSegments;
result = prime * result + totalDocCount; result = prime * result + totalDocCount;
return result; return result;
} }
@ -73,7 +73,7 @@ public class MergeInfo {
return false; return false;
if (isExternal != other.isExternal) if (isExternal != other.isExternal)
return false; return false;
if (optimize != other.optimize) if (mergeMaxNumSegments != other.mergeMaxNumSegments)
return false; return false;
if (totalDocCount != other.totalDocCount) if (totalDocCount != other.totalDocCount)
return false; return false;
@ -84,6 +84,6 @@ public class MergeInfo {
public String toString() { public String toString() {
return "MergeInfo [totalDocCount=" + totalDocCount return "MergeInfo [totalDocCount=" + totalDocCount
+ ", estimatedMergeBytes=" + estimatedMergeBytes + ", isExternal=" + ", estimatedMergeBytes=" + estimatedMergeBytes + ", isExternal="
+ isExternal + ", optimize=" + optimize + "]"; + isExternal + ", mergeMaxNumSegments=" + mergeMaxNumSegments + "]";
} }
} }

View File

@ -216,7 +216,7 @@ public abstract class CollationTestBase extends LuceneTestCase {
doc.add(new TextField("Denmark", denmarkAnalyzer.tokenStream("Denmark", new StringReader(sortData[i][5])))); doc.add(new TextField("Denmark", denmarkAnalyzer.tokenStream("Denmark", new StringReader(sortData[i][5]))));
writer.addDocument(doc); writer.addDocument(doc);
} }
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
IndexSearcher searcher = new IndexSearcher(indexStore, true); IndexSearcher searcher = new IndexSearcher(indexStore, true);

View File

@ -55,18 +55,18 @@ public class MockRandomMergePolicy extends MergePolicy {
} }
@Override @Override
public MergeSpecification findMergesForOptimize( public MergeSpecification findForcedMerges(
SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToOptimize) SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToMerge)
throws CorruptIndexException, IOException { throws CorruptIndexException, IOException {
final List<SegmentInfo> eligibleSegments = new ArrayList<SegmentInfo>(); final List<SegmentInfo> eligibleSegments = new ArrayList<SegmentInfo>();
for(SegmentInfo info : segmentInfos) { for(SegmentInfo info : segmentInfos) {
if (segmentsToOptimize.containsKey(info)) { if (segmentsToMerge.containsKey(info)) {
eligibleSegments.add(info); eligibleSegments.add(info);
} }
} }
//System.out.println("MRMP: findMergesForOptimize sis=" + segmentInfos + " eligible=" + eligibleSegments); //System.out.println("MRMP: findMerges sis=" + segmentInfos + " eligible=" + eligibleSegments);
MergeSpecification mergeSpec = null; MergeSpecification mergeSpec = null;
if (eligibleSegments.size() > 1 || (eligibleSegments.size() == 1 && eligibleSegments.get(0).hasDeletions())) { if (eligibleSegments.size() > 1 || (eligibleSegments.size() == 1 && eligibleSegments.get(0).hasDeletions())) {
mergeSpec = new MergeSpecification(); mergeSpec = new MergeSpecification();
@ -85,7 +85,7 @@ public class MockRandomMergePolicy extends MergePolicy {
if (mergeSpec != null) { if (mergeSpec != null) {
for(OneMerge merge : mergeSpec.merges) { for(OneMerge merge : mergeSpec.merges) {
for(SegmentInfo info : merge.segments) { for(SegmentInfo info : merge.segments) {
assert segmentsToOptimize.containsKey(info); assert segmentsToMerge.containsKey(info);
} }
} }
} }

View File

@ -38,7 +38,7 @@ import org.apache.lucene.util._TestUtil;
/** Silly class that randomizes the indexing experience. EG /** Silly class that randomizes the indexing experience. EG
* it may swap in a different merge policy/scheduler; may * it may swap in a different merge policy/scheduler; may
* commit periodically; may or may not optimize in the end, * commit periodically; may or may not forceMerge in the end,
* may flush by doc count instead of RAM, etc. * may flush by doc count instead of RAM, etc.
*/ */
@ -323,8 +323,8 @@ public class RandomIndexWriter implements Closeable {
return getReader(true); return getReader(true);
} }
private boolean doRandomOptimize = true; private boolean doRandomForceMerge = true;
private boolean doRandomOptimizeAssert = true; private boolean doRandomForceMergeAssert = true;
public void expungeDeletes(boolean doWait) throws IOException { public void expungeDeletes(boolean doWait) throws IOException {
w.expungeDeletes(doWait); w.expungeDeletes(doWait);
@ -334,25 +334,25 @@ public class RandomIndexWriter implements Closeable {
w.expungeDeletes(); w.expungeDeletes();
} }
public void setDoRandomOptimize(boolean v) { public void setDoRandomForceMerge(boolean v) {
doRandomOptimize = v; doRandomForceMerge = v;
} }
public void setDoRandomOptimizeAssert(boolean v) { public void setDoRandomForceMergeAssert(boolean v) {
doRandomOptimizeAssert = v; doRandomForceMergeAssert = v;
} }
private void doRandomOptimize() throws IOException { private void doRandomForceMerge() throws IOException {
if (doRandomOptimize) { if (doRandomForceMerge) {
final int segCount = w.getSegmentCount(); final int segCount = w.getSegmentCount();
if (r.nextBoolean() || segCount == 0) { if (r.nextBoolean() || segCount == 0) {
// full optimize // full forceMerge
w.optimize(); w.forceMerge(1);
} else { } else {
// partial optimize // partial forceMerge
final int limit = _TestUtil.nextInt(r, 1, segCount); final int limit = _TestUtil.nextInt(r, 1, segCount);
w.optimize(limit); w.forceMerge(limit);
assert !doRandomOptimizeAssert || w.getSegmentCount() <= limit: "limit=" + limit + " actual=" + w.getSegmentCount(); assert !doRandomForceMergeAssert || w.getSegmentCount() <= limit: "limit=" + limit + " actual=" + w.getSegmentCount();
} }
} }
switchDoDocValues(); switchDoDocValues();
@ -361,7 +361,7 @@ public class RandomIndexWriter implements Closeable {
public IndexReader getReader(boolean applyDeletions) throws IOException { public IndexReader getReader(boolean applyDeletions) throws IOException {
getReaderCalled = true; getReaderCalled = true;
if (r.nextInt(4) == 2) { if (r.nextInt(4) == 2) {
doRandomOptimize(); doRandomForceMerge();
} }
// If we are writing with PreFlexRW, force a full // If we are writing with PreFlexRW, force a full
// IndexReader.open so terms are sorted in codepoint // IndexReader.open so terms are sorted in codepoint
@ -394,21 +394,21 @@ public class RandomIndexWriter implements Closeable {
*/ */
public void close() throws IOException { public void close() throws IOException {
// if someone isn't using getReader() API, we want to be sure to // if someone isn't using getReader() API, we want to be sure to
// maybeOptimize since presumably they might open a reader on the dir. // forceMerge since presumably they might open a reader on the dir.
if (getReaderCalled == false && r.nextInt(8) == 2) { if (getReaderCalled == false && r.nextInt(8) == 2) {
doRandomOptimize(); doRandomForceMerge();
} }
w.close(); w.close();
} }
/** /**
* Forces an optimize. * Forces a forceMerge.
* <p> * <p>
* NOTE: this should be avoided in tests unless absolutely necessary, * NOTE: this should be avoided in tests unless absolutely necessary,
* as it will result in less test coverage. * as it will result in less test coverage.
* @see IndexWriter#optimize() * @see IndexWriter#forceMerge(int)
*/ */
public void optimize() throws IOException { public void forceMerge(int maxSegmentCount) throws IOException {
w.optimize(); w.forceMerge(maxSegmentCount);
} }
} }

View File

@ -54,7 +54,7 @@ import org.apache.lucene.util.NamedThreadFactory;
import org.apache.lucene.util._TestUtil; import org.apache.lucene.util._TestUtil;
// TODO // TODO
// - mix in optimize, addIndexes // - mix in forceMerge, addIndexes
// - randomly mix in non-congruent docs // - randomly mix in non-congruent docs
/** Utility class that spawns multiple indexing and /** Utility class that spawns multiple indexing and

View File

@ -32,7 +32,6 @@ import java.util.Map.Entry;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
@ -41,17 +40,8 @@ import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.*; import org.apache.lucene.index.*;
import org.apache.lucene.index.codecs.Codec; import org.apache.lucene.index.codecs.Codec;
import org.apache.lucene.index.codecs.PostingsFormat; import org.apache.lucene.index.codecs.PostingsFormat;
import org.apache.lucene.index.codecs.lucene3x.Lucene3xCodec;
import org.apache.lucene.index.codecs.lucene3x.Lucene3xPostingsFormat;
import org.apache.lucene.index.codecs.lucene40.Lucene40Codec; import org.apache.lucene.index.codecs.lucene40.Lucene40Codec;
import org.apache.lucene.index.codecs.mockintblock.MockFixedIntBlockPostingsFormat;
import org.apache.lucene.index.codecs.mockintblock.MockVariableIntBlockPostingsFormat;
import org.apache.lucene.index.codecs.mocksep.MockSepPostingsFormat;
import org.apache.lucene.index.codecs.mockrandom.MockRandomPostingsFormat;
import org.apache.lucene.index.codecs.perfield.PerFieldPostingsFormat;
import org.apache.lucene.index.codecs.preflexrw.PreFlexRWCodec; import org.apache.lucene.index.codecs.preflexrw.PreFlexRWCodec;
import org.apache.lucene.index.codecs.preflexrw.PreFlexRWPostingsFormat;
import org.apache.lucene.index.codecs.pulsing.PulsingPostingsFormat;
import org.apache.lucene.index.codecs.simpletext.SimpleTextCodec; import org.apache.lucene.index.codecs.simpletext.SimpleTextCodec;
import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FieldCache; import org.apache.lucene.search.FieldCache;
@ -1329,7 +1319,7 @@ public abstract class LuceneTestCase extends Assert {
context = IOContext.READONCE; context = IOContext.READONCE;
break; break;
case 3: case 3:
context = new IOContext(new MergeInfo(randomNumDocs, size, true, false)); context = new IOContext(new MergeInfo(randomNumDocs, size, true, -1));
break; break;
case 4: case 4:
context = new IOContext(new FlushInfo(randomNumDocs, size)); context = new IOContext(new FlushInfo(randomNumDocs, size));

View File

@ -25,11 +25,9 @@ import org.apache.lucene.document.*;
import org.apache.lucene.index.*; import org.apache.lucene.index.*;
import org.apache.lucene.index.codecs.*; import org.apache.lucene.index.codecs.*;
import org.apache.lucene.index.codecs.lucene40.Lucene40Codec; import org.apache.lucene.index.codecs.lucene40.Lucene40Codec;
import org.apache.lucene.index.codecs.perfield.PerFieldPostingsFormat;
import org.apache.lucene.search.*; import org.apache.lucene.search.*;
import org.apache.lucene.store.*; import org.apache.lucene.store.*;
import org.apache.lucene.util.*; import org.apache.lucene.util.*;
import org.apache.lucene.util.Bits;
/* Intentionally outside of oal.index to verify fully /* Intentionally outside of oal.index to verify fully
external codecs work fine */ external codecs work fine */
@ -104,7 +102,7 @@ public class TestExternalCodecs extends LuceneTestCase {
System.out.println("\nTEST: now delete 2nd doc"); System.out.println("\nTEST: now delete 2nd doc");
} }
w.deleteDocuments(new Term("id", "44")); w.deleteDocuments(new Term("id", "44"));
w.optimize(); w.forceMerge(1);
r = IndexReader.open(w, true); r = IndexReader.open(w, true);
assertEquals(NUM_DOCS-2, r.maxDoc()); assertEquals(NUM_DOCS-2, r.maxDoc());
assertEquals(NUM_DOCS-2, r.numDocs()); assertEquals(NUM_DOCS-2, r.numDocs());

View File

@ -142,7 +142,7 @@ public class TestMergeSchedulerExternal extends LuceneTestCase {
writer.commit(); // trigger flush writer.commit(); // trigger flush
writer.addDocument(new Document()); writer.addDocument(new Document());
writer.commit(); // trigger flush writer.commit(); // trigger flush
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
dir.close(); dir.close();
} }

View File

@ -73,7 +73,7 @@ public class Test2BPostings extends LuceneTestCase {
System.out.println(i + " of " + numDocs + "..."); System.out.println(i + " of " + numDocs + "...");
} }
} }
w.optimize(); w.forceMerge(1);
w.close(); w.close();
CheckIndex ci = new CheckIndex(dir); CheckIndex ci = new CheckIndex(dir);
if (VERBOSE) { if (VERBOSE) {

View File

@ -195,8 +195,8 @@ public class Test2BTerms extends LuceneTestCase {
} }
savedTerms = ts.savedTerms; savedTerms = ts.savedTerms;
System.out.println("TEST: optimize"); System.out.println("TEST: full merge");
w.optimize(); w.forceMerge(1);
System.out.println("TEST: close writer"); System.out.println("TEST: close writer");
w.close(); w.close();
} }

View File

@ -39,13 +39,7 @@ import org.apache.lucene.index.codecs.StoredFieldsFormat;
import org.apache.lucene.index.codecs.PostingsFormat; import org.apache.lucene.index.codecs.PostingsFormat;
import org.apache.lucene.index.codecs.SegmentInfosFormat; import org.apache.lucene.index.codecs.SegmentInfosFormat;
import org.apache.lucene.index.codecs.lucene40.Lucene40Codec; import org.apache.lucene.index.codecs.lucene40.Lucene40Codec;
import org.apache.lucene.index.codecs.lucene40.Lucene40PostingsBaseFormat;
import org.apache.lucene.index.codecs.lucene40.Lucene40PostingsFormat;
import org.apache.lucene.index.codecs.mocksep.MockSepPostingsFormat;
import org.apache.lucene.index.codecs.perfield.PerFieldPostingsFormat;
import org.apache.lucene.index.codecs.pulsing.Pulsing40PostingsFormat; import org.apache.lucene.index.codecs.pulsing.Pulsing40PostingsFormat;
import org.apache.lucene.index.codecs.pulsing.PulsingPostingsFormat;
import org.apache.lucene.index.codecs.simpletext.SimpleTextPostingsFormat;
import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.AlreadyClosedException;
@ -114,7 +108,7 @@ public class TestAddIndexes extends LuceneTestCase {
assertEquals(40, writer.maxDoc()); assertEquals(40, writer.maxDoc());
writer.close(); writer.close();
// test doc count before segments are merged/index is optimized // test doc count before segments are merged
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
assertEquals(190, writer.maxDoc()); assertEquals(190, writer.maxDoc());
writer.addIndexes(aux3); writer.addIndexes(aux3);
@ -128,9 +122,9 @@ public class TestAddIndexes extends LuceneTestCase {
verifyTermDocs(dir, new Term("content", "bbb"), 50); verifyTermDocs(dir, new Term("content", "bbb"), 50);
// now optimize it. // now fully merge it.
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
// make sure the new index is correct // make sure the new index is correct
@ -186,7 +180,7 @@ public class TestAddIndexes extends LuceneTestCase {
q.add(new Term("content", "14")); q.add(new Term("content", "14"));
writer.deleteDocuments(q); writer.deleteDocuments(q);
writer.optimize(); writer.forceMerge(1);
writer.commit(); writer.commit();
verifyNumDocs(dir, 1039); verifyNumDocs(dir, 1039);
@ -224,7 +218,7 @@ public class TestAddIndexes extends LuceneTestCase {
q.add(new Term("content", "14")); q.add(new Term("content", "14"));
writer.deleteDocuments(q); writer.deleteDocuments(q);
writer.optimize(); writer.forceMerge(1);
writer.commit(); writer.commit();
verifyNumDocs(dir, 1039); verifyNumDocs(dir, 1039);
@ -262,7 +256,7 @@ public class TestAddIndexes extends LuceneTestCase {
writer.addIndexes(aux); writer.addIndexes(aux);
writer.optimize(); writer.forceMerge(1);
writer.commit(); writer.commit();
verifyNumDocs(dir, 1039); verifyNumDocs(dir, 1039);
@ -729,10 +723,10 @@ public class TestAddIndexes extends LuceneTestCase {
switch(j%5) { switch(j%5) {
case 0: case 0:
if (VERBOSE) { if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(Dir[]) then optimize"); System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(Dir[]) then full merge");
} }
writer2.addIndexes(dirs); writer2.addIndexes(dirs);
writer2.optimize(); writer2.forceMerge(1);
break; break;
case 1: case 1:
if (VERBOSE) { if (VERBOSE) {
@ -834,10 +828,10 @@ public class TestAddIndexes extends LuceneTestCase {
switch(j%5) { switch(j%5) {
case 0: case 0:
if (VERBOSE) { if (VERBOSE) {
System.out.println("TEST: " + Thread.currentThread().getName() + ": addIndexes + optimize"); System.out.println("TEST: " + Thread.currentThread().getName() + ": addIndexes + full merge");
} }
writer2.addIndexes(dirs); writer2.addIndexes(dirs);
writer2.optimize(); writer2.forceMerge(1);
break; break;
case 1: case 1:
if (VERBOSE) { if (VERBOSE) {
@ -853,9 +847,9 @@ public class TestAddIndexes extends LuceneTestCase {
break; break;
case 3: case 3:
if (VERBOSE) { if (VERBOSE) {
System.out.println("TEST: " + Thread.currentThread().getName() + ": optimize"); System.out.println("TEST: " + Thread.currentThread().getName() + ": full merge");
} }
writer2.optimize(); writer2.forceMerge(1);
break; break;
case 4: case 4:
if (VERBOSE) { if (VERBOSE) {
@ -1214,7 +1208,7 @@ public class TestAddIndexes extends LuceneTestCase {
} }
try { try {
IndexReader indexReader = IndexReader.open(toAdd); IndexReader.open(toAdd);
fail("no such codec"); fail("no such codec");
} catch (IllegalArgumentException ex) { } catch (IllegalArgumentException ex) {
// expected // expected

View File

@ -79,16 +79,16 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
/* /*
// These are only needed for the special upgrade test to verify // These are only needed for the special upgrade test to verify
// that also optimized indexes are correctly upgraded by IndexUpgrader. // that also single-segment indexes are correctly upgraded by IndexUpgrader.
// You don't need them to be build for non-3.1 (the test is happy with just one // You don't need them to be build for non-3.1 (the test is happy with just one
// "old" segment format, version is unimportant: // "old" segment format, version is unimportant:
public void testCreateOptimizedCFS() throws IOException { public void testCreateSingleSegmentCFS() throws IOException {
createIndex("index.optimized.cfs", true, true); createIndex("index.singlesegment.cfs", true, true);
} }
public void testCreateOptimizedNoCFS() throws IOException { public void testCreateSingleSegmentNoCFS() throws IOException {
createIndex("index.optimized.nocfs", false, true); createIndex("index.singlesegment.nocfs", false, true);
} }
*/ */
@ -118,8 +118,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
"29.nocfs", "29.nocfs",
}; };
final String[] oldOptimizedNames = {"31.optimized.cfs", final String[] oldSingleSegmentNames = {"31.optimized.cfs",
"31.optimized.nocfs", "31.optimized.nocfs",
}; };
/** This test checks that *only* IndexFormatTooOldExceptions are thrown when you open and operate on too old indexes! */ /** This test checks that *only* IndexFormatTooOldExceptions are thrown when you open and operate on too old indexes! */
@ -180,7 +180,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
} }
} }
public void testOptimizeOldIndex() throws Exception { public void testFullyMergeOldIndex() throws Exception {
for(int i=0;i<oldNames.length;i++) { for(int i=0;i<oldNames.length;i++) {
if (VERBOSE) { if (VERBOSE) {
System.out.println("\nTEST: index=" + oldNames[i]); System.out.println("\nTEST: index=" + oldNames[i]);
@ -191,7 +191,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig( IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random))); TEST_VERSION_CURRENT, new MockAnalyzer(random)));
w.optimize(); w.forceMerge(1);
w.close(); w.close();
dir.close(); dir.close();
@ -393,9 +393,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
doTestHits(hits, 43, searcher.getIndexReader()); doTestHits(hits, 43, searcher.getIndexReader());
searcher.close(); searcher.close();
// optimize // fully merge
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
searcher = new IndexSearcher(dir, true); searcher = new IndexSearcher(dir, true);
@ -439,9 +439,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
doTestHits(hits, 33, searcher.getIndexReader()); doTestHits(hits, 33, searcher.getIndexReader());
searcher.close(); searcher.close();
// optimize // fully merge
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
searcher = new IndexSearcher(dir, true); searcher = new IndexSearcher(dir, true);
@ -455,7 +455,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
dir.close(); dir.close();
} }
public File createIndex(String dirName, boolean doCFS, boolean optimized) throws IOException { public File createIndex(String dirName, boolean doCFS, boolean fullyMerged) throws IOException {
// we use a real directory name that is not cleaned up, because this method is only used to create backwards indexes: // we use a real directory name that is not cleaned up, because this method is only used to create backwards indexes:
File indexDir = new File(LuceneTestCase.TEMP_DIR, dirName); File indexDir = new File(LuceneTestCase.TEMP_DIR, dirName);
_TestUtil.rmDir(indexDir); _TestUtil.rmDir(indexDir);
@ -472,12 +472,12 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
addDoc(writer, i); addDoc(writer, i);
} }
assertEquals("wrong doc count", 35, writer.maxDoc()); assertEquals("wrong doc count", 35, writer.maxDoc());
if (optimized) { if (fullyMerged) {
writer.optimize(); writer.forceMerge(1);
} }
writer.close(); writer.close();
if (!optimized) { if (!fullyMerged) {
// open fresh writer so we get no prx file in the added segment // open fresh writer so we get no prx file in the added segment
mp = new LogByteSizeMergePolicy(); mp = new LogByteSizeMergePolicy();
mp.setUseCompoundFile(doCFS); mp.setUseCompoundFile(doCFS);
@ -743,9 +743,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
} }
public void testUpgradeOldIndex() throws Exception { public void testUpgradeOldIndex() throws Exception {
List<String> names = new ArrayList<String>(oldNames.length + oldOptimizedNames.length); List<String> names = new ArrayList<String>(oldNames.length + oldSingleSegmentNames.length);
names.addAll(Arrays.asList(oldNames)); names.addAll(Arrays.asList(oldNames));
names.addAll(Arrays.asList(oldOptimizedNames)); names.addAll(Arrays.asList(oldSingleSegmentNames));
for(String name : names) { for(String name : names) {
if (VERBOSE) { if (VERBOSE) {
System.out.println("testUpgradeOldIndex: index=" +name); System.out.println("testUpgradeOldIndex: index=" +name);
@ -764,16 +764,16 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
} }
} }
public void testUpgradeOldOptimizedIndexWithAdditions() throws Exception { public void testUpgradeOldSingleSegmentIndexWithAdditions() throws Exception {
for (String name : oldOptimizedNames) { for (String name : oldSingleSegmentNames) {
if (VERBOSE) { if (VERBOSE) {
System.out.println("testUpgradeOldOptimizedIndexWithAdditions: index=" +name); System.out.println("testUpgradeOldSingleSegmentIndexWithAdditions: index=" +name);
} }
File oldIndxeDir = _TestUtil.getTempDir(name); File oldIndxeDir = _TestUtil.getTempDir(name);
_TestUtil.unzip(getDataFile("index." + name + ".zip"), oldIndxeDir); _TestUtil.unzip(getDataFile("index." + name + ".zip"), oldIndxeDir);
Directory dir = newFSDirectory(oldIndxeDir); Directory dir = newFSDirectory(oldIndxeDir);
assertEquals("Original index must be optimized", 1, getNumberOfSegments(dir)); assertEquals("Original index must be single segment", 1, getNumberOfSegments(dir));
// create a bunch of dummy segments // create a bunch of dummy segments
int id = 40; int id = 40;
@ -791,7 +791,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
w.close(false); w.close(false);
} }
// add dummy segments (which are all in current version) to optimized index // add dummy segments (which are all in current
// version) to single segment index
MergePolicy mp = random.nextBoolean() ? newLogMergePolicy() : newTieredMergePolicy(); MergePolicy mp = random.nextBoolean() ? newLogMergePolicy() : newTieredMergePolicy();
IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, null) IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, null)
.setMergePolicy(mp); .setMergePolicy(mp);

View File

@ -45,7 +45,7 @@ public class TestCheckIndex extends LuceneTestCase {
for(int i=0;i<19;i++) { for(int i=0;i<19;i++) {
writer.addDocument(doc); writer.addDocument(doc);
} }
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
IndexReader reader = IndexReader.open(dir, false); IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(5); reader.deleteDocument(5);

View File

@ -33,7 +33,6 @@ import org.apache.lucene.index.codecs.PostingsConsumer;
import org.apache.lucene.index.codecs.TermStats; import org.apache.lucene.index.codecs.TermStats;
import org.apache.lucene.index.codecs.TermsConsumer; import org.apache.lucene.index.codecs.TermsConsumer;
import org.apache.lucene.index.codecs.lucene3x.Lucene3xCodec; import org.apache.lucene.index.codecs.lucene3x.Lucene3xCodec;
import org.apache.lucene.index.codecs.lucene3x.Lucene3xPostingsFormat;
import org.apache.lucene.index.codecs.mocksep.MockSepPostingsFormat; import org.apache.lucene.index.codecs.mocksep.MockSepPostingsFormat;
import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
@ -371,7 +370,7 @@ public class TestCodecs extends LuceneTestCase {
assertEquals(2, results.length); assertEquals(2, results.length);
assertEquals(0, results[0].doc); assertEquals(0, results[0].doc);
writer.optimize(); writer.forceMerge(1);
// optimise to merge the segments. // optimise to merge the segments.
results = this.search(writer, pq, 5); results = this.search(writer, pq, 5);

View File

@ -77,7 +77,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
assertEquals("f4", fis2.fieldInfo(3).name); assertEquals("f4", fis2.fieldInfo(3).name);
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
sis = new SegmentInfos(); sis = new SegmentInfos();
@ -141,7 +141,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
assertEquals("f4", fis2.fieldInfo(3).name); assertEquals("f4", fis2.fieldInfo(3).name);
writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
sis = new SegmentInfos(); sis = new SegmentInfos();
@ -252,7 +252,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(
new LogByteSizeMergePolicy()).setInfoStream(new FailOnNonBulkMergesInfoStream())); new LogByteSizeMergePolicy()).setInfoStream(new FailOnNonBulkMergesInfoStream()));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
SegmentInfos sis = new SegmentInfos(); SegmentInfos sis = new SegmentInfos();
@ -293,7 +293,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
writer.addDocument(d); writer.addDocument(d);
} }
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
SegmentInfos sis = new SegmentInfos(); SegmentInfos sis = new SegmentInfos();

View File

@ -74,7 +74,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
public void onCommit(List<? extends IndexCommit> commits) throws IOException { public void onCommit(List<? extends IndexCommit> commits) throws IOException {
IndexCommit lastCommit = commits.get(commits.size()-1); IndexCommit lastCommit = commits.get(commits.size()-1);
IndexReader r = IndexReader.open(dir, true); IndexReader r = IndexReader.open(dir, true);
assertEquals("lastCommit.isOptimized()=" + lastCommit.isOptimized() + " vs IndexReader.isOptimized=" + r.isOptimized(), r.isOptimized(), lastCommit.isOptimized()); assertEquals("lastCommit.segmentCount()=" + lastCommit.getSegmentCount() + " vs IndexReader.segmentCount=" + r.getSequentialSubReaders().length, r.getSequentialSubReaders().length, lastCommit.getSegmentCount());
r.close(); r.close();
verifyCommitOrder(commits); verifyCommitOrder(commits);
numOnCommit++; numOnCommit++;
@ -317,13 +317,13 @@ public class TestDeletionPolicy extends LuceneTestCase {
} }
writer.close(); writer.close();
final boolean isOptimized; final boolean needsMerging;
{ {
IndexReader r = IndexReader.open(dir); IndexReader r = IndexReader.open(dir);
isOptimized = r.isOptimized(); needsMerging = r.getSequentialSubReaders().length != 1;
r.close(); r.close();
} }
if (!isOptimized) { if (needsMerging) {
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random)).setOpenMode( new MockAnalyzer(random)).setOpenMode(
OpenMode.APPEND).setIndexDeletionPolicy(policy); OpenMode.APPEND).setIndexDeletionPolicy(policy);
@ -332,22 +332,22 @@ public class TestDeletionPolicy extends LuceneTestCase {
((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile); ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
} }
if (VERBOSE) { if (VERBOSE) {
System.out.println("TEST: open writer for optimize"); System.out.println("TEST: open writer for forceMerge");
} }
writer = new IndexWriter(dir, conf); writer = new IndexWriter(dir, conf);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
} }
assertEquals(isOptimized ? 0:1, policy.numOnInit); assertEquals(needsMerging ? 1:0, policy.numOnInit);
// If we are not auto committing then there should // If we are not auto committing then there should
// be exactly 2 commits (one per close above): // be exactly 2 commits (one per close above):
assertEquals(1 + (isOptimized ? 0:1), policy.numOnCommit); assertEquals(1 + (needsMerging ? 1:0), policy.numOnCommit);
// Test listCommits // Test listCommits
Collection<IndexCommit> commits = IndexReader.listCommits(dir); Collection<IndexCommit> commits = IndexReader.listCommits(dir);
// 2 from closing writer // 2 from closing writer
assertEquals(1 + (isOptimized ? 0:1), commits.size()); assertEquals(1 + (needsMerging ? 1:0), commits.size());
// Make sure we can open a reader on each commit: // Make sure we can open a reader on each commit:
for (final IndexCommit commit : commits) { for (final IndexCommit commit : commits) {
@ -418,16 +418,16 @@ public class TestDeletionPolicy extends LuceneTestCase {
} }
assertTrue(lastCommit != null); assertTrue(lastCommit != null);
// Now add 1 doc and optimize // Now add 1 doc and merge
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(policy)); writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(policy));
addDoc(writer); addDoc(writer);
assertEquals(11, writer.numDocs()); assertEquals(11, writer.numDocs());
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
assertEquals(6, IndexReader.listCommits(dir).size()); assertEquals(6, IndexReader.listCommits(dir).size());
// Now open writer on the commit just before optimize: // Now open writer on the commit just before merge:
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setIndexDeletionPolicy(policy).setIndexCommit(lastCommit)); .setIndexDeletionPolicy(policy).setIndexCommit(lastCommit));
assertEquals(10, writer.numDocs()); assertEquals(10, writer.numDocs());
@ -436,8 +436,8 @@ public class TestDeletionPolicy extends LuceneTestCase {
writer.rollback(); writer.rollback();
IndexReader r = IndexReader.open(dir, true); IndexReader r = IndexReader.open(dir, true);
// Still optimized, still 11 docs // Still merged, still 11 docs
assertTrue(r.isOptimized()); assertEquals(1, r.getSequentialSubReaders().length);
assertEquals(11, r.numDocs()); assertEquals(11, r.numDocs());
r.close(); r.close();
@ -451,39 +451,39 @@ public class TestDeletionPolicy extends LuceneTestCase {
assertEquals(7, IndexReader.listCommits(dir).size()); assertEquals(7, IndexReader.listCommits(dir).size());
r = IndexReader.open(dir, true); r = IndexReader.open(dir, true);
// Not optimized because we rolled it back, and now only // Not fully merged because we rolled it back, and now only
// 10 docs // 10 docs
assertTrue(!r.isOptimized()); assertTrue(r.getSequentialSubReaders().length > 1);
assertEquals(10, r.numDocs()); assertEquals(10, r.numDocs());
r.close(); r.close();
// Reoptimize // Re-merge
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(policy)); writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(policy));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
r = IndexReader.open(dir, true); r = IndexReader.open(dir, true);
assertTrue(r.isOptimized()); assertEquals(1, r.getSequentialSubReaders().length);
assertEquals(10, r.numDocs()); assertEquals(10, r.numDocs());
r.close(); r.close();
// Now open writer on the commit just before optimize, // Now open writer on the commit just before merging,
// but this time keeping only the last commit: // but this time keeping only the last commit:
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexCommit(lastCommit)); writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexCommit(lastCommit));
assertEquals(10, writer.numDocs()); assertEquals(10, writer.numDocs());
// Reader still sees optimized index, because writer // Reader still sees fully merged index, because writer
// opened on the prior commit has not yet committed: // opened on the prior commit has not yet committed:
r = IndexReader.open(dir, true); r = IndexReader.open(dir, true);
assertTrue(r.isOptimized()); assertEquals(1, r.getSequentialSubReaders().length);
assertEquals(10, r.numDocs()); assertEquals(10, r.numDocs());
r.close(); r.close();
writer.close(); writer.close();
// Now reader sees unoptimized index: // Now reader sees not-fully-merged index:
r = IndexReader.open(dir, true); r = IndexReader.open(dir, true);
assertTrue(!r.isOptimized()); assertTrue(r.getSequentialSubReaders().length > 1);
assertEquals(10, r.numDocs()); assertEquals(10, r.numDocs());
r.close(); r.close();
@ -525,7 +525,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
((LogMergePolicy) mp).setUseCompoundFile(true); ((LogMergePolicy) mp).setUseCompoundFile(true);
} }
writer = new IndexWriter(dir, conf); writer = new IndexWriter(dir, conf);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
assertEquals(1, policy.numOnInit); assertEquals(1, policy.numOnInit);
@ -569,7 +569,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
for(int i=0;i<17;i++) { for(int i=0;i<17;i++) {
addDoc(writer); addDoc(writer);
} }
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
} }
@ -673,15 +673,15 @@ public class TestDeletionPolicy extends LuceneTestCase {
((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile); ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
} }
IndexReader r = IndexReader.open(dir); IndexReader r = IndexReader.open(dir);
final boolean wasOptimized = r.isOptimized(); final boolean wasFullyMerged = r.getSequentialSubReaders().length == 1 && !r.hasDeletions();
r.close(); r.close();
writer = new IndexWriter(dir, conf); writer = new IndexWriter(dir, conf);
writer.optimize(); writer.forceMerge(1);
// this is a commit // this is a commit
writer.close(); writer.close();
assertEquals(2*(N+1)+1, policy.numOnInit); assertEquals(2*(N+1)+1, policy.numOnInit);
assertEquals(2*(N+2) - (wasOptimized ? 1:0), policy.numOnCommit); assertEquals(2*(N+2) - (wasFullyMerged ? 1:0), policy.numOnCommit);
IndexSearcher searcher = new IndexSearcher(dir, false); IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;

View File

@ -184,7 +184,7 @@ public class TestDirectoryReader extends LuceneTestCase {
while (td.nextDoc() != td.NO_MORE_DOCS) ret += td.docID(); while (td.nextDoc() != td.NO_MORE_DOCS) ret += td.docID();
// really a dummy assert to ensure that we got some docs and to ensure that // really a dummy assert to ensure that we got some docs and to ensure that
// nothing is optimized out. // nothing is eliminated by hotspot
assertTrue(ret > 0); assertTrue(ret > 0);
readers1[0].close(); readers1[0].close();
readers1[1].close(); readers1[1].close();

View File

@ -42,7 +42,7 @@ public class TestDocCount extends LuceneTestCase {
IndexReader ir = iw.getReader(); IndexReader ir = iw.getReader();
verifyCount(ir); verifyCount(ir);
ir.close(); ir.close();
iw.optimize(); iw.forceMerge(1);
ir = iw.getReader(); ir = iw.getReader();
verifyCount(ir); verifyCount(ir);
ir.close(); ir.close();

View File

@ -321,7 +321,7 @@ public class TestDocumentWriter extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random))); TEST_VERSION_CURRENT, new MockAnalyzer(random)));
writer.addDocument(doc); writer.addDocument(doc);
writer.optimize(); // be sure to have a single segment writer.forceMerge(1); // be sure to have a single segment
writer.close(); writer.close();
_TestUtil.checkIndex(dir); _TestUtil.checkIndex(dir);

View File

@ -203,7 +203,7 @@ public class TestFieldsReader extends LuceneTestCase {
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
for(int i=0;i<2;i++) for(int i=0;i<2;i++)
writer.addDocument(testDoc); writer.addDocument(testDoc);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
IndexReader reader = IndexReader.open(dir, true); IndexReader reader = IndexReader.open(dir, true);

View File

@ -48,7 +48,7 @@ public class TestFlex extends LuceneTestCase {
w.addDocument(doc); w.addDocument(doc);
} }
} else { } else {
w.optimize(); w.forceMerge(1);
} }
IndexReader r = w.getReader(); IndexReader r = w.getReader();

View File

@ -27,13 +27,12 @@ import org.apache.lucene.util.LineFileDocs;
import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil; import org.apache.lucene.util._TestUtil;
public class TestForceMergeForever extends LuceneTestCase {
public class TestOptimizeForever extends LuceneTestCase { // Just counts how many merges are done
// Just counts how many merges are done for optimize
private static class MyIndexWriter extends IndexWriter { private static class MyIndexWriter extends IndexWriter {
AtomicInteger optimizeMergeCount = new AtomicInteger(); AtomicInteger mergeCount = new AtomicInteger();
private boolean first; private boolean first;
public MyIndexWriter(Directory dir, IndexWriterConfig conf) throws Exception { public MyIndexWriter(Directory dir, IndexWriterConfig conf) throws Exception {
@ -42,12 +41,12 @@ public class TestOptimizeForever extends LuceneTestCase {
@Override @Override
public void merge(MergePolicy.OneMerge merge) throws CorruptIndexException, IOException { public void merge(MergePolicy.OneMerge merge) throws CorruptIndexException, IOException {
if (merge.optimize && (first || merge.segments.size() == 1)) { if (merge.maxNumSegments != -1 && (first || merge.segments.size() == 1)) {
first = false; first = false;
if (VERBOSE) { if (VERBOSE) {
System.out.println("TEST: optimized merge"); System.out.println("TEST: maxNumSegments merge");
} }
optimizeMergeCount.incrementAndGet(); mergeCount.incrementAndGet();
} }
super.merge(merge); super.merge(merge);
} }
@ -57,7 +56,7 @@ public class TestOptimizeForever extends LuceneTestCase {
final Directory d = newDirectory(); final Directory d = newDirectory();
final MyIndexWriter w = new MyIndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); final MyIndexWriter w = new MyIndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
// Try to make an index that requires optimizing: // Try to make an index that requires merging:
w.getConfig().setMaxBufferedDocs(_TestUtil.nextInt(random, 2, 11)); w.getConfig().setMaxBufferedDocs(_TestUtil.nextInt(random, 2, 11));
final int numStartDocs = atLeast(20); final int numStartDocs = atLeast(20);
final LineFileDocs docs = new LineFileDocs(random); final LineFileDocs docs = new LineFileDocs(random);
@ -95,10 +94,10 @@ public class TestOptimizeForever extends LuceneTestCase {
} }
}; };
t.start(); t.start();
w.optimize(); w.forceMerge(1);
doStop.set(true); doStop.set(true);
t.join(); t.join();
assertTrue("optimize count is " + w.optimizeMergeCount.get(), w.optimizeMergeCount.get() <= 1); assertTrue("merge count is " + w.mergeCount.get(), w.mergeCount.get() <= 1);
w.close(); w.close();
d.close(); d.close();
} }

View File

@ -100,7 +100,7 @@ public class TestGlobalFieldNumbers extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setInfoStream(new FailOnNonBulkMergesInfoStream())); TEST_VERSION_CURRENT, new MockAnalyzer(random)).setInfoStream(new FailOnNonBulkMergesInfoStream()));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
assertFNXFiles(dir, "_2.fnx"); assertFNXFiles(dir, "_2.fnx");
@ -140,7 +140,7 @@ public class TestGlobalFieldNumbers extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setInfoStream(new FailOnNonBulkMergesInfoStream())); TEST_VERSION_CURRENT, new MockAnalyzer(random)).setInfoStream(new FailOnNonBulkMergesInfoStream()));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
assertFNXFiles(dir, "_2.fnx"); assertFNXFiles(dir, "_2.fnx");
@ -187,7 +187,7 @@ public class TestGlobalFieldNumbers extends LuceneTestCase {
} }
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setInfoStream(new FailOnNonBulkMergesInfoStream())); TEST_VERSION_CURRENT, new MockAnalyzer(random)).setInfoStream(new FailOnNonBulkMergesInfoStream()));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
assertFNXFiles(dir, "_2.fnx"); assertFNXFiles(dir, "_2.fnx");
dir.close(); dir.close();
@ -270,7 +270,7 @@ public class TestGlobalFieldNumbers extends LuceneTestCase {
return dir; return dir;
} }
public void testOptimize() throws IOException { public void testForceMerge() throws IOException {
for (int i = 0; i < 2*RANDOM_MULTIPLIER; i++) { for (int i = 0; i < 2*RANDOM_MULTIPLIER; i++) {
Set<String> fieldNames = new HashSet<String>(); Set<String> fieldNames = new HashSet<String>();
final int numFields = 2 + (TEST_NIGHTLY ? random.nextInt(200) : random.nextInt(20)); final int numFields = 2 + (TEST_NIGHTLY ? random.nextInt(200) : random.nextInt(20));
@ -285,7 +285,7 @@ public class TestGlobalFieldNumbers extends LuceneTestCase {
FieldNumberBiMap globalFieldMap = writer.segmentInfos FieldNumberBiMap globalFieldMap = writer.segmentInfos
.getOrLoadGlobalFieldNumberMap(base); .getOrLoadGlobalFieldNumberMap(base);
Set<Entry<String, Integer>> entries = globalFieldMap.entries(); Set<Entry<String, Integer>> entries = globalFieldMap.entries();
writer.optimize(); writer.forceMerge(1);
writer.commit(); writer.commit();
writer.close(); writer.close();
Set<Entry<String, Integer>> afterOptmize = globalFieldMap.entries(); Set<Entry<String, Integer>> afterOptmize = globalFieldMap.entries();
@ -352,7 +352,7 @@ public class TestGlobalFieldNumbers extends LuceneTestCase {
IndexWriter w = new IndexWriter(base, newIndexWriterConfig( IndexWriter w = new IndexWriter(base, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(
new LogByteSizeMergePolicy())); new LogByteSizeMergePolicy()));
w.optimize(); w.forceMerge(1);
w.close(); w.close();
SegmentInfos sis = new SegmentInfos(); SegmentInfos sis = new SegmentInfos();
sis.read(base); sis.read(base);

View File

@ -42,7 +42,7 @@ public class TestIndexCommit extends LuceneTestCase {
@Override public long getTimestamp() throws IOException { return 1;} @Override public long getTimestamp() throws IOException { return 1;}
@Override public Map<String, String> getUserData() throws IOException { return null; } @Override public Map<String, String> getUserData() throws IOException { return null; }
@Override public boolean isDeleted() { return false; } @Override public boolean isDeleted() { return false; }
@Override public boolean isOptimized() { return false; } @Override public int getSegmentCount() { return 2; }
}; };
IndexCommit ic2 = new IndexCommit() { IndexCommit ic2 = new IndexCommit() {
@ -55,7 +55,7 @@ public class TestIndexCommit extends LuceneTestCase {
@Override public long getTimestamp() throws IOException { return 1;} @Override public long getTimestamp() throws IOException { return 1;}
@Override public Map<String, String> getUserData() throws IOException { return null; } @Override public Map<String, String> getUserData() throws IOException { return null; }
@Override public boolean isDeleted() { return false; } @Override public boolean isDeleted() { return false; }
@Override public boolean isOptimized() { return false; } @Override public int getSegmentCount() { return 2; }
}; };
assertEquals(ic1, ic2); assertEquals(ic1, ic2);

View File

@ -95,18 +95,18 @@ public class TestIndexReader extends LuceneTestCase
IndexReader r3 = IndexReader.openIfChanged(r2); IndexReader r3 = IndexReader.openIfChanged(r2);
assertNotNull(r3); assertNotNull(r3);
assertFalse(c.equals(r3.getIndexCommit())); assertFalse(c.equals(r3.getIndexCommit()));
assertFalse(r2.getIndexCommit().isOptimized()); assertFalse(r2.getIndexCommit().getSegmentCount() == 1);
r3.close(); r3.close();
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random)) new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND)); .setOpenMode(OpenMode.APPEND));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
r3 = IndexReader.openIfChanged(r2); r3 = IndexReader.openIfChanged(r2);
assertNotNull(r3); assertNotNull(r3);
assertTrue(r3.getIndexCommit().isOptimized()); assertEquals(1, r3.getIndexCommit().getSegmentCount());
r2.close(); r2.close();
r3.close(); r3.close();
d.close(); d.close();
@ -381,11 +381,11 @@ public class TestIndexReader extends LuceneTestCase
assertEquals(bin[i], bytesRef.bytes[i + bytesRef.offset]); assertEquals(bin[i], bytesRef.bytes[i + bytesRef.offset]);
} }
reader.close(); reader.close();
// force optimize // force merge
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy())); writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy()));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
reader = IndexReader.open(dir, false); reader = IndexReader.open(dir, false);
doc2 = reader.document(reader.maxDoc() - 1); doc2 = reader.document(reader.maxDoc() - 1);
@ -721,7 +721,7 @@ public class TestIndexReader extends LuceneTestCase
// [incorrectly] hit a "docs out of order" // [incorrectly] hit a "docs out of order"
// IllegalStateException because above out-of-bounds // IllegalStateException because above out-of-bounds
// deleteDocument corrupted the index: // deleteDocument corrupted the index:
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
if (!gotException) { if (!gotException) {
fail("delete of out-of-bounds doc number failed to hit exception"); fail("delete of out-of-bounds doc number failed to hit exception");
@ -846,7 +846,9 @@ public class TestIndexReader extends LuceneTestCase
assertEquals("IndexReaders have different values for numDocs.", index1.numDocs(), index2.numDocs()); assertEquals("IndexReaders have different values for numDocs.", index1.numDocs(), index2.numDocs());
assertEquals("IndexReaders have different values for maxDoc.", index1.maxDoc(), index2.maxDoc()); assertEquals("IndexReaders have different values for maxDoc.", index1.maxDoc(), index2.maxDoc());
assertEquals("Only one IndexReader has deletions.", index1.hasDeletions(), index2.hasDeletions()); assertEquals("Only one IndexReader has deletions.", index1.hasDeletions(), index2.hasDeletions());
assertEquals("Only one index is optimized.", index1.isOptimized(), index2.isOptimized()); if (!(index1 instanceof ParallelReader)) {
assertEquals("Single segment test differs.", index1.getSequentialSubReaders().length == 1, index2.getSequentialSubReaders().length == 1);
}
// check field names // check field names
Collection<String> fields1 = index1.getFieldNames(FieldOption.ALL); Collection<String> fields1 = index1.getFieldNames(FieldOption.ALL);
@ -970,19 +972,19 @@ public class TestIndexReader extends LuceneTestCase
IndexReader r2 = IndexReader.openIfChanged(r); IndexReader r2 = IndexReader.openIfChanged(r);
assertNotNull(r2); assertNotNull(r2);
assertFalse(c.equals(r2.getIndexCommit())); assertFalse(c.equals(r2.getIndexCommit()));
assertFalse(r2.getIndexCommit().isOptimized()); assertFalse(r2.getIndexCommit().getSegmentCount() == 1);
r2.close(); r2.close();
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random)) new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND)); .setOpenMode(OpenMode.APPEND));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
r2 = IndexReader.openIfChanged(r); r2 = IndexReader.openIfChanged(r);
assertNotNull(r2); assertNotNull(r2);
assertNull(IndexReader.openIfChanged(r2)); assertNull(IndexReader.openIfChanged(r2));
assertTrue(r2.getIndexCommit().isOptimized()); assertEquals(1, r2.getIndexCommit().getSegmentCount());
r.close(); r.close();
r2.close(); r2.close();
@ -1032,7 +1034,7 @@ public class TestIndexReader extends LuceneTestCase
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random)) new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND)); .setOpenMode(OpenMode.APPEND));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
// Make sure reopen to a single segment is still readonly: // Make sure reopen to a single segment is still readonly:

View File

@ -192,15 +192,15 @@ public class TestIndexReaderClone extends LuceneTestCase {
} }
// open non-readOnly reader1 on multi-segment index, then // open non-readOnly reader1 on multi-segment index, then
// optimize the index, then clone to readOnly reader2 // fully merge the index, then clone to readOnly reader2
public void testReadOnlyCloneAfterOptimize() throws Exception { public void testReadOnlyCloneAfterFullMerge() throws Exception {
final Directory dir1 = newDirectory(); final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, true); TestIndexReaderReopen.createIndex(random, dir1, true);
IndexReader reader1 = IndexReader.open(dir1, false); IndexReader reader1 = IndexReader.open(dir1, false);
IndexWriter w = new IndexWriter(dir1, newIndexWriterConfig( IndexWriter w = new IndexWriter(dir1, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random))); TEST_VERSION_CURRENT, new MockAnalyzer(random)));
w.optimize(); w.forceMerge(1);
w.close(); w.close();
IndexReader reader2 = reader1.clone(true); IndexReader reader2 = reader1.clone(true);
assertTrue(isReadOnly(reader2)); assertTrue(isReadOnly(reader2));

View File

@ -80,7 +80,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
/** /**
* Test that norms values are preserved as the index is maintained. Including * Test that norms values are preserved as the index is maintained. Including
* separate norms. Including merging indexes with seprate norms. Including * separate norms. Including merging indexes with seprate norms. Including
* optimize. * full merge.
*/ */
public void testNorms() throws IOException { public void testNorms() throws IOException {
// test with a single index: index1 // test with a single index: index1
@ -112,7 +112,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
createIndex(random, dir3); createIndex(random, dir3);
if (VERBOSE) { if (VERBOSE) {
System.out.println("TEST: now addIndexes/optimize"); System.out.println("TEST: now addIndexes/full merge");
} }
IndexWriter iw = new IndexWriter( IndexWriter iw = new IndexWriter(
dir3, dir3,
@ -122,7 +122,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
setMergePolicy(newLogMergePolicy(3)) setMergePolicy(newLogMergePolicy(3))
); );
iw.addIndexes(dir1, dir2); iw.addIndexes(dir1, dir2);
iw.optimize(); iw.forceMerge(1);
iw.close(); iw.close();
norms1.addAll(norms); norms1.addAll(norms);
@ -135,7 +135,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
verifyIndex(dir3); verifyIndex(dir3);
doTestNorms(random, dir3); doTestNorms(random, dir3);
// now with optimize // now with full merge
iw = new IndexWriter( iw = new IndexWriter(
dir3, dir3,
newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr). newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr).
@ -143,7 +143,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
setMaxBufferedDocs(5). setMaxBufferedDocs(5).
setMergePolicy(newLogMergePolicy(3)) setMergePolicy(newLogMergePolicy(3))
); );
iw.optimize(); iw.forceMerge(1);
iw.close(); iw.close();
verifyIndex(dir3); verifyIndex(dir3);

View File

@ -33,7 +33,7 @@ import static org.apache.lucene.index.TestIndexReader.assertTermDocsCount;
import static org.apache.lucene.index.TestIndexReader.createDocument; import static org.apache.lucene.index.TestIndexReader.createDocument;
public class TestIndexReaderDelete extends LuceneTestCase { public class TestIndexReaderDelete extends LuceneTestCase {
private void deleteReaderReaderConflict(boolean optimize) throws IOException { private void deleteReaderReaderConflict(boolean doFullMerge) throws IOException {
Directory dir = newDirectory(); Directory dir = newDirectory();
Term searchTerm1 = new Term("content", "aaa"); Term searchTerm1 = new Term("content", "aaa");
@ -49,8 +49,9 @@ public class TestIndexReaderDelete extends LuceneTestCase {
addDoc(writer, searchTerm2.text()); addDoc(writer, searchTerm2.text());
addDoc(writer, searchTerm3.text()); addDoc(writer, searchTerm3.text());
} }
if(optimize) if (doFullMerge) {
writer.optimize(); writer.forceMerge(1);
}
writer.close(); writer.close();
// OPEN TWO READERS // OPEN TWO READERS
@ -131,7 +132,7 @@ public class TestIndexReaderDelete extends LuceneTestCase {
dir.close(); dir.close();
} }
private void deleteReaderWriterConflict(boolean optimize) throws IOException { private void deleteReaderWriterConflict(boolean doFullMerge) throws IOException {
//Directory dir = new RAMDirectory(); //Directory dir = new RAMDirectory();
Directory dir = newDirectory(); Directory dir = newDirectory();
@ -159,13 +160,14 @@ public class TestIndexReaderDelete extends LuceneTestCase {
addDoc(writer, searchTerm2.text()); addDoc(writer, searchTerm2.text());
} }
// REQUEST OPTIMIZATION // REQUEST full merge
// This causes a new segment to become current for all subsequent // This causes a new segment to become current for all subsequent
// searchers. Because of this, deletions made via a previously open // searchers. Because of this, deletions made via a previously open
// reader, which would be applied to that reader's segment, are lost // reader, which would be applied to that reader's segment, are lost
// for subsequent searchers/readers // for subsequent searchers/readers
if(optimize) if (doFullMerge) {
writer.optimize(); writer.forceMerge(1);
}
writer.close(); writer.close();
// The reader should not see the new data // The reader should not see the new data
@ -255,19 +257,19 @@ public class TestIndexReaderDelete extends LuceneTestCase {
dir.close(); dir.close();
} }
public void testDeleteReaderReaderConflictUnoptimized() throws IOException { public void testDeleteReaderReaderConflictNoFullMerge() throws IOException {
deleteReaderReaderConflict(false); deleteReaderReaderConflict(false);
} }
public void testDeleteReaderReaderConflictOptimized() throws IOException { public void testDeleteReaderReaderConflictFullMerge() throws IOException {
deleteReaderReaderConflict(true); deleteReaderReaderConflict(true);
} }
public void testDeleteReaderWriterConflictUnoptimized() throws IOException { public void testDeleteReaderWriterConflictNoFullMerge() throws IOException {
deleteReaderWriterConflict(false); deleteReaderWriterConflict(false);
} }
public void testDeleteReaderWriterConflictOptimized() throws IOException { public void testDeleteReaderWriterConflictFullMerge() throws IOException {
deleteReaderWriterConflict(true); deleteReaderWriterConflict(true);
} }

View File

@ -711,7 +711,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
writer.addDocument(createDocument(i, 3)); writer.addDocument(createDocument(i, 3));
} }
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
final TestReopen test = new TestReopen() { final TestReopen test = new TestReopen() {
@ -961,7 +961,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
} }
if (!multiSegment) { if (!multiSegment) {
w.optimize(); w.forceMerge(1);
} }
w.close(); w.close();
@ -1019,14 +1019,14 @@ public class TestIndexReaderReopen extends LuceneTestCase {
} }
case 2: { case 2: {
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
w.optimize(); w.forceMerge(1);
w.close(); w.close();
break; break;
} }
case 3: { case 3: {
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
w.addDocument(createDocument(101, 4)); w.addDocument(createDocument(101, 4));
w.optimize(); w.forceMerge(1);
w.addDocument(createDocument(102, 4)); w.addDocument(createDocument(102, 4));
w.addDocument(createDocument(103, 4)); w.addDocument(createDocument(103, 4));
w.close(); w.close();

View File

@ -19,7 +19,6 @@ package org.apache.lucene.index;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream;
import java.io.Reader; import java.io.Reader;
import java.io.StringReader; import java.io.StringReader;
import java.util.ArrayList; import java.util.ArrayList;
@ -54,16 +53,13 @@ import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock; import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory; import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.NativeFSLockFactory;
import org.apache.lucene.store.NoLockFactory; import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.store.SimpleFSLockFactory; import org.apache.lucene.store.SimpleFSLockFactory;
import org.apache.lucene.store.SingleInstanceLockFactory; import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
@ -108,10 +104,10 @@ public class TestIndexWriter extends LuceneTestCase {
assertEquals(60, reader.numDocs()); assertEquals(60, reader.numDocs());
reader.close(); reader.close();
// optimize the index and check that the new doc count is correct // merge the index down and check that the new doc count is correct
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
assertEquals(60, writer.numDocs()); assertEquals(60, writer.numDocs());
writer.optimize(); writer.forceMerge(1);
assertEquals(60, writer.maxDoc()); assertEquals(60, writer.maxDoc());
assertEquals(60, writer.numDocs()); assertEquals(60, writer.numDocs());
writer.close(); writer.close();
@ -733,7 +729,7 @@ public class TestIndexWriter extends LuceneTestCase {
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
//LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy(); //LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
//lmp2.setUseCompoundFile(false); //lmp2.setUseCompoundFile(false);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
} }
} }
@ -1302,7 +1298,7 @@ public class TestIndexWriter extends LuceneTestCase {
w.addDocument(doc); w.addDocument(doc);
w.commit(); w.commit();
w.optimize(); // force segment merge. w.forceMerge(1); // force segment merge.
w.close(); w.close();
IndexReader ir = IndexReader.open(dir, true); IndexReader ir = IndexReader.open(dir, true);
@ -1439,7 +1435,7 @@ public class TestIndexWriter extends LuceneTestCase {
List<String> files = Arrays.asList(dir.listAll()); List<String> files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs")); assertTrue(files.contains("_0.cfs"));
w.addDocument(doc); w.addDocument(doc);
w.optimize(); w.forceMerge(1);
if (iter == 1) { if (iter == 1) {
w.commit(); w.commit();
} }
@ -1450,10 +1446,10 @@ public class TestIndexWriter extends LuceneTestCase {
// NOTE: here we rely on "Windows" behavior, ie, even // NOTE: here we rely on "Windows" behavior, ie, even
// though IW wanted to delete _0.cfs since it was // though IW wanted to delete _0.cfs since it was
// optimized away, because we have a reader open // merged away, because we have a reader open
// against this file, it should still be here: // against this file, it should still be here:
assertTrue(files.contains("_0.cfs")); assertTrue(files.contains("_0.cfs"));
// optimize created this // forceMerge created this
//assertTrue(files.contains("_2.cfs")); //assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles(); w.deleteUnusedFiles();
@ -1697,7 +1693,7 @@ public class TestIndexWriter extends LuceneTestCase {
} }
s.close(); s.close();
r.close(); r.close();
w.optimize(); w.forceMerge(1);
} }
} }
w.close(); w.close();

View File

@ -224,7 +224,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
} }
long midDiskUsage = dir.getMaxUsedSizeInBytes(); long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes(); dir.resetMaxUsedSizeInBytes();
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
IndexReader.open(dir, true).close(); IndexReader.open(dir, true).close();
@ -246,11 +246,11 @@ public class TestIndexWriterCommit extends LuceneTestCase {
/* /*
* Verify that calling optimize when writer is open for * Verify that calling forceMerge when writer is open for
* "commit on close" works correctly both for rollback() * "commit on close" works correctly both for rollback()
* and close(). * and close().
*/ */
public void testCommitOnCloseOptimize() throws IOException { public void testCommitOnCloseForceMerge() throws IOException {
MockDirectoryWrapper dir = newDirectory(); MockDirectoryWrapper dir = newDirectory();
// Must disable throwing exc on double-write: this // Must disable throwing exc on double-write: this
// test uses IW.rollback which easily results in // test uses IW.rollback which easily results in
@ -268,44 +268,44 @@ public class TestIndexWriterCommit extends LuceneTestCase {
writer.close(); writer.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
writer.optimize(); writer.forceMerge(1);
// Open a reader before closing (commiting) the writer: // Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true); IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this // Reader should see index as multi-seg at this
// point: // point:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized()); assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().length > 1);
reader.close(); reader.close();
// Abort the writer: // Abort the writer:
writer.rollback(); writer.rollback();
TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after optimize"); TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge");
// Open a reader after aborting writer: // Open a reader after aborting writer:
reader = IndexReader.open(dir, true); reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized: // Reader should still see index as multi-segment
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized()); assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().length > 1);
reader.close(); reader.close();
if (VERBOSE) { if (VERBOSE) {
System.out.println("TEST: do real optimize"); System.out.println("TEST: do real full merge");
} }
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
if (VERBOSE) { if (VERBOSE) {
System.out.println("TEST: writer closed"); System.out.println("TEST: writer closed");
} }
TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after optimize"); TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge");
// Open a reader after aborting writer: // Open a reader after aborting writer:
reader = IndexReader.open(dir, true); reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized: // Reader should see index as one segment
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized()); assertEquals("Reader incorrectly sees more than one segment", 1, reader.getSequentialSubReaders().length);
reader.close(); reader.close();
dir.close(); dir.close();
} }
@ -657,7 +657,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
r.close(); r.close();
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
w.optimize(); w.forceMerge(1);
w.close(); w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label")); assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));

View File

@ -64,7 +64,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
doc.add(newField("city", text[i], TextField.TYPE_STORED)); doc.add(newField("city", text[i], TextField.TYPE_STORED));
modifier.addDocument(doc); modifier.addDocument(doc);
} }
modifier.optimize(); modifier.forceMerge(1);
modifier.commit(); modifier.commit();
Term term = new Term("city", "Amsterdam"); Term term = new Term("city", "Amsterdam");
@ -711,10 +711,10 @@ public class TestIndexWriterDelete extends LuceneTestCase {
// flush (and commit if ac) // flush (and commit if ac)
if (VERBOSE) { if (VERBOSE) {
System.out.println("TEST: now optimize"); System.out.println("TEST: now full merge");
} }
modifier.optimize(); modifier.forceMerge(1);
if (VERBOSE) { if (VERBOSE) {
System.out.println("TEST: now commit"); System.out.println("TEST: now commit");
} }

View File

@ -17,13 +17,10 @@ package org.apache.lucene.index;
* limitations under the License. * limitations under the License.
*/ */
import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream;
import java.io.Reader; import java.io.Reader;
import java.io.StringReader; import java.io.StringReader;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
@ -657,7 +654,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5)); doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
for(int j=0;j<17;j++) for(int j=0;j<17;j++)
writer.addDocument(doc); writer.addDocument(doc);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
reader = IndexReader.open(dir, true); reader = IndexReader.open(dir, true);
@ -771,7 +768,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5)); doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
for(int j=0;j<17;j++) for(int j=0;j<17;j++)
writer.addDocument(doc); writer.addDocument(doc);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
reader = IndexReader.open(dir, true); reader = IndexReader.open(dir, true);
@ -927,7 +924,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
} }
} }
public void testOptimizeExceptions() throws IOException { public void testForceMergeExceptions() throws IOException {
Directory startDir = newDirectory(); Directory startDir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy()); IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy());
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100); ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100);
@ -947,10 +944,10 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
w = new IndexWriter(dir, conf); w = new IndexWriter(dir, conf);
dir.setRandomIOExceptionRate(0.5); dir.setRandomIOExceptionRate(0.5);
try { try {
w.optimize(); w.forceMerge(1);
} catch (IOException ioe) { } catch (IOException ioe) {
if (ioe.getCause() == null) if (ioe.getCause() == null)
fail("optimize threw IOException without root cause"); fail("forceMerge threw IOException without root cause");
} }
dir.setRandomIOExceptionRate(0); dir.setRandomIOExceptionRate(0);
w.close(); w.close();

View File

@ -28,8 +28,8 @@ import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil; import org.apache.lucene.util._TestUtil;
public class TestIndexWriterOptimize extends LuceneTestCase { public class TestIndexWriterForceMerge extends LuceneTestCase {
public void testOptimizeMaxNumSegments() throws IOException { public void testPartialMerge() throws IOException {
MockDirectoryWrapper dir = newDirectory(); MockDirectoryWrapper dir = newDirectory();
@ -56,7 +56,7 @@ public class TestIndexWriterOptimize extends LuceneTestCase {
ldmp.setMergeFactor(5); ldmp.setMergeFactor(5);
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
new MockAnalyzer(random)).setMergePolicy(ldmp)); new MockAnalyzer(random)).setMergePolicy(ldmp));
writer.optimize(3); writer.forceMerge(3);
writer.close(); writer.close();
sis = new SegmentInfos(); sis = new SegmentInfos();
@ -71,7 +71,7 @@ public class TestIndexWriterOptimize extends LuceneTestCase {
dir.close(); dir.close();
} }
public void testOptimizeMaxNumSegments2() throws IOException { public void testMaxNumSegments2() throws IOException {
MockDirectoryWrapper dir = newDirectory(); MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document(); final Document doc = new Document();
@ -97,7 +97,7 @@ public class TestIndexWriterOptimize extends LuceneTestCase {
final int segCount = sis.size(); final int segCount = sis.size();
writer.optimize(7); writer.forceMerge(7);
writer.commit(); writer.commit();
writer.waitForMerges(); writer.waitForMerges();
@ -115,11 +115,11 @@ public class TestIndexWriterOptimize extends LuceneTestCase {
} }
/** /**
* Make sure optimize doesn't use any more than 1X * Make sure forceMerge doesn't use any more than 1X
* starting index size as its temporary free space * starting index size as its temporary free space
* required. * required.
*/ */
public void testOptimizeTempSpaceUsage() throws IOException { public void testForceMergeTempSpaceUsage() throws IOException {
MockDirectoryWrapper dir = newDirectory(); MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy())); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy()));
@ -156,18 +156,18 @@ public class TestIndexWriterOptimize extends LuceneTestCase {
// smaller one here could increase the disk usage and // smaller one here could increase the disk usage and
// cause a false failure: // cause a false failure:
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval).setMergePolicy(newLogMergePolicy())); writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval).setMergePolicy(newLogMergePolicy()));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes(); long maxDiskUsage = dir.getMaxUsedSizeInBytes();
assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)", assertTrue("forceMerge used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
maxDiskUsage <= 4*startDiskUsage); maxDiskUsage <= 4*startDiskUsage);
dir.close(); dir.close();
} }
// Test calling optimize(false) whereby optimize is kicked // Test calling forceMerge(1, false) whereby forceMerge is kicked
// off but we don't wait for it to finish (but // off but we don't wait for it to finish (but
// writer.close()) does wait // writer.close()) does wait
public void testBackgroundOptimize() throws IOException { public void testBackgroundForceMerge() throws IOException {
Directory dir = newDirectory(); Directory dir = newDirectory();
for(int pass=0;pass<2;pass++) { for(int pass=0;pass<2;pass++) {
@ -182,22 +182,22 @@ public class TestIndexWriterOptimize extends LuceneTestCase {
doc.add(newField("field", "aaa", StringField.TYPE_UNSTORED)); doc.add(newField("field", "aaa", StringField.TYPE_UNSTORED));
for(int i=0;i<100;i++) for(int i=0;i<100;i++)
writer.addDocument(doc); writer.addDocument(doc);
writer.optimize(false); writer.forceMerge(1, false);
if (0 == pass) { if (0 == pass) {
writer.close(); writer.close();
IndexReader reader = IndexReader.open(dir, true); IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized()); assertEquals(1, reader.getSequentialSubReaders().length);
reader.close(); reader.close();
} else { } else {
// Get another segment to flush so we can verify it is // Get another segment to flush so we can verify it is
// NOT included in the optimization // NOT included in the merging
writer.addDocument(doc); writer.addDocument(doc);
writer.addDocument(doc); writer.addDocument(doc);
writer.close(); writer.close();
IndexReader reader = IndexReader.open(dir, true); IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized()); assertTrue(reader.getSequentialSubReaders().length > 1);
reader.close(); reader.close();
SegmentInfos infos = new SegmentInfos(); SegmentInfos infos = new SegmentInfos();

View File

@ -64,7 +64,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
setMergePolicy(newLogMergePolicy(2)) setMergePolicy(newLogMergePolicy(2))
); );
writer.addIndexes(indexA, indexB); writer.addIndexes(indexA, indexB);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
fail = verifyIndex(merged, 0); fail = verifyIndex(merged, 0);

View File

@ -24,7 +24,6 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldType; import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField; import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.codecs.Codec;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
@ -180,7 +179,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
} }
// Now, build a starting index that has START_COUNT docs. We // Now, build a starting index that has START_COUNT docs. We
// will then try to addIndexesNoOptimize into a copy of this: // will then try to addIndexes into a copy of this:
MockDirectoryWrapper startDir = newDirectory(); MockDirectoryWrapper startDir = newDirectory();
IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
for(int j=0;j<START_COUNT;j++) { for(int j=0;j<START_COUNT;j++) {
@ -234,7 +233,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
String methodName; String methodName;
if (0 == method) { if (0 == method) {
methodName = "addIndexes(Directory[]) + optimize()"; methodName = "addIndexes(Directory[]) + forceMerge(1)";
} else if (1 == method) { } else if (1 == method) {
methodName = "addIndexes(IndexReader[])"; methodName = "addIndexes(IndexReader[])";
} else { } else {
@ -303,7 +302,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
if (0 == method) { if (0 == method) {
writer.addIndexes(dirs); writer.addIndexes(dirs);
writer.optimize(); writer.forceMerge(1);
} else if (1 == method) { } else if (1 == method) {
IndexReader readers[] = new IndexReader[dirs.length]; IndexReader readers[] = new IndexReader[dirs.length];
for(int i=0;i<dirs.length;i++) { for(int i=0;i<dirs.length;i++) {

View File

@ -92,7 +92,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
assertFalse(reader.isCurrent()); assertFalse(reader.isCurrent());
reader.close(); reader.close();
} }
writer.optimize(); // make sure all merging is done etc. writer.forceMerge(1); // make sure all merging is done etc.
IndexReader reader = writer.getReader(); IndexReader reader = writer.getReader();
writer.commit(); // no changes that are not visible to the reader writer.commit(); // no changes that are not visible to the reader
assertTrue(reader.isCurrent()); assertTrue(reader.isCurrent());
@ -110,7 +110,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
} }
public void testUpdateDocument() throws Exception { public void testUpdateDocument() throws Exception {
boolean optimize = true; boolean doFullMerge = true;
Directory dir1 = newDirectory(); Directory dir1 = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
@ -129,7 +129,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir1, iwc); IndexWriter writer = new IndexWriter(dir1, iwc);
// create the index // create the index
createIndexNoClose(!optimize, "index1", writer); createIndexNoClose(!doFullMerge, "index1", writer);
// writer.flush(false, true, true); // writer.flush(false, true, true);
@ -199,7 +199,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
assertTrue(nrtReader.isCurrent()); assertTrue(nrtReader.isCurrent());
writer.addDocument(doc); writer.addDocument(doc);
assertFalse(nrtReader.isCurrent()); // should see the changes assertFalse(nrtReader.isCurrent()); // should see the changes
writer.optimize(); // make sure we don't have a merge going on writer.forceMerge(1); // make sure we don't have a merge going on
assertFalse(nrtReader.isCurrent()); assertFalse(nrtReader.isCurrent());
nrtReader.close(); nrtReader.close();
@ -225,7 +225,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
* @throws Exception * @throws Exception
*/ */
public void testAddIndexes() throws Exception { public void testAddIndexes() throws Exception {
boolean optimize = false; boolean doFullMerge = false;
Directory dir1 = newDirectory(); Directory dir1 = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
@ -241,13 +241,13 @@ public class TestIndexWriterReader extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir1, iwc); IndexWriter writer = new IndexWriter(dir1, iwc);
// create the index // create the index
createIndexNoClose(!optimize, "index1", writer); createIndexNoClose(!doFullMerge, "index1", writer);
writer.flush(false, true); writer.flush(false, true);
// create a 2nd index // create a 2nd index
Directory dir2 = newDirectory(); Directory dir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
createIndexNoClose(!optimize, "index2", writer2); createIndexNoClose(!doFullMerge, "index2", writer2);
writer2.close(); writer2.close();
IndexReader r0 = writer.getReader(); IndexReader r0 = writer.getReader();
@ -280,7 +280,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
} }
public void testAddIndexes2() throws Exception { public void testAddIndexes2() throws Exception {
boolean optimize = false; boolean doFullMerge = false;
Directory dir1 = newDirectory(); Directory dir1 = newDirectory();
IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
@ -288,7 +288,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
// create a 2nd index // create a 2nd index
Directory dir2 = newDirectory(); Directory dir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
createIndexNoClose(!optimize, "index2", writer2); createIndexNoClose(!doFullMerge, "index2", writer2);
writer2.close(); writer2.close();
writer.addIndexes(dir2); writer.addIndexes(dir2);
@ -312,12 +312,12 @@ public class TestIndexWriterReader extends LuceneTestCase {
* @throws Exception * @throws Exception
*/ */
public void testDeleteFromIndexWriter() throws Exception { public void testDeleteFromIndexWriter() throws Exception {
boolean optimize = true; boolean doFullMerge = true;
Directory dir1 = newDirectory(); Directory dir1 = newDirectory();
IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setReaderTermsIndexDivisor(2)); IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setReaderTermsIndexDivisor(2));
// create the index // create the index
createIndexNoClose(!optimize, "index1", writer); createIndexNoClose(!doFullMerge, "index1", writer);
writer.flush(false, true); writer.flush(false, true);
// get a reader // get a reader
IndexReader r1 = writer.getReader(); IndexReader r1 = writer.getReader();
@ -487,7 +487,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
switch (j % 4) { switch (j % 4) {
case 0: case 0:
mainWriter.addIndexes(dirs); mainWriter.addIndexes(dirs);
mainWriter.optimize(); mainWriter.forceMerge(1);
break; break;
case 1: case 1:
mainWriter.addIndexes(dirs); mainWriter.addIndexes(dirs);
@ -503,7 +503,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
} }
} }
public void testIndexWriterReopenSegmentOptimize() throws Exception { public void testIndexWriterReopenSegmentFullMerge() throws Exception {
doTestIndexWriterReopenSegment(true); doTestIndexWriterReopenSegment(true);
} }
@ -515,13 +515,13 @@ public class TestIndexWriterReader extends LuceneTestCase {
* Tests creating a segment, then check to insure the segment can be seen via * Tests creating a segment, then check to insure the segment can be seen via
* IW.getReader * IW.getReader
*/ */
public void doTestIndexWriterReopenSegment(boolean optimize) throws Exception { public void doTestIndexWriterReopenSegment(boolean doFullMerge) throws Exception {
Directory dir1 = newDirectory(); Directory dir1 = newDirectory();
IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
IndexReader r1 = writer.getReader(); IndexReader r1 = writer.getReader();
assertEquals(0, r1.maxDoc()); assertEquals(0, r1.maxDoc());
createIndexNoClose(false, "index1", writer); createIndexNoClose(false, "index1", writer);
writer.flush(!optimize, true); writer.flush(!doFullMerge, true);
IndexReader iwr1 = writer.getReader(); IndexReader iwr1 = writer.getReader();
assertEquals(100, iwr1.maxDoc()); assertEquals(100, iwr1.maxDoc());
@ -581,7 +581,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
w.addDocument(DocHelper.createDocument(i, indexName, 4)); w.addDocument(DocHelper.createDocument(i, indexName, 4));
} }
if (!multiSegment) { if (!multiSegment) {
w.optimize(); w.forceMerge(1);
} }
w.close(); w.close();
} }
@ -592,7 +592,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
w.addDocument(DocHelper.createDocument(i, indexName, 4)); w.addDocument(DocHelper.createDocument(i, indexName, 4));
} }
if (!multiSegment) { if (!multiSegment) {
w.optimize(); w.forceMerge(1);
} }
} }
@ -636,7 +636,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
final int count = warmer.warmCount; final int count = warmer.warmCount;
writer.addDocument(DocHelper.createDocument(17, "test", 4)); writer.addDocument(DocHelper.createDocument(17, "test", 4));
writer.optimize(); writer.forceMerge(1);
assertTrue(warmer.warmCount > count); assertTrue(warmer.warmCount > count);
writer.close(); writer.close();

View File

@ -326,9 +326,9 @@ public class TestIndexWriterUnicode extends LuceneTestCase {
// Test multi segment // Test multi segment
r.close(); r.close();
writer.optimize(); writer.forceMerge(1);
// Test optimized single segment // Test single segment
r = writer.getReader(); r = writer.getReader();
checkTermsOrder(r, allTerms, true); checkTermsOrder(r, allTerms, true);
r.close(); r.close();

View File

@ -102,7 +102,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
} }
// make sure the index has only a single segment // make sure the index has only a single segment
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
SegmentReader reader = getOnlySegmentReader(IndexReader.open(directory, false)); SegmentReader reader = getOnlySegmentReader(IndexReader.open(directory, false));

View File

@ -77,7 +77,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
writer.addDocument(d1); writer.addDocument(d1);
} }
writer.commit(); writer.commit();
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
IndexReader reader = getOnlySegmentReader(IndexReader.open(dir)); IndexReader reader = getOnlySegmentReader(IndexReader.open(dir));

View File

@ -25,7 +25,7 @@ import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.LuceneTestCase.UseNoMemoryExpensiveCodec; import org.apache.lucene.util.LuceneTestCase.UseNoMemoryExpensiveCodec;
// TODO // TODO
// - mix in optimize, addIndexes // - mix in forceMerge, addIndexes
// - randomoly mix in non-congruent docs // - randomoly mix in non-congruent docs
@UseNoMemoryExpensiveCodec @UseNoMemoryExpensiveCodec

View File

@ -31,7 +31,7 @@ public class TestNoMergePolicy extends LuceneTestCase {
public void testNoMergePolicy() throws Exception { public void testNoMergePolicy() throws Exception {
MergePolicy mp = NoMergePolicy.NO_COMPOUND_FILES; MergePolicy mp = NoMergePolicy.NO_COMPOUND_FILES;
assertNull(mp.findMerges(null)); assertNull(mp.findMerges(null));
assertNull(mp.findMergesForOptimize(null, 0, null)); assertNull(mp.findForcedMerges(null, 0, null));
assertNull(mp.findMergesToExpungeDeletes(null)); assertNull(mp.findMergesToExpungeDeletes(null));
assertFalse(mp.useCompoundFile(null, null)); assertFalse(mp.useCompoundFile(null, null));
mp.close(); mp.close();

View File

@ -36,7 +36,7 @@ import org.apache.lucene.util.LuceneTestCase;
/** /**
* Test that norms info is preserved during index life - including * Test that norms info is preserved during index life - including
* separate norms, addDocument, addIndexes, optimize. * separate norms, addDocument, addIndexes, forceMerge.
*/ */
public class TestNorms extends LuceneTestCase { public class TestNorms extends LuceneTestCase {
@ -74,7 +74,7 @@ public class TestNorms extends LuceneTestCase {
* Test that norms values are preserved as the index is maintained. * Test that norms values are preserved as the index is maintained.
* Including separate norms. * Including separate norms.
* Including merging indexes with seprate norms. * Including merging indexes with seprate norms.
* Including optimize. * Including forceMerge.
*/ */
public void testNorms() throws IOException { public void testNorms() throws IOException {
Directory dir1 = newDirectory(); Directory dir1 = newDirectory();
@ -111,7 +111,7 @@ public class TestNorms extends LuceneTestCase {
setMergePolicy(newLogMergePolicy(3)) setMergePolicy(newLogMergePolicy(3))
); );
iw.addIndexes(dir1,dir2); iw.addIndexes(dir1,dir2);
iw.optimize(); iw.forceMerge(1);
iw.close(); iw.close();
norms1.addAll(norms); norms1.addAll(norms);
@ -124,7 +124,7 @@ public class TestNorms extends LuceneTestCase {
verifyIndex(dir3); verifyIndex(dir3);
doTestNorms(random, dir3); doTestNorms(random, dir3);
// now with optimize // now with single segment
iw = new IndexWriter( iw = new IndexWriter(
dir3, dir3,
newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr). newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr).
@ -132,7 +132,7 @@ public class TestNorms extends LuceneTestCase {
setMaxBufferedDocs(5). setMaxBufferedDocs(5).
setMergePolicy(newLogMergePolicy(3)) setMergePolicy(newLogMergePolicy(3))
); );
iw.optimize(); iw.forceMerge(1);
iw.close(); iw.close();
verifyIndex(dir3); verifyIndex(dir3);

View File

@ -49,7 +49,7 @@ public class TestOmitNorms extends LuceneTestCase {
d.add(f2); d.add(f2);
writer.addDocument(d); writer.addDocument(d);
writer.optimize(); writer.forceMerge(1);
// now we add another document which has term freq for field f2 and not for f1 and verify if the SegmentMerger // now we add another document which has term freq for field f2 and not for f1 and verify if the SegmentMerger
// keep things constant // keep things constant
d = new Document(); d = new Document();
@ -62,7 +62,7 @@ public class TestOmitNorms extends LuceneTestCase {
writer.addDocument(d); writer.addDocument(d);
// force merge // force merge
writer.optimize(); writer.forceMerge(1);
// flush // flush
writer.close(); writer.close();
@ -116,7 +116,7 @@ public class TestOmitNorms extends LuceneTestCase {
} }
// force merge // force merge
writer.optimize(); writer.forceMerge(1);
// flush // flush
writer.close(); writer.close();
@ -163,7 +163,7 @@ public class TestOmitNorms extends LuceneTestCase {
} }
// force merge // force merge
writer.optimize(); writer.forceMerge(1);
// flush // flush
writer.close(); writer.close();
@ -209,7 +209,7 @@ public class TestOmitNorms extends LuceneTestCase {
assertNoNrm(ram); assertNoNrm(ram);
// force merge // force merge
writer.optimize(); writer.forceMerge(1);
// flush // flush
writer.close(); writer.close();
@ -221,7 +221,7 @@ public class TestOmitNorms extends LuceneTestCase {
* Tests various combinations of omitNorms=true/false, the field not existing at all, * Tests various combinations of omitNorms=true/false, the field not existing at all,
* ensuring that only omitNorms is 'viral'. * ensuring that only omitNorms is 'viral'.
* Internally checks that MultiNorms.norms() is consistent (returns the same bytes) * Internally checks that MultiNorms.norms() is consistent (returns the same bytes)
* as the optimized equivalent. * as the fully merged equivalent.
*/ */
public void testOmitNormsCombos() throws IOException { public void testOmitNormsCombos() throws IOException {
// indexed with norms // indexed with norms
@ -290,8 +290,8 @@ public class TestOmitNorms extends LuceneTestCase {
IndexReader ir1 = riw.getReader(); IndexReader ir1 = riw.getReader();
byte[] norms1 = MultiNorms.norms(ir1, field); byte[] norms1 = MultiNorms.norms(ir1, field);
// optimize and validate MultiNorms against single segment. // fully merge and validate MultiNorms against single segment.
riw.optimize(); riw.forceMerge(1);
IndexReader ir2 = riw.getReader(); IndexReader ir2 = riw.getReader();
byte[] norms2 = ir2.getSequentialSubReaders()[0].norms(field); byte[] norms2 = ir2.getSequentialSubReaders()[0].norms(field);

View File

@ -109,7 +109,7 @@ public class TestOmitPositions extends LuceneTestCase {
d.add(f9); d.add(f9);
writer.addDocument(d); writer.addDocument(d);
writer.optimize(); writer.forceMerge(1);
// now we add another document which has docs-only for f1, f4, f7, docs/freqs for f2, f5, f8, // now we add another document which has docs-only for f1, f4, f7, docs/freqs for f2, f5, f8,
// and docs/freqs/positions for f3, f6, f9 // and docs/freqs/positions for f3, f6, f9
@ -148,7 +148,7 @@ public class TestOmitPositions extends LuceneTestCase {
writer.addDocument(d); writer.addDocument(d);
// force merge // force merge
writer.optimize(); writer.forceMerge(1);
// flush // flush
writer.close(); writer.close();
@ -217,7 +217,7 @@ public class TestOmitPositions extends LuceneTestCase {
writer.addDocument(d); writer.addDocument(d);
// force merge // force merge
writer.optimize(); writer.forceMerge(1);
// flush // flush
writer.close(); writer.close();

View File

@ -81,7 +81,7 @@ public class TestOmitTf extends LuceneTestCase {
d.add(f2); d.add(f2);
writer.addDocument(d); writer.addDocument(d);
writer.optimize(); writer.forceMerge(1);
// now we add another document which has term freq for field f2 and not for f1 and verify if the SegmentMerger // now we add another document which has term freq for field f2 and not for f1 and verify if the SegmentMerger
// keep things constant // keep things constant
d = new Document(); d = new Document();
@ -96,7 +96,7 @@ public class TestOmitTf extends LuceneTestCase {
writer.addDocument(d); writer.addDocument(d);
// force merge // force merge
writer.optimize(); writer.forceMerge(1);
// flush // flush
writer.close(); writer.close();
@ -148,7 +148,7 @@ public class TestOmitTf extends LuceneTestCase {
writer.addDocument(d); writer.addDocument(d);
// force merge // force merge
writer.optimize(); writer.forceMerge(1);
// flush // flush
writer.close(); writer.close();
@ -190,7 +190,7 @@ public class TestOmitTf extends LuceneTestCase {
writer.addDocument(d); writer.addDocument(d);
// force merge // force merge
writer.optimize(); writer.forceMerge(1);
// flush // flush
writer.close(); writer.close();
@ -233,7 +233,8 @@ public class TestOmitTf extends LuceneTestCase {
assertNoPrx(ram); assertNoPrx(ram);
// now add some documents with positions, and check there is no prox after optimization // now add some documents with positions, and check
// there is no prox after full merge
d = new Document(); d = new Document();
f1 = newField("f1", "This field has positions", TextField.TYPE_UNSTORED); f1 = newField("f1", "This field has positions", TextField.TYPE_UNSTORED);
d.add(f1); d.add(f1);
@ -242,7 +243,7 @@ public class TestOmitTf extends LuceneTestCase {
writer.addDocument(d); writer.addDocument(d);
// force merge // force merge
writer.optimize(); writer.forceMerge(1);
// flush // flush
writer.close(); writer.close();
@ -278,7 +279,7 @@ public class TestOmitTf extends LuceneTestCase {
//System.out.println(d); //System.out.println(d);
} }
writer.optimize(); writer.forceMerge(1);
// flush // flush
writer.close(); writer.close();
@ -413,7 +414,7 @@ public class TestOmitTf extends LuceneTestCase {
@Override @Override
public void collect(int doc) throws IOException { public void collect(int doc) throws IOException {
count++; count++;
sum += doc + docBase; // use it to avoid any possibility of being optimized away sum += doc + docBase; // use it to avoid any possibility of being merged away
} }
public static int getCount() { return count; } public static int getCount() { return count; }

View File

@ -144,64 +144,6 @@ public class TestParallelReader extends LuceneTestCase {
dir2.close(); dir2.close();
} }
public void testIsOptimized() throws IOException {
Directory dir1 = getDir1(random);
Directory dir2 = getDir2(random);
// add another document to ensure that the indexes are not optimized
IndexWriter modifier = new IndexWriter(
dir1,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setMergePolicy(newLogMergePolicy(10))
);
Document d = new Document();
d.add(newField("f1", "v1", TextField.TYPE_STORED));
modifier.addDocument(d);
modifier.close();
modifier = new IndexWriter(
dir2,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setMergePolicy(newLogMergePolicy(10))
);
d = new Document();
d.add(newField("f2", "v2", TextField.TYPE_STORED));
modifier.addDocument(d);
modifier.close();
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
assertFalse(pr.isOptimized());
pr.close();
modifier = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
modifier.optimize();
modifier.close();
pr = new ParallelReader();
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
// just one of the two indexes are optimized
assertFalse(pr.isOptimized());
pr.close();
modifier = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
modifier.optimize();
modifier.close();
pr = new ParallelReader();
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
// now both indexes are optimized
assertTrue(pr.isOptimized());
pr.close();
dir1.close();
dir2.close();
}
private void queryTest(Query query) throws IOException { private void queryTest(Query query) throws IOException {
ScoreDoc[] parallelHits = parallel.search(query, null, 1000).scoreDocs; ScoreDoc[] parallelHits = parallel.search(query, null, 1000).scoreDocs;
ScoreDoc[] singleHits = single.search(query, null, 1000).scoreDocs; ScoreDoc[] singleHits = single.search(query, null, 1000).scoreDocs;

View File

@ -58,7 +58,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
// When unpatched, Lucene crashes here with a NoSuchElementException (caused by ParallelTermEnum) // When unpatched, Lucene crashes here with a NoSuchElementException (caused by ParallelTermEnum)
iwOut.addIndexes(pr); iwOut.addIndexes(pr);
iwOut.optimize(); iwOut.forceMerge(1);
iwOut.close(); iwOut.close();
rdOut.close(); rdOut.close();
rd1.close(); rd1.close();
@ -88,7 +88,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
ir.close(); ir.close();
iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
iw.optimize(); iw.forceMerge(1);
iw.close(); iw.close();
} }
@ -116,7 +116,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
rd1.close(); rd1.close();
rd2.close(); rd2.close();
iwOut.optimize(); iwOut.forceMerge(1);
iwOut.close(); iwOut.close();
rdOut.close(); rdOut.close();

View File

@ -245,7 +245,7 @@ public class TestPayloadProcessorProvider extends LuceneTestCase {
processors.put(dir, new PerTermPayloadProcessor()); processors.put(dir, new PerTermPayloadProcessor());
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
writer.setPayloadProcessorProvider(new PerDirPayloadProcessor(processors)); writer.setPayloadProcessorProvider(new PerDirPayloadProcessor(processors));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
verifyPayloadExists(dir, "p", new BytesRef("p1"), 0); verifyPayloadExists(dir, "p", new BytesRef("p1"), 0);

View File

@ -134,7 +134,7 @@ public class TestPayloads extends LuceneTestCase {
writer.addDocument(d); writer.addDocument(d);
// force merge // force merge
writer.optimize(); writer.forceMerge(1);
// flush // flush
writer.close(); writer.close();
@ -204,7 +204,7 @@ public class TestPayloads extends LuceneTestCase {
writer.addDocument(d, analyzer); writer.addDocument(d, analyzer);
} }
writer.optimize(); writer.forceMerge(1);
// flush // flush
writer.close(); writer.close();
@ -322,7 +322,7 @@ public class TestPayloads extends LuceneTestCase {
writer.addDocument(d); writer.addDocument(d);
writer.optimize(); writer.forceMerge(1);
// flush // flush
writer.close(); writer.close();
@ -621,7 +621,7 @@ public class TestPayloads extends LuceneTestCase {
doc.add(new Field("hasMaybepayload2", "here we go", TextField.TYPE_STORED)); doc.add(new Field("hasMaybepayload2", "here we go", TextField.TYPE_STORED));
writer.addDocument(doc); writer.addDocument(doc);
writer.addDocument(doc); writer.addDocument(doc);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
dir.close(); dir.close();

View File

@ -267,8 +267,8 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
} }
@Override @Override
public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos, public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToOptimize) int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToMerge)
throws CorruptIndexException, IOException { throws CorruptIndexException, IOException {
return null; return null;
} }

View File

@ -120,7 +120,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
addDoc(writer, "ccc ccc ccc ccc"); addDoc(writer, "ccc ccc ccc ccc");
// assure that we deal with a single segment // assure that we deal with a single segment
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
IndexReader reader = IndexReader.open(dir, null, true, indexDivisor); IndexReader reader = IndexReader.open(dir, null, true, indexDivisor);

View File

@ -61,15 +61,15 @@ public class TestSegmentTermEnum extends LuceneTestCase {
writer.close(); writer.close();
// verify document frequency of terms in an unoptimized index // verify document frequency of terms in an multi segment index
verifyDocFreq(); verifyDocFreq();
// merge segments by optimizing the index // merge segments
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
// verify document frequency of terms in an optimized index // verify document frequency of terms in a single segment index
verifyDocFreq(); verifyDocFreq();
} }

View File

@ -24,7 +24,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase;
public class TestSizeBoundedOptimize extends LuceneTestCase { public class TestSizeBoundedForceMerge extends LuceneTestCase {
private void addDocs(IndexWriter writer, int numDocs) throws IOException { private void addDocs(IndexWriter writer, int numDocs) throws IOException {
for (int i = 0; i < numDocs; i++) { for (int i = 0; i < numDocs; i++) {
@ -44,7 +44,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
} }
public void testByteSizeLimit() throws Exception { public void testByteSizeLimit() throws Exception {
// tests that the max merge size constraint is applied during optimize. // tests that the max merge size constraint is applied during forceMerge.
Directory dir = new RAMDirectory(); Directory dir = new RAMDirectory();
// Prepare an index w/ several small segments and a large one. // Prepare an index w/ several small segments and a large one.
@ -63,11 +63,11 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
conf = newWriterConfig(); conf = newWriterConfig();
LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(); LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
lmp.setMaxMergeMBForOptimize((min + 1) / (1 << 20)); lmp.setMaxMergeMBForForcedMerge((min + 1) / (1 << 20));
conf.setMergePolicy(lmp); conf.setMergePolicy(lmp);
writer = new IndexWriter(dir, conf); writer = new IndexWriter(dir, conf);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
// Should only be 3 segments in the index, because one of them exceeds the size limit // Should only be 3 segments in the index, because one of them exceeds the size limit
@ -77,7 +77,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
} }
public void testNumDocsLimit() throws Exception { public void testNumDocsLimit() throws Exception {
// tests that the max merge docs constraint is applied during optimize. // tests that the max merge docs constraint is applied during forceMerge.
Directory dir = new RAMDirectory(); Directory dir = new RAMDirectory();
// Prepare an index w/ several small segments and a large one. // Prepare an index w/ several small segments and a large one.
@ -100,7 +100,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
conf.setMergePolicy(lmp); conf.setMergePolicy(lmp);
writer = new IndexWriter(dir, conf); writer = new IndexWriter(dir, conf);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
// Should only be 3 segments in the index, because one of them exceeds the size limit // Should only be 3 segments in the index, because one of them exceeds the size limit
@ -128,7 +128,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
conf.setMergePolicy(lmp); conf.setMergePolicy(lmp);
writer = new IndexWriter(dir, conf); writer = new IndexWriter(dir, conf);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
SegmentInfos sis = new SegmentInfos(); SegmentInfos sis = new SegmentInfos();
@ -155,7 +155,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
conf.setMergePolicy(lmp); conf.setMergePolicy(lmp);
writer = new IndexWriter(dir, conf); writer = new IndexWriter(dir, conf);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
SegmentInfos sis = new SegmentInfos(); SegmentInfos sis = new SegmentInfos();
@ -182,7 +182,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
conf.setMergePolicy(lmp); conf.setMergePolicy(lmp);
writer = new IndexWriter(dir, conf); writer = new IndexWriter(dir, conf);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
SegmentInfos sis = new SegmentInfos(); SegmentInfos sis = new SegmentInfos();
@ -208,7 +208,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
conf.setMergePolicy(lmp); conf.setMergePolicy(lmp);
writer = new IndexWriter(dir, conf); writer = new IndexWriter(dir, conf);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
SegmentInfos sis = new SegmentInfos(); SegmentInfos sis = new SegmentInfos();
@ -235,7 +235,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
conf.setMergePolicy(lmp); conf.setMergePolicy(lmp);
writer = new IndexWriter(dir, conf); writer = new IndexWriter(dir, conf);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
SegmentInfos sis = new SegmentInfos(); SegmentInfos sis = new SegmentInfos();
@ -266,7 +266,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
conf.setMergePolicy(lmp); conf.setMergePolicy(lmp);
writer = new IndexWriter(dir, conf); writer = new IndexWriter(dir, conf);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
// Should only be 4 segments in the index, because of the merge factor and // Should only be 4 segments in the index, because of the merge factor and
@ -276,7 +276,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
assertEquals(4, sis.size()); assertEquals(4, sis.size());
} }
public void testSingleNonOptimizedSegment() throws Exception { public void testSingleMergeableSegment() throws Exception {
Directory dir = new RAMDirectory(); Directory dir = new RAMDirectory();
IndexWriterConfig conf = newWriterConfig(); IndexWriterConfig conf = newWriterConfig();
@ -288,7 +288,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
writer.close(); writer.close();
// delete the last document, so that the last segment is optimized. // delete the last document, so that the last segment is merged.
IndexReader r = IndexReader.open(dir, false); IndexReader r = IndexReader.open(dir, false);
r.deleteDocument(r.numDocs() - 1); r.deleteDocument(r.numDocs() - 1);
r.close(); r.close();
@ -299,7 +299,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
conf.setMergePolicy(lmp); conf.setMergePolicy(lmp);
writer = new IndexWriter(dir, conf); writer = new IndexWriter(dir, conf);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
// Verify that the last segment does not have deletions. // Verify that the last segment does not have deletions.
@ -309,7 +309,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
assertFalse(sis.info(2).hasDeletions()); assertFalse(sis.info(2).hasDeletions());
} }
public void testSingleOptimizedSegment() throws Exception { public void testSingleNonMergeableSegment() throws Exception {
Directory dir = new RAMDirectory(); Directory dir = new RAMDirectory();
IndexWriterConfig conf = newWriterConfig(); IndexWriterConfig conf = newWriterConfig();
@ -325,7 +325,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
conf.setMergePolicy(lmp); conf.setMergePolicy(lmp);
writer = new IndexWriter(dir, conf); writer = new IndexWriter(dir, conf);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
// Verify that the last segment does not have deletions. // Verify that the last segment does not have deletions.
@ -334,7 +334,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
assertEquals(1, sis.size()); assertEquals(1, sis.size());
} }
public void testSingleNonOptimizedTooLargeSegment() throws Exception { public void testSingleMergeableTooLargeSegment() throws Exception {
Directory dir = new RAMDirectory(); Directory dir = new RAMDirectory();
IndexWriterConfig conf = newWriterConfig(); IndexWriterConfig conf = newWriterConfig();
@ -355,7 +355,7 @@ public class TestSizeBoundedOptimize extends LuceneTestCase {
conf.setMergePolicy(lmp); conf.setMergePolicy(lmp);
writer = new IndexWriter(dir, conf); writer = new IndexWriter(dir, conf);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
// Verify that the last segment does not have deletions. // Verify that the last segment does not have deletions.

View File

@ -52,7 +52,7 @@ public class TestStressAdvance extends LuceneTestCase {
w.addDocument(doc); w.addDocument(doc);
} }
w.optimize(); w.forceMerge(1);
final List<Integer> aDocIDs = new ArrayList<Integer>(); final List<Integer> aDocIDs = new ArrayList<Integer>();
final List<Integer> bDocIDs = new ArrayList<Integer>(); final List<Integer> bDocIDs = new ArrayList<Integer>();

View File

@ -179,7 +179,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
threads[i].join(); threads[i].join();
} }
// w.optimize(); // w.forceMerge(1);
//w.close(); //w.close();
for (int i=0; i<threads.length; i++) { for (int i=0; i<threads.length; i++) {
@ -224,7 +224,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
threads[i].join(); threads[i].join();
} }
//w.optimize(); //w.forceMerge(1);
w.close(); w.close();
for (int i=0; i<threads.length; i++) { for (int i=0; i<threads.length; i++) {

View File

@ -107,7 +107,7 @@ public class TestStressNRT extends LuceneTestCase {
Directory dir = newDirectory(); Directory dir = newDirectory();
final RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); final RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
writer.setDoRandomOptimizeAssert(false); writer.setDoRandomForceMergeAssert(false);
writer.commit(); writer.commit();
reader = IndexReader.open(dir); reader = IndexReader.open(dir);

View File

@ -66,7 +66,7 @@ public class TestSumDocFreq extends LuceneTestCase {
ir.close(); ir.close();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
w.optimize(); w.forceMerge(1);
w.close(); w.close();
ir = IndexReader.open(dir, true); ir = IndexReader.open(dir, true);

View File

@ -315,7 +315,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
document.add(termVectorField); document.add(termVectorField);
writer.addDocument(document); writer.addDocument(document);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
IndexReader reader = IndexReader.open(dir, true); IndexReader reader = IndexReader.open(dir, true);
@ -333,7 +333,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
Directory[] indexDirs = {new MockDirectoryWrapper(random, new RAMDirectory(dir, newIOContext(random)))}; Directory[] indexDirs = {new MockDirectoryWrapper(random, new RAMDirectory(dir, newIOContext(random)))};
writer.addIndexes(indexDirs); writer.addIndexes(indexDirs);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
} }
dir.close(); dir.close();
@ -369,7 +369,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
Field termVectorField = newField("termVector", "termVector", customType2); Field termVectorField = newField("termVector", "termVector", customType2);
document.add(termVectorField); document.add(termVectorField);
writer.addDocument(document); writer.addDocument(document);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
IndexReader reader = IndexReader.open(dir, true); IndexReader reader = IndexReader.open(dir, true);
@ -414,7 +414,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
for(int i=0;i<6;i++) for(int i=0;i<6;i++)
writer.addDocument(document); writer.addDocument(document);
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
IndexReader reader = IndexReader.open(dir, true); IndexReader reader = IndexReader.open(dir, true);
@ -452,7 +452,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
// Make 2nd segment // Make 2nd segment
iw.commit(); iw.commit();
iw.optimize(); iw.forceMerge(1);
iw.close(); iw.close();
dir.close(); dir.close();
} }
@ -476,7 +476,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
// Make first segment // Make first segment
iw.commit(); iw.commit();
iw.optimize(); iw.forceMerge(1);
FieldType customType2 = new FieldType(StringField.TYPE_UNSTORED); FieldType customType2 = new FieldType(StringField.TYPE_UNSTORED);
customType2.setStoreTermVectors(true); customType2.setStoreTermVectors(true);
@ -484,7 +484,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
iw.addDocument(document); iw.addDocument(document);
// Make 2nd segment // Make 2nd segment
iw.commit(); iw.commit();
iw.optimize(); iw.forceMerge(1);
iw.close(); iw.close();
dir.close(); dir.close();

View File

@ -98,7 +98,7 @@ public class TestTermdocPerf extends LuceneTestCase {
writer.addDocument(doc); writer.addDocument(doc);
} }
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
} }

View File

@ -510,7 +510,7 @@ public class TestTermsEnum extends LuceneTestCase {
w.addDocument(doc); w.addDocument(doc);
w.commit(); w.commit();
w.deleteDocuments(new Term("field", "one")); w.deleteDocuments(new Term("field", "one"));
w.optimize(); w.forceMerge(1);
IndexReader r = w.getReader(); IndexReader r = w.getReader();
w.close(); w.close();
assertEquals(1, r.numDocs()); assertEquals(1, r.numDocs());

View File

@ -31,7 +31,7 @@ import org.apache.lucene.util.LuceneTestCase;
import java.util.Random; import java.util.Random;
public class TestThreadedOptimize extends LuceneTestCase { public class TestThreadedForceMerge extends LuceneTestCase {
private static final Analyzer ANALYZER = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); private static final Analyzer ANALYZER = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
@ -85,7 +85,7 @@ public class TestThreadedOptimize extends LuceneTestCase {
public void run() { public void run() {
try { try {
for(int j=0;j<NUM_ITER2;j++) { for(int j=0;j<NUM_ITER2;j++) {
writerFinal.optimize(false); writerFinal.forceMerge(1, false);
for(int k=0;k<17*(1+iFinal);k++) { for(int k=0;k<17*(1+iFinal);k++) {
Document d = new Document(); Document d = new Document();
d.add(newField("id", iterFinal + "_" + iFinal + "_" + j + "_" + k, customType)); d.add(newField("id", iterFinal + "_" + iFinal + "_" + j + "_" + k, customType));
@ -94,7 +94,7 @@ public class TestThreadedOptimize extends LuceneTestCase {
} }
for(int k=0;k<9*(1+iFinal);k++) for(int k=0;k<9*(1+iFinal);k++)
writerFinal.deleteDocuments(new Term("id", iterFinal + "_" + iFinal + "_" + j + "_" + k)); writerFinal.deleteDocuments(new Term("id", iterFinal + "_" + iFinal + "_" + j + "_" + k));
writerFinal.optimize(); writerFinal.forceMerge(1);
} }
} catch (Throwable t) { } catch (Throwable t) {
setFailed(); setFailed();
@ -124,7 +124,7 @@ public class TestThreadedOptimize extends LuceneTestCase {
OpenMode.APPEND).setMaxBufferedDocs(2)); OpenMode.APPEND).setMaxBufferedDocs(2));
IndexReader reader = IndexReader.open(directory, true); IndexReader reader = IndexReader.open(directory, true);
assertTrue("reader=" + reader, reader.isOptimized()); assertEquals("reader=" + reader, 1, reader.getSequentialSubReaders().length);
assertEquals(expectedDocCount, reader.numDocs()); assertEquals(expectedDocCount, reader.numDocs());
reader.close(); reader.close();
} }
@ -135,7 +135,7 @@ public class TestThreadedOptimize extends LuceneTestCase {
Run above stress test against RAMDirectory and then Run above stress test against RAMDirectory and then
FSDirectory. FSDirectory.
*/ */
public void testThreadedOptimize() throws Exception { public void testThreadedForceMerge() throws Exception {
Directory directory = newDirectory(); Directory directory = newDirectory();
runTest(random, directory); runTest(random, directory);
directory.close(); directory.close();

View File

@ -65,7 +65,7 @@ public class TestTieredMergePolicy extends LuceneTestCase {
dir.close(); dir.close();
} }
public void testPartialOptimize() throws Exception { public void testPartialMerge() throws Exception {
int num = atLeast(10); int num = atLeast(10);
for(int iter=0;iter<num;iter++) { for(int iter=0;iter<num;iter++) {
if (VERBOSE) { if (VERBOSE) {
@ -97,9 +97,9 @@ public class TestTieredMergePolicy extends LuceneTestCase {
int segmentCount = w.getSegmentCount(); int segmentCount = w.getSegmentCount();
int targetCount = _TestUtil.nextInt(random, 1, segmentCount); int targetCount = _TestUtil.nextInt(random, 1, segmentCount);
if (VERBOSE) { if (VERBOSE) {
System.out.println("TEST: optimize to " + targetCount + " segs (current count=" + segmentCount + ")"); System.out.println("TEST: merge to " + targetCount + " segs (current count=" + segmentCount + ")");
} }
w.optimize(targetCount); w.forceMerge(targetCount);
assertEquals(targetCount, w.getSegmentCount()); assertEquals(targetCount, w.getSegmentCount());
w.close(); w.close();
@ -116,7 +116,7 @@ public class TestTieredMergePolicy extends LuceneTestCase {
conf.setMergePolicy(tmp); conf.setMergePolicy(tmp);
final RandomIndexWriter w = new RandomIndexWriter(random, dir, conf); final RandomIndexWriter w = new RandomIndexWriter(random, dir, conf);
w.setDoRandomOptimize(false); w.setDoRandomForceMerge(false);
final int numDocs = atLeast(200); final int numDocs = atLeast(200);
for(int i=0;i<numDocs;i++) { for(int i=0;i<numDocs;i++) {
@ -126,7 +126,7 @@ public class TestTieredMergePolicy extends LuceneTestCase {
w.addDocument(doc); w.addDocument(doc);
} }
w.optimize(); w.forceMerge(1);
IndexReader r = w.getReader(); IndexReader r = w.getReader();
assertEquals(numDocs, r.maxDoc()); assertEquals(numDocs, r.maxDoc());
assertEquals(numDocs, r.numDocs()); assertEquals(numDocs, r.numDocs());

View File

@ -181,7 +181,7 @@ public class TestTermInfosReaderIndex extends LuceneTestCase {
} }
writer.addDocument(document); writer.addDocument(document);
} }
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
return config.getTermIndexInterval(); return config.getTermIndexInterval();
} }

View File

@ -17,7 +17,6 @@ package org.apache.lucene.index.codecs.perfield;
* limitations under the License. * limitations under the License.
*/ */
import java.io.IOException; import java.io.IOException;
import java.util.List;
import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
@ -107,7 +106,7 @@ public class TestPerFieldPostingsFormat extends LuceneTestCase {
writer.commit(); writer.commit();
assertEquals(30, writer.maxDoc()); assertEquals(30, writer.maxDoc());
_TestUtil.checkIndex(dir); _TestUtil.checkIndex(dir);
writer.optimize(); writer.forceMerge(1);
assertEquals(30, writer.maxDoc()); assertEquals(30, writer.maxDoc());
writer.close(); writer.close();
dir.close(); dir.close();
@ -158,8 +157,6 @@ public class TestPerFieldPostingsFormat extends LuceneTestCase {
addDocs2(writer, 10); addDocs2(writer, 10);
writer.commit(); writer.commit();
codec = (Lucene40Codec)iwconf.getCodec(); codec = (Lucene40Codec)iwconf.getCodec();
PostingsFormat origContentCodec = PostingsFormat.forName("MockSep");
PostingsFormat newContentCodec = PostingsFormat.forName("Lucene40");
assertEquals(30, writer.maxDoc()); assertEquals(30, writer.maxDoc());
assertQuery(new Term("content", "bbb"), dir, 10); assertQuery(new Term("content", "bbb"), dir, 10);
assertQuery(new Term("content", "ccc"), dir, 10); //// assertQuery(new Term("content", "ccc"), dir, 10); ////
@ -178,7 +175,7 @@ public class TestPerFieldPostingsFormat extends LuceneTestCase {
if (VERBOSE) { if (VERBOSE) {
System.out.println("TEST: now optimize"); System.out.println("TEST: now optimize");
} }
writer.optimize(); writer.forceMerge(1);
assertEquals(40, writer.maxDoc()); assertEquals(40, writer.maxDoc());
writer.close(); writer.close();
assertQuery(new Term("content", "ccc"), dir, 10); assertQuery(new Term("content", "ccc"), dir, 10);
@ -260,7 +257,7 @@ public class TestPerFieldPostingsFormat extends LuceneTestCase {
writer.addDocument(doc); writer.addDocument(doc);
} }
if (random.nextBoolean()) { if (random.nextBoolean()) {
writer.optimize(); writer.forceMerge(1);
} }
writer.commit(); writer.commit();
assertEquals((i + 1) * docsPerRound, writer.maxDoc()); assertEquals((i + 1) * docsPerRound, writer.maxDoc());

View File

@ -58,7 +58,7 @@ import org.junit.Before;
*/ */
public class TestDocValuesIndexing extends LuceneTestCase { public class TestDocValuesIndexing extends LuceneTestCase {
/* /*
* - add test for unoptimized case with deletes * - add test for multi segment case with deletes
* - add multithreaded tests / integrate into stress indexing? * - add multithreaded tests / integrate into stress indexing?
*/ */
@ -83,12 +83,12 @@ public class TestDocValuesIndexing extends LuceneTestCase {
writer.addDocument(doc); writer.addDocument(doc);
} }
writer.commit(); writer.commit();
writer.optimize(true); writer.forceMerge(1, true);
writer.close(true); writer.close(true);
IndexReader reader = IndexReader.open(dir, null, true, 1); IndexReader reader = IndexReader.open(dir, null, true, 1);
assertTrue(reader.isOptimized()); assertEquals(1, reader.getSequentialSubReaders().length);
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
@ -159,7 +159,7 @@ public class TestDocValuesIndexing extends LuceneTestCase {
} else { } else {
w.addIndexes(r_1, r_2); w.addIndexes(r_1, r_2);
} }
w.optimize(true); w.forceMerge(1, true);
w.commit(); w.commit();
_TestUtil.checkIndex(target); _TestUtil.checkIndex(target);
@ -418,10 +418,10 @@ public class TestDocValuesIndexing extends LuceneTestCase {
private IndexDocValues getDocValues(IndexReader reader, String field) private IndexDocValues getDocValues(IndexReader reader, String field)
throws IOException { throws IOException {
boolean optimized = reader.isOptimized(); boolean singleSeg = reader.getSequentialSubReaders().length == 1;
PerDocValues perDoc = optimized ? reader.getSequentialSubReaders()[0].perDocValues() PerDocValues perDoc = singleSeg ? reader.getSequentialSubReaders()[0].perDocValues()
: MultiPerDocValues.getPerDocs(reader); : MultiPerDocValues.getPerDocs(reader);
switch (random.nextInt(optimized ? 3 : 2)) { // case 2 only if optimized switch (random.nextInt(singleSeg ? 3 : 2)) { // case 2 only if single seg
case 0: case 0:
return perDoc.docValues(field); return perDoc.docValues(field);
case 1: case 1:
@ -430,7 +430,7 @@ public class TestDocValuesIndexing extends LuceneTestCase {
return docValues; return docValues;
} }
throw new RuntimeException("no such field " + field); throw new RuntimeException("no such field " + field);
case 2:// this only works if we are on an optimized index! case 2:// this only works if we are on a single seg index!
return reader.getSequentialSubReaders()[0].docValues(field); return reader.getSequentialSubReaders()[0].docValues(field);
} }
throw new RuntimeException(); throw new RuntimeException();
@ -538,9 +538,9 @@ public class TestDocValuesIndexing extends LuceneTestCase {
} }
w.commit(); w.commit();
// TODO test unoptimized with deletions // TODO test multi seg with deletions
if (withDeletions || random.nextBoolean()) { if (withDeletions || random.nextBoolean()) {
w.optimize(true); w.forceMerge(1, true);
} }
return deleted; return deleted;
} }
@ -565,7 +565,7 @@ public class TestDocValuesIndexing extends LuceneTestCase {
doc = new Document(); doc = new Document();
doc.add(f); doc.add(f);
w.addDocument(doc); w.addDocument(doc);
w.optimize(); w.forceMerge(1);
IndexReader r = w.getReader(); IndexReader r = w.getReader();
w.close(); w.close();
assertEquals(17, r.getSequentialSubReaders()[0].perDocValues().docValues("field").load().getInt(0)); assertEquals(17, r.getSequentialSubReaders()[0].perDocValues().docValues("field").load().getInt(0));
@ -595,7 +595,7 @@ public class TestDocValuesIndexing extends LuceneTestCase {
doc = new Document(); doc = new Document();
doc.add(f); doc.add(f);
w.addDocument(doc); w.addDocument(doc);
w.optimize(); w.forceMerge(1);
IndexReader r = w.getReader(); IndexReader r = w.getReader();
w.close(); w.close();
assertEquals(17, r.getSequentialSubReaders()[0].perDocValues().docValues("field").load().getInt(0)); assertEquals(17, r.getSequentialSubReaders()[0].perDocValues().docValues("field").load().getInt(0));

View File

@ -87,7 +87,7 @@ public class TestTypePromotion extends LuceneTestCase {
if (random.nextInt(4) == 0) { if (random.nextInt(4) == 0) {
// once in a while use addIndexes // once in a while use addIndexes
writer.optimize(); writer.forceMerge(1);
Directory dir_2 = newDirectory() ; Directory dir_2 = newDirectory() ;
IndexWriter writer_2 = new IndexWriter(dir_2, IndexWriter writer_2 = new IndexWriter(dir_2,
@ -110,7 +110,7 @@ public class TestTypePromotion extends LuceneTestCase {
randomValueType(types, random), values, num_1 + num_2, num_3); randomValueType(types, random), values, num_1 + num_2, num_3);
} }
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
assertValues(type, dir, values); assertValues(type, dir, values);
dir.close(); dir.close();
@ -119,7 +119,7 @@ public class TestTypePromotion extends LuceneTestCase {
private void assertValues(TestType type, Directory dir, long[] values) private void assertValues(TestType type, Directory dir, long[] values)
throws CorruptIndexException, IOException { throws CorruptIndexException, IOException {
IndexReader reader = IndexReader.open(dir); IndexReader reader = IndexReader.open(dir);
assertTrue(reader.isOptimized()); assertEquals(1, reader.getSequentialSubReaders().length);
ReaderContext topReaderContext = reader.getTopReaderContext(); ReaderContext topReaderContext = reader.getTopReaderContext();
ReaderContext[] children = topReaderContext.children(); ReaderContext[] children = topReaderContext.children();
IndexDocValues docValues = children[0].reader.docValues("promote"); IndexDocValues docValues = children[0].reader.docValues("promote");
@ -292,14 +292,14 @@ public class TestTypePromotion extends LuceneTestCase {
writer.close(); writer.close();
writerConfig = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); writerConfig = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
if (writerConfig.getMergePolicy() instanceof NoMergePolicy) { if (writerConfig.getMergePolicy() instanceof NoMergePolicy) {
writerConfig.setMergePolicy(newLogMergePolicy()); // make sure we optimize to one segment (merge everything together) writerConfig.setMergePolicy(newLogMergePolicy()); // make sure we merge to one segment (merge everything together)
} }
writer = new IndexWriter(dir, writerConfig); writer = new IndexWriter(dir, writerConfig);
// now optimize // now merge
writer.optimize(); writer.forceMerge(1);
writer.close(); writer.close();
IndexReader reader = IndexReader.open(dir); IndexReader reader = IndexReader.open(dir);
assertTrue(reader.isOptimized()); assertEquals(1, reader.getSequentialSubReaders().length);
ReaderContext topReaderContext = reader.getTopReaderContext(); ReaderContext topReaderContext = reader.getTopReaderContext();
ReaderContext[] children = topReaderContext.children(); ReaderContext[] children = topReaderContext.children();
IndexDocValues docValues = children[0].reader.docValues("promote"); IndexDocValues docValues = children[0].reader.docValues("promote");

Some files were not shown because too many files have changed in this diff Show More