LUCENE-3577: rename IW.expungeDeletes -> IW.forceMergeDeletes

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1203756 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2011-11-18 17:27:04 +00:00
parent 74076d2ce8
commit 5267d23d46
18 changed files with 81 additions and 86 deletions

View File

@ -749,6 +749,12 @@ API Changes
* LUCENE-3571: Deprecate IndexSearcher(Directory). Use the constructors
that take IndexReader instead. (Robert Muir)
* LUCENE-3577: Rename IndexWriter.expungeDeletes to forceMergeDeletes,
and revamped the javadocs, to discourage
use of this method since it is horribly costly and rarely
justified. MergePolicy.findMergesToExpungeDeletes was renamed to
findForcedDeletesMerges. (Robert Muir, Mike McCandless)
New Features
* LUCENE-3448: Added FixedBitSet.and(other/DISI), andNot(other/DISI).

View File

@ -243,7 +243,7 @@ public class BalancedSegmentMergePolicy extends LogByteSizeMergePolicy {
}
@Override
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos infos)
public MergeSpecification findForcedDeletesMerges(SegmentInfos infos)
throws CorruptIndexException, IOException {
final int numSegs = infos.size();
final int numLargeSegs = (numSegs < _numLargeSegments ? numSegs : _numLargeSegments);
@ -254,7 +254,7 @@ public class BalancedSegmentMergePolicy extends LogByteSizeMergePolicy {
// it does not clone all metadata, but LogMerge does not need it
final SegmentInfos smallSegments = new SegmentInfos();
smallSegments.rollbackSegmentInfos(infos.asList().subList(numLargeSegs, numSegs));
spec = super.findMergesToExpungeDeletes(smallSegments);
spec = super.findForcedDeletesMerges(smallSegments);
}
if(spec == null) spec = new MergeSpecification();

View File

@ -1732,7 +1732,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
return false;
}
/** Just like {@link #expungeDeletes()}, except you can
/** Just like {@link #forceMergeDeletes()}, except you can
* specify whether the call should block until the
* operation completes. This is only meaningful with a
* {@link MergeScheduler} that is able to run merges in
@ -1747,19 +1747,19 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* then any thread still running this method might hit a
* {@link MergePolicy.MergeAbortedException}.
*/
public void expungeDeletes(boolean doWait)
public void forceMergeDeletes(boolean doWait)
throws CorruptIndexException, IOException {
ensureOpen();
flush(true, true);
if (infoStream != null)
infoStream.message("IW", "expungeDeletes: index now " + segString());
infoStream.message("IW", "forceMergeDeletes: index now " + segString());
MergePolicy.MergeSpecification spec;
synchronized(this) {
spec = mergePolicy.findMergesToExpungeDeletes(segmentInfos);
spec = mergePolicy.findForcedDeletesMerges(segmentInfos);
if (spec != null) {
final int numMerges = spec.merges.size();
for(int i=0;i<numMerges;i++)
@ -1776,7 +1776,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
while(running) {
if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete expungeDeletes");
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete forceMergeDeletes");
}
// Check each merge that MergePolicy asked us to
@ -1808,29 +1808,20 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
}
/** Requests an expungeDeletes operation, by invoking
* {@link MergePolicy#findMergesToExpungeDeletes}.
* The MergePolicy determines what merges should be done.
* For example, the default {@link TieredMergePolicy}
* will only expunge deletes from a segment if the
* percentage of deleted docs is over 10%.
/**
* Forces merging of all segments that have deleted
* documents. The actual merges to be executed are
* determined by the {@link MergePolicy}. For example,
* the default {@link TieredMergePolicy} will only
* pick a segment if the percentage of
* deleted docs is over 10%.
*
* <p>When an index
* has many document deletions (or updates to existing
* documents), it's best to either call forceMerge or
* expungeDeletes to remove all unused data in the index
* associated with the deleted documents. To see how
* <p>This is often a horribly costly operation; rarely
* is it warranted.</p>
*
* <p>To see how
* many deletions you have pending in your index, call
* {@link IndexReader#numDeletedDocs}
* This saves disk space and memory usage while
* searching. expungeDeletes should be somewhat faster
* than forceMerge since it does not insist on reducing the
* index to a single segment (though, this depends on the
* {@link MergePolicy}; see {@link
* MergePolicy#findMergesToExpungeDeletes}.). Note that
* this call does not first commit any buffered
* documents, so you must do so yourself if necessary.
* See also {@link #expungeDeletes(boolean)}
* {@link IndexReader#numDeletedDocs}.</p>
*
* <p><b>NOTE</b>: this method first flushes a new
* segment (if there are indexed documents), and applies
@ -1840,8 +1831,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
public void expungeDeletes() throws CorruptIndexException, IOException {
expungeDeletes(true);
public void forceMergeDeletes() throws CorruptIndexException, IOException {
forceMergeDeletes(true);
}
/**
@ -2042,7 +2033,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* <p>NOTE: this method will forcefully abort all merges
* in progress. If other threads are running {@link
* #forceMerge}, {@link #addIndexes(IndexReader[])} or
* {@link #expungeDeletes} methods, they may receive
* {@link #forceMergeDeletes} methods, they may receive
* {@link MergePolicy.MergeAbortedException}s.
*/
public synchronized void deleteAll() throws IOException {

View File

@ -417,18 +417,18 @@ public abstract class LogMergePolicy extends MergePolicy {
}
/**
* Finds merges necessary to expunge all deletes from the
* Finds merges necessary to force-merge all deletes from the
* index. We simply merge adjacent segments that have
* deletes, up to mergeFactor at a time.
*/
@Override
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos)
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos)
throws CorruptIndexException, IOException {
final List<SegmentInfo> segments = segmentInfos.asList();
final int numSegments = segments.size();
if (verbose())
message("findMergesToExpungeDeletes: " + numSegments + " segments");
message("findForcedDeleteMerges: " + numSegments + " segments");
MergeSpecification spec = new MergeSpecification();
int firstSegmentWithDeletions = -1;

View File

@ -319,7 +319,7 @@ public abstract class MergePolicy implements java.io.Closeable {
* @param segmentInfos
* the total set of segments in the index
*/
public abstract MergeSpecification findMergesToExpungeDeletes(
public abstract MergeSpecification findForcedDeletesMerges(
SegmentInfos segmentInfos) throws CorruptIndexException, IOException;
/**

View File

@ -63,7 +63,7 @@ public final class NoMergePolicy extends MergePolicy {
throws CorruptIndexException, IOException { return null; }
@Override
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos)
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos)
throws CorruptIndexException, IOException { return null; }
@Override

View File

@ -81,14 +81,14 @@ public class TieredMergePolicy extends MergePolicy {
private long floorSegmentBytes = 2*1024*1024L;
private double segsPerTier = 10.0;
private double expungeDeletesPctAllowed = 10.0;
private double forceMergeDeletesPctAllowed = 10.0;
private boolean useCompoundFile = true;
private double noCFSRatio = 0.1;
private double reclaimDeletesWeight = 2.0;
/** Maximum number of segments to be merged at a time
* during "normal" merging. For explicit merging (eg,
* forceMerge or expungeDeletes was called), see {@link
* forceMerge or forceMergeDeletes was called), see {@link
* #setMaxMergeAtOnceExplicit}. Default is 10. */
public TieredMergePolicy setMaxMergeAtOnce(int v) {
if (v < 2) {
@ -107,7 +107,7 @@ public class TieredMergePolicy extends MergePolicy {
// if user calls IW.maybeMerge "explicitly"
/** Maximum number of segments to be merged at a time,
* during forceMerge or expungeDeletes. Default is 30. */
* during forceMerge or forceMergeDeletes. Default is 30. */
public TieredMergePolicy setMaxMergeAtOnceExplicit(int v) {
if (v < 2) {
throw new IllegalArgumentException("maxMergeAtOnceExplicit must be > 1 (got " + v + ")");
@ -171,20 +171,20 @@ public class TieredMergePolicy extends MergePolicy {
return floorSegmentBytes/1024*1024.;
}
/** When expungeDeletes is called, we only merge away a
/** When forceMergeDeletes is called, we only merge away a
* segment if its delete percentage is over this
* threshold. Default is 10%. */
public TieredMergePolicy setExpungeDeletesPctAllowed(double v) {
public TieredMergePolicy setForceMergeDeletesPctAllowed(double v) {
if (v < 0.0 || v > 100.0) {
throw new IllegalArgumentException("expungeDeletesPctAllowed must be between 0.0 and 100.0 inclusive (got " + v + ")");
throw new IllegalArgumentException("forceMergeDeletesPctAllowed must be between 0.0 and 100.0 inclusive (got " + v + ")");
}
expungeDeletesPctAllowed = v;
forceMergeDeletesPctAllowed = v;
return this;
}
/** @see #setExpungeDeletesPctAllowed */
public double getExpungeDeletesPctAllowed() {
return expungeDeletesPctAllowed;
/** @see #setForceMergeDeletesPctAllowed */
public double getForceMergeDeletesPctAllowed() {
return forceMergeDeletesPctAllowed;
}
/** Sets the allowed number of segments per tier. Smaller
@ -550,16 +550,16 @@ public class TieredMergePolicy extends MergePolicy {
}
@Override
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos infos)
public MergeSpecification findForcedDeletesMerges(SegmentInfos infos)
throws CorruptIndexException, IOException {
if (verbose()) {
message("findMergesToExpungeDeletes infos=" + writer.get().segString(infos) + " expungeDeletesPctAllowed=" + expungeDeletesPctAllowed);
message("findForcedDeletesMerges infos=" + writer.get().segString(infos) + " forceMergeDeletesPctAllowed=" + forceMergeDeletesPctAllowed);
}
final List<SegmentInfo> eligible = new ArrayList<SegmentInfo>();
final Collection<SegmentInfo> merging = writer.get().getMergingSegments();
for(SegmentInfo info : infos) {
double pctDeletes = 100.*((double) writer.get().numDeletedDocs(info))/info.docCount;
if (pctDeletes > expungeDeletesPctAllowed && !merging.contains(info)) {
if (pctDeletes > forceMergeDeletesPctAllowed && !merging.contains(info)) {
eligible.add(info);
}
}
@ -579,7 +579,7 @@ public class TieredMergePolicy extends MergePolicy {
while(start < eligible.size()) {
// Don't enforce max merged size here: app is explicitly
// calling expungeDeletes, and knows this may take a
// calling forceMergeDeletes, and knows this may take a
// long time / produce big segments (like forceMerge):
final int end = Math.min(start + maxMergeAtOnceExplicit, eligible.size());
if (spec == null) {
@ -664,10 +664,10 @@ public class TieredMergePolicy extends MergePolicy {
sb.append("maxMergeAtOnceExplicit=").append(maxMergeAtOnceExplicit).append(", ");
sb.append("maxMergedSegmentMB=").append(maxMergedSegmentBytes/1024/1024.).append(", ");
sb.append("floorSegmentMB=").append(floorSegmentBytes/1024/1024.).append(", ");
sb.append("expungeDeletesPctAllowed=").append(expungeDeletesPctAllowed).append(", ");
sb.append("forceMergeDeletesPctAllowed=").append(forceMergeDeletesPctAllowed).append(", ");
sb.append("segmentsPerTier=").append(segsPerTier).append(", ");
sb.append("useCompoundFile=").append(useCompoundFile).append(", ");
sb.append("noCFSRatio=").append(noCFSRatio);
return sb.toString();
}
}
}

View File

@ -127,8 +127,8 @@ public class UpgradeIndexMergePolicy extends MergePolicy {
}
@Override
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos) throws CorruptIndexException, IOException {
return base.findMergesToExpungeDeletes(segmentInfos);
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos) throws CorruptIndexException, IOException {
return base.findForcedDeletesMerges(segmentInfos);
}
@Override

View File

@ -93,7 +93,7 @@ public class MockRandomMergePolicy extends MergePolicy {
}
@Override
public MergeSpecification findMergesToExpungeDeletes(
public MergeSpecification findForcedDeletesMerges(
SegmentInfos segmentInfos)
throws CorruptIndexException, IOException {
return findMerges(segmentInfos);

View File

@ -330,12 +330,12 @@ public class RandomIndexWriter implements Closeable {
private boolean doRandomForceMerge = true;
private boolean doRandomForceMergeAssert = true;
public void expungeDeletes(boolean doWait) throws IOException {
w.expungeDeletes(doWait);
public void forceMergeDeletes(boolean doWait) throws IOException {
w.forceMergeDeletes(doWait);
}
public void expungeDeletes() throws IOException {
w.expungeDeletes();
public void forceMergeDeletes() throws IOException {
w.forceMergeDeletes();
}
public void setDoRandomForceMerge(boolean v) {

View File

@ -157,8 +157,6 @@ public abstract class LuceneTestCase extends Assert {
/** whether or not to clean threads between test invocations: "false", "perMethod", "perClass" */
public static final Throttling TEST_THROTTLING = TEST_NIGHTLY ? Throttling.SOMETIMES : Throttling.NEVER;
private static final Pattern codecWithParam = Pattern.compile("(.*)\\(\\s*(\\d+)\\s*\\)");
/**
* A random multiplier which you should use when writing random tests:
* multiply it by the number of iterations
@ -955,7 +953,7 @@ public abstract class LuceneTestCase extends Assert {
}
tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0);
tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0);
tmp.setExpungeDeletesPctAllowed(0.0 + r.nextDouble() * 30.0);
tmp.setForceMergeDeletesPctAllowed(0.0 + r.nextDouble() * 30.0);
tmp.setSegmentsPerTier(_TestUtil.nextInt(r, 2, 20));
tmp.setUseCompoundFile(r.nextBoolean());
tmp.setNoCFSRatio(0.1 + r.nextDouble()*0.8);

View File

@ -245,7 +245,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
writer.deleteDocuments(new Term("f1", "d1"));
// nuke the first segment entirely so that the segment with gaps is
// loaded first!
writer.expungeDeletes();
writer.forceMergeDeletes();
writer.close();
}

View File

@ -116,9 +116,9 @@ public class TestIndexWriterMerging extends LuceneTestCase
writer.close();
}
// LUCENE-325: test expungeDeletes, when 2 singular merges
// LUCENE-325: test forceMergeDeletes, when 2 singular merges
// are required
public void testExpungeDeletes() throws IOException {
public void testForceMergeDeletes() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random))
@ -156,7 +156,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
writer.expungeDeletes();
writer.forceMergeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir, true);
@ -166,8 +166,8 @@ public class TestIndexWriterMerging extends LuceneTestCase
dir.close();
}
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
public void testExpungeDeletes2() throws IOException {
// LUCENE-325: test forceMergeDeletes, when many adjacent merges are required
public void testForceMergeDeletes2() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
@ -210,7 +210,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
setMergePolicy(newLogMergePolicy(3))
);
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.forceMergeDeletes();
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
@ -219,9 +219,9 @@ public class TestIndexWriterMerging extends LuceneTestCase
dir.close();
}
// LUCENE-325: test expungeDeletes without waiting, when
// LUCENE-325: test forceMergeDeletes without waiting, when
// many adjacent merges are required
public void testExpungeDeletes3() throws IOException {
public void testForceMergeDeletes3() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(
dir,
@ -262,7 +262,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setMergePolicy(newLogMergePolicy(3))
);
writer.expungeDeletes(false);
writer.forceMergeDeletes(false);
writer.close();
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());

View File

@ -873,7 +873,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
dir1.close();
}
public void testExpungeDeletes() throws Throwable {
public void testForceMergeDeletes() throws Throwable {
Directory dir = newDirectory();
final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
Document doc = new Document();
@ -887,7 +887,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
w.deleteDocuments(new Term("id", "0"));
IndexReader r = w.getReader();
w.expungeDeletes();
w.forceMergeDeletes();
w.close();
r.close();
r = IndexReader.open(dir, true);

View File

@ -32,7 +32,7 @@ public class TestNoMergePolicy extends LuceneTestCase {
MergePolicy mp = NoMergePolicy.NO_COMPOUND_FILES;
assertNull(mp.findMerges(null));
assertNull(mp.findForcedMerges(null, 0, null));
assertNull(mp.findMergesToExpungeDeletes(null));
assertNull(mp.findForcedDeletesMerges(null));
assertFalse(mp.useCompoundFile(null, null));
mp.close();
}

View File

@ -274,7 +274,7 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
}
@Override
public MergeSpecification findMergesToExpungeDeletes(
public MergeSpecification findForcedDeletesMerges(
SegmentInfos segmentInfos) throws CorruptIndexException, IOException {
return null;
}

View File

@ -27,7 +27,7 @@ import org.apache.lucene.util._TestUtil;
public class TestTieredMergePolicy extends LuceneTestCase {
public void testExpungeDeletes() throws Exception {
public void testForceMergeDeletes() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
TieredMergePolicy tmp = newTieredMergePolicy();
@ -35,7 +35,7 @@ public class TestTieredMergePolicy extends LuceneTestCase {
conf.setMaxBufferedDocs(4);
tmp.setMaxMergeAtOnce(100);
tmp.setSegmentsPerTier(100);
tmp.setExpungeDeletesPctAllowed(30.0);
tmp.setForceMergeDeletesPctAllowed(30.0);
IndexWriter w = new IndexWriter(dir, conf);
for(int i=0;i<80;i++) {
Document doc = new Document();
@ -49,16 +49,16 @@ public class TestTieredMergePolicy extends LuceneTestCase {
System.out.println("\nTEST: delete docs");
}
w.deleteDocuments(new Term("content", "0"));
w.expungeDeletes();
w.forceMergeDeletes();
assertEquals(80, w.maxDoc());
assertEquals(60, w.numDocs());
if (VERBOSE) {
System.out.println("\nTEST: expunge2");
System.out.println("\nTEST: forceMergeDeletes2");
}
tmp.setExpungeDeletesPctAllowed(10.0);
w.expungeDeletes();
tmp.setForceMergeDeletesPctAllowed(10.0);
w.forceMergeDeletes();
assertEquals(60, w.maxDoc());
assertEquals(60, w.numDocs());
w.close();
@ -107,12 +107,12 @@ public class TestTieredMergePolicy extends LuceneTestCase {
}
}
public void testExpungeMaxSegSize() throws Exception {
public void testForceMergeDeletesMaxSegSize() throws Exception {
final Directory dir = newDirectory();
final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
final TieredMergePolicy tmp = new TieredMergePolicy();
tmp.setMaxMergedSegmentMB(0.01);
tmp.setExpungeDeletesPctAllowed(0.0);
tmp.setForceMergeDeletesPctAllowed(0.0);
conf.setMergePolicy(tmp);
final RandomIndexWriter w = new RandomIndexWriter(random, dir, conf);
@ -139,7 +139,7 @@ public class TestTieredMergePolicy extends LuceneTestCase {
assertEquals(numDocs-1, r.numDocs());
r.close();
w.expungeDeletes();
w.forceMergeDeletes();
r = w.getReader();
assertEquals(numDocs-1, r.maxDoc());

View File

@ -313,7 +313,7 @@ public class DirectUpdateHandler2 extends UpdateHandler {
if (cmd.optimize) {
writer.forceMerge(cmd.maxOptimizeSegments);
} else if (cmd.expungeDeletes) {
writer.expungeDeletes();
writer.forceMergeDeletes();
}
if (!cmd.softCommit) {