mirror of https://github.com/apache/lucene.git
LUCENE-3577: rename IW.expungeDeletes -> IW.forceMergeDeletes
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1203756 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
74076d2ce8
commit
5267d23d46
|
@ -749,6 +749,12 @@ API Changes
|
||||||
* LUCENE-3571: Deprecate IndexSearcher(Directory). Use the constructors
|
* LUCENE-3571: Deprecate IndexSearcher(Directory). Use the constructors
|
||||||
that take IndexReader instead. (Robert Muir)
|
that take IndexReader instead. (Robert Muir)
|
||||||
|
|
||||||
|
* LUCENE-3577: Rename IndexWriter.expungeDeletes to forceMergeDeletes,
|
||||||
|
and revamped the javadocs, to discourage
|
||||||
|
use of this method since it is horribly costly and rarely
|
||||||
|
justified. MergePolicy.findMergesToExpungeDeletes was renamed to
|
||||||
|
findForcedDeletesMerges. (Robert Muir, Mike McCandless)
|
||||||
|
|
||||||
New Features
|
New Features
|
||||||
|
|
||||||
* LUCENE-3448: Added FixedBitSet.and(other/DISI), andNot(other/DISI).
|
* LUCENE-3448: Added FixedBitSet.and(other/DISI), andNot(other/DISI).
|
||||||
|
|
|
@ -243,7 +243,7 @@ public class BalancedSegmentMergePolicy extends LogByteSizeMergePolicy {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos infos)
|
public MergeSpecification findForcedDeletesMerges(SegmentInfos infos)
|
||||||
throws CorruptIndexException, IOException {
|
throws CorruptIndexException, IOException {
|
||||||
final int numSegs = infos.size();
|
final int numSegs = infos.size();
|
||||||
final int numLargeSegs = (numSegs < _numLargeSegments ? numSegs : _numLargeSegments);
|
final int numLargeSegs = (numSegs < _numLargeSegments ? numSegs : _numLargeSegments);
|
||||||
|
@ -254,7 +254,7 @@ public class BalancedSegmentMergePolicy extends LogByteSizeMergePolicy {
|
||||||
// it does not clone all metadata, but LogMerge does not need it
|
// it does not clone all metadata, but LogMerge does not need it
|
||||||
final SegmentInfos smallSegments = new SegmentInfos();
|
final SegmentInfos smallSegments = new SegmentInfos();
|
||||||
smallSegments.rollbackSegmentInfos(infos.asList().subList(numLargeSegs, numSegs));
|
smallSegments.rollbackSegmentInfos(infos.asList().subList(numLargeSegs, numSegs));
|
||||||
spec = super.findMergesToExpungeDeletes(smallSegments);
|
spec = super.findForcedDeletesMerges(smallSegments);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(spec == null) spec = new MergeSpecification();
|
if(spec == null) spec = new MergeSpecification();
|
||||||
|
|
|
@ -1732,7 +1732,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Just like {@link #expungeDeletes()}, except you can
|
/** Just like {@link #forceMergeDeletes()}, except you can
|
||||||
* specify whether the call should block until the
|
* specify whether the call should block until the
|
||||||
* operation completes. This is only meaningful with a
|
* operation completes. This is only meaningful with a
|
||||||
* {@link MergeScheduler} that is able to run merges in
|
* {@link MergeScheduler} that is able to run merges in
|
||||||
|
@ -1747,19 +1747,19 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
* then any thread still running this method might hit a
|
* then any thread still running this method might hit a
|
||||||
* {@link MergePolicy.MergeAbortedException}.
|
* {@link MergePolicy.MergeAbortedException}.
|
||||||
*/
|
*/
|
||||||
public void expungeDeletes(boolean doWait)
|
public void forceMergeDeletes(boolean doWait)
|
||||||
throws CorruptIndexException, IOException {
|
throws CorruptIndexException, IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
|
|
||||||
flush(true, true);
|
flush(true, true);
|
||||||
|
|
||||||
if (infoStream != null)
|
if (infoStream != null)
|
||||||
infoStream.message("IW", "expungeDeletes: index now " + segString());
|
infoStream.message("IW", "forceMergeDeletes: index now " + segString());
|
||||||
|
|
||||||
MergePolicy.MergeSpecification spec;
|
MergePolicy.MergeSpecification spec;
|
||||||
|
|
||||||
synchronized(this) {
|
synchronized(this) {
|
||||||
spec = mergePolicy.findMergesToExpungeDeletes(segmentInfos);
|
spec = mergePolicy.findForcedDeletesMerges(segmentInfos);
|
||||||
if (spec != null) {
|
if (spec != null) {
|
||||||
final int numMerges = spec.merges.size();
|
final int numMerges = spec.merges.size();
|
||||||
for(int i=0;i<numMerges;i++)
|
for(int i=0;i<numMerges;i++)
|
||||||
|
@ -1776,7 +1776,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
while(running) {
|
while(running) {
|
||||||
|
|
||||||
if (hitOOM) {
|
if (hitOOM) {
|
||||||
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete expungeDeletes");
|
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete forceMergeDeletes");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check each merge that MergePolicy asked us to
|
// Check each merge that MergePolicy asked us to
|
||||||
|
@ -1808,29 +1808,20 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/** Requests an expungeDeletes operation, by invoking
|
/**
|
||||||
* {@link MergePolicy#findMergesToExpungeDeletes}.
|
* Forces merging of all segments that have deleted
|
||||||
* The MergePolicy determines what merges should be done.
|
* documents. The actual merges to be executed are
|
||||||
* For example, the default {@link TieredMergePolicy}
|
* determined by the {@link MergePolicy}. For example,
|
||||||
* will only expunge deletes from a segment if the
|
* the default {@link TieredMergePolicy} will only
|
||||||
* percentage of deleted docs is over 10%.
|
* pick a segment if the percentage of
|
||||||
|
* deleted docs is over 10%.
|
||||||
*
|
*
|
||||||
* <p>When an index
|
* <p>This is often a horribly costly operation; rarely
|
||||||
* has many document deletions (or updates to existing
|
* is it warranted.</p>
|
||||||
* documents), it's best to either call forceMerge or
|
*
|
||||||
* expungeDeletes to remove all unused data in the index
|
* <p>To see how
|
||||||
* associated with the deleted documents. To see how
|
|
||||||
* many deletions you have pending in your index, call
|
* many deletions you have pending in your index, call
|
||||||
* {@link IndexReader#numDeletedDocs}
|
* {@link IndexReader#numDeletedDocs}.</p>
|
||||||
* This saves disk space and memory usage while
|
|
||||||
* searching. expungeDeletes should be somewhat faster
|
|
||||||
* than forceMerge since it does not insist on reducing the
|
|
||||||
* index to a single segment (though, this depends on the
|
|
||||||
* {@link MergePolicy}; see {@link
|
|
||||||
* MergePolicy#findMergesToExpungeDeletes}.). Note that
|
|
||||||
* this call does not first commit any buffered
|
|
||||||
* documents, so you must do so yourself if necessary.
|
|
||||||
* See also {@link #expungeDeletes(boolean)}
|
|
||||||
*
|
*
|
||||||
* <p><b>NOTE</b>: this method first flushes a new
|
* <p><b>NOTE</b>: this method first flushes a new
|
||||||
* segment (if there are indexed documents), and applies
|
* segment (if there are indexed documents), and applies
|
||||||
|
@ -1840,8 +1831,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
* you should immediately close the writer. See <a
|
* you should immediately close the writer. See <a
|
||||||
* href="#OOME">above</a> for details.</p>
|
* href="#OOME">above</a> for details.</p>
|
||||||
*/
|
*/
|
||||||
public void expungeDeletes() throws CorruptIndexException, IOException {
|
public void forceMergeDeletes() throws CorruptIndexException, IOException {
|
||||||
expungeDeletes(true);
|
forceMergeDeletes(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2042,7 +2033,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
* <p>NOTE: this method will forcefully abort all merges
|
* <p>NOTE: this method will forcefully abort all merges
|
||||||
* in progress. If other threads are running {@link
|
* in progress. If other threads are running {@link
|
||||||
* #forceMerge}, {@link #addIndexes(IndexReader[])} or
|
* #forceMerge}, {@link #addIndexes(IndexReader[])} or
|
||||||
* {@link #expungeDeletes} methods, they may receive
|
* {@link #forceMergeDeletes} methods, they may receive
|
||||||
* {@link MergePolicy.MergeAbortedException}s.
|
* {@link MergePolicy.MergeAbortedException}s.
|
||||||
*/
|
*/
|
||||||
public synchronized void deleteAll() throws IOException {
|
public synchronized void deleteAll() throws IOException {
|
||||||
|
|
|
@ -417,18 +417,18 @@ public abstract class LogMergePolicy extends MergePolicy {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Finds merges necessary to expunge all deletes from the
|
* Finds merges necessary to force-merge all deletes from the
|
||||||
* index. We simply merge adjacent segments that have
|
* index. We simply merge adjacent segments that have
|
||||||
* deletes, up to mergeFactor at a time.
|
* deletes, up to mergeFactor at a time.
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos)
|
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos)
|
||||||
throws CorruptIndexException, IOException {
|
throws CorruptIndexException, IOException {
|
||||||
final List<SegmentInfo> segments = segmentInfos.asList();
|
final List<SegmentInfo> segments = segmentInfos.asList();
|
||||||
final int numSegments = segments.size();
|
final int numSegments = segments.size();
|
||||||
|
|
||||||
if (verbose())
|
if (verbose())
|
||||||
message("findMergesToExpungeDeletes: " + numSegments + " segments");
|
message("findForcedDeleteMerges: " + numSegments + " segments");
|
||||||
|
|
||||||
MergeSpecification spec = new MergeSpecification();
|
MergeSpecification spec = new MergeSpecification();
|
||||||
int firstSegmentWithDeletions = -1;
|
int firstSegmentWithDeletions = -1;
|
||||||
|
|
|
@ -319,7 +319,7 @@ public abstract class MergePolicy implements java.io.Closeable {
|
||||||
* @param segmentInfos
|
* @param segmentInfos
|
||||||
* the total set of segments in the index
|
* the total set of segments in the index
|
||||||
*/
|
*/
|
||||||
public abstract MergeSpecification findMergesToExpungeDeletes(
|
public abstract MergeSpecification findForcedDeletesMerges(
|
||||||
SegmentInfos segmentInfos) throws CorruptIndexException, IOException;
|
SegmentInfos segmentInfos) throws CorruptIndexException, IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -63,7 +63,7 @@ public final class NoMergePolicy extends MergePolicy {
|
||||||
throws CorruptIndexException, IOException { return null; }
|
throws CorruptIndexException, IOException { return null; }
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos)
|
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos)
|
||||||
throws CorruptIndexException, IOException { return null; }
|
throws CorruptIndexException, IOException { return null; }
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -81,14 +81,14 @@ public class TieredMergePolicy extends MergePolicy {
|
||||||
|
|
||||||
private long floorSegmentBytes = 2*1024*1024L;
|
private long floorSegmentBytes = 2*1024*1024L;
|
||||||
private double segsPerTier = 10.0;
|
private double segsPerTier = 10.0;
|
||||||
private double expungeDeletesPctAllowed = 10.0;
|
private double forceMergeDeletesPctAllowed = 10.0;
|
||||||
private boolean useCompoundFile = true;
|
private boolean useCompoundFile = true;
|
||||||
private double noCFSRatio = 0.1;
|
private double noCFSRatio = 0.1;
|
||||||
private double reclaimDeletesWeight = 2.0;
|
private double reclaimDeletesWeight = 2.0;
|
||||||
|
|
||||||
/** Maximum number of segments to be merged at a time
|
/** Maximum number of segments to be merged at a time
|
||||||
* during "normal" merging. For explicit merging (eg,
|
* during "normal" merging. For explicit merging (eg,
|
||||||
* forceMerge or expungeDeletes was called), see {@link
|
* forceMerge or forceMergeDeletes was called), see {@link
|
||||||
* #setMaxMergeAtOnceExplicit}. Default is 10. */
|
* #setMaxMergeAtOnceExplicit}. Default is 10. */
|
||||||
public TieredMergePolicy setMaxMergeAtOnce(int v) {
|
public TieredMergePolicy setMaxMergeAtOnce(int v) {
|
||||||
if (v < 2) {
|
if (v < 2) {
|
||||||
|
@ -107,7 +107,7 @@ public class TieredMergePolicy extends MergePolicy {
|
||||||
// if user calls IW.maybeMerge "explicitly"
|
// if user calls IW.maybeMerge "explicitly"
|
||||||
|
|
||||||
/** Maximum number of segments to be merged at a time,
|
/** Maximum number of segments to be merged at a time,
|
||||||
* during forceMerge or expungeDeletes. Default is 30. */
|
* during forceMerge or forceMergeDeletes. Default is 30. */
|
||||||
public TieredMergePolicy setMaxMergeAtOnceExplicit(int v) {
|
public TieredMergePolicy setMaxMergeAtOnceExplicit(int v) {
|
||||||
if (v < 2) {
|
if (v < 2) {
|
||||||
throw new IllegalArgumentException("maxMergeAtOnceExplicit must be > 1 (got " + v + ")");
|
throw new IllegalArgumentException("maxMergeAtOnceExplicit must be > 1 (got " + v + ")");
|
||||||
|
@ -171,20 +171,20 @@ public class TieredMergePolicy extends MergePolicy {
|
||||||
return floorSegmentBytes/1024*1024.;
|
return floorSegmentBytes/1024*1024.;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** When expungeDeletes is called, we only merge away a
|
/** When forceMergeDeletes is called, we only merge away a
|
||||||
* segment if its delete percentage is over this
|
* segment if its delete percentage is over this
|
||||||
* threshold. Default is 10%. */
|
* threshold. Default is 10%. */
|
||||||
public TieredMergePolicy setExpungeDeletesPctAllowed(double v) {
|
public TieredMergePolicy setForceMergeDeletesPctAllowed(double v) {
|
||||||
if (v < 0.0 || v > 100.0) {
|
if (v < 0.0 || v > 100.0) {
|
||||||
throw new IllegalArgumentException("expungeDeletesPctAllowed must be between 0.0 and 100.0 inclusive (got " + v + ")");
|
throw new IllegalArgumentException("forceMergeDeletesPctAllowed must be between 0.0 and 100.0 inclusive (got " + v + ")");
|
||||||
}
|
}
|
||||||
expungeDeletesPctAllowed = v;
|
forceMergeDeletesPctAllowed = v;
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @see #setExpungeDeletesPctAllowed */
|
/** @see #setForceMergeDeletesPctAllowed */
|
||||||
public double getExpungeDeletesPctAllowed() {
|
public double getForceMergeDeletesPctAllowed() {
|
||||||
return expungeDeletesPctAllowed;
|
return forceMergeDeletesPctAllowed;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Sets the allowed number of segments per tier. Smaller
|
/** Sets the allowed number of segments per tier. Smaller
|
||||||
|
@ -550,16 +550,16 @@ public class TieredMergePolicy extends MergePolicy {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos infos)
|
public MergeSpecification findForcedDeletesMerges(SegmentInfos infos)
|
||||||
throws CorruptIndexException, IOException {
|
throws CorruptIndexException, IOException {
|
||||||
if (verbose()) {
|
if (verbose()) {
|
||||||
message("findMergesToExpungeDeletes infos=" + writer.get().segString(infos) + " expungeDeletesPctAllowed=" + expungeDeletesPctAllowed);
|
message("findForcedDeletesMerges infos=" + writer.get().segString(infos) + " forceMergeDeletesPctAllowed=" + forceMergeDeletesPctAllowed);
|
||||||
}
|
}
|
||||||
final List<SegmentInfo> eligible = new ArrayList<SegmentInfo>();
|
final List<SegmentInfo> eligible = new ArrayList<SegmentInfo>();
|
||||||
final Collection<SegmentInfo> merging = writer.get().getMergingSegments();
|
final Collection<SegmentInfo> merging = writer.get().getMergingSegments();
|
||||||
for(SegmentInfo info : infos) {
|
for(SegmentInfo info : infos) {
|
||||||
double pctDeletes = 100.*((double) writer.get().numDeletedDocs(info))/info.docCount;
|
double pctDeletes = 100.*((double) writer.get().numDeletedDocs(info))/info.docCount;
|
||||||
if (pctDeletes > expungeDeletesPctAllowed && !merging.contains(info)) {
|
if (pctDeletes > forceMergeDeletesPctAllowed && !merging.contains(info)) {
|
||||||
eligible.add(info);
|
eligible.add(info);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -579,7 +579,7 @@ public class TieredMergePolicy extends MergePolicy {
|
||||||
|
|
||||||
while(start < eligible.size()) {
|
while(start < eligible.size()) {
|
||||||
// Don't enforce max merged size here: app is explicitly
|
// Don't enforce max merged size here: app is explicitly
|
||||||
// calling expungeDeletes, and knows this may take a
|
// calling forceMergeDeletes, and knows this may take a
|
||||||
// long time / produce big segments (like forceMerge):
|
// long time / produce big segments (like forceMerge):
|
||||||
final int end = Math.min(start + maxMergeAtOnceExplicit, eligible.size());
|
final int end = Math.min(start + maxMergeAtOnceExplicit, eligible.size());
|
||||||
if (spec == null) {
|
if (spec == null) {
|
||||||
|
@ -664,10 +664,10 @@ public class TieredMergePolicy extends MergePolicy {
|
||||||
sb.append("maxMergeAtOnceExplicit=").append(maxMergeAtOnceExplicit).append(", ");
|
sb.append("maxMergeAtOnceExplicit=").append(maxMergeAtOnceExplicit).append(", ");
|
||||||
sb.append("maxMergedSegmentMB=").append(maxMergedSegmentBytes/1024/1024.).append(", ");
|
sb.append("maxMergedSegmentMB=").append(maxMergedSegmentBytes/1024/1024.).append(", ");
|
||||||
sb.append("floorSegmentMB=").append(floorSegmentBytes/1024/1024.).append(", ");
|
sb.append("floorSegmentMB=").append(floorSegmentBytes/1024/1024.).append(", ");
|
||||||
sb.append("expungeDeletesPctAllowed=").append(expungeDeletesPctAllowed).append(", ");
|
sb.append("forceMergeDeletesPctAllowed=").append(forceMergeDeletesPctAllowed).append(", ");
|
||||||
sb.append("segmentsPerTier=").append(segsPerTier).append(", ");
|
sb.append("segmentsPerTier=").append(segsPerTier).append(", ");
|
||||||
sb.append("useCompoundFile=").append(useCompoundFile).append(", ");
|
sb.append("useCompoundFile=").append(useCompoundFile).append(", ");
|
||||||
sb.append("noCFSRatio=").append(noCFSRatio);
|
sb.append("noCFSRatio=").append(noCFSRatio);
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,8 +127,8 @@ public class UpgradeIndexMergePolicy extends MergePolicy {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos) throws CorruptIndexException, IOException {
|
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos) throws CorruptIndexException, IOException {
|
||||||
return base.findMergesToExpungeDeletes(segmentInfos);
|
return base.findForcedDeletesMerges(segmentInfos);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -93,7 +93,7 @@ public class MockRandomMergePolicy extends MergePolicy {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public MergeSpecification findMergesToExpungeDeletes(
|
public MergeSpecification findForcedDeletesMerges(
|
||||||
SegmentInfos segmentInfos)
|
SegmentInfos segmentInfos)
|
||||||
throws CorruptIndexException, IOException {
|
throws CorruptIndexException, IOException {
|
||||||
return findMerges(segmentInfos);
|
return findMerges(segmentInfos);
|
||||||
|
|
|
@ -330,12 +330,12 @@ public class RandomIndexWriter implements Closeable {
|
||||||
private boolean doRandomForceMerge = true;
|
private boolean doRandomForceMerge = true;
|
||||||
private boolean doRandomForceMergeAssert = true;
|
private boolean doRandomForceMergeAssert = true;
|
||||||
|
|
||||||
public void expungeDeletes(boolean doWait) throws IOException {
|
public void forceMergeDeletes(boolean doWait) throws IOException {
|
||||||
w.expungeDeletes(doWait);
|
w.forceMergeDeletes(doWait);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void expungeDeletes() throws IOException {
|
public void forceMergeDeletes() throws IOException {
|
||||||
w.expungeDeletes();
|
w.forceMergeDeletes();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setDoRandomForceMerge(boolean v) {
|
public void setDoRandomForceMerge(boolean v) {
|
||||||
|
|
|
@ -157,8 +157,6 @@ public abstract class LuceneTestCase extends Assert {
|
||||||
/** whether or not to clean threads between test invocations: "false", "perMethod", "perClass" */
|
/** whether or not to clean threads between test invocations: "false", "perMethod", "perClass" */
|
||||||
public static final Throttling TEST_THROTTLING = TEST_NIGHTLY ? Throttling.SOMETIMES : Throttling.NEVER;
|
public static final Throttling TEST_THROTTLING = TEST_NIGHTLY ? Throttling.SOMETIMES : Throttling.NEVER;
|
||||||
|
|
||||||
private static final Pattern codecWithParam = Pattern.compile("(.*)\\(\\s*(\\d+)\\s*\\)");
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A random multiplier which you should use when writing random tests:
|
* A random multiplier which you should use when writing random tests:
|
||||||
* multiply it by the number of iterations
|
* multiply it by the number of iterations
|
||||||
|
@ -955,7 +953,7 @@ public abstract class LuceneTestCase extends Assert {
|
||||||
}
|
}
|
||||||
tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0);
|
tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0);
|
||||||
tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0);
|
tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0);
|
||||||
tmp.setExpungeDeletesPctAllowed(0.0 + r.nextDouble() * 30.0);
|
tmp.setForceMergeDeletesPctAllowed(0.0 + r.nextDouble() * 30.0);
|
||||||
tmp.setSegmentsPerTier(_TestUtil.nextInt(r, 2, 20));
|
tmp.setSegmentsPerTier(_TestUtil.nextInt(r, 2, 20));
|
||||||
tmp.setUseCompoundFile(r.nextBoolean());
|
tmp.setUseCompoundFile(r.nextBoolean());
|
||||||
tmp.setNoCFSRatio(0.1 + r.nextDouble()*0.8);
|
tmp.setNoCFSRatio(0.1 + r.nextDouble()*0.8);
|
||||||
|
|
|
@ -245,7 +245,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
|
||||||
writer.deleteDocuments(new Term("f1", "d1"));
|
writer.deleteDocuments(new Term("f1", "d1"));
|
||||||
// nuke the first segment entirely so that the segment with gaps is
|
// nuke the first segment entirely so that the segment with gaps is
|
||||||
// loaded first!
|
// loaded first!
|
||||||
writer.expungeDeletes();
|
writer.forceMergeDeletes();
|
||||||
writer.close();
|
writer.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -116,9 +116,9 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
||||||
writer.close();
|
writer.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
// LUCENE-325: test expungeDeletes, when 2 singular merges
|
// LUCENE-325: test forceMergeDeletes, when 2 singular merges
|
||||||
// are required
|
// are required
|
||||||
public void testExpungeDeletes() throws IOException {
|
public void testForceMergeDeletes() throws IOException {
|
||||||
Directory dir = newDirectory();
|
Directory dir = newDirectory();
|
||||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||||
TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||||
|
@ -156,7 +156,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
||||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||||
assertEquals(8, writer.numDocs());
|
assertEquals(8, writer.numDocs());
|
||||||
assertEquals(10, writer.maxDoc());
|
assertEquals(10, writer.maxDoc());
|
||||||
writer.expungeDeletes();
|
writer.forceMergeDeletes();
|
||||||
assertEquals(8, writer.numDocs());
|
assertEquals(8, writer.numDocs());
|
||||||
writer.close();
|
writer.close();
|
||||||
ir = IndexReader.open(dir, true);
|
ir = IndexReader.open(dir, true);
|
||||||
|
@ -166,8 +166,8 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
||||||
dir.close();
|
dir.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
|
// LUCENE-325: test forceMergeDeletes, when many adjacent merges are required
|
||||||
public void testExpungeDeletes2() throws IOException {
|
public void testForceMergeDeletes2() throws IOException {
|
||||||
Directory dir = newDirectory();
|
Directory dir = newDirectory();
|
||||||
IndexWriter writer = new IndexWriter(
|
IndexWriter writer = new IndexWriter(
|
||||||
dir,
|
dir,
|
||||||
|
@ -210,7 +210,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
||||||
setMergePolicy(newLogMergePolicy(3))
|
setMergePolicy(newLogMergePolicy(3))
|
||||||
);
|
);
|
||||||
assertEquals(49, writer.numDocs());
|
assertEquals(49, writer.numDocs());
|
||||||
writer.expungeDeletes();
|
writer.forceMergeDeletes();
|
||||||
writer.close();
|
writer.close();
|
||||||
ir = IndexReader.open(dir, true);
|
ir = IndexReader.open(dir, true);
|
||||||
assertEquals(49, ir.maxDoc());
|
assertEquals(49, ir.maxDoc());
|
||||||
|
@ -219,9 +219,9 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
||||||
dir.close();
|
dir.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
// LUCENE-325: test expungeDeletes without waiting, when
|
// LUCENE-325: test forceMergeDeletes without waiting, when
|
||||||
// many adjacent merges are required
|
// many adjacent merges are required
|
||||||
public void testExpungeDeletes3() throws IOException {
|
public void testForceMergeDeletes3() throws IOException {
|
||||||
Directory dir = newDirectory();
|
Directory dir = newDirectory();
|
||||||
IndexWriter writer = new IndexWriter(
|
IndexWriter writer = new IndexWriter(
|
||||||
dir,
|
dir,
|
||||||
|
@ -262,7 +262,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
||||||
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||||
setMergePolicy(newLogMergePolicy(3))
|
setMergePolicy(newLogMergePolicy(3))
|
||||||
);
|
);
|
||||||
writer.expungeDeletes(false);
|
writer.forceMergeDeletes(false);
|
||||||
writer.close();
|
writer.close();
|
||||||
ir = IndexReader.open(dir, true);
|
ir = IndexReader.open(dir, true);
|
||||||
assertEquals(49, ir.maxDoc());
|
assertEquals(49, ir.maxDoc());
|
||||||
|
|
|
@ -873,7 +873,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
||||||
dir1.close();
|
dir1.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testExpungeDeletes() throws Throwable {
|
public void testForceMergeDeletes() throws Throwable {
|
||||||
Directory dir = newDirectory();
|
Directory dir = newDirectory();
|
||||||
final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||||
Document doc = new Document();
|
Document doc = new Document();
|
||||||
|
@ -887,7 +887,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
||||||
w.deleteDocuments(new Term("id", "0"));
|
w.deleteDocuments(new Term("id", "0"));
|
||||||
|
|
||||||
IndexReader r = w.getReader();
|
IndexReader r = w.getReader();
|
||||||
w.expungeDeletes();
|
w.forceMergeDeletes();
|
||||||
w.close();
|
w.close();
|
||||||
r.close();
|
r.close();
|
||||||
r = IndexReader.open(dir, true);
|
r = IndexReader.open(dir, true);
|
||||||
|
|
|
@ -32,7 +32,7 @@ public class TestNoMergePolicy extends LuceneTestCase {
|
||||||
MergePolicy mp = NoMergePolicy.NO_COMPOUND_FILES;
|
MergePolicy mp = NoMergePolicy.NO_COMPOUND_FILES;
|
||||||
assertNull(mp.findMerges(null));
|
assertNull(mp.findMerges(null));
|
||||||
assertNull(mp.findForcedMerges(null, 0, null));
|
assertNull(mp.findForcedMerges(null, 0, null));
|
||||||
assertNull(mp.findMergesToExpungeDeletes(null));
|
assertNull(mp.findForcedDeletesMerges(null));
|
||||||
assertFalse(mp.useCompoundFile(null, null));
|
assertFalse(mp.useCompoundFile(null, null));
|
||||||
mp.close();
|
mp.close();
|
||||||
}
|
}
|
||||||
|
|
|
@ -274,7 +274,7 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public MergeSpecification findMergesToExpungeDeletes(
|
public MergeSpecification findForcedDeletesMerges(
|
||||||
SegmentInfos segmentInfos) throws CorruptIndexException, IOException {
|
SegmentInfos segmentInfos) throws CorruptIndexException, IOException {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.lucene.util._TestUtil;
|
||||||
|
|
||||||
public class TestTieredMergePolicy extends LuceneTestCase {
|
public class TestTieredMergePolicy extends LuceneTestCase {
|
||||||
|
|
||||||
public void testExpungeDeletes() throws Exception {
|
public void testForceMergeDeletes() throws Exception {
|
||||||
Directory dir = newDirectory();
|
Directory dir = newDirectory();
|
||||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
|
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
|
||||||
TieredMergePolicy tmp = newTieredMergePolicy();
|
TieredMergePolicy tmp = newTieredMergePolicy();
|
||||||
|
@ -35,7 +35,7 @@ public class TestTieredMergePolicy extends LuceneTestCase {
|
||||||
conf.setMaxBufferedDocs(4);
|
conf.setMaxBufferedDocs(4);
|
||||||
tmp.setMaxMergeAtOnce(100);
|
tmp.setMaxMergeAtOnce(100);
|
||||||
tmp.setSegmentsPerTier(100);
|
tmp.setSegmentsPerTier(100);
|
||||||
tmp.setExpungeDeletesPctAllowed(30.0);
|
tmp.setForceMergeDeletesPctAllowed(30.0);
|
||||||
IndexWriter w = new IndexWriter(dir, conf);
|
IndexWriter w = new IndexWriter(dir, conf);
|
||||||
for(int i=0;i<80;i++) {
|
for(int i=0;i<80;i++) {
|
||||||
Document doc = new Document();
|
Document doc = new Document();
|
||||||
|
@ -49,16 +49,16 @@ public class TestTieredMergePolicy extends LuceneTestCase {
|
||||||
System.out.println("\nTEST: delete docs");
|
System.out.println("\nTEST: delete docs");
|
||||||
}
|
}
|
||||||
w.deleteDocuments(new Term("content", "0"));
|
w.deleteDocuments(new Term("content", "0"));
|
||||||
w.expungeDeletes();
|
w.forceMergeDeletes();
|
||||||
|
|
||||||
assertEquals(80, w.maxDoc());
|
assertEquals(80, w.maxDoc());
|
||||||
assertEquals(60, w.numDocs());
|
assertEquals(60, w.numDocs());
|
||||||
|
|
||||||
if (VERBOSE) {
|
if (VERBOSE) {
|
||||||
System.out.println("\nTEST: expunge2");
|
System.out.println("\nTEST: forceMergeDeletes2");
|
||||||
}
|
}
|
||||||
tmp.setExpungeDeletesPctAllowed(10.0);
|
tmp.setForceMergeDeletesPctAllowed(10.0);
|
||||||
w.expungeDeletes();
|
w.forceMergeDeletes();
|
||||||
assertEquals(60, w.maxDoc());
|
assertEquals(60, w.maxDoc());
|
||||||
assertEquals(60, w.numDocs());
|
assertEquals(60, w.numDocs());
|
||||||
w.close();
|
w.close();
|
||||||
|
@ -107,12 +107,12 @@ public class TestTieredMergePolicy extends LuceneTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testExpungeMaxSegSize() throws Exception {
|
public void testForceMergeDeletesMaxSegSize() throws Exception {
|
||||||
final Directory dir = newDirectory();
|
final Directory dir = newDirectory();
|
||||||
final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
|
final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
|
||||||
final TieredMergePolicy tmp = new TieredMergePolicy();
|
final TieredMergePolicy tmp = new TieredMergePolicy();
|
||||||
tmp.setMaxMergedSegmentMB(0.01);
|
tmp.setMaxMergedSegmentMB(0.01);
|
||||||
tmp.setExpungeDeletesPctAllowed(0.0);
|
tmp.setForceMergeDeletesPctAllowed(0.0);
|
||||||
conf.setMergePolicy(tmp);
|
conf.setMergePolicy(tmp);
|
||||||
|
|
||||||
final RandomIndexWriter w = new RandomIndexWriter(random, dir, conf);
|
final RandomIndexWriter w = new RandomIndexWriter(random, dir, conf);
|
||||||
|
@ -139,7 +139,7 @@ public class TestTieredMergePolicy extends LuceneTestCase {
|
||||||
assertEquals(numDocs-1, r.numDocs());
|
assertEquals(numDocs-1, r.numDocs());
|
||||||
r.close();
|
r.close();
|
||||||
|
|
||||||
w.expungeDeletes();
|
w.forceMergeDeletes();
|
||||||
|
|
||||||
r = w.getReader();
|
r = w.getReader();
|
||||||
assertEquals(numDocs-1, r.maxDoc());
|
assertEquals(numDocs-1, r.maxDoc());
|
||||||
|
|
|
@ -313,7 +313,7 @@ public class DirectUpdateHandler2 extends UpdateHandler {
|
||||||
if (cmd.optimize) {
|
if (cmd.optimize) {
|
||||||
writer.forceMerge(cmd.maxOptimizeSegments);
|
writer.forceMerge(cmd.maxOptimizeSegments);
|
||||||
} else if (cmd.expungeDeletes) {
|
} else if (cmd.expungeDeletes) {
|
||||||
writer.expungeDeletes();
|
writer.forceMergeDeletes();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cmd.softCommit) {
|
if (!cmd.softCommit) {
|
||||||
|
|
Loading…
Reference in New Issue