LUCENE-5711: Pass IndexWriter to MergePolicy on each operation

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1598833 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Simon Willnauer 2014-05-31 07:37:01 +00:00
parent 075778c06e
commit 963bf0a4db
15 changed files with 196 additions and 220 deletions

View File

@ -138,6 +138,10 @@ Changes in Backwards Compatibility Policy
API Changes
* LUCENE-5711: MergePolicy accepts an IndexWriter instance
on each method rather than holding state against a single
IndexWriter instance. (Simon Willnauer)
* LUCENE-5582: Deprecate IndexOutput.length (just use
IndexOutput.getFilePointer instead) and IndexOutput.setLength.
(Mike McCandless)

View File

@ -705,7 +705,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{
analyzer = config.getAnalyzer();
infoStream = config.getInfoStream();
mergePolicy = config.getMergePolicy();
mergePolicy.setIndexWriter(this);
mergeScheduler = config.getMergeScheduler();
codec = config.getCodec();
@ -1781,7 +1780,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{
MergePolicy.MergeSpecification spec;
boolean newMergesFound = false;
synchronized(this) {
spec = mergePolicy.findForcedDeletesMerges(segmentInfos);
spec = mergePolicy.findForcedDeletesMerges(segmentInfos, this);
newMergesFound = spec != null;
if (newMergesFound) {
final int numMerges = spec.merges.size();
@ -1901,7 +1900,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{
if (maxNumSegments != UNBOUNDED_MAX_MERGE_SEGMENTS) {
assert trigger == MergeTrigger.EXPLICIT || trigger == MergeTrigger.MERGE_FINISHED :
"Expected EXPLICT or MERGE_FINISHED as trigger even with maxNumSegments set but was: " + trigger.name();
spec = mergePolicy.findForcedMerges(segmentInfos, maxNumSegments, Collections.unmodifiableMap(segmentsToMerge));
spec = mergePolicy.findForcedMerges(segmentInfos, maxNumSegments, Collections.unmodifiableMap(segmentsToMerge), this);
newMergesFound = spec != null;
if (newMergesFound) {
final int numMerges = spec.merges.size();
@ -1911,7 +1910,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{
}
}
} else {
spec = mergePolicy.findMerges(trigger, segmentInfos);
spec = mergePolicy.findMerges(trigger, segmentInfos, this);
}
newMergesFound = spec != null;
if (newMergesFound) {
@ -2598,7 +2597,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{
return;
}
ensureOpen();
useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, infoPerCommit);
useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, infoPerCommit, this);
}
// Now create the compound file if needed
@ -4019,7 +4018,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{
//System.out.println("merger set hasProx=" + merger.hasProx() + " seg=" + merge.info.name);
boolean useCompoundFile;
synchronized (this) { // Guard segmentInfos
useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, merge.info);
useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, merge.info, this);
}
if (useCompoundFile) {

View File

@ -43,8 +43,8 @@ public class LogByteSizeMergePolicy extends LogMergePolicy {
}
@Override
protected long size(SegmentCommitInfo info) throws IOException {
return sizeBytes(info);
protected long size(SegmentCommitInfo info, IndexWriter writer) throws IOException {
return sizeBytes(info, writer);
}
/** <p>Determines the largest segment (measured by total

View File

@ -40,8 +40,8 @@ public class LogDocMergePolicy extends LogMergePolicy {
}
@Override
protected long size(SegmentCommitInfo info) throws IOException {
return sizeDocs(info);
protected long size(SegmentCommitInfo info, IndexWriter writer) throws IOException {
return sizeDocs(info, writer);
}
/** Sets the minimum size for the lowest level segments.

View File

@ -99,16 +99,15 @@ public abstract class LogMergePolicy extends MergePolicy {
/** Returns true if {@code LMP} is enabled in {@link
* IndexWriter}'s {@code infoStream}. */
protected boolean verbose() {
final IndexWriter w = writer.get();
return w != null && w.infoStream.isEnabled("LMP");
protected boolean verbose(IndexWriter writer) {
return writer != null && writer.infoStream.isEnabled("LMP");
}
/** Print a debug message to {@link IndexWriter}'s {@code
* infoStream}. */
protected void message(String message) {
if (verbose()) {
writer.get().infoStream.message("LMP", message);
protected void message(String message, IndexWriter writer) {
if (verbose(writer)) {
writer.infoStream.message("LMP", message);
}
}
@ -154,9 +153,9 @@ public abstract class LogMergePolicy extends MergePolicy {
* SegmentCommitInfo}, pro-rated by percentage of
* non-deleted documents if {@link
* #setCalibrateSizeByDeletes} is set. */
protected long sizeDocs(SegmentCommitInfo info) throws IOException {
protected long sizeDocs(SegmentCommitInfo info, IndexWriter writer) throws IOException {
if (calibrateSizeByDeletes) {
int delCount = writer.get().numDeletedDocs(info);
int delCount = writer.numDeletedDocs(info);
assert delCount <= info.info.getDocCount();
return (info.info.getDocCount() - (long)delCount);
} else {
@ -168,9 +167,9 @@ public abstract class LogMergePolicy extends MergePolicy {
* SegmentCommitInfo}, pro-rated by percentage of
* non-deleted documents if {@link
* #setCalibrateSizeByDeletes} is set. */
protected long sizeBytes(SegmentCommitInfo info) throws IOException {
protected long sizeBytes(SegmentCommitInfo info, IndexWriter writer) throws IOException {
if (calibrateSizeByDeletes) {
return super.size(info);
return super.size(info, writer);
}
return info.sizeInBytes();
}
@ -178,7 +177,7 @@ public abstract class LogMergePolicy extends MergePolicy {
/** Returns true if the number of segments eligible for
* merging is less than or equal to the specified {@code
* maxNumSegments}. */
protected boolean isMerged(SegmentInfos infos, int maxNumSegments, Map<SegmentCommitInfo,Boolean> segmentsToMerge) throws IOException {
protected boolean isMerged(SegmentInfos infos, int maxNumSegments, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer) throws IOException {
final int numSegments = infos.size();
int numToMerge = 0;
SegmentCommitInfo mergeInfo = null;
@ -194,7 +193,7 @@ public abstract class LogMergePolicy extends MergePolicy {
}
return numToMerge <= maxNumSegments &&
(numToMerge != 1 || !segmentIsOriginal || isMerged(infos, mergeInfo));
(numToMerge != 1 || !segmentIsOriginal || isMerged(infos, mergeInfo, writer));
}
/**
@ -206,20 +205,20 @@ public abstract class LogMergePolicy extends MergePolicy {
* maxNumSegments} will remain, but &lt;= that number.
*/
private MergeSpecification findForcedMergesSizeLimit(
SegmentInfos infos, int maxNumSegments, int last) throws IOException {
SegmentInfos infos, int maxNumSegments, int last, IndexWriter writer) throws IOException {
MergeSpecification spec = new MergeSpecification();
final List<SegmentCommitInfo> segments = infos.asList();
int start = last - 1;
while (start >= 0) {
SegmentCommitInfo info = infos.info(start);
if (size(info) > maxMergeSizeForForcedMerge || sizeDocs(info) > maxMergeDocs) {
if (verbose()) {
message("findForcedMergesSizeLimit: skip segment=" + info + ": size is > maxMergeSize (" + maxMergeSizeForForcedMerge + ") or sizeDocs is > maxMergeDocs (" + maxMergeDocs + ")");
if (size(info, writer) > maxMergeSizeForForcedMerge || sizeDocs(info, writer) > maxMergeDocs) {
if (verbose(writer)) {
message("findForcedMergesSizeLimit: skip segment=" + info + ": size is > maxMergeSize (" + maxMergeSizeForForcedMerge + ") or sizeDocs is > maxMergeDocs (" + maxMergeDocs + ")", writer);
}
// need to skip that segment + add a merge for the 'right' segments,
// unless there is only 1 which is merged.
if (last - start - 1 > 1 || (start != last - 1 && !isMerged(infos, infos.info(start + 1)))) {
if (last - start - 1 > 1 || (start != last - 1 && !isMerged(infos, infos.info(start + 1), writer))) {
// there is more than 1 segment to the right of
// this one, or a mergeable single segment.
spec.add(new OneMerge(segments.subList(start + 1, last)));
@ -235,7 +234,7 @@ public abstract class LogMergePolicy extends MergePolicy {
// Add any left-over segments, unless there is just 1
// already fully merged
if (last > 0 && (++start + 1 < last || !isMerged(infos, infos.info(start)))) {
if (last > 0 && (++start + 1 < last || !isMerged(infos, infos.info(start), writer))) {
spec.add(new OneMerge(segments.subList(start, last)));
}
@ -247,7 +246,7 @@ public abstract class LogMergePolicy extends MergePolicy {
* the returned merges only by the {@code maxNumSegments} parameter, and
* guaranteed that exactly that number of segments will remain in the index.
*/
private MergeSpecification findForcedMergesMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last) throws IOException {
private MergeSpecification findForcedMergesMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last, IndexWriter writer) throws IOException {
MergeSpecification spec = new MergeSpecification();
final List<SegmentCommitInfo> segments = infos.asList();
@ -265,7 +264,7 @@ public abstract class LogMergePolicy extends MergePolicy {
// Since we must merge down to 1 segment, the
// choice is simple:
if (last > 1 || !isMerged(infos, infos.info(0))) {
if (last > 1 || !isMerged(infos, infos.info(0), writer)) {
spec.add(new OneMerge(segments.subList(0, last)));
}
} else if (last > maxNumSegments) {
@ -288,9 +287,9 @@ public abstract class LogMergePolicy extends MergePolicy {
for(int i=0;i<last-finalMergeSize+1;i++) {
long sumSize = 0;
for(int j=0;j<finalMergeSize;j++) {
sumSize += size(infos.info(j+i));
sumSize += size(infos.info(j+i), writer);
}
if (i == 0 || (sumSize < 2*size(infos.info(i-1)) && sumSize < bestSize)) {
if (i == 0 || (sumSize < 2*size(infos.info(i-1), writer) && sumSize < bestSize)) {
bestStart = i;
bestSize = sumSize;
}
@ -314,18 +313,18 @@ public abstract class LogMergePolicy extends MergePolicy {
* in use may make use of concurrency. */
@Override
public MergeSpecification findForcedMerges(SegmentInfos infos,
int maxNumSegments, Map<SegmentCommitInfo,Boolean> segmentsToMerge) throws IOException {
int maxNumSegments, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer) throws IOException {
assert maxNumSegments > 0;
if (verbose()) {
message("findForcedMerges: maxNumSegs=" + maxNumSegments + " segsToMerge="+ segmentsToMerge);
if (verbose(writer)) {
message("findForcedMerges: maxNumSegs=" + maxNumSegments + " segsToMerge="+ segmentsToMerge, writer);
}
// If the segments are already merged (e.g. there's only 1 segment), or
// there are <maxNumSegments:.
if (isMerged(infos, maxNumSegments, segmentsToMerge)) {
if (verbose()) {
message("already merged; skip");
if (isMerged(infos, maxNumSegments, segmentsToMerge, writer)) {
if (verbose(writer)) {
message("already merged; skip", writer);
}
return null;
}
@ -343,16 +342,16 @@ public abstract class LogMergePolicy extends MergePolicy {
}
if (last == 0) {
if (verbose()) {
message("last == 0; skip");
if (verbose(writer)) {
message("last == 0; skip", writer);
}
return null;
}
// There is only one segment already, and it is merged
if (maxNumSegments == 1 && last == 1 && isMerged(infos, infos.info(0))) {
if (verbose()) {
message("already 1 seg; skip");
if (maxNumSegments == 1 && last == 1 && isMerged(infos, infos.info(0), writer)) {
if (verbose(writer)) {
message("already 1 seg; skip", writer);
}
return null;
}
@ -361,16 +360,16 @@ public abstract class LogMergePolicy extends MergePolicy {
boolean anyTooLarge = false;
for (int i = 0; i < last; i++) {
SegmentCommitInfo info = infos.info(i);
if (size(info) > maxMergeSizeForForcedMerge || sizeDocs(info) > maxMergeDocs) {
if (size(info, writer) > maxMergeSizeForForcedMerge || sizeDocs(info, writer) > maxMergeDocs) {
anyTooLarge = true;
break;
}
}
if (anyTooLarge) {
return findForcedMergesSizeLimit(infos, maxNumSegments, last);
return findForcedMergesSizeLimit(infos, maxNumSegments, last, writer);
} else {
return findForcedMergesMaxNumSegments(infos, maxNumSegments, last);
return findForcedMergesMaxNumSegments(infos, maxNumSegments, last, writer);
}
}
@ -380,33 +379,32 @@ public abstract class LogMergePolicy extends MergePolicy {
* deletes, up to mergeFactor at a time.
*/
@Override
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos)
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos, IndexWriter writer)
throws IOException {
final List<SegmentCommitInfo> segments = segmentInfos.asList();
final int numSegments = segments.size();
if (verbose()) {
message("findForcedDeleteMerges: " + numSegments + " segments");
if (verbose(writer)) {
message("findForcedDeleteMerges: " + numSegments + " segments", writer);
}
MergeSpecification spec = new MergeSpecification();
int firstSegmentWithDeletions = -1;
IndexWriter w = writer.get();
assert w != null;
assert writer != null;
for(int i=0;i<numSegments;i++) {
final SegmentCommitInfo info = segmentInfos.info(i);
int delCount = w.numDeletedDocs(info);
int delCount = writer.numDeletedDocs(info);
if (delCount > 0) {
if (verbose()) {
message(" segment " + info.info.name + " has deletions");
if (verbose(writer)) {
message(" segment " + info.info.name + " has deletions", writer);
}
if (firstSegmentWithDeletions == -1)
firstSegmentWithDeletions = i;
else if (i - firstSegmentWithDeletions == mergeFactor) {
// We've seen mergeFactor segments in a row with
// deletions, so force a merge now:
if (verbose()) {
message(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive");
if (verbose(writer)) {
message(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive", writer);
}
spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, i)));
firstSegmentWithDeletions = i;
@ -415,8 +413,8 @@ public abstract class LogMergePolicy extends MergePolicy {
// End of a sequence of segments with deletions, so,
// merge those past segments even if it's fewer than
// mergeFactor segments
if (verbose()) {
message(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive");
if (verbose(writer)) {
message(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive", writer);
}
spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, i)));
firstSegmentWithDeletions = -1;
@ -424,8 +422,8 @@ public abstract class LogMergePolicy extends MergePolicy {
}
if (firstSegmentWithDeletions != -1) {
if (verbose()) {
message(" add merge " + firstSegmentWithDeletions + " to " + (numSegments-1) + " inclusive");
if (verbose(writer)) {
message(" add merge " + firstSegmentWithDeletions + " to " + (numSegments-1) + " inclusive", writer);
}
spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, numSegments)));
}
@ -459,11 +457,11 @@ public abstract class LogMergePolicy extends MergePolicy {
* will return multiple merges, allowing the {@link
* MergeScheduler} to use concurrency. */
@Override
public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos infos) throws IOException {
public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos infos, IndexWriter writer) throws IOException {
final int numSegments = infos.size();
if (verbose()) {
message("findMerges: " + numSegments + " segments");
if (verbose(writer)) {
message("findMerges: " + numSegments + " segments", writer);
}
// Compute levels, which is just log (base mergeFactor)
@ -471,11 +469,11 @@ public abstract class LogMergePolicy extends MergePolicy {
final List<SegmentInfoAndLevel> levels = new ArrayList<>();
final float norm = (float) Math.log(mergeFactor);
final Collection<SegmentCommitInfo> mergingSegments = writer.get().getMergingSegments();
final Collection<SegmentCommitInfo> mergingSegments = writer.getMergingSegments();
for(int i=0;i<numSegments;i++) {
final SegmentCommitInfo info = infos.info(i);
long size = size(info);
long size = size(info, writer);
// Floor tiny segments
if (size < 1) {
@ -485,13 +483,13 @@ public abstract class LogMergePolicy extends MergePolicy {
final SegmentInfoAndLevel infoLevel = new SegmentInfoAndLevel(info, (float) Math.log(size)/norm, i);
levels.add(infoLevel);
if (verbose()) {
final long segBytes = sizeBytes(info);
if (verbose(writer)) {
final long segBytes = sizeBytes(info, writer);
String extra = mergingSegments.contains(info) ? " [merging]" : "";
if (size >= maxMergeSize) {
extra += " [skip: too large]";
}
message("seg=" + writer.get().segString(info) + " level=" + infoLevel.level + " size=" + String.format(Locale.ROOT, "%.3f MB", segBytes/1024/1024.) + extra);
message("seg=" + writer.segString(info) + " level=" + infoLevel.level + " size=" + String.format(Locale.ROOT, "%.3f MB", segBytes/1024/1024.) + extra, writer);
}
}
@ -547,8 +545,8 @@ public abstract class LogMergePolicy extends MergePolicy {
}
upto--;
}
if (verbose()) {
message(" level " + levelBottom + " to " + maxLevel + ": " + (1+upto-start) + " segments");
if (verbose(writer)) {
message(" level " + levelBottom + " to " + maxLevel + ": " + (1+upto-start) + " segments", writer);
}
// Finally, record all merges that are viable at this level:
@ -558,7 +556,7 @@ public abstract class LogMergePolicy extends MergePolicy {
boolean anyMerging = false;
for(int i=start;i<end;i++) {
final SegmentCommitInfo info = levels.get(i).info;
anyTooLarge |= (size(info) >= maxMergeSize || sizeDocs(info) >= maxMergeDocs);
anyTooLarge |= (size(info, writer) >= maxMergeSize || sizeDocs(info, writer) >= maxMergeDocs);
if (mergingSegments.contains(info)) {
anyMerging = true;
break;
@ -575,12 +573,12 @@ public abstract class LogMergePolicy extends MergePolicy {
mergeInfos.add(levels.get(i).info);
assert infos.contains(levels.get(i).info);
}
if (verbose()) {
message(" add merge=" + writer.get().segString(mergeInfos) + " start=" + start + " end=" + end);
if (verbose(writer)) {
message(" add merge=" + writer.segString(mergeInfos) + " start=" + start + " end=" + end, writer);
}
spec.add(new OneMerge(mergeInfos));
} else if (verbose()) {
message(" " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping");
} else if (verbose(writer)) {
message(" " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping", writer);
}
start = end;

View File

@ -43,7 +43,7 @@ import java.util.Map;
* {@link MergeSpecification} instance describing the set of
* merges that should be done, or null if no merges are
* necessary. When IndexWriter.forceMerge is called, it calls
* {@link #findForcedMerges(SegmentInfos,int,Map)} and the MergePolicy should
* {@link #findForcedMerges(SegmentInfos,int,Map, IndexWriter)} and the MergePolicy should
* then return the necessary merges.</p>
*
* <p>Note that the policy can return more than one merge at
@ -372,9 +372,6 @@ public abstract class MergePolicy implements java.io.Closeable {
*/
protected static final long DEFAULT_MAX_CFS_SEGMENT_SIZE = Long.MAX_VALUE;
/** {@link IndexWriter} that contains this instance. */
protected SetOnce<IndexWriter> writer;
/** If the size of the merge segment exceeds this ratio of
* the total index size then it will remain in
* non-compound format */
@ -385,9 +382,7 @@ public abstract class MergePolicy implements java.io.Closeable {
protected long maxCFSSegmentSize = DEFAULT_MAX_CFS_SEGMENT_SIZE;
/**
* Creates a new merge policy instance. Note that if you intend to use it
* without passing it to {@link IndexWriter}, you should call
* {@link #setIndexWriter(IndexWriter)}.
* Creates a new merge policy instance.
*/
public MergePolicy() {
this(DEFAULT_NO_CFS_RATIO, DEFAULT_MAX_CFS_SEGMENT_SIZE);
@ -399,22 +394,10 @@ public abstract class MergePolicy implements java.io.Closeable {
* defaults than the {@link MergePolicy}
*/
protected MergePolicy(double defaultNoCFSRatio, long defaultMaxCFSSegmentSize) {
writer = new SetOnce<>();
this.noCFSRatio = defaultNoCFSRatio;
this.maxCFSSegmentSize = defaultMaxCFSSegmentSize;
}
/**
* Sets the {@link IndexWriter} to use by this merge policy. This method is
* allowed to be called only once, and is usually set by IndexWriter. If it is
* called more than once, {@link AlreadySetException} is thrown.
*
* @see SetOnce
*/
public void setIndexWriter(IndexWriter writer) {
this.writer.set(writer);
}
/**
* Determine what set of merge operations are now necessary on the index.
* {@link IndexWriter} calls this whenever there is a change to the segments.
@ -423,8 +406,9 @@ public abstract class MergePolicy implements java.io.Closeable {
* @param mergeTrigger the event that triggered the merge
* @param segmentInfos
* the total set of segments in the index
* @param writer the IndexWriter to find the merges on
*/
public abstract MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos)
public abstract MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos, IndexWriter writer)
throws IOException;
/**
@ -447,9 +431,10 @@ public abstract class MergePolicy implements java.io.Closeable {
* an original segment present in the
* to-be-merged index; else, it was a segment
* produced by a cascaded merge.
* @param writer the IndexWriter to find the merges on
*/
public abstract MergeSpecification findForcedMerges(
SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge)
SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer)
throws IOException;
/**
@ -458,9 +443,10 @@ public abstract class MergePolicy implements java.io.Closeable {
*
* @param segmentInfos
* the total set of segments in the index
* @param writer the IndexWriter to find the merges on
*/
public abstract MergeSpecification findForcedDeletesMerges(
SegmentInfos segmentInfos) throws IOException;
SegmentInfos segmentInfos, IndexWriter writer) throws IOException;
/**
* Release all resources for the policy.
@ -475,11 +461,11 @@ public abstract class MergePolicy implements java.io.Closeable {
* {@link #getMaxCFSSegmentSizeMB()} and the size is less or equal to the
* TotalIndexSize * {@link #getNoCFSRatio()} otherwise <code>false</code>.
*/
public boolean useCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo) throws IOException {
public boolean useCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo, IndexWriter writer) throws IOException {
if (getNoCFSRatio() == 0.0) {
return false;
}
long mergedInfoSize = size(mergedInfo);
long mergedInfoSize = size(mergedInfo, writer);
if (mergedInfoSize > maxCFSSegmentSize) {
return false;
}
@ -488,7 +474,7 @@ public abstract class MergePolicy implements java.io.Closeable {
}
long totalSize = 0;
for (SegmentCommitInfo info : infos) {
totalSize += size(info);
totalSize += size(info, writer);
}
return mergedInfoSize <= getNoCFSRatio() * totalSize;
}
@ -496,9 +482,9 @@ public abstract class MergePolicy implements java.io.Closeable {
/** Return the byte size of the provided {@link
* SegmentCommitInfo}, pro-rated by percentage of
* non-deleted documents is set. */
protected long size(SegmentCommitInfo info) throws IOException {
protected long size(SegmentCommitInfo info, IndexWriter writer) throws IOException {
long byteSize = info.sizeInBytes();
int delCount = writer.get().numDeletedDocs(info);
int delCount = writer.numDeletedDocs(info);
double delRatio = (info.info.getDocCount() <= 0 ? 0.0f : ((float)delCount / (float)info.info.getDocCount()));
assert delRatio <= 1.0;
return (info.info.getDocCount() <= 0 ? byteSize : (long)(byteSize * (1.0 - delRatio)));
@ -507,13 +493,12 @@ public abstract class MergePolicy implements java.io.Closeable {
/** Returns true if this single info is already fully merged (has no
* pending deletes, is in the same dir as the
* writer, and matches the current compound file setting */
protected final boolean isMerged(SegmentInfos infos, SegmentCommitInfo info) throws IOException {
IndexWriter w = writer.get();
assert w != null;
boolean hasDeletions = w.numDeletedDocs(info) > 0;
protected final boolean isMerged(SegmentInfos infos, SegmentCommitInfo info, IndexWriter writer) throws IOException {
assert writer != null;
boolean hasDeletions = writer.numDeletedDocs(info) > 0;
return !hasDeletions &&
info.info.dir == w.getDirectory() &&
useCompoundFile(infos, info) == info.info.getUseCompoundFile();
info.info.dir == writer.getDirectory() &&
useCompoundFile(infos, info, writer) == info.info.getUseCompoundFile();
}
/** Returns current {@code noCFSRatio}.

View File

@ -19,7 +19,7 @@ package org.apache.lucene.index;
/**
* MergeTrigger is passed to
* {@link org.apache.lucene.index.MergePolicy#findMerges(MergeTrigger, org.apache.lucene.index.SegmentInfos)} to indicate the
* {@link org.apache.lucene.index.MergePolicy#findMerges(MergeTrigger, org.apache.lucene.index.SegmentInfos, IndexWriter)} to indicate the
* event that triggered the merge.
*/
public enum MergeTrigger {

View File

@ -38,25 +38,22 @@ public final class NoMergePolicy extends MergePolicy {
public void close() {}
@Override
public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos) { return null; }
public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos, IndexWriter writer) { return null; }
@Override
public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge) { return null; }
int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer) { return null; }
@Override
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos) { return null; }
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos, IndexWriter writer) { return null; }
@Override
public boolean useCompoundFile(SegmentInfos segments, SegmentCommitInfo newSegment) {
public boolean useCompoundFile(SegmentInfos segments, SegmentCommitInfo newSegment, IndexWriter writer) {
return newSegment.info.getUseCompoundFile();
}
@Override
public void setIndexWriter(IndexWriter writer) {}
@Override
protected long size(SegmentCommitInfo info) throws IOException {
protected long size(SegmentCommitInfo info, IndexWriter writer) throws IOException {
return Long.MAX_VALUE;
}

View File

@ -236,11 +236,17 @@ public class TieredMergePolicy extends MergePolicy {
}
private class SegmentByteSizeDescending implements Comparator<SegmentCommitInfo> {
private final IndexWriter writer;
SegmentByteSizeDescending(IndexWriter writer) {
this.writer = writer;
}
@Override
public int compare(SegmentCommitInfo o1, SegmentCommitInfo o2) {
try {
final long sz1 = size(o1);
final long sz2 = size(o2);
final long sz1 = size(o1, writer);
final long sz2 = size(o2, writer);
if (sz1 > sz2) {
return -1;
} else if (sz2 > sz1) {
@ -272,32 +278,32 @@ public class TieredMergePolicy extends MergePolicy {
}
@Override
public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos infos) throws IOException {
if (verbose()) {
message("findMerges: " + infos.size() + " segments");
public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos infos, IndexWriter writer) throws IOException {
if (verbose(writer)) {
message("findMerges: " + infos.size() + " segments", writer);
}
if (infos.size() == 0) {
return null;
}
final Collection<SegmentCommitInfo> merging = writer.get().getMergingSegments();
final Collection<SegmentCommitInfo> merging = writer.getMergingSegments();
final Collection<SegmentCommitInfo> toBeMerged = new HashSet<>();
final List<SegmentCommitInfo> infosSorted = new ArrayList<>(infos.asList());
Collections.sort(infosSorted, new SegmentByteSizeDescending());
Collections.sort(infosSorted, new SegmentByteSizeDescending(writer));
// Compute total index bytes & print details about the index
long totIndexBytes = 0;
long minSegmentBytes = Long.MAX_VALUE;
for(SegmentCommitInfo info : infosSorted) {
final long segBytes = size(info);
if (verbose()) {
final long segBytes = size(info, writer);
if (verbose(writer)) {
String extra = merging.contains(info) ? " [merging]" : "";
if (segBytes >= maxMergedSegmentBytes/2.0) {
extra += " [skip: too large]";
} else if (segBytes < floorSegmentBytes) {
extra += " [floored]";
}
message(" seg=" + writer.get().segString(info) + " size=" + String.format(Locale.ROOT, "%.3f", segBytes/1024/1024.) + " MB" + extra);
message(" seg=" + writer.segString(info) + " size=" + String.format(Locale.ROOT, "%.3f", segBytes/1024/1024.) + " MB" + extra, writer);
}
minSegmentBytes = Math.min(segBytes, minSegmentBytes);
@ -308,8 +314,8 @@ public class TieredMergePolicy extends MergePolicy {
// If we have too-large segments, grace them out
// of the maxSegmentCount:
int tooBigCount = 0;
while (tooBigCount < infosSorted.size() && size(infosSorted.get(tooBigCount)) >= maxMergedSegmentBytes/2.0) {
totIndexBytes -= size(infosSorted.get(tooBigCount));
while (tooBigCount < infosSorted.size() && size(infosSorted.get(tooBigCount), writer) >= maxMergedSegmentBytes/2.0) {
totIndexBytes -= size(infosSorted.get(tooBigCount), writer);
tooBigCount++;
}
@ -353,8 +359,8 @@ public class TieredMergePolicy extends MergePolicy {
final boolean maxMergeIsRunning = mergingBytes >= maxMergedSegmentBytes;
if (verbose()) {
message(" allowedSegmentCount=" + allowedSegCountInt + " vs count=" + infosSorted.size() + " (eligible count=" + eligible.size() + ") tooBigCount=" + tooBigCount);
if (verbose(writer)) {
message(" allowedSegmentCount=" + allowedSegCountInt + " vs count=" + infosSorted.size() + " (eligible count=" + eligible.size() + ") tooBigCount=" + tooBigCount, writer);
}
if (eligible.size() == 0) {
@ -378,7 +384,7 @@ public class TieredMergePolicy extends MergePolicy {
boolean hitTooLarge = false;
for(int idx = startIdx;idx<eligible.size() && candidate.size() < maxMergeAtOnce;idx++) {
final SegmentCommitInfo info = eligible.get(idx);
final long segBytes = size(info);
final long segBytes = size(info, writer);
if (totAfterMergeBytes + segBytes > maxMergedSegmentBytes) {
hitTooLarge = true;
@ -394,9 +400,9 @@ public class TieredMergePolicy extends MergePolicy {
totAfterMergeBytes += segBytes;
}
final MergeScore score = score(candidate, hitTooLarge, mergingBytes);
if (verbose()) {
message(" maybe=" + writer.get().segString(candidate) + " score=" + score.getScore() + " " + score.getExplanation() + " tooLarge=" + hitTooLarge + " size=" + String.format(Locale.ROOT, "%.3f MB", totAfterMergeBytes/1024./1024.));
final MergeScore score = score(candidate, hitTooLarge, mergingBytes, writer);
if (verbose(writer)) {
message(" maybe=" + writer.segString(candidate) + " score=" + score.getScore() + " " + score.getExplanation() + " tooLarge=" + hitTooLarge + " size=" + String.format(Locale.ROOT, "%.3f MB", totAfterMergeBytes/1024./1024.), writer);
}
// If we are already running a max sized merge
@ -420,8 +426,8 @@ public class TieredMergePolicy extends MergePolicy {
toBeMerged.add(info);
}
if (verbose()) {
message(" add merge=" + writer.get().segString(merge.segments) + " size=" + String.format(Locale.ROOT, "%.3f MB", bestMergeBytes/1024./1024.) + " score=" + String.format(Locale.ROOT, "%.3f", bestScore.getScore()) + " " + bestScore.getExplanation() + (bestTooLarge ? " [max merge]" : ""));
if (verbose(writer)) {
message(" add merge=" + writer.segString(merge.segments) + " size=" + String.format(Locale.ROOT, "%.3f MB", bestMergeBytes/1024./1024.) + " score=" + String.format(Locale.ROOT, "%.3f", bestScore.getScore()) + " " + bestScore.getExplanation() + (bestTooLarge ? " [max merge]" : ""), writer);
}
} else {
return spec;
@ -433,12 +439,12 @@ public class TieredMergePolicy extends MergePolicy {
}
/** Expert: scores one merge; subclasses can override. */
protected MergeScore score(List<SegmentCommitInfo> candidate, boolean hitTooLarge, long mergingBytes) throws IOException {
protected MergeScore score(List<SegmentCommitInfo> candidate, boolean hitTooLarge, long mergingBytes, IndexWriter writer) throws IOException {
long totBeforeMergeBytes = 0;
long totAfterMergeBytes = 0;
long totAfterMergeBytesFloored = 0;
for(SegmentCommitInfo info : candidate) {
final long segBytes = size(info);
final long segBytes = size(info, writer);
totAfterMergeBytes += segBytes;
totAfterMergeBytesFloored += floorSize(segBytes);
totBeforeMergeBytes += info.sizeInBytes();
@ -458,7 +464,7 @@ public class TieredMergePolicy extends MergePolicy {
// over time:
skew = 1.0/maxMergeAtOnce;
} else {
skew = ((double) floorSize(size(candidate.get(0))))/totAfterMergeBytesFloored;
skew = ((double) floorSize(size(candidate.get(0), writer)))/totAfterMergeBytesFloored;
}
// Strongly favor merges with less skew (smaller
@ -492,14 +498,14 @@ public class TieredMergePolicy extends MergePolicy {
}
@Override
public MergeSpecification findForcedMerges(SegmentInfos infos, int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge) throws IOException {
if (verbose()) {
message("findForcedMerges maxSegmentCount=" + maxSegmentCount + " infos=" + writer.get().segString(infos) + " segmentsToMerge=" + segmentsToMerge);
public MergeSpecification findForcedMerges(SegmentInfos infos, int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer) throws IOException {
if (verbose(writer)) {
message("findForcedMerges maxSegmentCount=" + maxSegmentCount + " infos=" + writer.segString(infos) + " segmentsToMerge=" + segmentsToMerge, writer);
}
List<SegmentCommitInfo> eligible = new ArrayList<>();
boolean forceMergeRunning = false;
final Collection<SegmentCommitInfo> merging = writer.get().getMergingSegments();
final Collection<SegmentCommitInfo> merging = writer.getMergingSegments();
boolean segmentIsOriginal = false;
for(SegmentCommitInfo info : infos) {
final Boolean isOriginal = segmentsToMerge.get(info);
@ -518,18 +524,18 @@ public class TieredMergePolicy extends MergePolicy {
}
if ((maxSegmentCount > 1 && eligible.size() <= maxSegmentCount) ||
(maxSegmentCount == 1 && eligible.size() == 1 && (!segmentIsOriginal || isMerged(infos, eligible.get(0))))) {
if (verbose()) {
message("already merged");
(maxSegmentCount == 1 && eligible.size() == 1 && (!segmentIsOriginal || isMerged(infos, eligible.get(0), writer)))) {
if (verbose(writer)) {
message("already merged", writer);
}
return null;
}
Collections.sort(eligible, new SegmentByteSizeDescending());
Collections.sort(eligible, new SegmentByteSizeDescending(writer));
if (verbose()) {
message("eligible=" + eligible);
message("forceMergeRunning=" + forceMergeRunning);
if (verbose(writer)) {
message("eligible=" + eligible, writer);
message("forceMergeRunning=" + forceMergeRunning, writer);
}
int end = eligible.size();
@ -542,8 +548,8 @@ public class TieredMergePolicy extends MergePolicy {
spec = new MergeSpecification();
}
final OneMerge merge = new OneMerge(eligible.subList(end-maxMergeAtOnceExplicit, end));
if (verbose()) {
message("add merge=" + writer.get().segString(merge.segments));
if (verbose(writer)) {
message("add merge=" + writer.segString(merge.segments), writer);
}
spec.add(merge);
end -= maxMergeAtOnceExplicit;
@ -553,8 +559,8 @@ public class TieredMergePolicy extends MergePolicy {
// Do final merge
final int numToMerge = end - maxSegmentCount + 1;
final OneMerge merge = new OneMerge(eligible.subList(end-numToMerge, end));
if (verbose()) {
message("add final merge=" + merge.segString(writer.get().getDirectory()));
if (verbose(writer)) {
message("add final merge=" + merge.segString(writer.getDirectory()), writer);
}
spec = new MergeSpecification();
spec.add(merge);
@ -564,14 +570,14 @@ public class TieredMergePolicy extends MergePolicy {
}
@Override
public MergeSpecification findForcedDeletesMerges(SegmentInfos infos) throws IOException {
if (verbose()) {
message("findForcedDeletesMerges infos=" + writer.get().segString(infos) + " forceMergeDeletesPctAllowed=" + forceMergeDeletesPctAllowed);
public MergeSpecification findForcedDeletesMerges(SegmentInfos infos, IndexWriter writer) throws IOException {
if (verbose(writer)) {
message("findForcedDeletesMerges infos=" + writer.segString(infos) + " forceMergeDeletesPctAllowed=" + forceMergeDeletesPctAllowed, writer);
}
final List<SegmentCommitInfo> eligible = new ArrayList<>();
final Collection<SegmentCommitInfo> merging = writer.get().getMergingSegments();
final Collection<SegmentCommitInfo> merging = writer.getMergingSegments();
for(SegmentCommitInfo info : infos) {
double pctDeletes = 100.*((double) writer.get().numDeletedDocs(info))/info.info.getDocCount();
double pctDeletes = 100.*((double) writer.numDeletedDocs(info))/info.info.getDocCount();
if (pctDeletes > forceMergeDeletesPctAllowed && !merging.contains(info)) {
eligible.add(info);
}
@ -581,10 +587,10 @@ public class TieredMergePolicy extends MergePolicy {
return null;
}
Collections.sort(eligible, new SegmentByteSizeDescending());
Collections.sort(eligible, new SegmentByteSizeDescending(writer));
if (verbose()) {
message("eligible=" + eligible);
if (verbose(writer)) {
message("eligible=" + eligible, writer);
}
int start = 0;
@ -600,8 +606,8 @@ public class TieredMergePolicy extends MergePolicy {
}
final OneMerge merge = new OneMerge(eligible.subList(start, end));
if (verbose()) {
message("add merge=" + writer.get().segString(merge.segments));
if (verbose(writer)) {
message("add merge=" + writer.segString(merge.segments), writer);
}
spec.add(merge);
start = end;
@ -618,13 +624,12 @@ public class TieredMergePolicy extends MergePolicy {
return Math.max(floorSegmentBytes, bytes);
}
private boolean verbose() {
final IndexWriter w = writer.get();
return w != null && w.infoStream.isEnabled("TMP");
private boolean verbose(IndexWriter writer) {
return writer != null && writer.infoStream.isEnabled("TMP");
}
private void message(String message) {
writer.get().infoStream.message("TMP", message);
private void message(String message, IndexWriter writer) {
writer.infoStream.message("TMP", message);
}
@Override

View File

@ -69,18 +69,12 @@ public class UpgradeIndexMergePolicy extends MergePolicy {
}
@Override
public void setIndexWriter(IndexWriter writer) {
super.setIndexWriter(writer);
base.setIndexWriter(writer);
public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos, IndexWriter writer) throws IOException {
return base.findMerges(null, segmentInfos, writer);
}
@Override
public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos) throws IOException {
return base.findMerges(null, segmentInfos);
}
@Override
public MergeSpecification findForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge) throws IOException {
public MergeSpecification findForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer) throws IOException {
// first find all old segments
final Map<SegmentCommitInfo,Boolean> oldSegments = new HashMap<>();
for (final SegmentCommitInfo si : segmentInfos) {
@ -90,14 +84,14 @@ public class UpgradeIndexMergePolicy extends MergePolicy {
}
}
if (verbose()) {
message("findForcedMerges: segmentsToUpgrade=" + oldSegments);
if (verbose(writer)) {
message("findForcedMerges: segmentsToUpgrade=" + oldSegments, writer);
}
if (oldSegments.isEmpty())
return null;
MergeSpecification spec = base.findForcedMerges(segmentInfos, maxSegmentCount, oldSegments);
MergeSpecification spec = base.findForcedMerges(segmentInfos, maxSegmentCount, oldSegments, writer);
if (spec != null) {
// remove all segments that are in merge specification from oldSegments,
@ -109,9 +103,9 @@ public class UpgradeIndexMergePolicy extends MergePolicy {
}
if (!oldSegments.isEmpty()) {
if (verbose()) {
if (verbose(writer)) {
message("findForcedMerges: " + base.getClass().getSimpleName() +
" does not want to merge all old segments, merge remaining ones into new segment: " + oldSegments);
" does not want to merge all old segments, merge remaining ones into new segment: " + oldSegments, writer);
}
final List<SegmentCommitInfo> newInfos = new ArrayList<>();
for (final SegmentCommitInfo si : segmentInfos) {
@ -130,13 +124,13 @@ public class UpgradeIndexMergePolicy extends MergePolicy {
}
@Override
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos) throws IOException {
return base.findForcedDeletesMerges(segmentInfos);
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos, IndexWriter writer) throws IOException {
return base.findForcedDeletesMerges(segmentInfos, writer);
}
@Override
public boolean useCompoundFile(SegmentInfos segments, SegmentCommitInfo newSegment) throws IOException {
return base.useCompoundFile(segments, newSegment);
public boolean useCompoundFile(SegmentInfos segments, SegmentCommitInfo newSegment, IndexWriter writer) throws IOException {
return base.useCompoundFile(segments, newSegment, writer);
}
@Override
@ -149,12 +143,11 @@ public class UpgradeIndexMergePolicy extends MergePolicy {
return "[" + getClass().getSimpleName() + "->" + base + "]";
}
private boolean verbose() {
final IndexWriter w = writer.get();
return w != null && w.infoStream.isEnabled("UPGMP");
private boolean verbose(IndexWriter writer) {
return writer != null && writer.infoStream.isEnabled("UPGMP");
}
private void message(String message) {
writer.get().infoStream.message("UPGMP", message);
private void message(String message, IndexWriter writer) {
writer.infoStream.message("UPGMP", message);
}
}

View File

@ -30,9 +30,9 @@ public class TestNoMergePolicy extends LuceneTestCase {
@Test
public void testNoMergePolicy() throws Exception {
MergePolicy mp = NoMergePolicy.INSTANCE;
assertNull(mp.findMerges(null, (SegmentInfos)null));
assertNull(mp.findForcedMerges(null, 0, null));
assertNull(mp.findForcedDeletesMerges(null));
assertNull(mp.findMerges(null, (SegmentInfos)null, null));
assertNull(mp.findForcedMerges(null, 0, null, null));
assertNull(mp.findForcedDeletesMerges(null, null));
mp.close();
}

View File

@ -257,7 +257,7 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
public void close() {}
@Override
public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos)
public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos, IndexWriter writer)
throws IOException {
MergeSpecification ms = new MergeSpecification();
if (doMerge) {
@ -271,19 +271,19 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
@Override
public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge)
int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer)
throws IOException {
return null;
}
@Override
public MergeSpecification findForcedDeletesMerges(
SegmentInfos segmentInfos) throws IOException {
SegmentInfos segmentInfos, IndexWriter writer) throws IOException {
return null;
}
@Override
public boolean useCompoundFile(SegmentInfos segments, SegmentCommitInfo newSegment) {
public boolean useCompoundFile(SegmentInfos segments, SegmentCommitInfo newSegment, IndexWriter writer) {
return useCompoundFile;
}
}

View File

@ -186,21 +186,21 @@ public final class SortingMergePolicy extends MergePolicy {
@Override
public MergeSpecification findMerges(MergeTrigger mergeTrigger,
SegmentInfos segmentInfos) throws IOException {
return sortedMergeSpecification(in.findMerges(mergeTrigger, segmentInfos));
SegmentInfos segmentInfos, IndexWriter writer) throws IOException {
return sortedMergeSpecification(in.findMerges(mergeTrigger, segmentInfos, writer));
}
@Override
public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge)
int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer)
throws IOException {
return sortedMergeSpecification(in.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge));
return sortedMergeSpecification(in.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge, writer));
}
@Override
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos)
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos, IndexWriter writer)
throws IOException {
return sortedMergeSpecification(in.findForcedDeletesMerges(segmentInfos));
return sortedMergeSpecification(in.findForcedDeletesMerges(segmentInfos, writer));
}
@Override
@ -210,13 +210,8 @@ public final class SortingMergePolicy extends MergePolicy {
@Override
public boolean useCompoundFile(SegmentInfos segments,
SegmentCommitInfo newSegment) throws IOException {
return in.useCompoundFile(segments, newSegment);
}
@Override
public void setIndexWriter(IndexWriter writer) {
in.setIndexWriter(writer);
SegmentCommitInfo newSegment, IndexWriter writer) throws IOException {
return in.useCompoundFile(segments, newSegment, writer);
}
@Override

View File

@ -54,7 +54,7 @@ public class AlcoholicMergePolicy extends LogMergePolicy {
@Override
//@BlackMagic(level=Voodoo);
protected long size(SegmentCommitInfo info) throws IOException {
protected long size(SegmentCommitInfo info, IndexWriter writer) throws IOException {
int hourOfDay = calendar.get(Calendar.HOUR_OF_DAY);
if (hourOfDay < 6 ||
hourOfDay > 20 ||

View File

@ -49,14 +49,14 @@ public class MockRandomMergePolicy extends MergePolicy {
}
@Override
public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos) {
public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos, IndexWriter writer) {
MergeSpecification mergeSpec = null;
//System.out.println("MRMP: findMerges sis=" + segmentInfos);
int numSegments = segmentInfos.size();
List<SegmentCommitInfo> segments = new ArrayList<>();
final Collection<SegmentCommitInfo> merging = writer.get().getMergingSegments();
final Collection<SegmentCommitInfo> merging = writer.getMergingSegments();
for(SegmentCommitInfo sipc : segmentInfos) {
if (!merging.contains(sipc)) {
@ -85,7 +85,7 @@ public class MockRandomMergePolicy extends MergePolicy {
@Override
public MergeSpecification findForcedMerges(
SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge)
SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer)
throws IOException {
final List<SegmentCommitInfo> eligibleSegments = new ArrayList<>();
@ -126,8 +126,8 @@ public class MockRandomMergePolicy extends MergePolicy {
}
@Override
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos) throws IOException {
return findMerges(null, segmentInfos);
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos, IndexWriter writer) throws IOException {
return findMerges(null, segmentInfos, writer);
}
@Override
@ -135,7 +135,7 @@ public class MockRandomMergePolicy extends MergePolicy {
}
@Override
public boolean useCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo) throws IOException {
public boolean useCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo, IndexWriter writer) throws IOException {
// 80% of the time we create CFS:
return random.nextInt(5) != 1;
}