remove confusing dead code

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1147671 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2011-07-17 16:09:02 +00:00
parent 521af09c38
commit 50be8c0489
4 changed files with 8 additions and 136 deletions

View File

@ -1,110 +0,0 @@
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Remaps docIDs after a merge has completed, where the
* merged segments had at least one deletion. This is used
* to renumber the buffered deletes in IndexWriter when a
* merge of segments with deletions commits. */
final class MergeDocIDRemapper {
int[] starts; // used for binary search of mapped docID
int[] newStarts; // starts, minus the deletes
int[][] docMaps; // maps docIDs in the merged set
int minDocID; // minimum docID that needs renumbering
int maxDocID; // 1+ the max docID that needs renumbering
int docShift; // total # deleted docs that were compacted by this merge
public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount) {
this.docMaps = docMaps;
SegmentInfo firstSegment = merge.segments.get(0);
int i = 0;
while(true) {
SegmentInfo info = infos.info(i);
if (info.equals(firstSegment))
break;
minDocID += info.docCount;
i++;
}
int numDocs = 0;
for(int j=0;j<docMaps.length;i++,j++) {
numDocs += infos.info(i).docCount;
assert infos.info(i).equals(merge.segments.get(j));
}
maxDocID = minDocID + numDocs;
starts = new int[docMaps.length];
newStarts = new int[docMaps.length];
starts[0] = minDocID;
newStarts[0] = minDocID;
for(i=1;i<docMaps.length;i++) {
final int lastDocCount = merge.segments.get(i-1).docCount;
starts[i] = starts[i-1] + lastDocCount;
newStarts[i] = newStarts[i-1] + lastDocCount - delCounts[i-1];
}
docShift = numDocs - mergedDocCount;
// There are rare cases when docShift is 0. It happens
// if you try to delete a docID that's out of bounds,
// because the SegmentReader still allocates deletedDocs
// and pretends it has deletions ... so we can't make
// this assert here
// assert docShift > 0;
// Make sure it all adds up:
assert docShift == maxDocID - (newStarts[docMaps.length-1] + merge.segments.get(docMaps.length-1).docCount - delCounts[docMaps.length-1]);
}
public int remap(int oldDocID) {
if (oldDocID < minDocID)
// Unaffected by merge
return oldDocID;
else if (oldDocID >= maxDocID)
// This doc was "after" the merge, so simple shift
return oldDocID - docShift;
else {
// Binary search to locate this document & find its new docID
int lo = 0; // search starts array
int hi = docMaps.length - 1; // for first element less
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
int midValue = starts[mid];
if (oldDocID < midValue)
hi = mid - 1;
else if (oldDocID > midValue)
lo = mid + 1;
else { // found a match
while (mid+1 < docMaps.length && starts[mid+1] == midValue) {
mid++; // scan to last match
}
if (docMaps[mid] != null)
return newStarts[mid] + docMaps[mid][oldDocID-starts[mid]];
else
return newStarts[mid] + oldDocID-starts[mid];
}
}
if (docMaps[hi] != null)
return newStarts[hi] + docMaps[hi][oldDocID-starts[hi]];
else
return newStarts[hi] + oldDocID-starts[hi];
}
}
}

View File

@ -39,7 +39,6 @@ import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.MultiBits;
import org.apache.lucene.util.ReaderUtil;
/**
@ -513,7 +512,6 @@ final class SegmentMerger {
mergeState.mergedDocCount = mergedDocs;
// Remap docIDs
mergeState.delCounts = new int[mergeState.readerCount];
mergeState.docMaps = new int[mergeState.readerCount][];
mergeState.docBase = new int[mergeState.readerCount];
mergeState.hasPayloadProcessorProvider = payloadProcessorProvider != null;
@ -528,11 +526,10 @@ final class SegmentMerger {
final IndexReader reader = readers.get(i);
mergeState.delCounts[i] = reader.numDeletedDocs();
mergeState.docBase[i] = docBase;
docBase += reader.numDocs();
inputDocBase += reader.maxDoc();
if (mergeState.delCounts[i] != 0) {
if (reader.hasDeletions()) {
int delCount = 0;
final Bits liveDocs = reader.getLiveDocs();
assert liveDocs != null;
@ -547,7 +544,7 @@ final class SegmentMerger {
docMap[j] = newDocID++;
}
}
assert delCount == mergeState.delCounts[i]: "reader delCount=" + mergeState.delCounts[i] + " vs recomputed delCount=" + delCount;
assert delCount == reader.numDeletedDocs(): "reader delCount=" + reader.numDeletedDocs() + " vs recomputed delCount=" + delCount;
}
if (payloadProcessorProvider != null) {
@ -557,12 +554,6 @@ final class SegmentMerger {
codec = segmentWriteState.segmentCodecs.codec();
final FieldsConsumer consumer = codec.fieldsConsumer(segmentWriteState);
try {
// NOTE: this is silly, yet, necessary -- we create a
// MultiBits as our skip docs only to have it broken
// apart when we step through the docs enums in
// MultiDocsEnum.
mergeState.multiLiveDocs = new MultiBits(bits, bitsStarts, true);
consumer.merge(mergeState,
new MultiFields(fields.toArray(Fields.EMPTY_ARRAY),
slices.toArray(ReaderUtil.Slice.EMPTY_ARRAY)));
@ -591,8 +582,6 @@ final class SegmentMerger {
}
perDocBitsStarts.add(docBase);
if (!perDocSlices.isEmpty()) {
mergeState.multiLiveDocs = new MultiBits(perDocBits, perDocBitsStarts,
true);
final PerDocConsumer docsConsumer = codec
.docsConsumer(new PerDocWriteState(segmentWriteState));
boolean success = false;
@ -612,14 +601,6 @@ final class SegmentMerger {
private MergeState mergeState;
int[][] getDocMaps() {
return mergeState.docMaps;
}
int[] getDelCounts() {
return mergeState.delCounts;
}
public boolean getAnyNonBulkMerges() {
assert matchedCount <= readers.size();
return matchedCount != readers.size();

View File

@ -26,7 +26,6 @@ import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.index.PayloadProcessorProvider.DirPayloadProcessor;
import org.apache.lucene.index.PayloadProcessorProvider.PayloadProcessor;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
/** Holds common state used during segment merging
*
@ -36,10 +35,8 @@ public class MergeState {
public List<IndexReader> readers; // Readers being merged
public int readerCount; // Number of readers being merged
public int[][] docMaps; // Maps docIDs around deletions
public int[] delCounts; // Deletion count per reader
public int[] docBase; // New docID base per reader
public int mergedDocCount; // Total # merged docs
public Bits multiLiveDocs;
public CheckAbort checkAbort;
// Updated per field;

View File

@ -69,7 +69,9 @@ public abstract class TermsConsumer {
MultiDocsEnum docsEnumIn = null;
while((term = termsEnum.next()) != null) {
docsEnumIn = (MultiDocsEnum) termsEnum.docs(mergeState.multiLiveDocs, docsEnumIn);
// We can pass null for liveDocs, because the
// mapping enum will skip the non-live docs:
docsEnumIn = (MultiDocsEnum) termsEnum.docs(null, docsEnumIn);
if (docsEnumIn != null) {
docsEnum.reset(docsEnumIn);
final PostingsConsumer postingsConsumer = startTerm(term);
@ -93,7 +95,9 @@ public abstract class TermsConsumer {
postingsEnum.setMergeState(mergeState);
MultiDocsAndPositionsEnum postingsEnumIn = null;
while((term = termsEnum.next()) != null) {
postingsEnumIn = (MultiDocsAndPositionsEnum) termsEnum.docsAndPositions(mergeState.multiLiveDocs, postingsEnumIn);
// We can pass null for liveDocs, because the
// mapping enum will skip the non-live docs:
postingsEnumIn = (MultiDocsAndPositionsEnum) termsEnum.docsAndPositions(null, postingsEnumIn);
if (postingsEnumIn != null) {
postingsEnum.reset(postingsEnumIn);
// set PayloadProcessor