LUCENE-4055: remove redundant numDocs

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene4055@1341997 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2012-05-23 19:44:26 +00:00
parent b0d40e65fc
commit 7508958f63
15 changed files with 29 additions and 35 deletions

View File

@ -123,11 +123,11 @@ public final class Lucene40PostingsWriter extends PostingsWriterBase {
}
}
totalNumDocs = state.numDocs;
totalNumDocs = state.segmentInfo.getDocCount();
skipListWriter = new Lucene40SkipListWriter(skipInterval,
maxSkipLevels,
state.numDocs,
totalNumDocs,
freqOut,
proxOut);
}

View File

@ -138,11 +138,11 @@ public final class SepPostingsWriter extends PostingsWriterBase {
final String skipFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, SKIP_EXTENSION);
skipOut = state.directory.createOutput(skipFileName, state.context);
totalNumDocs = state.numDocs;
totalNumDocs = state.segmentInfo.getDocCount();
skipListWriter = new SepSkipListWriter(skipInterval,
maxSkipLevels,
state.numDocs,
totalNumDocs,
freqOut, docOut,
posOut, payloadOut);

View File

@ -81,7 +81,7 @@ final class DocFieldProcessor extends DocConsumer {
consumer.flush(childFields, state);
for (DocValuesConsumerAndDocID consumer : docValues.values()) {
consumer.docValuesConsumer.finish(state.numDocs);
consumer.docValuesConsumer.finish(state.segmentInfo.getDocCount());
}
// close perDocConsumer during flush to ensure all files are flushed due to PerCodec CFS

View File

@ -447,8 +447,9 @@ class DocumentsWriterPerThread {
FlushedSegment flush() throws IOException {
assert numDocsInRAM > 0;
assert deleteSlice == null : "all deletes must be applied in prepareFlush";
segmentInfo.setDocCount(numDocsInRAM);
flushState = new SegmentWriteState(infoStream, directory, segmentInfo, fieldInfos.finish(),
numDocsInRAM, writer.getConfig().getTermIndexInterval(),
writer.getConfig().getTermIndexInterval(),
pendingDeletes, new IOContext(new FlushInfo(numDocsInRAM, bytesUsed())));
final double startMBUsed = parent.flushControl.netBytes() / 1024. / 1024.;
@ -481,13 +482,11 @@ class DocumentsWriterPerThread {
try {
consumer.flush(flushState);
pendingDeletes.terms.clear();
// nocommit use setter and make this a SetOnce:
segmentInfo.setDocCount(flushState.numDocs);
segmentInfo.setFiles(new HashSet<String>(directory.getCreatedFiles()));
final SegmentInfoPerCommit segmentInfoPerCommit = new SegmentInfoPerCommit(segmentInfo, 0, -1L);
if (infoStream.isEnabled("DWPT")) {
infoStream.message("DWPT", "new segment has " + (flushState.liveDocs == null ? 0 : (flushState.numDocs - flushState.delCountOnFlush)) + " deleted docs");
infoStream.message("DWPT", "new segment has " + (flushState.liveDocs == null ? 0 : (flushState.segmentInfo.getDocCount() - flushState.delCountOnFlush)) + " deleted docs");
infoStream.message("DWPT", "new segment has " +
(flushState.fieldInfos.hasVectors() ? "vectors" : "no vectors") + "; " +
(flushState.fieldInfos.hasNorms() ? "norms" : "no norms") + "; " +
@ -498,7 +497,7 @@ class DocumentsWriterPerThread {
infoStream.message("DWPT", "flushed codec=" + codec);
}
flushedDocCount += flushState.numDocs;
flushedDocCount += flushState.segmentInfo.getDocCount();
final BufferedDeletes segmentDeletes;
if (pendingDeletes.queries.isEmpty()) {

View File

@ -367,7 +367,7 @@ final class FreqProxTermsWriterPerField extends TermsHashConsumerPerField implem
final ByteSliceReader freq = new ByteSliceReader();
final ByteSliceReader prox = new ByteSliceReader();
FixedBitSet visitedDocs = new FixedBitSet(state.numDocs);
FixedBitSet visitedDocs = new FixedBitSet(state.segmentInfo.getDocCount());
long sumTotalTermFreq = 0;
long sumDocFreq = 0;
@ -445,7 +445,7 @@ final class FreqProxTermsWriterPerField extends TermsHashConsumerPerField implem
}
numDocs++;
assert docID < state.numDocs: "doc=" + docID + " maxDoc=" + state.numDocs;
assert docID < state.segmentInfo.getDocCount(): "doc=" + docID + " maxDoc=" + state.segmentInfo.getDocCount();
// NOTE: we could check here if the docID was
// deleted, and skip it. However, this is somewhat
@ -467,7 +467,7 @@ final class FreqProxTermsWriterPerField extends TermsHashConsumerPerField implem
// TODO: can we do this reach-around in a cleaner way????
if (state.liveDocs == null) {
state.liveDocs = docState.docWriter.codec.liveDocsFormat().newLiveDocs(state.numDocs);
state.liveDocs = docState.docWriter.codec.liveDocsFormat().newLiveDocs(state.segmentInfo.getDocCount());
}
if (state.liveDocs.get(docID)) {
state.delCountOnFlush++;

View File

@ -2299,8 +2299,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
MergeState mergeState = merger.merge(); // merge 'em
info.setDocCount(mergeState.mergedDocCount);
SegmentInfoPerCommit infoPerCommit = new SegmentInfoPerCommit(info, 0, -1L);
info.setFiles(new HashSet<String>(trackingDir.getCreatedFiles()));
@ -3498,7 +3496,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// This is where all the work happens:
MergeState mergeState = merger.merge();
merge.info.info.setDocCount(mergeState.mergedDocCount);
assert mergeState.segmentInfo == merge.info.info;
merge.info.info.setFiles(new HashSet<String>(dirWrapper.getCreatedFiles()));
// Record which codec was used to write the segment

View File

@ -64,7 +64,7 @@ final class NormsConsumer extends InvertedDocEndConsumer {
if (!fi.omitsNorms()) {
if (toWrite != null && toWrite.initialized()) {
anythingFlushed = true;
final Type type = toWrite.flush(state.numDocs);
final Type type = toWrite.flush(state.segmentInfo.getDocCount());
assert fi.getNormType() == type;
} else if (fi.isIndexed()) {
anythingFlushed = true;

View File

@ -107,16 +107,16 @@ final class SegmentMerger {
// IndexWriter.close(false) takes to actually stop the
// threads.
// nocommit: can we nuke this count too?
mergeState.mergedDocCount = setDocMaps();
mergeState.segmentInfo.setDocCount(mergeState.mergedDocCount);
mergeDocValuesAndNormsFieldInfos();
setMatchingSegmentReaders();
int numMerged = mergeFields();
assert numMerged == mergeState.mergedDocCount;
final SegmentWriteState segmentWriteState = new SegmentWriteState(mergeState.infoStream, directory, mergeState.segmentInfo,
mergeState.fieldInfos, mergeState.mergedDocCount,
termIndexInterval, null, context);
mergeState.fieldInfos, termIndexInterval, null, context);
mergeTerms(segmentWriteState);
mergePerDoc(segmentWriteState);

View File

@ -31,7 +31,6 @@ public class SegmentWriteState {
public final Directory directory;
public final SegmentInfo segmentInfo;
public final FieldInfos fieldInfos;
public final int numDocs;
public int delCountOnFlush;
// Deletes to apply while we are flushing the segment. A
@ -56,15 +55,12 @@ public class SegmentWriteState {
public final IOContext context;
public SegmentWriteState(InfoStream infoStream, Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos,
int numDocs, int termIndexInterval, BufferedDeletes segDeletes, IOContext context) {
int termIndexInterval, BufferedDeletes segDeletes, IOContext context) {
this.infoStream = infoStream;
this.segDeletes = segDeletes;
this.directory = directory;
// nocommit a lot of this is redundant w/ SI! BUT not
// the Directory!!!! one is tracking one is not!!!
this.segmentInfo = segmentInfo;
this.fieldInfos = fieldInfos;
this.numDocs = numDocs;
this.termIndexInterval = termIndexInterval;
segmentSuffix = "";
this.context = context;
@ -78,7 +74,6 @@ public class SegmentWriteState {
directory = state.directory;
segmentInfo = state.segmentInfo;
fieldInfos = state.fieldInfos;
numDocs = state.numDocs;
termIndexInterval = state.termIndexInterval;
context = state.context;
this.segmentSuffix = segmentSuffix;

View File

@ -58,18 +58,19 @@ final class StoredFieldsConsumer {
}
public void flush(SegmentWriteState state) throws IOException {
int numDocs = state.segmentInfo.getDocCount();
if (state.numDocs > 0) {
if (numDocs > 0) {
// It's possible that all documents seen in this segment
// hit non-aborting exceptions, in which case we will
// not have yet init'd the FieldsWriter:
initFieldsWriter(state.context);
fill(state.numDocs);
fill(numDocs);
}
if (fieldsWriter != null) {
try {
fieldsWriter.finish(state.fieldInfos, state.numDocs);
fieldsWriter.finish(state.fieldInfos, numDocs);
} finally {
fieldsWriter.close();
fieldsWriter = null;

View File

@ -51,11 +51,12 @@ final class TermVectorsConsumer extends TermsHashConsumer {
@Override
void flush(Map<String, TermsHashConsumerPerField> fieldsToFlush, final SegmentWriteState state) throws IOException {
if (writer != null) {
int numDocs = state.segmentInfo.getDocCount();
// At least one doc in this run had term vectors enabled
try {
fill(state.numDocs);
fill(numDocs);
assert state.segmentInfo != null;
writer.finish(state.fieldInfos, state.numDocs);
writer.finish(state.fieldInfos, numDocs);
} finally {
IOUtils.close(writer);
writer = null;

View File

@ -620,7 +620,7 @@ public class TestCodecs extends LuceneTestCase {
final int termIndexInterval = _TestUtil.nextInt(random(), 13, 27);
final Codec codec = Codec.getDefault();
final SegmentInfo si = new SegmentInfo(dir, Constants.LUCENE_MAIN_VERSION, SEGMENT, 10000, -1, SEGMENT, false, null, false, codec, null, null);
final SegmentWriteState state = new SegmentWriteState(InfoStream.getDefault(), dir, si, fieldInfos, 10000, termIndexInterval, null, newIOContext(random()));
final SegmentWriteState state = new SegmentWriteState(InfoStream.getDefault(), dir, si, fieldInfos, termIndexInterval, null, newIOContext(random()));
final FieldsConsumer consumer = codec.postingsFormat().fieldsConsumer(state);
Arrays.sort(fields);

View File

@ -197,7 +197,7 @@ public class TestDoc extends LuceneTestCase {
final Codec codec = Codec.getDefault();
TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(si1.info.dir);
final SegmentInfo si = new SegmentInfo(si1.info.dir, Constants.LUCENE_MAIN_VERSION, merged, 10000, -1, merged, false, null, false, codec, null, null);
final SegmentInfo si = new SegmentInfo(si1.info.dir, Constants.LUCENE_MAIN_VERSION, merged, -1, -1, merged, false, null, false, codec, null, null);
SegmentMerger merger = new SegmentMerger(si, InfoStream.getDefault(), trackingDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL,
MergeState.CheckAbort.NONE, null, new FieldInfos.FieldNumbers(), context);

View File

@ -77,7 +77,7 @@ public class TestSegmentMerger extends LuceneTestCase {
public void testMerge() throws IOException {
final Codec codec = Codec.getDefault();
final SegmentInfo si = new SegmentInfo(mergedDir, Constants.LUCENE_MAIN_VERSION, mergedSegment, 10000, -1, mergedSegment, false, null, false, codec, null, null);
final SegmentInfo si = new SegmentInfo(mergedDir, Constants.LUCENE_MAIN_VERSION, mergedSegment, -1, -1, mergedSegment, false, null, false, codec, null, null);
SegmentMerger merger = new SegmentMerger(si, InfoStream.getDefault(), mergedDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL,
MergeState.CheckAbort.NONE, null, new FieldInfos.FieldNumbers(), newIOContext(random()));

View File

@ -51,7 +51,7 @@ class PreFlexRWFieldsWriter extends FieldsConsumer {
try {
final String freqFile = IndexFileNames.segmentFileName(state.segmentInfo.name, "", Lucene3xPostingsFormat.FREQ_EXTENSION);
freqOut = state.directory.createOutput(freqFile, state.context);
totalNumDocs = state.numDocs;
totalNumDocs = state.segmentInfo.getDocCount();
success = true;
} finally {
if (!success) {