LUCENE-4055: make SI.docCount private

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene4055@1341971 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2012-05-23 18:35:11 +00:00
parent 63efab259c
commit 97b082b3d8
27 changed files with 113 additions and 99 deletions

View File

@ -69,7 +69,7 @@ class Lucene3xNormsProducer extends PerDocProducer {
// but we just don't do any seeks or reading yet.
public Lucene3xNormsProducer(Directory dir, SegmentInfo info, FieldInfos fields, IOContext context) throws IOException {
Directory separateNormsDir = info.dir; // separate norms are never inside CFS
maxdoc = info.docCount;
maxdoc = info.getDocCount();
String segmentName = info.name;
Map<Integer,Long> normGen = info.getNormGen();
boolean success = false;

View File

@ -141,7 +141,7 @@ final class Lucene3xStoredFieldsReader extends StoredFieldsReader implements Clo
public Lucene3xStoredFieldsReader(Directory d, SegmentInfo si, FieldInfos fn, IOContext context) throws IOException {
final String segment = si.getDocStoreSegment();
final int docStoreOffset = si.getDocStoreOffset();
final int size = si.docCount;
final int size = si.getDocCount();
boolean success = false;
fieldInfos = fn;
try {
@ -176,8 +176,8 @@ final class Lucene3xStoredFieldsReader extends StoredFieldsReader implements Clo
this.docStoreOffset = 0;
this.size = (int) (indexSize >> 3);
// Verify two sources of "maxDoc" agree:
if (this.size != si.docCount) {
throw new CorruptIndexException("doc counts differ for segment " + segment + ": fieldsReader shows " + this.size + " but segmentInfo shows " + si.docCount);
if (this.size != si.getDocCount()) {
throw new CorruptIndexException("doc counts differ for segment " + segment + ": fieldsReader shows " + this.size + " but segmentInfo shows " + si.getDocCount());
}
}
numTotalDocs = (int) (indexSize >> 3);

View File

@ -116,7 +116,7 @@ class Lucene3xTermVectorsReader extends TermVectorsReader {
throws CorruptIndexException, IOException {
final String segment = si.getDocStoreSegment();
final int docStoreOffset = si.getDocStoreOffset();
final int size = si.docCount;
final int size = si.getDocCount();
boolean success = false;

View File

@ -56,7 +56,7 @@ public class Lucene40DocValuesProducer extends PerDocProducerBase {
IndexFileNames.segmentFileName(state.segmentInfo.name,
segmentSuffix, IndexFileNames.COMPOUND_FILE_EXTENSION),
state.context, false);
docValues = load(state.fieldInfos, state.segmentInfo.name, state.segmentInfo.docCount, cfs, state.context);
docValues = load(state.fieldInfos, state.segmentInfo.name, state.segmentInfo.getDocCount(), cfs, state.context);
} else {
cfs = null;
docValues = new TreeMap<String,DocValues>();

View File

@ -86,9 +86,9 @@ public class Lucene40LiveDocsFormat extends LiveDocsFormat {
public Bits readLiveDocs(Directory dir, SegmentInfoPerCommit info, IOContext context) throws IOException {
String filename = IndexFileNames.fileNameFromGeneration(info.info.name, DELETES_EXTENSION, info.getDelGen());
final BitVector liveDocs = new BitVector(dir, filename, context);
assert liveDocs.count() == info.info.docCount - info.getDelCount():
"liveDocs.count()=" + liveDocs.count() + " info.docCount=" + info.info.docCount + " info.getDelCount()=" + info.getDelCount();
assert liveDocs.length() == info.info.docCount;
assert liveDocs.count() == info.info.getDocCount() - info.getDelCount():
"liveDocs.count()=" + liveDocs.count() + " info.docCount=" + info.info.getDocCount() + " info.getDelCount()=" + info.getDelCount();
assert liveDocs.length() == info.info.getDocCount();
return liveDocs;
}
@ -96,8 +96,8 @@ public class Lucene40LiveDocsFormat extends LiveDocsFormat {
public void writeLiveDocs(MutableBits bits, Directory dir, SegmentInfoPerCommit info, int newDelCount, IOContext context) throws IOException {
String filename = IndexFileNames.fileNameFromGeneration(info.info.name, DELETES_EXTENSION, info.getNextDelGen());
final BitVector liveDocs = (BitVector) bits;
assert liveDocs.count() == info.info.docCount - info.getDelCount() - newDelCount;
assert liveDocs.length() == info.info.docCount;
assert liveDocs.count() == info.info.getDocCount() - info.getDelCount() - newDelCount;
assert liveDocs.length() == info.info.getDocCount();
liveDocs.write(dir, filename, context);
}

View File

@ -48,7 +48,7 @@ public class Lucene40SegmentInfoWriter extends SegmentInfoWriter {
try {
// Write the Lucene version that created this segment, since 3.1
output.writeString(si.getVersion());
output.writeInt(si.docCount);
output.writeInt(si.getDocCount());
assert si.getDocStoreOffset() == -1;
assert si.getNormGen() == null;

View File

@ -90,8 +90,8 @@ public final class Lucene40StoredFieldsReader extends StoredFieldsReader impleme
final long indexSize = indexStream.length() - HEADER_LENGTH_IDX;
this.size = (int) (indexSize >> 3);
// Verify two sources of "maxDoc" agree:
if (this.size != si.docCount) {
throw new CorruptIndexException("doc counts differ for segment " + segment + ": fieldsReader shows " + this.size + " but segmentInfo shows " + si.docCount);
if (this.size != si.getDocCount()) {
throw new CorruptIndexException("doc counts differ for segment " + segment + ": fieldsReader shows " + this.size + " but segmentInfo shows " + si.getDocCount());
}
numTotalDocs = (int) (indexSize >> 3);
success = true;

View File

@ -100,7 +100,7 @@ public class Lucene40TermVectorsReader extends TermVectorsReader {
public Lucene40TermVectorsReader(Directory d, SegmentInfo si, FieldInfos fieldInfos, IOContext context)
throws CorruptIndexException, IOException {
final String segment = si.name;
final int size = si.docCount;
final int size = si.getDocCount();
boolean success = false;

View File

@ -45,7 +45,7 @@ public class SepDocValuesProducer extends PerDocProducerBase {
* {@link DocValues} instances for this segment and codec.
*/
public SepDocValuesProducer(SegmentReadState state) throws IOException {
docValues = load(state.fieldInfos, state.segmentInfo.name, state.segmentInfo.docCount, state.dir, state.context);
docValues = load(state.fieldInfos, state.segmentInfo.name, state.segmentInfo.getDocCount(), state.dir, state.context);
}
@Override

View File

@ -68,7 +68,7 @@ public class SimpleTextPerDocProducer extends PerDocProducerBase {
this.segmentSuffix = segmentSuffix;
if (anyDocValuesFields(state.fieldInfos)) {
docValues = load(state.fieldInfos, state.segmentInfo.name,
state.segmentInfo.docCount, state.dir, state.context);
state.segmentInfo.getDocCount(), state.dir, state.context);
} else {
docValues = new TreeMap<String, DocValues>();
}

View File

@ -72,7 +72,7 @@ public class SimpleTextSegmentInfoWriter extends SegmentInfoWriter {
SimpleTextUtil.writeNewline(output);
SimpleTextUtil.write(output, SI_DOCCOUNT);
SimpleTextUtil.write(output, Integer.toString(si.docCount), scratch);
SimpleTextUtil.write(output, Integer.toString(si.getDocCount()), scratch);
SimpleTextUtil.writeNewline(output);
SimpleTextUtil.write(output, SI_USECOMPOUND);

View File

@ -225,8 +225,8 @@ class BufferedDeletesStream {
// already did that on flush:
delCount += applyQueryDeletes(packet.queriesIterable(), rld, reader);
final int fullDelCount = rld.info.getDelCount() + rld.getPendingDeleteCount();
assert fullDelCount <= rld.info.info.docCount;
segAllDeletes = fullDelCount == rld.info.info.docCount;
assert fullDelCount <= rld.info.info.getDocCount();
segAllDeletes = fullDelCount == rld.info.info.getDocCount();
} finally {
rld.release(reader);
readerPool.release(rld);
@ -271,8 +271,8 @@ class BufferedDeletesStream {
delCount += applyTermDeletes(coalescedDeletes.termsIterable(), rld, reader);
delCount += applyQueryDeletes(coalescedDeletes.queriesIterable(), rld, reader);
final int fullDelCount = rld.info.getDelCount() + rld.getPendingDeleteCount();
assert fullDelCount <= rld.info.info.docCount;
segAllDeletes = fullDelCount == rld.info.info.docCount;
assert fullDelCount <= rld.info.info.getDocCount();
segAllDeletes = fullDelCount == rld.info.info.getDocCount();
} finally {
rld.release(reader);
readerPool.release(rld);

View File

@ -465,11 +465,11 @@ public class CheckIndex {
}
Status.SegmentInfoStatus segInfoStat = new Status.SegmentInfoStatus();
result.segmentInfos.add(segInfoStat);
msg(" " + (1+i) + " of " + numSegments + ": name=" + info.info.name + " docCount=" + info.info.docCount);
msg(" " + (1+i) + " of " + numSegments + ": name=" + info.info.name + " docCount=" + info.info.getDocCount());
segInfoStat.name = info.info.name;
segInfoStat.docCount = info.info.docCount;
segInfoStat.docCount = info.info.getDocCount();
int toLoseDocCount = info.info.docCount;
int toLoseDocCount = info.info.getDocCount();
SegmentReader reader = null;
@ -517,14 +517,14 @@ public class CheckIndex {
final int numDocs = reader.numDocs();
toLoseDocCount = numDocs;
if (reader.hasDeletions()) {
if (reader.numDocs() != info.info.docCount - info.getDelCount()) {
throw new RuntimeException("delete count mismatch: info=" + (info.info.docCount - info.getDelCount()) + " vs reader=" + reader.numDocs());
if (reader.numDocs() != info.info.getDocCount() - info.getDelCount()) {
throw new RuntimeException("delete count mismatch: info=" + (info.info.getDocCount() - info.getDelCount()) + " vs reader=" + reader.numDocs());
}
if ((info.info.docCount-reader.numDocs()) > reader.maxDoc()) {
throw new RuntimeException("too many deleted docs: maxDoc()=" + reader.maxDoc() + " vs del count=" + (info.info.docCount-reader.numDocs()));
if ((info.info.getDocCount()-reader.numDocs()) > reader.maxDoc()) {
throw new RuntimeException("too many deleted docs: maxDoc()=" + reader.maxDoc() + " vs del count=" + (info.info.getDocCount()-reader.numDocs()));
}
if (info.info.docCount - numDocs != info.getDelCount()) {
throw new RuntimeException("delete count mismatch: info=" + info.getDelCount() + " vs reader=" + (info.info.docCount - numDocs));
if (info.info.getDocCount() - numDocs != info.getDelCount()) {
throw new RuntimeException("delete count mismatch: info=" + info.getDelCount() + " vs reader=" + (info.info.getDocCount() - numDocs));
}
Bits liveDocs = reader.getLiveDocs();
if (liveDocs == null) {
@ -541,11 +541,11 @@ public class CheckIndex {
}
}
segInfoStat.numDeleted = info.info.docCount - numDocs;
segInfoStat.numDeleted = info.info.getDocCount() - numDocs;
msg("OK [" + (segInfoStat.numDeleted) + " deleted docs]");
} else {
if (info.getDelCount() != 0) {
throw new RuntimeException("delete count mismatch: info=" + info.getDelCount() + " vs reader=" + (info.info.docCount - numDocs));
throw new RuntimeException("delete count mismatch: info=" + info.getDelCount() + " vs reader=" + (info.info.getDocCount() - numDocs));
}
Bits liveDocs = reader.getLiveDocs();
if (liveDocs != null) {
@ -558,8 +558,8 @@ public class CheckIndex {
}
msg("OK");
}
if (reader.maxDoc() != info.info.docCount) {
throw new RuntimeException("SegmentReader.maxDoc() " + reader.maxDoc() + " != SegmentInfos.docCount " + info.info.docCount);
if (reader.maxDoc() != info.info.getDocCount()) {
throw new RuntimeException("SegmentReader.maxDoc() " + reader.maxDoc() + " != SegmentInfos.docCount " + info.info.getDocCount());
}
// Test getFieldInfos()
@ -1170,7 +1170,7 @@ public class CheckIndex {
// Scan stored fields for all documents
final Bits liveDocs = reader.getLiveDocs();
for (int j = 0; j < info.info.docCount; ++j) {
for (int j = 0; j < info.info.getDocCount(); ++j) {
// Intentionally pull even deleted documents to
// make sure they too are not corrupt:
Document doc = reader.document(j);
@ -1349,7 +1349,7 @@ public class CheckIndex {
TermsEnum termsEnum = null;
TermsEnum postingsTermsEnum = null;
for (int j = 0; j < info.info.docCount; ++j) {
for (int j = 0; j < info.info.getDocCount(); ++j) {
// Intentionally pull/visit (but don't count in
// stats) deleted documents to make sure they too
// are not corrupt:

View File

@ -270,7 +270,7 @@ class DocumentsWriterPerThread {
private void initSegmentInfo() {
String segment = writer.newSegmentName();
segmentInfo = new SegmentInfo(directoryOrig, Constants.LUCENE_MAIN_VERSION, segment, 0,
segmentInfo = new SegmentInfo(directoryOrig, Constants.LUCENE_MAIN_VERSION, segment, -1,
-1, segment, false, null, false,
codec,
null, null);
@ -486,7 +486,7 @@ class DocumentsWriterPerThread {
consumer.flush(flushState);
pendingDeletes.terms.clear();
// nocommit use setter and make this a SetOnce:
segmentInfo.docCount = flushState.numDocs;
segmentInfo.setDocCount(flushState.numDocs);
segmentInfo.setFiles(new HashSet<String>(directory.getCreatedFiles()));
final SegmentInfoPerCommit segmentInfoPerCommit = new SegmentInfoPerCommit(segmentInfo, 0, -1L);

View File

@ -735,7 +735,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
SegmentInfoPerCommit biggest = null;
for(SegmentInfoPerCommit info : segmentInfos) {
if (biggest == null || (info.info.docCount-info.getDelCount()) > (biggest.info.docCount-biggest.getDelCount())) {
if (biggest == null || (info.info.getDocCount()-info.getDelCount()) > (biggest.info.getDocCount()-biggest.getDelCount())) {
biggest = info;
}
}
@ -999,7 +999,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
count = 0;
for (final SegmentInfoPerCommit info : segmentInfos) {
count += info.info.docCount - numDeletedDocs(info);
count += info.info.getDocCount() - numDeletedDocs(info);
}
return count;
}
@ -1367,7 +1367,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// for test purpose
final synchronized int getDocCount(int i) {
if (i >= 0 && i < segmentInfos.size()) {
return segmentInfos.info(i).info.docCount;
return segmentInfos.info(i).info.getDocCount();
} else {
return -1;
}
@ -2021,7 +2021,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
setDiagnostics(newSegment.info, "flush");
IOContext context = new IOContext(new FlushInfo(newSegment.info.docCount, newSegment.info.sizeInBytes()));
IOContext context = new IOContext(new FlushInfo(newSegment.info.getDocCount(), newSegment.info.sizeInBytes()));
boolean success = false;
try {
@ -2225,7 +2225,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
infoStream.message("IW", "addIndexes: process segment origName=" + info.info.name + " newName=" + newSegName + " dsName=" + dsName + " info=" + info);
}
IOContext context = new IOContext(new MergeInfo(info.info.docCount, info.info.sizeInBytes(), true, -1));
IOContext context = new IOContext(new MergeInfo(info.info.getDocCount(), info.info.sizeInBytes(), true, -1));
infos.add(copySegmentAsIs(info, newSegName, dsNames, dsFilesCopied, context, copiedFiles));
}
@ -2285,7 +2285,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// abortable so that IW.close(false) is able to stop it
TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory);
SegmentInfo info = new SegmentInfo(directory, Constants.LUCENE_MAIN_VERSION, mergedName, 0,
SegmentInfo info = new SegmentInfo(directory, Constants.LUCENE_MAIN_VERSION, mergedName, -1,
-1, mergedName, false, null, false,
codec, null, null);
@ -2299,8 +2299,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
MergeState mergeState = merger.merge(); // merge 'em
// nocommit use setter and make this a SetOnce:
info.docCount = mergeState.mergedDocCount;
info.setDocCount(mergeState.mergedDocCount);
SegmentInfoPerCommit infoPerCommit = new SegmentInfoPerCommit(info, 0, -1L);
@ -2399,7 +2398,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
//System.out.println("copy seg=" + info.info.name + " version=" + info.info.getVersion());
// Same SI as before but we change directory, name and docStoreSegment:
SegmentInfo newInfo = new SegmentInfo(directory, info.info.getVersion(), segName, info.info.docCount, info.info.getDocStoreOffset(),
SegmentInfo newInfo = new SegmentInfo(directory, info.info.getVersion(), segName, info.info.getDocCount(), info.info.getDocStoreOffset(),
newDsName, info.info.getDocStoreIsCompoundFile(), info.info.getNormGen(), info.info.getUseCompoundFile(),
info.info.getCodec(), info.info.getDiagnostics(), info.info.attributes());
SegmentInfoPerCommit newInfoPerCommit = new SegmentInfoPerCommit(newInfo, info.getDelCount(), info.getDelGen());
@ -2896,7 +2895,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
for(int i=0; i < sourceSegments.size(); i++) {
SegmentInfoPerCommit info = sourceSegments.get(i);
minGen = Math.min(info.getBufferedDeletesGen(), minGen);
final int docCount = info.info.docCount;
final int docCount = info.info.getDocCount();
final Bits prevLiveDocs = merge.readerLiveDocs.get(i);
final Bits currentLiveDocs;
final ReadersAndLiveDocs rld = readerPool.get(info, false);
@ -2944,7 +2943,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
}
}
} else {
docUpto += info.info.docCount - info.getDelCount() - rld.getPendingDeleteCount();
docUpto += info.info.getDocCount() - info.getDelCount() - rld.getPendingDeleteCount();
}
} else if (currentLiveDocs != null) {
assert currentLiveDocs.length() == docCount;
@ -2962,11 +2961,11 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
}
} else {
// No deletes before or after
docUpto += info.info.docCount;
docUpto += info.info.getDocCount();
}
}
assert docUpto == merge.info.info.docCount;
assert docUpto == merge.info.info.getDocCount();
if (infoStream.isEnabled("IW")) {
if (mergedDeletes == null) {
@ -3015,7 +3014,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
return false;
}
final ReadersAndLiveDocs mergedDeletes = merge.info.info.docCount == 0 ? null : commitMergedDeletes(merge);
final ReadersAndLiveDocs mergedDeletes = merge.info.info.getDocCount() == 0 ? null : commitMergedDeletes(merge);
assert mergedDeletes == null || mergedDeletes.getPendingDeleteCount() != 0;
@ -3027,9 +3026,9 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
assert !segmentInfos.contains(merge.info);
final boolean allDeleted = merge.segments.size() == 0 ||
merge.info.info.docCount == 0 ||
merge.info.info.getDocCount() == 0 ||
(mergedDeletes != null &&
mergedDeletes.getPendingDeleteCount() == merge.info.info.docCount);
mergedDeletes.getPendingDeleteCount() == merge.info.info.getDocCount());
if (infoStream.isEnabled("IW")) {
if (allDeleted) {
@ -3043,7 +3042,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// the new segment:
assert merge.segments.size() > 0 || dropSegment;
assert merge.info.info.docCount != 0 || keepFullyDeletedSegments || dropSegment;
assert merge.info.info.getDocCount() != 0 || keepFullyDeletedSegments || dropSegment;
segmentInfos.applyMergeChanges(merge, dropSegment);
@ -3164,9 +3163,9 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
} catch (OutOfMemoryError oom) {
handleOOM(oom, "merge");
}
if (merge.info != null) {
if (merge.info != null && !merge.isAborted()) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "merge time " + (System.currentTimeMillis()-t0) + " msec for " + merge.info.info.docCount + " docs");
infoStream.message("IW", "merge time " + (System.currentTimeMillis()-t0) + " msec for " + merge.info.info.getDocCount() + " docs");
}
}
}
@ -3321,7 +3320,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// ConcurrentMergePolicy we keep deterministic segment
// names.
final String mergeSegmentName = newSegmentName();
SegmentInfo si = new SegmentInfo(directory, Constants.LUCENE_MAIN_VERSION, mergeSegmentName, 0, -1, mergeSegmentName, false, null, false, codec, details, null);
SegmentInfo si = new SegmentInfo(directory, Constants.LUCENE_MAIN_VERSION, mergeSegmentName, -1, -1, mergeSegmentName, false, null, false, codec, details, null);
merge.info = new SegmentInfoPerCommit(si, 0, -1L);
// Lock order: IW -> BD
@ -3333,10 +3332,10 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
assert merge.estimatedMergeBytes == 0;
for(SegmentInfoPerCommit info : merge.segments) {
if (info.info.docCount > 0) {
if (info.info.getDocCount() > 0) {
final int delCount = numDeletedDocs(info);
assert delCount <= info.info.docCount;
final double delRatio = ((double) delCount)/info.info.docCount;
assert delCount <= info.info.getDocCount();
final double delRatio = ((double) delCount)/info.info.getDocCount();
merge.estimatedMergeBytes += info.info.sizeInBytes() * (1.0 - delRatio);
}
}
@ -3488,8 +3487,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
}
merge.readerLiveDocs.add(liveDocs);
merge.readers.add(reader);
assert delCount <= info.info.docCount: "delCount=" + delCount + " info.docCount=" + info.info.docCount + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount();
if (delCount < info.info.docCount) {
assert delCount <= info.info.getDocCount(): "delCount=" + delCount + " info.docCount=" + info.info.getDocCount() + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount();
if (delCount < info.info.getDocCount()) {
merger.add(reader, liveDocs);
}
segUpto++;
@ -3499,13 +3498,13 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// This is where all the work happens:
MergeState mergeState = merger.merge();
merge.info.info.docCount = mergeState.mergedDocCount;
merge.info.info.setDocCount(mergeState.mergedDocCount);
merge.info.info.setFiles(new HashSet<String>(dirWrapper.getCreatedFiles()));
// Record which codec was used to write the segment
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "merge codec=" + codec + " docCount=" + merge.info.info.docCount + "; merged segment has " +
infoStream.message("IW", "merge codec=" + codec + " docCount=" + merge.info.info.getDocCount() + "; merged segment has " +
(mergeState.fieldInfos.hasVectors() ? "vectors" : "no vectors") + "; " +
(mergeState.fieldInfos.hasNorms() ? "norms" : "no norms") + "; " +
(mergeState.fieldInfos.hasDocValues() ? "docValues" : "no docValues") + "; " +
@ -3634,7 +3633,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
}
}
return merge.info.info.docCount;
return merge.info.info.getDocCount();
}
synchronized void addMergeException(MergePolicy.OneMerge merge) {

View File

@ -185,10 +185,10 @@ public abstract class LogMergePolicy extends MergePolicy {
protected long sizeDocs(SegmentInfoPerCommit info) throws IOException {
if (calibrateSizeByDeletes) {
int delCount = writer.get().numDeletedDocs(info);
assert delCount <= info.info.docCount;
return (info.info.docCount - (long)delCount);
assert delCount <= info.info.getDocCount();
return (info.info.getDocCount() - (long)delCount);
} else {
return info.info.docCount;
return info.info.getDocCount();
}
}
@ -196,9 +196,9 @@ public abstract class LogMergePolicy extends MergePolicy {
long byteSize = info.sizeInBytes();
if (calibrateSizeByDeletes) {
int delCount = writer.get().numDeletedDocs(info);
double delRatio = (info.info.docCount <= 0 ? 0.0f : ((float)delCount / (float)info.info.docCount));
double delRatio = (info.info.getDocCount() <= 0 ? 0.0f : ((float)delCount / (float)info.info.getDocCount()));
assert delRatio <= 1.0;
return (info.info.docCount <= 0 ? byteSize : (long)(byteSize * (1.0 - delRatio)));
return (info.info.getDocCount() <= 0 ? byteSize : (long)(byteSize * (1.0 - delRatio)));
} else {
return byteSize;
}

View File

@ -88,7 +88,7 @@ public abstract class MergePolicy implements java.io.Closeable {
this.segments = new ArrayList<SegmentInfoPerCommit>(segments);
int count = 0;
for(SegmentInfoPerCommit info : segments) {
count += info.info.docCount;
count += info.info.getDocCount();
}
totalDocCount = count;
}
@ -186,7 +186,7 @@ public abstract class MergePolicy implements java.io.Closeable {
public int totalNumDocs() throws IOException {
int total = 0;
for (SegmentInfoPerCommit info : segments) {
total += info.info.docCount;
total += info.info.getDocCount();
}
return total;
}

View File

@ -98,16 +98,16 @@ class ReadersAndLiveDocs {
int count;
if (liveDocs != null) {
count = 0;
for(int docID=0;docID<info.info.docCount;docID++) {
for(int docID=0;docID<info.info.getDocCount();docID++) {
if (liveDocs.get(docID)) {
count++;
}
}
} else {
count = info.info.docCount;
count = info.info.getDocCount();
}
assert info.info.docCount - info.getDelCount() - pendingDeleteCount == count: "info.docCount=" + info.info.docCount + " info.getDelCount()=" + info.getDelCount() + " pendingDeleteCount=" + pendingDeleteCount + " count=" + count;
assert info.info.getDocCount() - info.getDelCount() - pendingDeleteCount == count: "info.docCount=" + info.info.getDocCount() + " info.getDelCount()=" + info.getDelCount() + " pendingDeleteCount=" + pendingDeleteCount + " count=" + count;
return true;
}
@ -169,7 +169,7 @@ class ReadersAndLiveDocs {
public synchronized boolean delete(int docID) {
assert liveDocs != null;
assert Thread.holdsLock(writer);
assert docID >= 0 && docID < liveDocs.length() : "out of bounds: docid=" + docID + " liveDocsLength=" + liveDocs.length() + " seg=" + info.info.name + " docCount=" + info.info.docCount;
assert docID >= 0 && docID < liveDocs.length() : "out of bounds: docid=" + docID + " liveDocsLength=" + liveDocs.length() + " seg=" + info.info.name + " docCount=" + info.info.getDocCount();
assert !shared;
final boolean didDelete = liveDocs.get(docID);
if (didDelete) {
@ -207,7 +207,7 @@ class ReadersAndLiveDocs {
}
shared = true;
if (liveDocs != null) {
return new SegmentReader(reader.getSegmentInfo(), reader.core, liveDocs, info.info.docCount - info.getDelCount() - pendingDeleteCount);
return new SegmentReader(reader.getSegmentInfo(), reader.core, liveDocs, info.info.getDocCount() - info.getDelCount() - pendingDeleteCount);
} else {
assert reader.getLiveDocs() == liveDocs;
reader.incRef();
@ -217,7 +217,7 @@ class ReadersAndLiveDocs {
public synchronized void initWritableLiveDocs() throws IOException {
assert Thread.holdsLock(writer);
assert info.info.docCount > 0;
assert info.info.getDocCount() > 0;
//System.out.println("initWritableLivedocs seg=" + info + " liveDocs=" + liveDocs + " shared=" + shared);
if (shared) {
// Copy on write: this means we've cloned a
@ -227,7 +227,7 @@ class ReadersAndLiveDocs {
LiveDocsFormat liveDocsFormat = info.info.getCodec().liveDocsFormat();
if (liveDocs == null) {
//System.out.println("create BV seg=" + info);
liveDocs = liveDocsFormat.newLiveDocs(info.info.docCount);
liveDocs = liveDocsFormat.newLiveDocs(info.info.getDocCount());
} else {
liveDocs = liveDocsFormat.newLiveDocs(liveDocs);
}
@ -270,7 +270,7 @@ class ReadersAndLiveDocs {
//System.out.println("rld.writeLiveDocs seg=" + info + " pendingDelCount=" + pendingDeleteCount);
if (pendingDeleteCount != 0) {
// We have new deletes
assert liveDocs.length() == info.info.docCount;
assert liveDocs.length() == info.info.getDocCount();
// We can write directly to the actual name (vs to a
// .tmp & renaming it) because the file is not live

View File

@ -42,7 +42,7 @@ public final class SegmentInfo {
public static final int YES = 1; // e.g. have norms; have deletes;
public final String name; // unique name in dir
public int docCount; // number of docs in seg
private int docCount; // number of docs in seg
public final Directory dir; // where segment resides
/*
@ -203,6 +203,21 @@ public final class SegmentInfo {
return codec;
}
public int getDocCount() {
if (this.docCount == -1) {
throw new IllegalStateException("docCount isn't set yet");
}
return docCount;
}
// NOTE: leave package private
void setDocCount(int docCount) {
if (this.docCount != -1) {
throw new IllegalStateException("docCount was already set");
}
this.docCount = docCount;
}
/*
* Return all files referenced by this SegmentInfo. The
* returns List is a locally cached List so you should not

View File

@ -126,7 +126,7 @@ public class SegmentInfoPerCommit {
void setDelCount(int delCount) {
this.delCount = delCount;
assert delCount <= info.docCount;
assert delCount <= info.getDocCount();
}
public String toString(Directory dir, int pendingDelCount) {

View File

@ -288,7 +288,7 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
info.setCodec(codec);
long delGen = input.readLong();
int delCount = input.readInt();
assert delCount <= info.docCount;
assert delCount <= info.getDocCount();
add(new SegmentInfoPerCommit(info, delCount, delGen));
}
userData = input.readStringStringMap();
@ -366,7 +366,7 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
segnOutput.writeInt(siPerCommit.getDelCount());
assert si.dir == directory;
assert siPerCommit.getDelCount() <= si.docCount;
assert siPerCommit.getDelCount() <= si.getDocCount();
// If this segment is pre-4.x, perform a one-time
// "ugprade" to write the .si file for it:
@ -423,7 +423,7 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
// Write the Lucene version that created this segment, since 3.1
output.writeString(si.getVersion());
output.writeString(si.name);
output.writeInt(si.docCount);
output.writeInt(si.getDocCount());
// NOTE: a lie
output.writeLong(0L);
@ -987,7 +987,7 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
public int totalDocCount() {
int count = 0;
for(SegmentInfoPerCommit info : this) {
count += info.info.docCount;
count += info.info.getDocCount();
}
return count;
}

View File

@ -62,7 +62,7 @@ public final class SegmentReader extends AtomicReader {
assert si.getDelCount() == 0;
liveDocs = null;
}
numDocs = si.info.docCount - si.getDelCount();
numDocs = si.info.getDocCount() - si.getDelCount();
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
@ -82,7 +82,7 @@ public final class SegmentReader extends AtomicReader {
SegmentReader(SegmentInfoPerCommit si, SegmentCoreReaders core, IOContext context) throws IOException {
this(si, core,
si.info.getCodec().liveDocsFormat().readLiveDocs(si.info.dir, si, context),
si.info.docCount - si.getDelCount());
si.info.getDocCount() - si.getDelCount());
}
// Create new SegmentReader sharing core from a previous
@ -153,7 +153,7 @@ public final class SegmentReader extends AtomicReader {
@Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return si.info.docCount;
return si.info.getDocCount();
}
/** @lucene.internal */
@ -181,7 +181,7 @@ public final class SegmentReader extends AtomicReader {
public String toString() {
// SegmentInfo.toString takes dir and number of
// *pending* deletions; so we reverse compute that here:
return si.toString(si.info.dir, si.info.docCount - numDocs - si.getDelCount());
return si.toString(si.info.dir, si.info.getDocCount() - numDocs - si.getDelCount());
}
/**

View File

@ -563,7 +563,7 @@ public class TieredMergePolicy extends MergePolicy {
final List<SegmentInfoPerCommit> eligible = new ArrayList<SegmentInfoPerCommit>();
final Collection<SegmentInfoPerCommit> merging = writer.get().getMergingSegments();
for(SegmentInfoPerCommit info : infos) {
double pctDeletes = 100.*((double) writer.get().numDeletedDocs(info))/info.info.docCount;
double pctDeletes = 100.*((double) writer.get().numDeletedDocs(info))/info.info.getDocCount();
if (pctDeletes > forceMergeDeletesPctAllowed && !merging.contains(info)) {
eligible.add(info);
}
@ -640,7 +640,7 @@ public class TieredMergePolicy extends MergePolicy {
private long size(SegmentInfoPerCommit info) throws IOException {
final long byteSize = info.info.sizeInBytes();
final int delCount = writer.get().numDeletedDocs(info);
final double delRatio = (info.info.docCount <= 0 ? 0.0f : ((double)delCount / (double)info.info.docCount));
final double delRatio = (info.info.getDocCount() <= 0 ? 0.0f : ((double)delCount / (double)info.info.getDocCount()));
assert delRatio <= 1.0;
return (long) (byteSize * (1.0-delRatio));
}

View File

@ -208,7 +208,7 @@ public class TestDoc extends LuceneTestCase {
r1.close();
r2.close();
final SegmentInfo info = new SegmentInfo(si1.info.dir, Constants.LUCENE_MAIN_VERSION, merged,
si1.info.docCount + si2.info.docCount, -1, merged,
si1.info.getDocCount() + si2.info.getDocCount(), -1, merged,
false, null, false, codec, null, null);
info.setFiles(new HashSet<String>(trackingDir.getCreatedFiles()));

View File

@ -321,7 +321,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
break;
}
for(int i=0;i<merge.segments.size();i++) {
assert merge.segments.get(i).info.docCount < 20;
assert merge.segments.get(i).info.getDocCount() < 20;
}
writer.merge(merge);
}

View File

@ -59,8 +59,8 @@ public class BalancedSegmentMergePolicy extends LogByteSizeMergePolicy {
@Override
protected long size(SegmentInfoPerCommit info) throws IOException {
long byteSize = info.sizeInBytes();
float delRatio = (info.info.docCount <= 0 ? 0.0f : ((float)info.getDelCount() / (float)info.info.docCount));
return (info.info.docCount <= 0 ? byteSize : (long)((1.0f - delRatio) * byteSize));
float delRatio = (info.info.getDocCount() <= 0 ? 0.0f : ((float)info.getDelCount() / (float)info.info.getDocCount()));
return (info.info.getDocCount() <= 0 ? byteSize : (long)((1.0f - delRatio) * byteSize));
}
public void setPartialExpunge(boolean doPartialExpunge) {

View File

@ -145,7 +145,7 @@ public class IndexSplitter {
SegmentInfoPerCommit infoPerCommit = getInfo(n);
SegmentInfo info = infoPerCommit.info;
// Same info just changing the dir:
SegmentInfo newInfo = new SegmentInfo(destFSDir, info.getVersion(), info.name, info.docCount, info.getDocStoreOffset(),
SegmentInfo newInfo = new SegmentInfo(destFSDir, info.getVersion(), info.name, info.getDocCount(), info.getDocStoreOffset(),
info.getDocStoreSegment(), info.getDocStoreIsCompoundFile(), info.getNormGen(), info.getUseCompoundFile(),
info.getCodec(), info.getDiagnostics(), info.attributes());
destInfos.add(new SegmentInfoPerCommit(newInfo, infoPerCommit.getDelCount(), infoPerCommit.getDelGen()));