LUCENE-3728: stop invading the codecs files() for this assert

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene3661@1237627 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2012-01-30 12:52:29 +00:00
parent 438ec3ce0b
commit d9a73590a8
3 changed files with 7 additions and 101 deletions

View File

@ -494,14 +494,13 @@ public class DocumentsWriterPerThread {
}
if (infoStream.isEnabled("DWPT")) {
final double newSegmentSizeNoStore = newSegment.sizeInBytes(false)/1024./1024.;
final double newSegmentSize = newSegment.sizeInBytes(true)/1024./1024.;
final double newSegmentSize = newSegment.sizeInBytes()/1024./1024.;
// nocommit: some of this is confusing since it includes docstores
infoStream.message("DWPT", "flushed: segment=" + newSegment +
" ramUsed=" + nf.format(startMBUsed) + " MB" +
" newFlushedSize=" + nf.format(newSegmentSize) + " MB" +
" (" + nf.format(newSegmentSizeNoStore) + " MB w/o doc stores)" +
" docs/MB=" + nf.format(flushedDocCount / newSegmentSize) +
" new/old=" + nf.format(100.0 * newSegmentSizeNoStore / startMBUsed) + "%");
" new/old=" + nf.format(100.0 * newSegmentSize / startMBUsed) + "%");
}
doAfterFlush();
success = true;

View File

@ -73,8 +73,7 @@ public final class SegmentInfo implements Cloneable {
private volatile List<String> files; // cached list of files that this segment uses
// in the Directory
private volatile long sizeInBytesNoStore = -1; // total byte size of all but the store files (computed on demand)
private volatile long sizeInBytesWithStore = -1; // total byte size of all of our files (computed on demand)
private volatile long sizeInBytes = -1; // total byte size of all files (computed on demand)
//TODO: LUCENE-2555: remove once we don't need to support shared doc stores (pre 4.0)
private int docStoreOffset; // if this segment shares stored fields & vectors, this
@ -212,50 +211,12 @@ public final class SegmentInfo implements Cloneable {
* Returns total size in bytes of all of files used by this segment
*/
public long sizeInBytes() throws IOException {
return sizeInBytes(true);
}
/**
* Returns total size in bytes of all of files used by this segment (if
* {@code includeDocStores} is true), or the size of all files except the
* store files otherwise.
* <p>
* NOTE: includeDocStores=false should only be used for debugging.
* Theoretically a codec could combine its files however it wants (after-
* the-fact or something), and this calculation is not particularly
* efficient.
*/
long sizeInBytes(boolean includeDocStores) throws IOException {
// TODO: based on how this is used, can't we just forget about all this docstore crap?
// its really an abstraction violation into the codec
if (includeDocStores) {
if (sizeInBytesWithStore != -1) {
return sizeInBytesWithStore;
}
long sum = 0;
for (final String fileName : files()) {
// We don't count bytes used by a shared doc store
// against this segment
if (docStoreOffset == -1 || !isDocStoreFile(fileName)) {
sum += dir.fileLength(fileName);
}
}
sizeInBytesWithStore = sum;
return sizeInBytesWithStore;
} else {
if (sizeInBytesNoStore != -1) {
return sizeInBytesNoStore;
}
long sum = 0;
for (final String fileName : files()) {
if (isDocStoreFile(fileName)) {
continue;
}
sum += dir.fileLength(fileName);
}
sizeInBytesNoStore = sum;
return sizeInBytesNoStore;
}
sizeInBytes = sum;
return sizeInBytes;
}
// nocommit: wrong to call this if (compoundFile)
@ -267,12 +228,6 @@ public final class SegmentInfo implements Cloneable {
return docStoreFiles;
}
// TODO: a little messy, but sizeInBytes above that uses this is the real problem.
private boolean isDocStoreFile(String fileName) throws IOException {
Set<String> docStoreFiles = codecDocStoreFiles();
return fileName.endsWith(IndexFileNames.COMPOUND_FILE_STORE_EXTENSION) || docStoreFiles.contains(fileName);
}
public boolean getHasVectors() throws IOException {
return hasVectors == CHECK_FIELDINFO ? getFieldInfos().hasVectors() : hasVectors == YES;
}
@ -488,8 +443,7 @@ public final class SegmentInfo implements Cloneable {
* files this segment has. */
private void clearFilesCache() {
files = null;
sizeInBytesNoStore = -1;
sizeInBytesWithStore = -1;
sizeInBytes = -1;
}
/** {@inheritDoc} */

View File

@ -1,47 +0,0 @@
package org.apache.lucene.index;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
public class TestSegmentInfo extends LuceneTestCase {
public void testSizeInBytesCache() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy());
IndexWriter writer = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(new Field("a", "value", TextField.TYPE_STORED));
writer.addDocument(doc);
writer.close();
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
SegmentInfo si = sis.info(0);
long sizeInBytesNoStore = si.sizeInBytes(false);
long sizeInBytesWithStore = si.sizeInBytes(true);
assertTrue("sizeInBytesNoStore=" + sizeInBytesNoStore + " sizeInBytesWithStore=" + sizeInBytesWithStore, sizeInBytesWithStore > sizeInBytesNoStore);
dir.close();
}
}