mirror of https://github.com/apache/lucene.git
fix test/test-framework/real bug causing OOME in TestPostingsFormat
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1380808 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
635699a563
commit
ea8ecb15a4
|
@ -138,6 +138,10 @@ Bug Fixes
|
|||
* LUCENE-4333: Fixed NPE in TermGroupFacetCollector when faceting on mv fields.
|
||||
(Jesse MacVicar, Martijn van Groningen)
|
||||
|
||||
* NRTCachingDirectory was always caching a newly flushed segment in
|
||||
RAM, instead of checking the estimated size of the segment
|
||||
to decide whether to cache it. (Mike McCandless)
|
||||
|
||||
Optimizations
|
||||
|
||||
* LUCENE-4322: Decrease lucene-core JAR size. The core JAR size had increased a
|
||||
|
|
|
@ -267,9 +267,16 @@ public class NRTCachingDirectory extends Directory {
|
|||
/** Subclass can override this to customize logic; return
|
||||
* true if this file should be written to the RAMDirectory. */
|
||||
protected boolean doCacheWrite(String name, IOContext context) {
|
||||
final MergeInfo merge = context.mergeInfo;
|
||||
//System.out.println(Thread.currentThread().getName() + ": CACHE check merge=" + merge + " size=" + (merge==null ? 0 : merge.estimatedMergeBytes));
|
||||
return !name.equals(IndexFileNames.SEGMENTS_GEN) && (merge == null || merge.estimatedMergeBytes <= maxMergeSizeBytes) && cache.sizeInBytes() <= maxCachedBytes;
|
||||
|
||||
long bytes = 0;
|
||||
if (context.mergeInfo != null) {
|
||||
bytes = context.mergeInfo.estimatedMergeBytes;
|
||||
} else if (context.flushInfo != null) {
|
||||
bytes = context.flushInfo.estimatedSegmentSize;
|
||||
}
|
||||
|
||||
return !name.equals(IndexFileNames.SEGMENTS_GEN) && (bytes <= maxMergeSizeBytes) && (bytes + cache.sizeInBytes()) <= maxCachedBytes;
|
||||
}
|
||||
|
||||
private final Object uncacheLock = new Object();
|
||||
|
|
|
@ -38,12 +38,14 @@ import org.apache.lucene.codecs.TermStats;
|
|||
import org.apache.lucene.codecs.TermsConsumer;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FlushInfo;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -379,10 +381,13 @@ public class TestPostingsFormat extends LuceneTestCase {
|
|||
|
||||
FieldInfos newFieldInfos = new FieldInfos(newFieldInfoArray);
|
||||
|
||||
// Estimate that flushed segment size will be 25% of
|
||||
// what we use in RAM:
|
||||
long bytes = RamUsageEstimator.sizeOf(fields)/4;
|
||||
|
||||
SegmentWriteState writeState = new SegmentWriteState(null, dir,
|
||||
segmentInfo, newFieldInfos,
|
||||
32, null, IOContext.DEFAULT);
|
||||
|
||||
32, null, new IOContext(new FlushInfo(maxDocID, bytes)));
|
||||
FieldsConsumer fieldsConsumer = Codec.getDefault().postingsFormat().fieldsConsumer(writeState);
|
||||
|
||||
for(Map.Entry<String,Map<BytesRef,List<Posting>>> fieldEnt : fields.entrySet()) {
|
||||
|
|
|
@ -443,7 +443,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
|
|||
}
|
||||
|
||||
//System.out.println(Thread.currentThread().getName() + ": MDW: create " + name);
|
||||
IndexOutput io = new MockIndexOutputWrapper(this, delegate.createOutput(name, LuceneTestCase.newIOContext(randomState)), name);
|
||||
IndexOutput io = new MockIndexOutputWrapper(this, delegate.createOutput(name, LuceneTestCase.newIOContext(randomState, context)), name);
|
||||
addFileHandle(io, name, Handle.Output);
|
||||
openFilesForWrite.add(name);
|
||||
|
||||
|
@ -497,7 +497,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
|
|||
throw fillOpenTrace(new IOException("MockDirectoryWrapper: file \"" + name + "\" is still open for writing"), name, false);
|
||||
}
|
||||
|
||||
IndexInput ii = new MockIndexInputWrapper(this, name, delegate.openInput(name, LuceneTestCase.newIOContext(randomState)));
|
||||
IndexInput ii = new MockIndexInputWrapper(this, name, delegate.openInput(name, LuceneTestCase.newIOContext(randomState, context)));
|
||||
addFileHandle(ii, name, Handle.Input);
|
||||
return ii;
|
||||
}
|
||||
|
|
|
@ -1130,8 +1130,23 @@ public abstract class LuceneTestCase extends Assert {
|
|||
|
||||
/** TODO: javadoc */
|
||||
public static IOContext newIOContext(Random random) {
|
||||
return newIOContext(random, IOContext.DEFAULT);
|
||||
}
|
||||
|
||||
/** TODO: javadoc */
|
||||
public static IOContext newIOContext(Random random, IOContext oldContext) {
|
||||
final int randomNumDocs = random.nextInt(4192);
|
||||
final int size = random.nextInt(512) * randomNumDocs;
|
||||
if (oldContext.flushInfo != null) {
|
||||
// Always return at least the estimatedSegmentSize of
|
||||
// the incoming IOContext:
|
||||
return new IOContext(new FlushInfo(randomNumDocs, Math.max(oldContext.flushInfo.estimatedSegmentSize, size)));
|
||||
} else if (oldContext.mergeInfo != null) {
|
||||
// Always return at least the estimatedMergeBytes of
|
||||
// the incoming IOContext:
|
||||
return new IOContext(new MergeInfo(randomNumDocs, Math.max(oldContext.mergeInfo.estimatedMergeBytes, size), random.nextBoolean(), _TestUtil.nextInt(random, 1, 100)));
|
||||
} else {
|
||||
// Make a totally random IOContext:
|
||||
final IOContext context;
|
||||
switch (random.nextInt(5)) {
|
||||
case 0:
|
||||
|
@ -1154,6 +1169,7 @@ public abstract class LuceneTestCase extends Assert {
|
|||
}
|
||||
return context;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new searcher over the reader. This searcher might randomly use
|
||||
|
|
Loading…
Reference in New Issue