Don't pick crazy compression parameters for this monsterish test

This commit is contained in:
Robert Muir 2016-04-18 10:11:29 -04:00
parent cc099d8fc6
commit ddb8d077ec
2 changed files with 29 additions and 8 deletions

View File

@ -18,6 +18,7 @@ package org.apache.lucene.index;
import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.codecs.compressing.CompressingCodec;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType; import org.apache.lucene.document.FieldType;
@ -34,7 +35,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomInts;
/** /**
* This test creates an index with one segment that is a little larger than 4GB. * This test creates an index with one segment that is a little larger than 4GB.
*/ */
@SuppressCodecs({ "SimpleText" }) @SuppressCodecs({ "SimpleText", "Compressing" })
@TimeoutSuite(millis = 4 * TimeUnits.HOUR) @TimeoutSuite(millis = 4 * TimeUnits.HOUR)
public class Test4GBStoredFields extends LuceneTestCase { public class Test4GBStoredFields extends LuceneTestCase {
@ -43,13 +44,20 @@ public class Test4GBStoredFields extends LuceneTestCase {
MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new MMapDirectory(createTempDir("4GBStoredFields"))); MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new MMapDirectory(createTempDir("4GBStoredFields")));
dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER); dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER);
IndexWriter w = new IndexWriter(dir, IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
new IndexWriterConfig(new MockAnalyzer(random())) iwc.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH) iwc.setRAMBufferSizeMB(256.0);
.setRAMBufferSizeMB(256.0) iwc.setMergeScheduler(new ConcurrentMergeScheduler());
.setMergeScheduler(new ConcurrentMergeScheduler()) iwc.setMergePolicy(newLogMergePolicy(false, 10));
.setMergePolicy(newLogMergePolicy(false, 10)) iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
.setOpenMode(IndexWriterConfig.OpenMode.CREATE));
// TODO: we disable "Compressing" since it likes to pick very extreme values which will be too slow for this test.
// maybe we should factor out crazy cases to ExtremeCompressing? then annotations can handle this stuff...
if (random().nextBoolean()) {
iwc.setCodec(CompressingCodec.reasonableInstance(random()));
}
IndexWriter w = new IndexWriter(dir, iwc);
MergePolicy mp = w.getConfig().getMergePolicy(); MergePolicy mp = w.getConfig().getMergePolicy();
if (mp instanceof LogByteSizeMergePolicy) { if (mp instanceof LogByteSizeMergePolicy) {

View File

@ -61,6 +61,19 @@ public abstract class CompressingCodec extends FilterCodec {
return randomInstance(random, chunkSize, chunkDocs, false, blockSize); return randomInstance(random, chunkSize, chunkDocs, false, blockSize);
} }
/**
* Creates a random {@link CompressingCodec} with more reasonable parameters for big tests.
*/
public static CompressingCodec reasonableInstance(Random random) {
// e.g. defaults use 2^14 for FAST and ~ 2^16 for HIGH
final int chunkSize = TestUtil.nextInt(random, 1<<13, 1<<17);
// e.g. defaults use 128 for FAST and 512 for HIGH
final int chunkDocs = TestUtil.nextInt(random, 1<<6, 1<<10);
// e.g. defaults use 1024 for both cases
final int blockSize = TestUtil.nextInt(random, 1<<9, 1<<11);
return randomInstance(random, chunkSize, chunkDocs, false, blockSize);
}
/** /**
* Creates a random {@link CompressingCodec} that is using a segment suffix * Creates a random {@link CompressingCodec} that is using a segment suffix
*/ */