minimize randomization of IndexWriterConfig in TestDocTermOrdsUninvertLimit to reduce risk of OOM in nightly tests

This commit is contained in:
Chris Hostetter 2019-05-09 11:34:43 -07:00
parent 5b772f7c9d
commit 5e40000e27
1 changed files with 15 additions and 6 deletions

View File

@ -22,9 +22,12 @@ import java.io.IOException;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.ConcurrentMergeScheduler;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.store.Directory;
import org.apache.solr.SolrTestCase;
import org.apache.lucene.util.TestUtil;
@ -46,11 +49,17 @@ public class TestDocTermOrdsUninvertLimit extends SolrTestCase {
final int DOCS = (1<<16)-1; // The number of documents within a single pass (simplified)
final int TERMS = REF_LIMIT/DOCS; // Each document must have this many references aka terms hit limit
// disk based Directory to reduce risk of OOM
// disk based Directory and IWC settings to reduce risk of OOM
Directory dir = newFSDirectory(createTempDir("TestDocTermOrdsUninvertLimit"));
final IndexWriter w = new IndexWriter(dir,
new IndexWriterConfig(new MockAnalyzer(random()))
.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setRAMBufferSizeMB(256.0)
.setMergeScheduler(new ConcurrentMergeScheduler())
.setMergePolicy(newLogMergePolicy(false, 10))
.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
.setCodec(TestUtil.getDefaultCodec()));
final RandomIndexWriter w = new RandomIndexWriter(random(), dir,
newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
Document doc = new Document();
Field field = newTextField("field", "", Field.Store.NO);
doc.add(field);
@ -65,9 +74,9 @@ public class TestDocTermOrdsUninvertLimit extends SolrTestCase {
w.addDocument(doc);
}
//System.out.println("\n Finished adding " + DOCS + " documents of " + TERMS + " unique terms");
final IndexReader r = w.getReader();
w.close();
final IndexReader r = DirectoryReader.open(dir);
try {
final LeafReader ar = SlowCompositeReaderWrapper.wrap(r);
TestUtil.checkReader(ar);