LUCENE-3184: add LuceneTestCase.rarely/LuceneTestCase.atLeast

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1133599 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2011-06-08 23:32:12 +00:00
parent 1216e243e4
commit ae2239e134
30 changed files with 183 additions and 128 deletions

View File

@ -129,7 +129,7 @@ public final class MockAnalyzer extends Analyzer {
Integer val = previousMappings.get(fieldName);
if (val == null) {
val = -1; // no payloads
if (LuceneTestCase.TEST_NIGHTLY || random.nextInt(20) == 0) {
if (LuceneTestCase.rarely(random)) {
switch(random.nextInt(3)) {
case 0: val = -1; // no payloads
break;

View File

@ -725,6 +725,47 @@ public abstract class LuceneTestCase extends Assert {
}
}
/**
* Returns a number of at least <code>i</code>
* <p>
* The actual number returned will be influenced by whether {@link TEST_NIGHTLY}
* is active and {@link RANDOM_MULTIPLIER}, but also with some random fudge.
*/
public static int atLeast(Random random, int i) {
int min = (TEST_NIGHTLY ? 5*i : i) * RANDOM_MULTIPLIER;
int max = min+(min/2);
return _TestUtil.nextInt(random, min, max);
}
public static int atLeast(int i) {
return atLeast(random, i);
}
/**
* Returns true if something should happen rarely,
* <p>
* The actual number returned will be influenced by whether {@link TEST_NIGHTLY}
* is active and {@link RANDOM_MULTIPLIER}.
*/
public static boolean rarely(Random random) {
int p = TEST_NIGHTLY ? 25 : 5;
p += (p * Math.log(RANDOM_MULTIPLIER));
int min = 100 - Math.min(p, 90); // never more than 90
return random.nextInt(100) >= min;
}
public static boolean rarely() {
return rarely(random);
}
public static boolean usually(Random random) {
return !rarely(random);
}
public static boolean usually() {
return usually(random);
}
// @deprecated (4.0) These deprecated methods should be removed soon, when all tests using no Epsilon are fixed:
@Deprecated
static public void assertEquals(double expected, double actual) {
@ -836,7 +877,7 @@ public abstract class LuceneTestCase extends Assert {
c.setMergeScheduler(new SerialMergeScheduler());
}
if (r.nextBoolean()) {
if ((TEST_NIGHTLY && random.nextBoolean()) || r.nextInt(20) == 17) {
if (rarely(r)) {
// crazy value
c.setMaxBufferedDocs(_TestUtil.nextInt(r, 2, 7));
} else {
@ -845,7 +886,7 @@ public abstract class LuceneTestCase extends Assert {
}
}
if (r.nextBoolean()) {
if ((TEST_NIGHTLY && random.nextBoolean()) || r.nextInt(20) == 17) {
if (rarely(r)) {
// crazy value
c.setTermIndexInterval(random.nextBoolean() ? _TestUtil.nextInt(r, 1, 31) : _TestUtil.nextInt(r, 129, 1000));
} else {
@ -882,7 +923,7 @@ public abstract class LuceneTestCase extends Assert {
LogMergePolicy logmp = r.nextBoolean() ? new LogDocMergePolicy() : new LogByteSizeMergePolicy();
logmp.setUseCompoundFile(r.nextBoolean());
logmp.setCalibrateSizeByDeletes(r.nextBoolean());
if ((TEST_NIGHTLY && random.nextBoolean()) || r.nextInt(20) == 17) {
if (rarely(r)) {
logmp.setMergeFactor(_TestUtil.nextInt(r, 2, 4));
} else {
logmp.setMergeFactor(_TestUtil.nextInt(r, 5, 50));
@ -892,7 +933,7 @@ public abstract class LuceneTestCase extends Assert {
public static TieredMergePolicy newTieredMergePolicy(Random r) {
TieredMergePolicy tmp = new TieredMergePolicy();
if ((TEST_NIGHTLY && random.nextBoolean()) || r.nextInt(20) == 17) {
if (rarely(r)) {
tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 2, 4));
tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 2, 4));
} else {
@ -1060,7 +1101,7 @@ public abstract class LuceneTestCase extends Assert {
/** Returns a new field instance, using the specified random.
* See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
public static Field newField(Random random, String name, String value, Store store, Index index, TermVector tv) {
if (!TEST_NIGHTLY && random.nextInt(20) > 0) {
if (usually(random)) {
// most of the time, don't modify the params
return new Field(name, value, store, index, tv);
}
@ -1128,7 +1169,7 @@ public abstract class LuceneTestCase extends Assert {
};
public static String randomDirectory(Random random) {
if (random.nextInt(20) == 0) {
if (rarely(random)) {
return CORE_DIRECTORIES[random.nextInt(CORE_DIRECTORIES.length)];
} else {
return "RAMDirectory";
@ -1192,7 +1233,7 @@ public abstract class LuceneTestCase extends Assert {
public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) throws IOException {
if (random.nextBoolean()) {
if (maybeWrap && random.nextInt(20) == 0) {
if (maybeWrap && rarely()) {
return new IndexSearcher(new SlowMultiReaderWrapper(r));
} else {
return new IndexSearcher(r);

View File

@ -506,7 +506,7 @@ public class TestExternalCodecs extends LuceneTestCase {
provider.register(new RAMOnlyCodec());
provider.setDefaultFieldCodec("RamOnly");
final int NUM_DOCS = 173;
final int NUM_DOCS = atLeast(173);
MockDirectoryWrapper dir = newDirectory();
dir.setCheckIndexOnClose(false); // we use a custom codec provider
IndexWriter w = new IndexWriter(

View File

@ -59,7 +59,8 @@ public class TestSearchForDuplicates extends LuceneTestCase {
public void testRun() throws Exception {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw, true);
doTest(random, pw, false);
final int MAX_DOCS = atLeast(225);
doTest(random, pw, false, MAX_DOCS);
pw.close();
sw.close();
String multiFileOutput = sw.getBuffer().toString();
@ -67,7 +68,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
sw = new StringWriter();
pw = new PrintWriter(sw, true);
doTest(random, pw, true);
doTest(random, pw, true, MAX_DOCS);
pw.close();
sw.close();
String singleFileOutput = sw.getBuffer().toString();
@ -76,7 +77,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
}
private void doTest(Random random, PrintWriter out, boolean useCompoundFiles) throws Exception {
private void doTest(Random random, PrintWriter out, boolean useCompoundFiles, int MAX_DOCS) throws Exception {
Directory directory = newDirectory();
Analyzer analyzer = new MockAnalyzer(random);
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
@ -90,8 +91,6 @@ public class TestSearchForDuplicates extends LuceneTestCase {
writer.setInfoStream(System.out);
}
final int MAX_DOCS = 225;
for (int j = 0; j < MAX_DOCS; j++) {
Document d = new Document();
d.add(newField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.ANALYZED));

View File

@ -114,6 +114,6 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
checkRandomData(random, new MockAnalyzer(random), 10000*RANDOM_MULTIPLIER);
checkRandomData(random, new MockAnalyzer(random), atLeast(1000));
}
}

View File

@ -23,7 +23,7 @@ public class TestByteSlices extends LuceneTestCase {
public void testBasic() throws Throwable {
ByteBlockPool pool = new ByteBlockPool(new RecyclingByteBlockAllocator(ByteBlockPool.BYTE_BLOCK_SIZE, Integer.MAX_VALUE));
final int NUM_STREAM = 100 * RANDOM_MULTIPLIER;
final int NUM_STREAM = atLeast(100);
ByteSliceWriter writer = new ByteSliceWriter(pool);
@ -40,7 +40,7 @@ public class TestByteSlices extends LuceneTestCase {
counters[stream] = 0;
}
int num = 10000 * RANDOM_MULTIPLIER;
int num = atLeast(10000);
for (int iter = 0; iter < num; iter++) {
int stream = random.nextInt(NUM_STREAM);
if (VERBOSE)

View File

@ -64,7 +64,7 @@ import org.apache.lucene.util._TestUtil;
public class TestCodecs extends LuceneTestCase {
private static String[] fieldNames = new String[] {"one", "two", "three", "four"};
private final static int NUM_TEST_ITER = 20 * RANDOM_MULTIPLIER;
private final static int NUM_TEST_ITER = atLeast(20);
private final static int NUM_TEST_THREADS = 3;
private final static int NUM_FIELDS = 4;
private final static int NUM_TERMS_RAND = 50; // must be > 16 to test skipping

View File

@ -155,7 +155,8 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
}
public void testFieldNumberGaps() throws IOException {
for (int i = 0; i < 39; i++) {
int numIters = atLeast(13);
for (int i = 0; i < numIters; i++) {
Directory dir = newDirectory();
{
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
@ -270,8 +271,8 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
@Test
public void testManyFields() throws Exception {
final int NUM_DOCS = 2000;
final int MAX_FIELDS = 50;
final int NUM_DOCS = atLeast(200);
final int MAX_FIELDS = atLeast(50);
int[][] docs = new int[NUM_DOCS][4];
for (int i = 0; i < docs.length; i++) {

View File

@ -214,7 +214,7 @@ public class TestDocTermOrds extends LuceneTestCase {
public void testRandom() throws Exception {
MockDirectoryWrapper dir = newDirectory();
final int NUM_TERMS = (TEST_NIGHTLY ? 100 : 20) * RANDOM_MULTIPLIER;
final int NUM_TERMS = atLeast(20);
final Set<BytesRef> terms = new HashSet<BytesRef>();
while(terms.size() < NUM_TERMS) {
final String s = _TestUtil.randomRealisticUnicodeString(random);
@ -226,7 +226,7 @@ public class TestDocTermOrds extends LuceneTestCase {
final BytesRef[] termsArray = terms.toArray(new BytesRef[terms.size()]);
Arrays.sort(termsArray);
final int NUM_DOCS = (TEST_NIGHTLY ? 1000 : 100) * RANDOM_MULTIPLIER;
final int NUM_DOCS = atLeast(100);
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
@ -317,7 +317,7 @@ public class TestDocTermOrds extends LuceneTestCase {
}
final String[] prefixesArray = prefixes.toArray(new String[prefixes.size()]);
final int NUM_TERMS = (TEST_NIGHTLY ? 100 : 20) * RANDOM_MULTIPLIER;
final int NUM_TERMS = atLeast(20);
final Set<BytesRef> terms = new HashSet<BytesRef>();
while(terms.size() < NUM_TERMS) {
final String s = prefixesArray[random.nextInt(prefixesArray.length)] + _TestUtil.randomRealisticUnicodeString(random);
@ -329,7 +329,7 @@ public class TestDocTermOrds extends LuceneTestCase {
final BytesRef[] termsArray = terms.toArray(new BytesRef[terms.size()]);
Arrays.sort(termsArray);
final int NUM_DOCS = (TEST_NIGHTLY ? 1000 : 100) * RANDOM_MULTIPLIER;
final int NUM_DOCS = atLeast(100);
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));

View File

@ -57,7 +57,8 @@ public class TestDocsAndPositions extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13) * RANDOM_MULTIPLIER; i++) {
int num = atLeast(13);
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("1");
ReaderContext topReaderContext = reader.getTopReaderContext();
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
@ -112,7 +113,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
int numDocs = TEST_NIGHTLY ? 131 : 47;
int numDocs = atLeast(47);
int max = 1051;
int term = random.nextInt(max);
Integer[][] positionsInDoc = new Integer[numDocs][];
@ -120,7 +121,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
Document doc = new Document();
ArrayList<Integer> positions = new ArrayList<Integer>();
StringBuilder builder = new StringBuilder();
int num = TEST_NIGHTLY ? 3049 : 131;
int num = atLeast(131);
for (int j = 0; j < num; j++) {
int nextInt = random.nextInt(max);
builder.append(nextInt).append(" ");
@ -141,7 +142,8 @@ public class TestDocsAndPositions extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13) * RANDOM_MULTIPLIER; i++) {
int num = atLeast(13);
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("" + term);
ReaderContext topReaderContext = reader.getTopReaderContext();
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
@ -193,7 +195,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
int numDocs = TEST_NIGHTLY ? 499 : 49;
int numDocs = atLeast(49);
int max = 15678;
int term = random.nextInt(max);
int[] freqInDoc = new int[numDocs];
@ -215,7 +217,8 @@ public class TestDocsAndPositions extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13) * RANDOM_MULTIPLIER; i++) {
int num = atLeast(13);
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("" + term);
ReaderContext topReaderContext = reader.getTopReaderContext();
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
@ -291,7 +294,8 @@ public class TestDocsAndPositions extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13) * RANDOM_MULTIPLIER; i++) {
int num = atLeast(13);
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("even");
ReaderContext topReaderContext = reader.getTopReaderContext();

View File

@ -30,19 +30,24 @@ import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.LineFileDocs;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Before;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public class TestFlushByRamOrCountsPolicy extends LuceneTestCase {
private LineFileDocs lineDocFile;
private static LineFileDocs lineDocFile;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
@BeforeClass
public static void beforeClass() throws Exception {
lineDocFile = new LineFileDocs(random);
}
@AfterClass
public static void afterClass() throws Exception {
lineDocFile.close();
lineDocFile = null;
}
public void testFlushByRam() throws CorruptIndexException,
LockObtainFailedException, IOException, InterruptedException {
int[] numThreads = new int[] { 3 + random.nextInt(12), 1 };

View File

@ -42,7 +42,8 @@ import org.apache.lucene.util._TestUtil;
public class TestGlobalFieldNumbers extends LuceneTestCase {
public void testGlobalFieldNumberFiles() throws IOException {
for (int i = 0; i < (TEST_NIGHTLY ? 39 : 3); i++) {
int num = atLeast(3);
for (int i = 0; i < num; i++) {
Directory dir = newDirectory();
{
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
@ -113,7 +114,8 @@ public class TestGlobalFieldNumbers extends LuceneTestCase {
}
public void testIndexReaderCommit() throws IOException {
for (int i = 0; i < (TEST_NIGHTLY ? 39 : 3); i++) {
int num = atLeast(3);
for (int i = 0; i < num; i++) {
Directory dir = newDirectory();
{
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
@ -156,7 +158,8 @@ public class TestGlobalFieldNumbers extends LuceneTestCase {
}
public void testGlobalFieldNumberFilesAcrossCommits() throws IOException {
for (int i = 0; i < (TEST_NIGHTLY ? 39 : 3); i++) {
int num = atLeast(3);
for (int i = 0; i < num; i++) {
Directory dir = newDirectory();
{
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
@ -207,7 +210,8 @@ public class TestGlobalFieldNumbers extends LuceneTestCase {
}
public void testGlobalFieldNumberOnOldCommit() throws IOException {
for (int i = 0; i < (TEST_NIGHTLY ? 39 : 3); i++) {
int num = atLeast(3);
for (int i = 0; i < num; i++) {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(

View File

@ -64,7 +64,7 @@ public class TestIndexWriterOnJRECrash extends TestNRTThreads {
}
} else {
// we are the fork, setup a crashing thread
final int crashTime = _TestUtil.nextInt(random, 500, 4000);
final int crashTime = TEST_NIGHTLY ? _TestUtil.nextInt(random, 500, 4000) : _TestUtil.nextInt(random, 300, 1000);
Thread t = new Thread() {
@Override
public void run() {

View File

@ -157,7 +157,7 @@ public class TestNRTThreads extends LuceneTestCase {
final int NUM_INDEX_THREADS = 2;
final int NUM_SEARCH_THREADS = 3;
final int RUN_TIME_SEC = LuceneTestCase.TEST_NIGHTLY ? 300 : 5;
final int RUN_TIME_SEC = LuceneTestCase.TEST_NIGHTLY ? 300 : RANDOM_MULTIPLIER;
final AtomicBoolean failed = new AtomicBoolean();
final AtomicInteger addCount = new AtomicInteger();

View File

@ -57,18 +57,21 @@ public class TestFuzzyQuery2 extends LuceneTestCase {
/** epsilon for score comparisons */
static final float epsilon = 0.00001f;
static int[][] mappings = new int[][] {
new int[] { 0x40, 0x41 },
new int[] { 0x40, 0x0195 },
new int[] { 0x40, 0x0906 },
new int[] { 0x40, 0x1040F },
new int[] { 0x0194, 0x0195 },
new int[] { 0x0194, 0x0906 },
new int[] { 0x0194, 0x1040F },
new int[] { 0x0905, 0x0906 },
new int[] { 0x0905, 0x1040F },
new int[] { 0x1040E, 0x1040F }
};
public void testFromTestData() throws Exception {
// TODO: randomize!
assertFromTestData(new int[] { 0x40, 0x41 });
assertFromTestData(new int[] { 0x40, 0x0195 });
assertFromTestData(new int[] { 0x40, 0x0906 });
assertFromTestData(new int[] { 0x40, 0x1040F });
assertFromTestData(new int[] { 0x0194, 0x0195 });
assertFromTestData(new int[] { 0x0194, 0x0906 });
assertFromTestData(new int[] { 0x0194, 0x1040F });
assertFromTestData(new int[] { 0x0905, 0x0906 });
assertFromTestData(new int[] { 0x0905, 0x1040F });
assertFromTestData(new int[] { 0x1040E, 0x1040F });
assertFromTestData(mappings[random.nextInt(mappings.length)]);
}
public void assertFromTestData(int codePointTable[]) throws Exception {

View File

@ -27,6 +27,8 @@ import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.store.*;
import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.io.IOException;
import java.io.Reader;
@ -45,14 +47,13 @@ public class TestPhraseQuery extends LuceneTestCase {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
private IndexSearcher searcher;
private IndexReader reader;
private static IndexSearcher searcher;
private static IndexReader reader;
private PhraseQuery query;
private Directory directory;
private static Directory directory;
@Override
public void setUp() throws Exception {
super.setUp();
@BeforeClass
public static void beforeClass() throws Exception {
directory = newDirectory();
Analyzer analyzer = new Analyzer() {
@Override
@ -87,15 +88,22 @@ public class TestPhraseQuery extends LuceneTestCase {
writer.close();
searcher = newSearcher(reader);
query = new PhraseQuery();
}
@Override
public void tearDown() throws Exception {
public void setUp() throws Exception {
super.setUp();
query = new PhraseQuery();
}
@AfterClass
public static void afterClass() throws Exception {
searcher.close();
searcher = null;
reader.close();
reader = null;
directory.close();
super.tearDown();
directory = null;
}
public void testNotCloseEnough() throws Exception {
@ -606,10 +614,10 @@ public class TestPhraseQuery extends LuceneTestCase {
Random r = random;
int NUM_DOCS = 10 * RANDOM_MULTIPLIER;
int NUM_DOCS = atLeast(10);
for (int i = 0; i < NUM_DOCS; i++) {
// must be > 4096 so it spans multiple chunks
int termCount = _TestUtil.nextInt(r, 10000, 30000);
int termCount = atLeast(5000);
List<String> doc = new ArrayList<String>();
@ -656,7 +664,7 @@ public class TestPhraseQuery extends LuceneTestCase {
w.close();
// now search
int num = 100 * RANDOM_MULTIPLIER;
int num = atLeast(10);
for(int i=0;i<num;i++) {
int docID = r.nextInt(docs.size());
List<String> doc = docs.get(docID);

View File

@ -66,7 +66,7 @@ public class TestRegexpRandom2 extends LuceneTestCase {
Field field = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(field);
List<String> terms = new ArrayList<String>();
int num = 2000 * RANDOM_MULTIPLIER;
int num = atLeast(200);
for (int i = 0; i < num; i++) {
String s = _TestUtil.randomUnicodeString(random);
field.setValue(s);

View File

@ -27,15 +27,14 @@ import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
import org.junit.After;
import org.junit.Before;
import org.junit.AfterClass;
import org.junit.Ignore;
/**
* Setup for function tests
*/
@Ignore
public class FunctionTestSetup extends LuceneTestCase {
public abstract class FunctionTestSetup extends LuceneTestCase {
/**
* Actual score computation order is slightly different than assumptios
@ -67,32 +66,17 @@ public class FunctionTestSetup extends LuceneTestCase {
"text for the test, but oh much much safer. ",
};
protected Directory dir;
protected Analyzer anlzr;
protected static Directory dir;
protected static Analyzer anlzr;
private final boolean doMultiSegment;
public FunctionTestSetup(boolean doMultiSegment) {
this.doMultiSegment = doMultiSegment;
}
public FunctionTestSetup() {
this(false);
}
@Override
@After
public void tearDown() throws Exception {
@AfterClass
public static void afterClassFunctionTestSetup() throws Exception {
dir.close();
dir = null;
anlzr = null;
super.tearDown();
}
@Override
@Before
public void setUp() throws Exception {
super.setUp();
protected static void createIndex(boolean doMultiSegment) throws Exception {
if (VERBOSE) {
System.out.println("TEST: setUp");
}
@ -130,7 +114,7 @@ public class FunctionTestSetup extends LuceneTestCase {
}
}
private void addDoc(RandomIndexWriter iw, int i) throws Exception {
private static void addDoc(RandomIndexWriter iw, int i) throws Exception {
Document d = new Document();
Fieldable f;
int scoreAndID = i + 1;
@ -156,7 +140,7 @@ public class FunctionTestSetup extends LuceneTestCase {
}
// 17 --> ID00017
protected String id2String(int scoreAndID) {
protected static String id2String(int scoreAndID) {
String s = "000000000" + scoreAndID;
int n = ("" + N_DOCS).length() + 3;
int k = s.length() - n;
@ -164,17 +148,17 @@ public class FunctionTestSetup extends LuceneTestCase {
}
// some text line for regular search
private String textLine(int docNum) {
private static String textLine(int docNum) {
return DOC_TEXT_LINES[docNum % DOC_TEXT_LINES.length];
}
// extract expected doc score from its ID Field: "ID7" --> 7.0
protected float expectedFieldScore(String docIDFieldVal) {
protected static float expectedFieldScore(String docIDFieldVal) {
return Float.parseFloat(docIDFieldVal.substring(2));
}
// debug messages (change DBG to true for anything to print)
protected void log(Object o) {
protected static void log(Object o) {
if (VERBOSE) {
System.out.println(o.toString());
}

View File

@ -20,6 +20,7 @@ package org.apache.lucene.search.function;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.*;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.util.HashMap;
@ -33,9 +34,9 @@ import org.apache.lucene.index.Term;
*/
public class TestCustomScoreQuery extends FunctionTestSetup {
/* @override constructor */
public TestCustomScoreQuery() {
super(true);
@BeforeClass
public static void beforeClass() throws Exception {
createIndex(true);
}
/**
@ -193,7 +194,7 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
final Query q = new CustomExternalQuery(q1);
log(q);
IndexSearcher s = new IndexSearcher(dir);
IndexSearcher s = new IndexSearcher(dir, true);
TopDocs hits = s.search(q, 1000);
assertEquals(N_DOCS, hits.totalHits);
for(int i=0;i<N_DOCS;i++) {

View File

@ -26,6 +26,7 @@ import org.apache.lucene.search.QueryUtils;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.util.ReaderUtil;
import org.junit.BeforeClass;
import org.junit.Test;
/**
@ -41,9 +42,9 @@ import org.junit.Test;
*/
public class TestFieldScoreQuery extends FunctionTestSetup {
/* @override constructor */
public TestFieldScoreQuery() {
super(true);
@BeforeClass
public static void beforeClass() throws Exception {
createIndex(true);
}
/** Test that FieldScoreQuery of Type.BYTE returns docs in expected order. */

View File

@ -21,6 +21,7 @@ import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.*;
import org.apache.lucene.util.ReaderUtil;
import org.junit.BeforeClass;
import org.junit.Test;
/**
@ -36,9 +37,9 @@ import org.junit.Test;
*/
public class TestOrdValues extends FunctionTestSetup {
/* @override constructor */
public TestOrdValues() {
super(false);
@BeforeClass
public static void beforeClass() throws Exception {
createIndex(false);
}
/**

View File

@ -32,6 +32,8 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryUtils;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public class TestFieldMaskingSpanQuery extends LuceneTestCase {
@ -43,17 +45,16 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase {
return doc;
}
protected Field field(String name, String value) {
protected static Field field(String name, String value) {
return newField(name, value, Field.Store.NO, Field.Index.ANALYZED);
}
protected IndexSearcher searcher;
protected Directory directory;
protected IndexReader reader;
protected static IndexSearcher searcher;
protected static Directory directory;
protected static IndexReader reader;
@Override
public void setUp() throws Exception {
super.setUp();
@BeforeClass
public static void beforeClass() throws Exception {
directory = newDirectory();
RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
@ -115,12 +116,14 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase {
searcher = newSearcher(reader);
}
@Override
public void tearDown() throws Exception {
@AfterClass
public static void afterClass() throws Exception {
searcher.close();
searcher = null;
reader.close();
reader = null;
directory.close();
super.tearDown();
directory = null;
}
protected void check(SpanQuery q, int[] docs) throws Exception {

View File

@ -22,8 +22,8 @@ package org.apache.lucene.util;
*/
@Deprecated
public class TestIndexableBinaryStringTools extends LuceneTestCase {
private static final int NUM_RANDOM_TESTS = 2000 * RANDOM_MULTIPLIER;
private static final int MAX_RANDOM_BINARY_LENGTH = 300 * RANDOM_MULTIPLIER;
private static final int NUM_RANDOM_TESTS = atLeast(200);
private static final int MAX_RANDOM_BINARY_LENGTH = atLeast(300);
public void testSingleBinaryRoundTrip() {
byte[] binary = new byte[] { (byte) 0x23, (byte) 0x98, (byte) 0x13,

View File

@ -308,7 +308,7 @@ public class TestNumericUtils extends LuceneTestCase {
}
public void testRandomSplit() throws Exception {
long num = 100L * RANDOM_MULTIPLIER;
long num = (long) atLeast(10);
for (long i=0; i < num; i++) {
executeOneRandomSplit(random);
}

View File

@ -43,7 +43,7 @@ public class TestStringIntern extends LuceneTestCase {
// makeStrings(100); // realistic for perf testing
int nThreads = 20;
// final int iter=100000;
final int iter = 1000000 * RANDOM_MULTIPLIER;
final int iter = atLeast(100000);
// try native intern
// StringHelper.interner = new StringInterner();

View File

@ -27,14 +27,14 @@ public class TestDeterminism extends LuceneTestCase {
/** test a bunch of random regular expressions */
public void testRegexps() throws Exception {
int num = 500 * RANDOM_MULTIPLIER;
int num = atLeast(500);
for (int i = 0; i < num; i++)
assertAutomaton(new RegExp(AutomatonTestUtil.randomRegexp(random), RegExp.NONE).toAutomaton());
}
/** test against a simple, unoptimized det */
public void testAgainstSimple() throws Exception {
int num = 2000 * RANDOM_MULTIPLIER;
int num = atLeast(200);
for (int i = 0; i < num; i++) {
Automaton a = AutomatonTestUtil.randomAutomaton(random);
Automaton b = a.clone();

View File

@ -33,7 +33,7 @@ public class TestDeterminizeLexicon extends LuceneTestCase {
private List<String> terms = new ArrayList<String>();
public void testLexicon() throws Exception {
int num = 3 * RANDOM_MULTIPLIER;
int num = atLeast(1);
for (int i = 0; i < num; i++) {
automata.clear();
terms.clear();

View File

@ -25,7 +25,7 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestMinimize extends LuceneTestCase {
/** the minimal and non-minimal are compared to ensure they are the same. */
public void test() {
int num = 2000 * RANDOM_MULTIPLIER;
int num = atLeast(200);
for (int i = 0; i < num; i++) {
Automaton a = AutomatonTestUtil.randomAutomaton(random);
Automaton b = a.clone();
@ -38,7 +38,7 @@ public class TestMinimize extends LuceneTestCase {
* we check not only that they are the same, but that #states/#transitions
* are the same. */
public void testAgainstBrzozowski() {
int num = 2000 * RANDOM_MULTIPLIER;
int num = atLeast(200);
for (int i = 0; i < num; i++) {
Automaton a = AutomatonTestUtil.randomAutomaton(random);
AutomatonTestUtil.minimizeSimple(a);

View File

@ -24,7 +24,7 @@ public class TestSpecialOperations extends LuceneTestCase {
* tests against the original brics implementation.
*/
public void testIsFinite() {
int num = 2000 * RANDOM_MULTIPLIER;
int num = atLeast(200);
for (int i = 0; i < num; i++) {
Automaton a = AutomatonTestUtil.randomAutomaton(random);
Automaton b = a.clone();

View File

@ -915,7 +915,7 @@ public class TestFSTs extends LuceneTestCase {
}
public void testRandomWords() throws IOException {
testRandomWords(1000, 5 * RANDOM_MULTIPLIER);
testRandomWords(1000, atLeast(2));
//testRandomWords(20, 100);
}
@ -983,13 +983,13 @@ public class TestFSTs extends LuceneTestCase {
}
final LineFileDocs docs = new LineFileDocs(random);
final int RUN_TIME_SEC = LuceneTestCase.TEST_NIGHTLY ? 100 : 1;
final int RUN_TIME_MSEC = atLeast(500);
final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(-1).setRAMBufferSizeMB(64);
final File tempDir = _TestUtil.getTempDir("fstlines");
final MockDirectoryWrapper dir = new MockDirectoryWrapper(random, FSDirectory.open(tempDir));
final IndexWriter writer = new IndexWriter(dir, conf);
writer.setInfoStream(VERBOSE ? System.out : null);
final long stopTime = System.currentTimeMillis() + RUN_TIME_SEC * 1000;
final long stopTime = System.currentTimeMillis() + RUN_TIME_MSEC;
Document doc;
int docCount = 0;
while((doc = docs.nextDoc()) != null && System.currentTimeMillis() < stopTime) {