LUCENE-9157: test speedup for slowest tests

This commit is contained in:
Robert Muir 2020-01-21 19:27:19 -05:00
parent 1af171e47f
commit c754a764d4
No known key found for this signature in database
GPG Key ID: 817AE1DD322D7ECA
73 changed files with 180 additions and 155 deletions

View File

@ -213,7 +213,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
}
};
int numRounds = RANDOM_MULTIPLIER * 10000;
int numRounds = RANDOM_MULTIPLIER * 1000;
checkRandomData(random(), analyzer, numRounds);
analyzer.close();
}

View File

@ -230,7 +230,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
Analyzer analyzers[] = new Analyzer[] { new WhitespaceAnalyzer(), new SimpleAnalyzer(),
new StopAnalyzer(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET), new UnicodeWhitespaceAnalyzer() };
for (Analyzer analyzer : analyzers) {
checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER);
checkRandomData(random(), analyzer, 100*RANDOM_MULTIPLIER);
}
IOUtils.close(analyzers);
}
@ -240,7 +240,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
Analyzer analyzers[] = new Analyzer[] { new WhitespaceAnalyzer(), new SimpleAnalyzer(),
new StopAnalyzer(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET), new UnicodeWhitespaceAnalyzer() };
for (Analyzer analyzer : analyzers) {
checkRandomData(random(), analyzer, 100*RANDOM_MULTIPLIER, 8192);
checkRandomData(random(), analyzer, 10*RANDOM_MULTIPLIER, 8192);
}
IOUtils.close(analyzers);
}

View File

@ -227,7 +227,7 @@ public class TestBugInSomething extends BaseTokenStreamTestCase {
}
// LUCENE-5269
@Slow
@Nightly
public void testUnicodeShinglesAndNgrams() throws Exception {
Analyzer analyzer = new Analyzer() {
@Override

View File

@ -39,6 +39,7 @@ import org.apache.lucene.analysis.util.StringMockResourceLoader;
import org.apache.lucene.analysis.util.TokenFilterFactory;
import org.apache.lucene.analysis.util.TokenizerFactory;
import org.apache.lucene.util.AttributeFactory;
import org.apache.lucene.util.LuceneTestCase.Nightly;
import org.apache.lucene.util.Version;
/**
@ -51,6 +52,7 @@ import org.apache.lucene.util.Version;
// currently the only way to do this is via eclipse etc (LUCENE-3974)
// TODO: fix this to use CustomAnalyzer instead of its own FactoryAnalyzer
@Nightly
public class TestFactories extends BaseTokenStreamTestCase {
/** Factories that are excluded from testing it with random data */
@ -81,7 +83,7 @@ public class TestFactories extends BaseTokenStreamTestCase {
// beast it just a little, it shouldnt throw exceptions:
// (it should have thrown them in initialize)
Analyzer a = new FactoryAnalyzer(factory, null, null);
checkRandomData(random(), a, 20, 20, false, false);
checkRandomData(random(), a, 3, 20, false, false);
a.close();
}
}
@ -96,7 +98,7 @@ public class TestFactories extends BaseTokenStreamTestCase {
// beast it just a little, it shouldnt throw exceptions:
// (it should have thrown them in initialize)
Analyzer a = new FactoryAnalyzer(assertingTokenizer, factory, null);
checkRandomData(random(), a, 20, 20, false, false);
checkRandomData(random(), a, 3, 20, false, false);
a.close();
}
}
@ -111,7 +113,7 @@ public class TestFactories extends BaseTokenStreamTestCase {
// beast it just a little, it shouldnt throw exceptions:
// (it should have thrown them in initialize)
Analyzer a = new FactoryAnalyzer(assertingTokenizer, null, factory);
checkRandomData(random(), a, 20, 20, false, false);
checkRandomData(random(), a, 3, 20, false, false);
a.close();
}
}

View File

@ -414,7 +414,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
int numIterations = atLeast(5);
int numIterations = atLeast(3);
for (int i = 0; i < numIterations; i++) {
final int flags = random().nextInt(512);
final CharArraySet protectedWords;
@ -433,14 +433,14 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
}
};
// TODO: properly support positionLengthAttribute
checkRandomData(random(), a, 200*RANDOM_MULTIPLIER, 20, false, false);
checkRandomData(random(), a, 100*RANDOM_MULTIPLIER, 20, false, false);
a.close();
}
}
/** blast some enormous random strings through the analyzer */
public void testRandomHugeStrings() throws Exception {
int numIterations = atLeast(5);
int numIterations = atLeast(3);
for (int i = 0; i < numIterations; i++) {
final int flags = random().nextInt(512);
final CharArraySet protectedWords;
@ -459,7 +459,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
}
};
// TODO: properly support positionLengthAttribute
checkRandomData(random(), a, 20*RANDOM_MULTIPLIER, 8192, false, false);
checkRandomData(random(), a, 10*RANDOM_MULTIPLIER, 8192, false, false);
a.close();
}
}

View File

@ -475,7 +475,7 @@ public class TestWordDelimiterGraphFilter extends BaseTokenStreamTestCase {
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
int numIterations = atLeast(5);
int numIterations = atLeast(3);
for (int i = 0; i < numIterations; i++) {
final int flags = random().nextInt(512);
final CharArraySet protectedWords;
@ -494,14 +494,14 @@ public class TestWordDelimiterGraphFilter extends BaseTokenStreamTestCase {
}
};
// TODO: properly support positionLengthAttribute
checkRandomData(random(), a, 200*RANDOM_MULTIPLIER, 20, false, false);
checkRandomData(random(), a, 100*RANDOM_MULTIPLIER, 20, false, false);
a.close();
}
}
/** blast some enormous random strings through the analyzer */
public void testRandomHugeStrings() throws Exception {
int numIterations = atLeast(5);
int numIterations = atLeast(3);
for (int i = 0; i < numIterations; i++) {
final int flags = random().nextInt(512);
final CharArraySet protectedWords;
@ -521,7 +521,7 @@ public class TestWordDelimiterGraphFilter extends BaseTokenStreamTestCase {
}
};
// TODO: properly support positionLengthAttribute
checkRandomData(random(), a, 20*RANDOM_MULTIPLIER, 8192, false, false);
checkRandomData(random(), a, 10*RANDOM_MULTIPLIER, 8192, false, false);
a.close();
}
}

View File

@ -298,7 +298,7 @@ public class TestPatternReplaceCharFilter extends BaseTokenStreamTestCase {
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
int numPatterns = 10 + random().nextInt(20);
int numPatterns = 5;
Random random = new Random(random().nextLong());
for (int i = 0; i < numPatterns; i++) {
final Pattern p = TestUtil.randomPattern(random());
@ -322,7 +322,7 @@ public class TestPatternReplaceCharFilter extends BaseTokenStreamTestCase {
final int maxInputLength = 30;
/* ASCII only input?: */
final boolean asciiOnly = true;
checkRandomData(random, a, 250 * RANDOM_MULTIPLIER, maxInputLength, asciiOnly);
checkRandomData(random, a, 50 * RANDOM_MULTIPLIER, maxInputLength, asciiOnly);
a.close();
}
}

View File

@ -147,6 +147,7 @@ public class TestPatternTokenizer extends BaseTokenStreamTestCase
}
// LUCENE-6814
@Nightly
public void testHeapFreedAfterClose() throws Exception {
// TODO: can we move this to BaseTSTC to catch other "hangs onto heap"ers?

View File

@ -23,14 +23,14 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.lucene.util.LuceneTestCase.Nightly;
import static org.apache.lucene.analysis.VocabularyAssert.*;
/**
* Test the snowball filters against the snowball data tests
*/
@Slow
@Nightly
public class TestSnowballVocab extends LuceneTestCase {
/**
* Run all languages against their snowball vocabulary tests.

View File

@ -40,7 +40,7 @@ import java.util.Random;
public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase {
// LUCENE-5440: extremely slow tokenization of text matching email <local-part> (before the '@')
@Slow
@Nightly
public void testLongEMAILatomText() throws Exception {
// EMAILatomText = [A-Za-z0-9!#$%&'*+-/=?\^_`{|}~]
char[] emailAtomChars

View File

@ -1406,7 +1406,7 @@ public class TestSynonymGraphFilter extends BaseTokenStreamTestCase {
a = getAnalyzer(b, true);
}
int iters = atLeast(20);
int iters = atLeast(10);
for(int iter=0;iter<iters;iter++) {
String doc = toTokenString(randomBinaryChars(50, 100, bias, 'a'));

View File

@ -58,7 +58,7 @@ public class TestExtendedMode extends BaseTokenStreamTestCase {
/** random test ensuring we don't ever split supplementaries */
public void testSurrogates2() throws IOException {
int numIterations = atLeast(1000);
int numIterations = atLeast(500);
for (int i = 0; i < numIterations; i++) {
String s = TestUtil.randomUnicodeString(random(), 100);
try (TokenStream ts = analyzer.tokenStream("foo", s)) {
@ -75,12 +75,12 @@ public class TestExtendedMode extends BaseTokenStreamTestCase {
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
Random random = random();
checkRandomData(random, analyzer, 500*RANDOM_MULTIPLIER);
checkRandomData(random, analyzer, 100*RANDOM_MULTIPLIER);
}
/** blast some random large strings through the analyzer */
public void testRandomHugeStrings() throws Exception {
Random random = random();
checkRandomData(random, analyzer, 30*RANDOM_MULTIPLIER, 8192);
checkRandomData(random, analyzer, 3*RANDOM_MULTIPLIER, 8192);
}
}

View File

@ -38,6 +38,7 @@ import org.apache.lucene.analysis.util.ResourceLoaderAware;
import org.apache.lucene.analysis.util.TokenFilterFactory;
import org.apache.lucene.analysis.util.TokenizerFactory;
import org.apache.lucene.util.AttributeFactory;
import org.apache.lucene.util.LuceneTestCase.Nightly;
import org.apache.lucene.util.Version;
/**
@ -48,6 +49,7 @@ import org.apache.lucene.util.Version;
// TODO: this was copied from the analysis/common module ... find a better way to share it!
// TODO: fix this to use CustomAnalyzer instead of its own FactoryAnalyzer
@Nightly
public class TestFactories extends BaseTokenStreamTestCase {
/** Factories that are excluded from testing it with random data */
@ -78,7 +80,7 @@ public class TestFactories extends BaseTokenStreamTestCase {
// beast it just a little, it shouldnt throw exceptions:
// (it should have thrown them in initialize)
Analyzer a = new FactoryAnalyzer(factory, null, null);
checkRandomData(random(), a, 20, 20, false, false);
checkRandomData(random(), a, 3, 20, false, false);
a.close();
}
}
@ -93,7 +95,7 @@ public class TestFactories extends BaseTokenStreamTestCase {
// beast it just a little, it shouldnt throw exceptions:
// (it should have thrown them in initialize)
Analyzer a = new FactoryAnalyzer(assertingTokenizer, factory, null);
checkRandomData(random(), a, 20, 20, false, false);
checkRandomData(random(), a, 3, 20, false, false);
a.close();
}
}
@ -108,7 +110,7 @@ public class TestFactories extends BaseTokenStreamTestCase {
// beast it just a little, it shouldnt throw exceptions:
// (it should have thrown them in initialize)
Analyzer a = new FactoryAnalyzer(assertingTokenizer, null, factory);
checkRandomData(random(), a, 20, 20, false, false);
checkRandomData(random(), a, 3, 20, false, false);
a.close();
}
}

View File

@ -133,7 +133,7 @@ public class TestJapaneseAnalyzer extends BaseTokenStreamTestCase {
final Analyzer a = new JapaneseAnalyzer(null, Mode.SEARCH,
JapaneseAnalyzer.getDefaultStopSet(),
JapaneseAnalyzer.getDefaultStopTags());
checkRandomData(random, a, atLeast(1000));
checkRandomData(random, a, atLeast(100));
a.close();
}

View File

@ -256,12 +256,12 @@ public class TestJapaneseNumberFilter extends BaseTokenStreamTestCase {
@Test
public void testRandomHugeStrings() throws Exception {
checkRandomData(random(), analyzer, 50 * RANDOM_MULTIPLIER, 8192);
checkRandomData(random(), analyzer, 5 * RANDOM_MULTIPLIER, 8192);
}
@Test
public void testRandomSmallStrings() throws Exception {
checkRandomData(random(), analyzer, 500 * RANDOM_MULTIPLIER, 128);
checkRandomData(random(), analyzer, 100 * RANDOM_MULTIPLIER, 128);
}
@Test

View File

@ -314,17 +314,17 @@ public class
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
checkRandomData(random(), analyzer, 500*RANDOM_MULTIPLIER);
checkRandomData(random(), analyzerNoPunct, 500*RANDOM_MULTIPLIER);
checkRandomData(random(), analyzerNormalNBest, 500*RANDOM_MULTIPLIER);
checkRandomData(random(), analyzer, 100*RANDOM_MULTIPLIER);
checkRandomData(random(), analyzerNoPunct, 100*RANDOM_MULTIPLIER);
checkRandomData(random(), analyzerNormalNBest, 100*RANDOM_MULTIPLIER);
}
/** blast some random large strings through the analyzer */
public void testRandomHugeStrings() throws Exception {
Random random = random();
checkRandomData(random, analyzer, 20*RANDOM_MULTIPLIER, 8192);
checkRandomData(random, analyzerNoPunct, 20*RANDOM_MULTIPLIER, 8192);
checkRandomData(random, analyzerNormalNBest, 20*RANDOM_MULTIPLIER, 8192);
checkRandomData(random, analyzer, 3*RANDOM_MULTIPLIER, 8192);
checkRandomData(random, analyzerNoPunct, 3*RANDOM_MULTIPLIER, 8192);
checkRandomData(random, analyzerNormalNBest, 3*RANDOM_MULTIPLIER, 8192);
}
public void testRandomHugeStringsMockGraphAfter() throws Exception {
@ -338,7 +338,7 @@ public class
return new TokenStreamComponents(tokenizer, graph);
}
};
checkRandomData(random, analyzer, 20*RANDOM_MULTIPLIER, 8192);
checkRandomData(random, analyzer, 3*RANDOM_MULTIPLIER, 8192);
analyzer.close();
}
@ -362,7 +362,7 @@ public class
/** random test ensuring we don't ever split supplementaries */
public void testSurrogates2() throws IOException {
int numIterations = atLeast(10000);
int numIterations = atLeast(500);
for (int i = 0; i < numIterations; i++) {
if (VERBOSE) {
System.out.println("\nTEST: iter=" + i);

View File

@ -274,12 +274,12 @@ public class TestKoreanNumberFilter extends BaseTokenStreamTestCase {
@Test
public void testRandomHugeStrings() throws Exception {
checkRandomData(random(), analyzer, 50 * RANDOM_MULTIPLIER, 8192);
checkRandomData(random(), analyzer, 5 * RANDOM_MULTIPLIER, 8192);
}
@Test
public void testRandomSmallStrings() throws Exception {
checkRandomData(random(), analyzer, 500 * RANDOM_MULTIPLIER, 128);
checkRandomData(random(), analyzer, 100 * RANDOM_MULTIPLIER, 128);
}
@Test

View File

@ -368,17 +368,17 @@ public class TestKoreanTokenizer extends BaseTokenStreamTestCase {
/** blast some random strings through the tokenizer */
public void testRandomStrings() throws Exception {
checkRandomData(random(), analyzer, 500*RANDOM_MULTIPLIER);
checkRandomData(random(), analyzerUnigram, 500*RANDOM_MULTIPLIER);
checkRandomData(random(), analyzerDecompound, 500*RANDOM_MULTIPLIER);
checkRandomData(random(), analyzer, 100*RANDOM_MULTIPLIER);
checkRandomData(random(), analyzerUnigram, 100*RANDOM_MULTIPLIER);
checkRandomData(random(), analyzerDecompound, 100*RANDOM_MULTIPLIER);
}
/** blast some random large strings through the tokenizer */
public void testRandomHugeStrings() throws Exception {
Random random = random();
checkRandomData(random, analyzer, 20*RANDOM_MULTIPLIER, 8192);
checkRandomData(random, analyzerUnigram, 20*RANDOM_MULTIPLIER, 8192);
checkRandomData(random, analyzerDecompound, 20*RANDOM_MULTIPLIER, 8192);
checkRandomData(random, analyzer, 3*RANDOM_MULTIPLIER, 8192);
checkRandomData(random, analyzerUnigram, 3*RANDOM_MULTIPLIER, 8192);
checkRandomData(random, analyzerDecompound, 3*RANDOM_MULTIPLIER, 8192);
}
public void testRandomHugeStringsMockGraphAfter() throws Exception {
@ -392,7 +392,7 @@ public class TestKoreanTokenizer extends BaseTokenStreamTestCase {
return new TokenStreamComponents(tokenizer, graph);
}
};
checkRandomData(random, analyzer, 20*RANDOM_MULTIPLIER, 8192);
checkRandomData(random, analyzer, 3*RANDOM_MULTIPLIER, 8192);
analyzer.close();
}

View File

@ -439,14 +439,14 @@ public class TestStandardAnalyzer extends BaseTokenStreamTestCase {
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
Analyzer analyzer = new StandardAnalyzer();
checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER);
checkRandomData(random(), analyzer, 200*RANDOM_MULTIPLIER);
analyzer.close();
}
/** blast some random large strings through the analyzer */
public void testRandomHugeStrings() throws Exception {
Analyzer analyzer = new StandardAnalyzer();
checkRandomData(random(), analyzer, 100*RANDOM_MULTIPLIER, 8192);
checkRandomData(random(), analyzer, 20*RANDOM_MULTIPLIER, 8192);
analyzer.close();
}
@ -461,7 +461,7 @@ public class TestStandardAnalyzer extends BaseTokenStreamTestCase {
return new TokenStreamComponents(tokenizer, tokenStream);
}
};
checkRandomData(random, analyzer, 100*RANDOM_MULTIPLIER, 8192);
checkRandomData(random, analyzer, 20*RANDOM_MULTIPLIER, 8192);
analyzer.close();
}

View File

@ -43,6 +43,7 @@ public class TestIndexedDISI extends LuceneTestCase {
}
// EMPTY blocks are special with regard to jumps as they have size 0
@Nightly
public void testEmptyBlocks() throws IOException {
final int B = 65536;
int maxDoc = B*11;

View File

@ -115,7 +115,7 @@ public class TestLucene80DocValuesFormat extends BaseCompressingDocValuesFormatT
}
}
@Slow
@Nightly
public void testTermsEnumFixedWidth() throws Exception {
int numIterations = atLeast(1);
for (int i = 0; i < numIterations; i++) {
@ -123,7 +123,7 @@ public class TestLucene80DocValuesFormat extends BaseCompressingDocValuesFormatT
}
}
@Slow
@Nightly
public void testTermsEnumVariableWidth() throws Exception {
int numIterations = atLeast(1);
for (int i = 0; i < numIterations; i++) {
@ -139,6 +139,7 @@ public class TestLucene80DocValuesFormat extends BaseCompressingDocValuesFormatT
}
}
@Nightly
public void testTermsEnumLongSharedPrefixes() throws Exception {
int numIterations = atLeast(1);
for (int i = 0; i < numIterations; i++) {
@ -533,12 +534,12 @@ public class TestLucene80DocValuesFormat extends BaseCompressingDocValuesFormatT
}
}
@Slow
@Nightly
public void testSortedNumericBlocksOfVariousBitsPerValue() throws Exception {
doTestSortedNumericBlocksOfVariousBitsPerValue(() -> TestUtil.nextInt(random(), 1, 3));
}
@Slow
@Nightly
public void testSparseSortedNumericBlocksOfVariousBitsPerValue() throws Exception {
doTestSortedNumericBlocksOfVariousBitsPerValue(() -> TestUtil.nextInt(random(), 0, 2));
}
@ -548,14 +549,14 @@ public class TestLucene80DocValuesFormat extends BaseCompressingDocValuesFormatT
doTestSparseNumericBlocksOfVariousBitsPerValue(1);
}
@Slow
@Nightly
public void testSparseNumericBlocksOfVariousBitsPerValue() throws Exception {
doTestSparseNumericBlocksOfVariousBitsPerValue(random().nextDouble());
}
// The LUCENE-8585 jump-tables enables O(1) skipping of IndexedDISI blocks, DENSE block lookup
// and numeric multi blocks. This test focuses on testing these jumps.
@Slow
@Nightly
public void testNumericFieldJumpTables() throws Exception {
// IndexedDISI block skipping only activated if target >= current+2, so we need at least 5 blocks to
// trigger consecutive block skips

View File

@ -70,7 +70,7 @@ public abstract class BaseShapeTestCase extends LuceneTestCase {
// A particularly tricky adversary for BKD tree:
public void testSameShapeManyTimes() throws Exception {
int numShapes = atLeast(500);
int numShapes = atLeast(50);
// Every doc has 2 points:
Object theShape = nextShape();
@ -82,8 +82,9 @@ public abstract class BaseShapeTestCase extends LuceneTestCase {
}
// Force low cardinality leaves
@Slow
public void testLowCardinalityShapeManyTimes() throws Exception {
int numShapes = atLeast(500);
int numShapes = atLeast(20);
int cardinality = TestUtil.nextInt(random(), 2, 20);
Object[] diffShapes = new Object[cardinality];
@ -106,7 +107,7 @@ public abstract class BaseShapeTestCase extends LuceneTestCase {
@Slow
public void testRandomMedium() throws Exception {
doTestRandom(1000);
doTestRandom(atLeast(20));
}
@Slow

View File

@ -195,7 +195,7 @@ public class TestGeoUtils extends LuceneTestCase {
// TODO: does not really belong here, but we test it like this for now
// we can make a fake IndexReader to send boxes directly to Point visitors instead?
public void testCircleOpto() throws Exception {
int iters = atLeast(20);
int iters = atLeast(3);
for (int i = 0; i < iters; i++) {
// circle
final double centerLat = -90 + 180.0 * random().nextDouble();

View File

@ -44,7 +44,7 @@ import org.apache.lucene.util.TestUtil;
public class TestBagOfPositions extends LuceneTestCase {
public void test() throws Exception {
List<String> postingsList = new ArrayList<>();
int numTerms = atLeast(300);
int numTerms = atLeast(100);
final int maxTermsPerDoc = TestUtil.nextInt(random(), 10, 20);
boolean isSimpleText = "SimpleText".equals(TestUtil.getPostingsFormat("field"));

View File

@ -352,7 +352,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
if (d instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper)d).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
}
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
iwc.setMaxBufferedDocs(5);
CountDownLatch atLeastOneMerge = new CountDownLatch(1);
iwc.setMergeScheduler(new TrackingCMS(atLeastOneMerge));

View File

@ -952,7 +952,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
ReindexingReader reindexer = null;
// TODO: separate refresh thread, search threads, indexing threads
int numDocs = atLeast(TEST_NIGHTLY ? 20000 : 1000);
int numDocs = atLeast(TEST_NIGHTLY ? 20000 : 200);
int maxID = 0;
Path root = createTempDir();
int refreshEveryNumDocs = 100;
@ -1037,7 +1037,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
ReindexingReader reindexer = null;
// TODO: separate refresh thread, search threads, indexing threads
int numDocs = atLeast(TEST_NIGHTLY ? 20000 : 1000);
int numDocs = atLeast(TEST_NIGHTLY ? 20000 : 200);
int maxID = 0;
Path root = createTempDir();
int refreshEveryNumDocs = 100;
@ -1215,7 +1215,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
ReindexingReader reindexer = null;
// TODO: separate refresh thread, search threads, indexing threads
int numDocs = atLeast(TEST_NIGHTLY ? 20000 : 1000);
int numDocs = atLeast(TEST_NIGHTLY ? 20000 : 200);
int maxID = 0;
int refreshEveryNumDocs = 100;
int commitCloseNumDocs = 1000;

View File

@ -147,7 +147,7 @@ public class TestDuelingCodecs extends LuceneTestCase {
*/
// we use a small amount of docs here, so it works with any codec
public void testEquals() throws IOException {
int numdocs = atLeast(100);
int numdocs = atLeast(20);
createRandomIndex(numdocs, leftWriter, seed);
createRandomIndex(numdocs, rightWriter, seed);
@ -158,7 +158,7 @@ public class TestDuelingCodecs extends LuceneTestCase {
}
public void testCrazyReaderEquals() throws IOException {
int numdocs = atLeast(100);
int numdocs = atLeast(20);
createRandomIndex(numdocs, leftWriter, seed);
createRandomIndex(numdocs, rightWriter, seed);

View File

@ -1691,7 +1691,7 @@ public class TestIndexSorting extends LuceneTestCase {
Sort indexSort = new Sort(new SortField("foo", SortField.Type.LONG));
iwc.setIndexSort(indexSort);
IndexWriter w = new IndexWriter(dir, iwc);
final int numDocs = atLeast(1000);
final int numDocs = atLeast(200);
final FixedBitSet deleted = new FixedBitSet(numDocs);
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();
@ -1762,7 +1762,7 @@ public class TestIndexSorting extends LuceneTestCase {
Sort indexSort = new Sort(new SortedNumericSortField("foo", SortField.Type.LONG));
iwc.setIndexSort(indexSort);
IndexWriter w = new IndexWriter(dir, iwc);
final int numDocs = atLeast(1000);
final int numDocs = atLeast(200);
final FixedBitSet deleted = new FixedBitSet(numDocs);
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();

View File

@ -486,6 +486,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
doTestOperationsOnDiskFull(false);
}
@Slow
public void testUpdatesOnDiskFull() throws IOException {
doTestOperationsOnDiskFull(true);
}
@ -978,6 +979,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
dir.close();
}
@Slow
public void testIndexingThenDeleting() throws Exception {
// TODO: move this test to its own class and just @SuppressCodecs?
// TODO: is it enough to just use newFSDirectory?
@ -998,7 +1000,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document doc = new Document();
doc.add(newTextField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO));
int num = atLeast(3);
int num = atLeast(1);
for (int iter = 0; iter < num; iter++) {
int count = 0;

View File

@ -99,7 +99,7 @@ public class TestIndexWriterExceptions2 extends LuceneTestCase {
conf.setMergeScheduler(new SerialMergeScheduler());
conf.setCodec(codec);
int numDocs = atLeast(500);
int numDocs = atLeast(100);
IndexWriter iw = new IndexWriter(dir, conf);
try {

View File

@ -383,7 +383,7 @@ public class TestIndexWriterMerging extends LuceneTestCase {
IndexWriter writer = new IndexWriter(directory, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(100);
for(int iter=0;iter<10;iter++) {
for(int iter=0;iter<atLeast(3);iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}

View File

@ -72,7 +72,7 @@ public class TestIndexingSequenceNumbers extends LuceneTestCase {
dir.close();
}
@Slow
@Nightly
public void testStressUpdateSameID() throws Exception {
int iters = atLeast(100);
for(int iter=0;iter<iters;iter++) {
@ -143,7 +143,7 @@ public class TestIndexingSequenceNumbers extends LuceneTestCase {
long seqNo;
}
@Slow
@Nightly
public void testStressConcurrentCommit() throws Exception {
final int opCount = atLeast(10000);
final int idCount = TestUtil.nextInt(random(), 10, 1000);
@ -303,7 +303,7 @@ public class TestIndexingSequenceNumbers extends LuceneTestCase {
dir.close();
}
@Slow
@Nightly
public void testStressConcurrentDocValuesUpdatesCommit() throws Exception {
final int opCount = atLeast(10000);
final int idCount = TestUtil.nextInt(random(), 10, 1000);
@ -462,7 +462,7 @@ public class TestIndexingSequenceNumbers extends LuceneTestCase {
dir.close();
}
@Slow
@Nightly
public void testStressConcurrentAddAndDeleteAndCommit() throws Exception {
final int opCount = atLeast(10000);
final int idCount = TestUtil.nextInt(random(), 10, 1000);

View File

@ -78,7 +78,7 @@ public class TestLongPostings extends LuceneTestCase {
// randomness (ie same seed will point to same dir):
Directory dir = newFSDirectory(createTempDir("longpostings" + "." + random().nextLong()));
final int NUM_DOCS = atLeast(2000);
final int NUM_DOCS = atLeast(1000);
if (VERBOSE) {
System.out.println("TEST: NUM_DOCS=" + NUM_DOCS);
@ -275,7 +275,7 @@ public class TestLongPostings extends LuceneTestCase {
// randomness (ie same seed will point to same dir):
Directory dir = newFSDirectory(createTempDir("longpostings" + "." + random().nextLong()));
final int NUM_DOCS = atLeast(2000);
final int NUM_DOCS = atLeast(1000);
if (VERBOSE) {
System.out.println("TEST: NUM_DOCS=" + NUM_DOCS);

View File

@ -972,7 +972,7 @@ public class TestTermsEnum extends LuceneTestCase {
}
// Stresses out many-terms-in-root-block case:
@Slow
@Nightly
public void testVaryingTermsPerSegment() throws Exception {
Directory dir = newDirectory();
Set<BytesRef> terms = new HashSet<BytesRef>();

View File

@ -710,7 +710,7 @@ public class TestTieredMergePolicy extends BaseMergePolicyTestCase {
doTestSimulateAppendOnly(mergePolicy, 100_000_000, 10_000);
}
@Override
@Override @Slow
public void testSimulateUpdates() throws IOException {
TieredMergePolicy mergePolicy = mergePolicy();
// Avoid low values of the max merged segment size which prevent this merge policy from scaling well

View File

@ -367,7 +367,7 @@ public class TestBoolean2 extends LuceneTestCase {
try {
// increase number of iterations for more complete testing
int num = atLeast(20);
int num = atLeast(3);
for (int i=0; i<num; i++) {
int level = random().nextInt(3);
q1 = randBoolQuery(new Random(random().nextLong()), random().nextBoolean(), level, field, vals, null).build();

View File

@ -332,7 +332,7 @@ public class TestPointQueries extends LuceneTestCase {
}
public void testAllEqual() throws Exception {
int numValues = atLeast(10000);
int numValues = atLeast(1000);
long value = randomValue();
long[] values = new long[numValues];
@ -350,7 +350,7 @@ public class TestPointQueries extends LuceneTestCase {
}
public void testRandomLongsMedium() throws Exception {
doTestRandomLongs(10000);
doTestRandomLongs(1000);
}
private void doTestRandomLongs(int count) throws Exception {
@ -602,7 +602,7 @@ public class TestPointQueries extends LuceneTestCase {
}
public void testRandomBinaryMedium() throws Exception {
doTestRandomBinary(10000);
doTestRandomBinary(1000);
}
private void doTestRandomBinary(int count) throws Exception {

View File

@ -551,6 +551,7 @@ public class TestSearcherManager extends ThreadedIndexingAndSearchingTestCase {
dir.close();
}
@Slow
public void testConcurrentIndexCloseSearchAndRefresh() throws Exception {
final Directory dir = newFSDirectory(createTempDir());
AtomicReference<IndexWriter> writerRef = new AtomicReference<>();
@ -567,7 +568,7 @@ public class TestSearcherManager extends ThreadedIndexingAndSearchingTestCase {
public void run() {
try {
LineFileDocs docs = new LineFileDocs(random());
long runTimeSec = TEST_NIGHTLY ? atLeast(10) : atLeast(2);
long runTimeSec = TEST_NIGHTLY ? atLeast(10) : 1;
long endTime = System.nanoTime() + runTimeSec * 1000000000;
while (System.nanoTime() < endTime) {
IndexWriter w = writerRef.get();

View File

@ -117,7 +117,7 @@ public class TestTopFieldCollectorEarlyTermination extends LuceneTestCase {
}
private void doTestEarlyTermination(boolean paging) throws IOException {
final int iters = atLeast(8);
final int iters = atLeast(1);
for (int i = 0; i < iters; ++i) {
createRandomIndex(false);
int maxSegmentSize = 0;

View File

@ -148,8 +148,9 @@ public class TestArrayUtil extends LuceneTestCase {
}
// This is a test for LUCENE-3054 (which fails without the merge sort fall back with stack overflow in most cases)
@Slow
public void testQuickToHeapSortFallback() {
int num = atLeast(50);
int num = atLeast(10);
for (int i = 0; i < num; i++) {
Integer[] a1 = createSparseRandomArray(40000), a2 = a1.clone();
ArrayUtil.introSort(a1);

View File

@ -36,7 +36,7 @@ public class TestCollectionUtil extends LuceneTestCase {
}
public void testIntroSort() {
for (int i = 0, c = atLeast(500); i < c; i++) {
for (int i = 0, c = atLeast(100); i < c; i++) {
List<Integer> list1 = createRandomList(2000), list2 = new ArrayList<>(list1);
CollectionUtil.introSort(list1);
Collections.sort(list2);
@ -55,7 +55,7 @@ public class TestCollectionUtil extends LuceneTestCase {
}
public void testTimSort() {
for (int i = 0, c = atLeast(500); i < c; i++) {
for (int i = 0, c = atLeast(100); i < c; i++) {
List<Integer> list1 = createRandomList(2000), list2 = new ArrayList<>(list1);
CollectionUtil.timSort(list1);
Collections.sort(list2);

View File

@ -87,6 +87,7 @@ public class TestOfflineSorter extends LuceneTestCase {
}
}
@Slow
public void testIntermediateMerges() throws Exception {
// Sort 20 mb worth of data with 1mb buffer, binary merging.
try (Directory dir = newDirectory()) {
@ -100,6 +101,7 @@ public class TestOfflineSorter extends LuceneTestCase {
}
}
@Slow
public void testSmallRandom() throws Exception {
// Sort 20 mb worth of data with 1mb buffer.
try (Directory dir = newDirectory()) {
@ -255,7 +257,7 @@ public class TestOfflineSorter extends LuceneTestCase {
public void testThreadSafety() throws Exception {
Thread[] threads = new Thread[TestUtil.nextInt(random(), 4, 10)];
final AtomicBoolean failed = new AtomicBoolean();
final int iters = atLeast(1000);
final int iters = atLeast(200);
try (Directory dir = newDirectory()) {
for(int i=0;i<threads.length;i++) {
final int threadID = i;
@ -442,6 +444,7 @@ public class TestOfflineSorter extends LuceneTestCase {
}
}
@Slow
public void testFixedLengthHeap() throws Exception {
// Make sure the RAM accounting is correct, i.e. if we are sorting fixed width
// ints (4 bytes) then the heap used is really only 4 bytes per value:

View File

@ -72,25 +72,29 @@ public class TestStringMSBRadixSorter extends LuceneTestCase {
}
public void testRandom() {
for (int iter = 0; iter < 10; ++iter) {
int numIters = atLeast(3);
for (int iter = 0; iter < numIters; ++iter) {
testRandom(0, 10);
}
}
public void testRandomWithLotsOfDuplicates() {
for (int iter = 0; iter < 10; ++iter) {
int numIters = atLeast(3);
for (int iter = 0; iter < numIters; ++iter) {
testRandom(0, 2);
}
}
public void testRandomWithSharedPrefix() {
for (int iter = 0; iter < 10; ++iter) {
int numIters = atLeast(3);
for (int iter = 0; iter < numIters; ++iter) {
testRandom(TestUtil.nextInt(random(), 1, 30), 10);
}
}
public void testRandomWithSharedPrefixAndLotsOfDuplicates() {
for (int iter = 0; iter < 10; ++iter) {
int numIters = atLeast(3);
for (int iter = 0; iter < numIters; ++iter) {
testRandom(TestUtil.nextInt(random(), 1, 30), 2);
}
}

View File

@ -20,12 +20,12 @@ package org.apache.lucene.util;
import java.util.LinkedList;
import java.util.List;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.lucene.util.LuceneTestCase.Nightly;
import org.apache.lucene.util.packed.PackedInts;
import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
@Slow
@Nightly
public class TestTimSorterWorstCase extends LuceneTestCase {
public void testWorstCaseStackSize() {

View File

@ -64,6 +64,7 @@ public class TestMinimize extends LuceneTestCase {
}
/** n^2 space usage in Hopcroft minimization? */
@Slow
public void testMinimizeHuge() {
new RegExp("+-*(A|.....|BC)*]", RegExp.NONE).toAutomaton(1000000);
}

View File

@ -31,8 +31,9 @@ import org.apache.lucene.util.TestUtil;
public class TestDocIdsWriter extends LuceneTestCase {
public void testRandom() throws Exception {
int numIters = atLeast(100);
try (Directory dir = newDirectory()) {
for (int iter = 0; iter < 1000; ++iter) {
for (int iter = 0; iter < numIters; ++iter) {
int[] docIDs = new int[random().nextInt(5000)];
final int bpv = TestUtil.nextInt(random(), 1, 32);
for (int i = 0; i < docIDs.length; ++i) {
@ -44,8 +45,9 @@ public class TestDocIdsWriter extends LuceneTestCase {
}
public void testSorted() throws Exception {
int numIters = atLeast(100);
try (Directory dir = newDirectory()) {
for (int iter = 0; iter < 1000; ++iter) {
for (int iter = 0; iter < numIters; ++iter) {
int[] docIDs = new int[random().nextInt(5000)];
final int bpv = TestUtil.nextInt(random(), 1, 32);
for (int i = 0; i < docIDs.length; ++i) {

View File

@ -67,6 +67,7 @@ public class TestFSTDirectAddressing extends LuceneTestCase {
assertTrue("FST size = " + size + " B", size <= 1648 * 1.01d);
}
@Nightly
public void testWorstCaseForDirectAddressing() throws Exception {
// This test will fail if there is more than 1% memory increase with direct addressing in this worst case.
final double MEMORY_INCREASE_LIMIT_PERCENT = 1d;

View File

@ -64,7 +64,6 @@ import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.IntsRef;
import org.apache.lucene.util.IntsRefBuilder;
import org.apache.lucene.util.LineFileDocs;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
@ -82,7 +81,6 @@ import static org.apache.lucene.util.fst.FSTTester.simpleRandomString;
import static org.apache.lucene.util.fst.FSTTester.toIntsRef;
@SuppressCodecs({ "SimpleText", "Direct" })
@Slow
public class TestFSTs extends LuceneTestCase {
private MockDirectoryWrapper dir;
@ -306,10 +304,11 @@ public class TestFSTs extends LuceneTestCase {
// Build FST for all unique terms in the test line docs
// file, up until a doc limit
@Slow
public void testRealTerms() throws Exception {
final LineFileDocs docs = new LineFileDocs(random());
final int numDocs = TEST_NIGHTLY ? atLeast(1000) : atLeast(100);
final int numDocs = TEST_NIGHTLY ? atLeast(1000) : atLeast(50);
MockAnalyzer analyzer = new MockAnalyzer(random());
analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));

View File

@ -617,13 +617,13 @@ public class TestGrouping extends LuceneTestCase {
}
public void testRandom() throws Exception {
int numberOfRuns = TestUtil.nextInt(random(), 3, 6);
int numberOfRuns = atLeast(1);
for (int iter=0; iter<numberOfRuns; iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
final int numDocs = TestUtil.nextInt(random(), 100, 1000) * RANDOM_MULTIPLIER;
final int numDocs = atLeast(100);
//final int numDocs = _TestUtil.nextInt(random, 5, 20);
final int numGroups = TestUtil.nextInt(random(), 1, numDocs);

View File

@ -59,9 +59,9 @@ public class TestUnifiedHighlighterRanking extends LuceneTestCase {
// TODO: this only tests single-valued fields. we should also index multiple values per field!
public void testRanking() throws Exception {
// number of documents: we will check each one
final int numDocs = atLeast(100);
final int numDocs = atLeast(20);
// number of top-N snippets, we will check 1 .. N
final int maxTopN = 5;
final int maxTopN = 3;
// maximum number of elements to put in a sentence.
final int maxSentenceLength = 10;
// maximum number of sentences in a document

View File

@ -494,7 +494,7 @@ public class TestBlockJoin extends LuceneTestCase {
final Directory joinDir = newDirectory();
final int maxNumChildrenPerParent = 20;
final int numParentDocs = TestUtil.nextInt(random(), 100 * RANDOM_MULTIPLIER, 300 * RANDOM_MULTIPLIER);
final int numParentDocs = TestUtil.nextInt(random(), 10* RANDOM_MULTIPLIER, 30 * RANDOM_MULTIPLIER);
//final int numParentDocs = 30;
// Values for parent fields:

View File

@ -383,8 +383,8 @@ public class TestJoinUtil extends LuceneTestCase {
}
public void testRandomOrdinalsJoin() throws Exception {
IndexIterationContext context = createContext(512, false, true);
int searchIters = 10;
IndexIterationContext context = createContext(128, false, true);
int searchIters = atLeast(1);
IndexSearcher indexSearcher = context.searcher;
for (int i = 0; i < searchIters; i++) {
if (VERBOSE) {
@ -1162,8 +1162,8 @@ public class TestJoinUtil extends LuceneTestCase {
@Test
@Slow
public void testSingleValueRandomJoin() throws Exception {
int maxIndexIter = TestUtil.nextInt(random(), 6, 12);
int maxSearchIter = TestUtil.nextInt(random(), 13, 26);
int maxIndexIter = atLeast(1);
int maxSearchIter = atLeast(1);
executeRandomJoin(false, maxIndexIter, maxSearchIter, TestUtil.nextInt(random(), 87, 764));
}
@ -1171,8 +1171,8 @@ public class TestJoinUtil extends LuceneTestCase {
@Slow
// This test really takes more time, that is why the number of iterations are smaller.
public void testMultiValueRandomJoin() throws Exception {
int maxIndexIter = TestUtil.nextInt(random(), 3, 6);
int maxSearchIter = TestUtil.nextInt(random(), 6, 12);
int maxIndexIter = atLeast(1);
int maxSearchIter = atLeast(1);
executeRandomJoin(true, maxIndexIter, maxSearchIter, TestUtil.nextInt(random(), 11, 57));
}

View File

@ -72,6 +72,8 @@ public class SweetSpotSimilarityTest extends LuceneTestCase {
return null;
}
// TODO: rewrite this test to not make thosuands of indexes.
@Nightly
public void testSweetSpotComputeNorm() throws IOException {
final SweetSpotSimilarity ss = new SweetSpotSimilarity();

View File

@ -86,7 +86,7 @@ public class TestCoveringQuery extends LuceneTestCase {
public void testRandom() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
int numDocs = atLeast(200);
int numDocs = atLeast(50);
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();
if (random().nextBoolean()) {

View File

@ -146,7 +146,7 @@ public class TestNearest extends LuceneTestCase {
public void testNearestNeighborRandom() throws Exception {
int numPoints = atLeast(5000);
int numPoints = atLeast(1000);
Directory dir;
if (numPoints > 100000) {
dir = newFSDirectory(createTempDir(getClass().getSimpleName()));

View File

@ -406,10 +406,10 @@ public class TestTermAutomatonQuery extends LuceneTestCase {
}
public void testRandom() throws Exception {
int numDocs = atLeast(100);
int numDocs = atLeast(50);
Directory dir = newDirectory();
// Adds occassional random synonyms:
// Adds occasional random synonyms:
Analyzer analyzer = new Analyzer() {
@Override
public TokenStreamComponents createComponents(String fieldName) {

View File

@ -18,7 +18,6 @@ package org.apache.lucene.spatial.bbox;
import java.io.IOException;
import com.carrotsearch.randomizedtesting.annotations.Repeat;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.search.Query;
@ -80,7 +79,6 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase {
}
@Test
@Repeat(iterations = 15)
public void testOperations() throws IOException {
//setup
if (random().nextInt(4) > 0) {//75% of the time choose geo (more interesting to test)

View File

@ -18,7 +18,6 @@ package org.apache.lucene.spatial.composite;
import java.io.IOException;
import com.carrotsearch.randomizedtesting.annotations.Repeat;
import org.apache.lucene.spatial.prefix.RandomSpatialOpStrategyTestCase;
import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
@ -74,7 +73,6 @@ public class CompositeStrategyTest extends RandomSpatialOpStrategyTestCase {
}
@Test
@Repeat(iterations = 20)
public void testOperations() throws IOException {
//setup
if (randomBoolean()) {

View File

@ -21,7 +21,6 @@ import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import com.carrotsearch.randomizedtesting.annotations.Repeat;
import org.apache.lucene.spatial.SpatialTestData;
import org.apache.lucene.spatial.composite.CompositeSpatialStrategy;
import org.apache.lucene.spatial.prefix.RandomSpatialOpStrategyTestCase;
@ -123,7 +122,6 @@ public class Geo3dRptTest extends RandomSpatialOpStrategyTestCase {
}
@Test
@Repeat(iterations = 30)
public void testOperations() throws IOException {
setupStrategy();

View File

@ -539,6 +539,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
}
}
@Slow
public void testRandomNRT() throws Exception {
final Path tempDir = createTempDir("AnalyzingInfixSuggesterTest");
Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
@ -555,7 +556,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
LookupThread lookupThread = new LookupThread(suggester);
lookupThread.start();
int iters = atLeast(1000);
int iters = atLeast(100);
int visibleUpto = 0;
Set<Long> usedWeights = new HashSet<>();

View File

@ -669,9 +669,10 @@ public class AnalyzingSuggesterTest extends LuceneTestCase {
private static char SEP = '\u001F';
@Slow
public void testRandom() throws Exception {
int numQueries = atLeast(1000);
int numQueries = atLeast(200);
final List<TermFreq2> slowCompletor = new ArrayList<>();
final TreeSet<String> allPrefixes = new TreeSet<>();

View File

@ -541,9 +541,10 @@ public class FuzzySuggesterTest extends LuceneTestCase {
}
}
@Slow
public void testRandom() throws Exception {
int numQueries = atLeast(100);
int numQueries = atLeast(20);
final List<TermFreqPayload2> slowCompletor = new ArrayList<>();
final TreeSet<String> allPrefixes = new TreeSet<>();

View File

@ -158,11 +158,11 @@ public class TestSuggestField extends LuceneTestCase {
assertTokenStreamContents(stream, new String[] {"input"}, null, null, new String[]{payload.utf8ToString()}, new int[]{1}, null, null);
}
@Test
@Test @Slow
public void testDupSuggestFieldValues() throws Exception {
Analyzer analyzer = new MockAnalyzer(random());
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
final int num = Math.min(1000, atLeast(300));
final int num = Math.min(1000, atLeast(100));
int[] weights = new int[num];
for(int i = 0; i < num; i++) {
Document document = new Document();
@ -246,10 +246,11 @@ public class TestSuggestField extends LuceneTestCase {
iw.close();
}
@Slow
public void testExtremeDeduplication() throws Exception {
Analyzer analyzer = new MockAnalyzer(random());
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
final int num = atLeast(5000);
final int num = atLeast(500);
int bestWeight = Integer.MIN_VALUE;
for(int i = 0; i < num; i++) {
Document document = new Document();
@ -678,7 +679,7 @@ public class TestSuggestField extends LuceneTestCase {
Analyzer analyzer = new MockAnalyzer(random());
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
int num = Math.min(1000, atLeast(100));
int num = Math.min(1000, atLeast(50));
String[] prefixes = {"abc", "bac", "cab"};
Map<String, Integer> mappings = new HashMap<>();
for (int i = 0; i < num; i++) {
@ -721,7 +722,7 @@ public class TestSuggestField extends LuceneTestCase {
Analyzer analyzer = new MockAnalyzer(random());
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
LineFileDocs lineFileDocs = new LineFileDocs(random());
int num = Math.min(1000, atLeast(100));
int num = Math.min(1000, atLeast(50));
Map<String, Integer> mappings = new HashMap<>();
for (int i = 0; i < num; i++) {
Document document = lineFileDocs.nextDoc();

View File

@ -180,6 +180,7 @@ public class FSTCompletionTest extends LuceneTestCase {
tempDir.close();
}
@Slow
public void testMultilingualInput() throws Exception {
List<Input> input = LookupBenchmarkTest.readTop50KWiki();

View File

@ -438,7 +438,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase {
}
public void testAllLatEqual() throws Exception {
int numPoints = atLeast(10000);
int numPoints = atLeast(1000);
double lat = nextLatitude();
double[] lats = new double[numPoints];
double[] lons = new double[numPoints];
@ -484,7 +484,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase {
}
public void testAllLonEqual() throws Exception {
int numPoints = atLeast(10000);
int numPoints = atLeast(1000);
double theLon = nextLongitude();
double[] lats = new double[numPoints];
double[] lons = new double[numPoints];
@ -532,7 +532,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase {
}
public void testMultiValued() throws Exception {
int numPoints = atLeast(10000);
int numPoints = atLeast(1000);
// Every doc has 2 points:
double[] lats = new double[2*numPoints];
double[] lons = new double[2*numPoints];
@ -644,7 +644,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase {
}
public void testRandomMedium() throws Exception {
doTestRandom(10000);
doTestRandom(1000);
}
@Nightly
@ -1256,7 +1256,8 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase {
/** Run a few iterations with just 10 docs, hopefully easy to debug */
public void testRandomDistance() throws Exception {
for (int iters = 0; iters < 100; iters++) {
int numIters = atLeast(1);
for (int iters = 0; iters < numIters; iters++) {
doRandomDistanceTest(10, 100);
}
}

View File

@ -2637,7 +2637,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
}
/** Tests dv against stored fields with threads (all types + missing) */
@Slow
@Nightly
public void testThreads2() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
@ -2786,7 +2786,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
dir.close();
}
@Slow
@Nightly
public void testThreads3() throws Exception {
Directory dir = newFSDirectory(createTempDir());
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
@ -3456,7 +3456,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
}
}
for(int iter=0;iter<100;iter++) {
int numIters = atLeast(10);
for(int iter=0;iter<numIters;iter++) {
DocIdSetIterator values = fieldCreator.iterator(r);
assertEquals(-1, values.docID());

View File

@ -262,7 +262,7 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
}
/** Test the accuracy of the ramBytesUsed estimations. */
@Slow
@Nightly
public void testRamBytesUsed() throws IOException {
if (Codec.getDefault() instanceof RandomCodec) {
// this test relies on the fact that two segments will be written with

View File

@ -630,7 +630,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
public void testRandom() throws IOException {
final RandomDocumentFactory docFactory = new RandomDocumentFactory(5, 20);
final int numDocs = atLeast(100);
final int numDocs = atLeast(50);
final RandomDocument[] docs = new RandomDocument[numDocs];
for (int i = 0; i < numDocs; ++i) {
docs[i] = docFactory.newDocument(TestUtil.nextInt(random(), 1, 3), TestUtil.nextInt(random(), 10, 50), randomOptions());
@ -693,7 +693,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
// don't share mutable data
public void testClone() throws IOException, InterruptedException {
final RandomDocumentFactory docFactory = new RandomDocumentFactory(5, 20);
final int numDocs = atLeast(100);
final int numDocs = atLeast(50);
for (Options options : validOptions()) {
final RandomDocument[] docs = new RandomDocument[numDocs];
for (int i = 0; i < numDocs; ++i) {

View File

@ -69,7 +69,7 @@ public abstract class BaseRangeFieldQueryTestCase extends LuceneTestCase {
}
public void testRandomMedium() throws Exception {
doTestRandom(10000, false);
doTestRandom(1000, false);
}
@Nightly
@ -78,11 +78,11 @@ public abstract class BaseRangeFieldQueryTestCase extends LuceneTestCase {
}
public void testMultiValued() throws Exception {
doTestRandom(10000, true);
doTestRandom(1000, true);
}
public void testAllEqual() throws Exception {
int numDocs = atLeast(10000);
int numDocs = atLeast(1000);
int dimensions = dimension();
Range[][] ranges = new Range[numDocs][];
Range[] theRange = new Range[] {nextRange(dimensions)};
@ -92,7 +92,7 @@ public abstract class BaseRangeFieldQueryTestCase extends LuceneTestCase {
// Force low cardinality leaves
public void testLowCardinality() throws Exception {
int numDocs = atLeast(10000);
int numDocs = atLeast(1000);
int dimensions = dimension();
int cardinality = TestUtil.nextInt(random(), 2, 20);

View File

@ -251,11 +251,11 @@ public abstract class BaseSimilarityTestCase extends LuceneTestCase {
*/
public void testRandomScoring() throws Exception {
Random random = random();
final int iterations = atLeast(10);
final int iterations = atLeast(3);
for (int i = 0; i < iterations; i++) {
// pull a new similarity to switch up parameters
Similarity similarity = getSimilarity(random);
for (int j = 0; j < 10; j++) {
for (int j = 0; j < 3; j++) {
// for each norm value...
for (int k = 1; k < 256; k++) {
CollectionStatistics corpus = newCorpus(random, k);

View File

@ -947,7 +947,7 @@ public abstract class LuceneTestCase extends Assert {
c.setInfoStream(new TestRuleSetupAndRestoreClassEnv.ThreadNameFixingPrintStreamInfoStream(System.out));
}
if (r.nextBoolean()) {
if (rarely(r)) {
c.setMergeScheduler(new SerialMergeScheduler());
} else if (rarely(r)) {
ConcurrentMergeScheduler cms;
@ -1063,7 +1063,7 @@ public abstract class LuceneTestCase extends Assert {
return new MockRandomMergePolicy(r);
} else if (r.nextBoolean()) {
return newTieredMergePolicy(r);
} else if (r.nextInt(5) == 0) {
} else if (rarely(r) ) {
return newAlcoholicMergePolicy(r, classEnvRule.timeZone);
}
return newLogMergePolicy(r);
@ -1095,7 +1095,7 @@ public abstract class LuceneTestCase extends Assert {
if (rarely(r)) {
logmp.setMergeFactor(TestUtil.nextInt(r, 2, 9));
} else {
logmp.setMergeFactor(TestUtil.nextInt(r, 10, 50));
logmp.setMergeFactor(TestUtil.nextInt(r, 100, 500));
}
configureRandom(r, logmp);
return logmp;
@ -1127,7 +1127,7 @@ public abstract class LuceneTestCase extends Assert {
if (rarely(r)) {
tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0);
} else {
tmp.setMaxMergedSegmentMB(r.nextDouble() * 100);
tmp.setMaxMergedSegmentMB(10 + r.nextDouble() * 100);
}
tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0);
tmp.setForceMergeDeletesPctAllowed(0.0 + r.nextDouble() * 30.0);

View File

@ -17,10 +17,10 @@
package org.apache.lucene.codecs.asserting;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.index.BaseTermVectorsFormatTestCase;
import org.apache.lucene.index.BaseStoredFieldsFormatTestCase;
/** Test AssertingTermVectorsFormat directly */
public class TestAssertingStoredFieldsFormat extends BaseTermVectorsFormatTestCase {
/** Test AssertingStoredFieldsFormat directly */
public class TestAssertingStoredFieldsFormat extends BaseStoredFieldsFormatTestCase {
private final Codec codec = new AssertingCodec();
@Override

View File

@ -27,7 +27,7 @@ public class TestRamUsageTesterOnWildAnimals extends LuceneTestCase {
}
public void testOverflowMaxChainLength() {
int UPPERLIMIT = 100000;
int UPPERLIMIT = atLeast(10000);
int lower = 0;
int upper = UPPERLIMIT;